WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH] architecture-specific stuff in xend

On Mon, 2006-08-14 at 12:40 -0500, Hollis Blanchard wrote:
> Here is the update. Special thanks to Dan Stekloff and Daniel Miles, who
> tested on HVM and IA64, respectively.

...

> @@ -349,20 +340,8 @@ class HVMImageHandler(ImageHandler):
>          os.waitpid(self.pid, 0)
>          self.pid = 0
>  
> -    def getDomainMemory(self, mem_kb):
> -        """@see ImageHandler.getDomainMemory"""
> -        if os.uname()[4] == 'ia64':
> -            page_kb = 16
> -            # ROM size for guest firmware, ioreq page and xenstore page
> -            extra_pages = 1024 + 2
> -        else:
> -            page_kb = 4
> -            # This was derived emperically:
> -            #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
> -            #   + 4 to avoid low-memory condition
> -            extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
> -            extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
> -        return mem_kb + extra_pages * page_kb
> +    def getRequiredMemory(self, domain_kb):
> +        return arch.HVMRequiredMemory(domain_kb)
>  
>      def register_shutdown_watch(self):
>          """ add xen store watch on control/shutdown """

I'm actually not sure about this approach now. getRequiredMemory() could
more naturally be implemented in an ia64- or x86-specific HVM subclass.
Here is a totally untested patch illustrating the idea:

diff -r 8cca42e2610a tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Aug 10 14:29:04 2006 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py   Mon Aug 14 19:57:38 2006 -0500
@@ -1279,23 +1279,14 @@ class XendDomainInfo:
                     cpu = [ int( cpus[v % len(cpus)] ) ]
                     xc.vcpu_setaffinity(self.domid, v, cpu)
 
-            # set domain maxmem in KiB
-            xc.domain_setmaxmem(self.domid, self.info['maxmem'] * 1024)
-
-            m = self.image.getDomainMemory(self.info['memory'] * 1024)
-            balloon.free(m)
-
-            init_reservation = self.info['memory'] * 1024
-            if os.uname()[4] in ('ia64', 'ppc64'):
-                # Workaround for architectures that don't yet support
-                # ballooning.
-                init_reservation = m
-                # Following line from xiantao.zhang@xxxxxxxxx
-                # Needed for IA64 until supports ballooning -- okay for PPC64?
-                xc.domain_setmaxmem(self.domid, m)
-
-            xc.domain_memory_increase_reservation(self.domid, init_reservation,
-                                                  0, 0)
+            # set memory limit
+            maxmem = self.image.getRequiredMemory(self.info['maxmem'] * 1024)
+            xc.domain_setmaxmem(self.domid, maxmem)
+
+            # initial memory allocation
+            mem_kb = self.image.getRequiredMemory(self.info['memory'] * 1024)
+            balloon.free(mem_kb)
+            xc.domain_memory_increase_reservation(self.domid, mem_kb, 0, 0)
 
             self.createChannels()
 
diff -r 8cca42e2610a tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Thu Aug 10 14:29:04 2006 +0100
+++ b/tools/python/xen/xend/image.py    Mon Aug 14 20:52:55 2006 -0500
@@ -27,6 +27,7 @@ from xen.xend.XendLogging import log
 from xen.xend.XendLogging import log
 from xen.xend.server.netif import randomMAC
 from xen.xend.xenstore.xswatch import xswatch
+from xen.xend import arch
 
 
 xc = xen.lowlevel.xc.xc()
@@ -141,17 +142,8 @@ class ImageHandler:
             raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
                           % (self.ostype, self.vm.getDomid(), str(result)))
 
-
-    def getDomainMemory(self, mem_kb):
-        """@return The memory required, in KiB, by the domain to store the
-        given amount, also in KiB."""
-        if os.uname()[4] != 'ia64':
-            # A little extra because auto-ballooning is broken w.r.t. HVM
-            # guests. Also, slack is necessary for live migration since that
-            # uses shadow page tables.
-            if 'hvm' in xc.xeninfo()['xen_caps']:
-                mem_kb += 4*1024;
-        return mem_kb
+    def getRequiredMemory(self, domain_kb):
+        return domain_kb
 
     def buildDomain(self):
         """Build the domain. Define in subclass."""
@@ -192,8 +184,6 @@ class LinuxImageHandler(ImageHandler):
                               features       = self.vm.getFeatures())
 
 class HVMImageHandler(ImageHandler):
-
-    ostype = "hvm"
 
     def configure(self, imageConfig, deviceConfig):
         ImageHandler.configure(self, imageConfig, deviceConfig)
@@ -349,21 +339,6 @@ class HVMImageHandler(ImageHandler):
         os.waitpid(self.pid, 0)
         self.pid = 0
 
-    def getDomainMemory(self, mem_kb):
-        """@see ImageHandler.getDomainMemory"""
-        if os.uname()[4] == 'ia64':
-            page_kb = 16
-            # ROM size for guest firmware, ioreq page and xenstore page
-            extra_pages = 1024 + 2
-        else:
-            page_kb = 4
-            # This was derived emperically:
-            #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
-            #   + 4 to avoid low-memory condition
-            extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
-            extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
-        return mem_kb + extra_pages * page_kb
-
     def register_shutdown_watch(self):
         """ add xen store watch on control/shutdown """
         self.shutdownWatch = xswatch(self.vm.dompath + "/control/shutdown", \
@@ -400,15 +375,42 @@ class HVMImageHandler(ImageHandler):
 
         return 1 # Keep watching
 
-"""Table of image handler classes for virtual machine images.  Indexed by
-image type.
-"""
-imageHandlerClasses = {}
-
-
-for h in LinuxImageHandler, HVMImageHandler:
-    imageHandlerClasses[h.ostype] = h
-
+class IA64_HVM_ImageHandler(HVMImageHandler):
+
+    ostype = "hvm"
+
+    def getRequiredMemory(self):
+        page_kb = 16
+        # ROM size for guest firmware, ioreq page and xenstore page
+        extra_pages = 1024 + 2
+        return mem_kb + extra_pages * page_kb
+
+class X86_HVM_ImageHandler(HVMImageHandler):
+
+    ostype = "hvm"
+
+    def getRequiredMemory(self):
+        page_kb = 4
+        # This was derived emperically:
+        #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
+        #   + 4 to avoid low-memory condition
+        extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
+        extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
+        return mem_kb + extra_pages * page_kb
+
+_handlers = {
+    "powerpc": {
+        "linux": LinuxImageHandler,
+    },
+    "ia64": {
+        "linux": LinuxImageHandler,
+        "hvm": IA64_HVM_ImageHandler,
+    }
+    "x86": {
+        "linux": LinuxImageHandler,
+        "hvm": X86_HVM_ImageHandler,
+    },
+}
 
 def findImageHandlerClass(image):
     """Find the image handler class for an image config.
@@ -416,10 +418,10 @@ def findImageHandlerClass(image):
     @param image config
     @return ImageHandler subclass or None
     """
-    ty = sxp.name(image)
-    if ty is None:
+    type = sxp.name(image)
+    if type is None:
         raise VmError('missing image type')
-    imageClass = imageHandlerClasses.get(ty)
-    if imageClass is None:
-        raise VmError('unknown image type: ' + ty)
-    return imageClass
+    try:
+        return _handlers[arch.type][type]
+    except KeyError:
+        raise VmError('unknown image type: ' + type)
diff -r 8cca42e2610a tools/python/xen/xend/arch.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xend/arch.py     Mon Aug 14 20:45:42 2006 -0500
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+# Copyright (C) IBM Corp. 2006
+#
+# Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
+
+import os
+
+_types = {
+       "i386": "x86",
+       "i486": "x86",
+       "i586": "x86",
+       "i686": "x86",
+       "ia64": "ia64",
+       "ppc": "powerpc",
+       "ppc64": "powerpc",
+}
+type = _types.get(os.uname()[4], "unknown")

Notice that findImageHandlerClass() had to change because we now have
two "hvm" handlers.

The PowerPC builder will layer on top of that patch:

diff -r acb9e95d892b tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Mon Aug 14 20:53:11 2006 -0500
+++ b/tools/python/xen/xend/image.py    Mon Aug 14 21:11:55 2006 -0500
@@ -28,6 +28,7 @@ from xen.xend.server.netif import random
 from xen.xend.server.netif import randomMAC
 from xen.xend.xenstore.xswatch import xswatch
 from xen.xend import arch
+from xen.xend import FlatDeviceTree
 
 
 xc = xen.lowlevel.xc.xc()
@@ -182,6 +183,38 @@ class LinuxImageHandler(ImageHandler):
                               cmdline        = self.cmdline,
                               ramdisk        = self.ramdisk,
                               features       = self.vm.getFeatures())
+
+class PPC_LinuxImageHandler(LinuxImageHandler):
+
+    ostype = "linux"
+
+    def configure(self, imageConfig, deviceConfig):
+        LinuxImageHandler.__init__(self, imageConfig, deviceConfig)
+        self.imageConfig = imageConfig
+
+    def buildDomain(self):
+        store_evtchn = self.vm.getStorePort()
+        console_evtchn = self.vm.getConsolePort()
+
+        log.debug("dom            = %d", self.vm.getDomid())
+        log.debug("image          = %s", self.kernel)
+        log.debug("store_evtchn   = %d", store_evtchn)
+        log.debug("console_evtchn = %d", console_evtchn)
+        log.debug("cmdline        = %s", self.cmdline)
+        log.debug("ramdisk        = %s", self.ramdisk)
+        log.debug("vcpus          = %d", self.vm.getVCpuCount())
+        log.debug("features       = %s", self.vm.getFeatures())
+
+        devtree = FlatDeviceTree.build(self.imageConfig)
+
+        return xc.linux_build(dom            = self.vm.getDomid(),
+                              image          = self.kernel,
+                              store_evtchn   = store_evtchn,
+                              console_evtchn = console_evtchn,
+                              cmdline        = self.cmdline,
+                              ramdisk        = self.ramdisk,
+                              features       = self.vm.getFeatures()
+                              arch_args      = devtree.to_bin())
 
 class HVMImageHandler(ImageHandler):
 
@@ -400,7 +433,7 @@ class X86_HVM_ImageHandler(HVMImageHandl
 
 _handlers = {
     "powerpc": {
-        "linux": LinuxImageHandler,
+        "linux": PPC_LinuxImageHandler,
     },
     "ia64": {
         "linux": LinuxImageHandler,


PowerPC needs to convert 'imageConfig' into a special data structure, as
you can see here. (I haven't attached FlatDeviceTree.py because it's
large and irrelevant for now.) Also, the xc.linux_build() stuff is going
to be changed drastically per Keir's preference, but you get the idea:
PowerPC needs extra communication with libxc..

This approach would locate xend architecture-specific code wherever it's
used, rather than in an architecture-specific module. That could
probably be changed if desired, but I think it will require mastering
__import__().

Thoughts?

-- 
Hollis Blanchard
IBM Linux Technology Center


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel