WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Have a watch callback return 0 or 1 depending upon wheth

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Have a watch callback return 0 or 1 depending upon whether it would like to
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 15 Nov 2005 21:16:20 +0000
Delivery-date: Tue, 15 Nov 2005 21:18:41 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID 210a5b625e30067c8f340452aa1020aa44325124
# Parent  151837f6c26b2cf70d8083915235f4907c9e84a1
Have a watch callback return 0 or 1 depending upon whether it would like to
continue to receive watches.  This means that it is not necessary to probe
around in the xswatch internals to unregister a watch.

Tidy up the hotplug watch handling, moving the nested function out to a
separate place (I don't think that this was a problem in the end, but it was
making me nervous as I was debugging the recent race condition, and I find it
clearer out of the class in any case.  Pass an integer code representing
hotplug status, once it has been parsed from the store, as there are now a few
different places we can detect failure, and it's cleaner to pass a code from
those places.

Small tidy up to XendDomain, removing the unused releaseDomain field.

diff -r 151837f6c26b -r 210a5b625e30 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Tue Nov 15 17:47:39 2005
+++ b/tools/python/xen/xend/XendDomain.py       Tue Nov 15 18:08:11 2005
@@ -36,6 +36,7 @@
 from xen.xend.XendError import XendError
 from xen.xend.XendLogging import log
 from xen.xend.server import relocate
+from xen.xend.xenstore.xswatch import xswatch
 
 
 xc = xen.lowlevel.xc.new()
@@ -58,9 +59,11 @@
         # to import XendDomain from XendDomainInfo causes unbounded recursion.
         # So we stuff the XendDomain instance (self) into xroot's components.
         xroot.add_component("xen.xend.XendDomain", self)
+
         self.domains = {}
         self.domains_lock = threading.RLock()
-        self.watchReleaseDomain()
+
+        xswatch("@releaseDomain", self.onReleaseDomain)
 
         self.domains_lock.acquire()
         try:
@@ -112,11 +115,7 @@
             self.refresh()
         finally:
             self.domains_lock.release()
-            
-
-    def watchReleaseDomain(self):
-        from xen.xend.xenstore.xswatch import xswatch
-        self.releaseDomain = xswatch("@releaseDomain", self.onReleaseDomain)
+        return 1
 
 
     def xen_domains(self):
diff -r 151837f6c26b -r 210a5b625e30 
tools/python/xen/xend/server/DevController.py
--- a/tools/python/xen/xend/server/DevController.py     Tue Nov 15 17:47:39 2005
+++ b/tools/python/xen/xend/server/DevController.py     Tue Nov 15 18:08:11 2005
@@ -29,6 +29,12 @@
 HOTPLUG_STATUS_NODE = "hotplug-status"
 HOTPLUG_STATUS_ERROR = "error"
 
+Connected = 1
+Died      = 2
+Error     = 3
+Missing   = 4
+Timeout   = 5
+
 xenbusState = {
     'Unknown'      : 0,
     'Initialising' : 1,
@@ -87,18 +93,28 @@
     def waitForDevice(self, devid):
         log.debug("Waiting for %s.", devid)
         
-        status, fn_ret = self.waitForBackend(devid)
-        if status:
+        status = self.waitForBackend(devid)
+
+        if status == Timeout:
             self.destroyDevice(devid)
-            raise VmError( ("Device %s (%s) could not be connected. "
-                            "Hotplug scripts not working") 
-                            % (devid, self.deviceClass))
-
-        elif fn_ret == HOTPLUG_STATUS_ERROR:
+            raise VmError("Device %s (%s) could not be connected. "
+                          "Hotplug scripts not working" %
+                          (devid, self.deviceClass))
+
+        elif status == Error:
             self.destroyDevice(devid)
-            raise VmError( ("Device %s (%s) could not be connected. "
-                            "Backend device not found!") 
-                            % (devid, self.deviceClass))
+            raise VmError("Device %s (%s) could not be connected. "
+                          "Backend device not found" %
+                          (devid, self.deviceClass))
+
+        elif status == Missing:
+            raise VmError("Device %s (%s) could not be connected. "
+                          "Device not found" % (devid, self.deviceClass))
+
+        elif status == Died:
+            self.destroyDevice(devid)
+            raise VmError("Device %s (%s) could not be connected. "
+                          "Device has died" % (devid, self.deviceClass))
 
 
     def reconfigureDevice(self, devid, config):
@@ -302,35 +318,22 @@
                 raise
 
 
-    def waitForBackend(self,devid):
-        ev = Event()
-
-        def hotplugStatus():
-            log.debug("hotplugStatus %d", devid)
-            
-            try:
-                status = self.readBackend(devid, HOTPLUG_STATUS_NODE)
-            except VmError:
-                status = "died"
-            if status is not None:
-                watch.xs.unwatch(backpath, watch)
-                hotplugStatus.value = status
-                ev.set()
-
-        hotplugStatus.value = None
+    def waitForBackend(self, devid):
+
         frontpath = self.frontendPath(devid)
         backpath = xstransact.Read(frontpath, "backend")
 
         if backpath:
-            watch = xswatch(backpath, hotplugStatus)
+            statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
+            ev = Event()
+            result = { 'status': Timeout }
+            
+            xswatch(statusPath, hotplugStatusCallback, statusPath, ev, result)
 
             ev.wait(DEVICE_CREATE_TIMEOUT)
-            if ev.isSet():
-                return (0, hotplugStatus.value)
-            else:
-                return (-1, hotplugStatus.value)
-        else:
-            return (-1, "missing")
+            return result['status']
+        else:
+            return Missing
 
 
     def backendPath(self, backdom, devid):
@@ -352,3 +355,25 @@
     def frontendMiscPath(self):
         return "%s/device-misc/%s" % (self.vm.getDomainPath(),
                                       self.deviceClass)
+
+
+def hotplugStatusCallback(statusPath, ev, result):
+    log.debug("hotplugStatusCallback %s.", statusPath)
+
+    try:
+        status = xstransact.Read(statusPath)
+
+        if status is not None:
+            if status == HOTPLUG_STATUS_ERROR:
+                result['status'] = Error
+            else:
+                result['status'] = Connected
+        else:
+            return 1
+    except VmError:
+        result['status'] = Died
+
+    log.debug("hotplugStatusCallback %d.", result['status'])
+
+    ev.set()
+    return 0
diff -r 151837f6c26b -r 210a5b625e30 tools/python/xen/xend/xenstore/xswatch.py
--- a/tools/python/xen/xend/xenstore/xswatch.py Tue Nov 15 17:47:39 2005
+++ b/tools/python/xen/xend/xenstore/xswatch.py Tue Nov 15 18:08:11 2005
@@ -5,9 +5,7 @@
 # Public License.  See the file "COPYING" in the main directory of
 # this archive for more details.
 
-import select
 import threading
-from xen.lowlevel import xs
 from xen.xend.xenstore.xsutil import xshandle
 
 from xen.xend.XendLogging import log
@@ -20,37 +18,42 @@
     xslock = threading.Lock()
     
     def __init__(self, path, fn, *args, **kwargs):
+        self.path = path
         self.fn = fn
         self.args = args
         self.kwargs = kwargs
         xswatch.watchStart()
         xswatch.xs.watch(path, self)
 
+
     def watchStart(cls):
         cls.xslock.acquire()
-        if cls.watchThread:
+        try:
+            if cls.watchThread:
+                return
+            cls.xs = xshandle()
+            cls.watchThread = threading.Thread(name="Watcher",
+                                               target=cls.watchMain)
+            cls.watchThread.setDaemon(True)
+            cls.watchThread.start()
+        finally:
             cls.xslock.release()
-            return
-        cls.xs = xshandle()
-        cls.watchThread = threading.Thread(name="Watcher",
-                                           target=cls.watchMain)
-        cls.watchThread.setDaemon(True)
-        cls.watchThread.start()
-        cls.xslock.release()
 
     watchStart = classmethod(watchStart)
+
 
     def watchMain(cls):
         while True:
             try:
                 we = cls.xs.read_watch()
                 watch = we[1]
-                watch.fn(*watch.args, **watch.kwargs)
+                res = watch.fn(*watch.args, **watch.kwargs)
+                if not res:
+                    cls.xs.unwatch(watch.path, watch)
             except:
                 log.exception("read_watch failed")
                 # Ignore this exception -- there's no point throwing it
                 # further on because that will just kill the watcher thread,
                 # which achieves nothing.
 
-
     watchMain = classmethod(watchMain)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Have a watch callback return 0 or 1 depending upon whether it would like to, Xen patchbot -unstable <=