| 
hi,
i've been using glusterfs for a while now in different scenarios and now 
i'm facing a really strange problem, or at least it seems so to me.... 
the thing is that i've got a glusterfs, with six nodes and several 
clients (xen-servers) mounting the shared system. The idea is to have a 
big shared storage for all the vm's i've got. 
when i try to run a vm created from xen 3.2 which is stored on gluster, 
it won't run. 
it says:
Error: Device 2049 (vbd) could not be connected. Backend device not found.
and if you look at the xen log's file:
*****************
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) 
XendDomainInfo.create(['vm', ['name', 'xen_testdedicat1'], ['memory', 
'256'], ['on_poweroff', 'destroy'], ['on_reboot', 'restart'], 
['on_crash', 'restart'], ['vcpus', 1], ['on_xend_start', 'ignore'], 
['on_xend_stop', 'ignore'], ['image', ['linux', ['kernel', 
'/boot/vmlinuz-2.6.18-6-xen-amd64'], ['ramdisk', 
'/boot/initrd.img-2.6.18-6-xen-amd64'], ['root', '/dev/sda1 ro']]], 
['device', ['vbd', ['uname', 
'file:/mnt/gluster/domains/xen_testdedicat1/disk.img'], ['dev', 'sda1'], 
['mode', 'w']]], ['device', ['vif', ['ip', '192.168.1.164']]]])
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) 
XendDomainInfo.constructDomain
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) Balloon: 262956 KiB 
free; need 2048; done. 
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) Adding Domain: 2
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) 
XendDomainInfo.initDomain: 2 256
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) 
_initDomain:shadow_memory=0x0, memory_static_max=0x10000000, 
memory_static_min=0x0.
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) Balloon: 262948 KiB 
free; need 262144; done.
[2008-07-07 18:51:31 4020] INFO (__init__:1072) buildDomain os=linux 
dom=2 vcpus=1 
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) domid          = 2
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) memsize        = 256
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) image          = 
/boot/vmlinuz-2.6.18-6-xen-amd64 
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) store_evtchn   = 1
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) console_evtchn = 2
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) cmdline        = 
root=/dev/sda1 ro
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) ramdisk        = 
/boot/initrd.img-2.6.18-6-xen-amd64 
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) vcpus          = 1
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) features       =
[2008-07-07 18:51:31 4020] INFO (__init__:1072) createDevice: vbd : 
{'uuid': 'a4d3fb99-4d54-687e-1e5e-b3c82264547d', 'bootable': 1, 
'driver': 'paravirtualised', 'dev': 'sda1', 'uname': 
'file:/mnt/gluster/domains/xen_testdedicat1/disk.img', 'mode': 'w'}
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) DevController: writing 
{'virtual-device': '2049', 'device-type': 'disk', 'protocol': 
'x86_64-abi', 'backend-id': '0', 'state': '1', 'backend': 
'/local/domain/0/backend/vbd/2/2049'} to /local/domain/2/device/vbd/2049.
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) DevController: writing 
{'domain': 'xen_testdedicat1', 'frontend': 
'/local/domain/2/device/vbd/2049', 'uuid': 
'a4d3fb99-4d54-687e-1e5e-b3c82264547d', 'dev': 'sda1', 'state': '1', 
'params': '/mnt/gluster/domains/xen_testdedicat1/disk.img', 'mode': 'w', 
'online': '1', 'frontend-id': '2', 'type': 'file'} to 
/local/domain/0/backend/vbd/2/2049.
[2008-07-07 18:51:31 4020] INFO (__init__:1072) createDevice: vif : 
{'ip': '192.168.1.164', 'mac': '00:16:3e:29:f1:ad', 'uuid': 
'2746c385-a8e6-e5e7-00bc-32fc7f3d6272'}
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) DevController: writing 
{'mac': '00:16:3e:29:f1:ad', 'handle': '0', 'protocol': 'x86_64-abi', 
'backend-id': '0', 'state': '1', 'backend': 
'/local/domain/0/backend/vif/2/0'} to /local/domain/2/device/vif/0.
[2008-07-07 18:51:31 4020] DEBUG (__init__:1072) DevController: writing 
{'domain': 'xen_testdedicat1', 'handle': '0', 'uuid': 
'2746c385-a8e6-e5e7-00bc-32fc7f3d6272', 'script': 
'/etc/xen/scripts/vif-bridge', 'ip': '192.168.1.164', 'state': '1', 
'frontend': '/local/domain/2/device/vif/0', 'mac': '00:16:3e:29:f1:ad', 
'online': '1', 'frontend-id': '2'} to /local/domain/0/backend/vif/2/0.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Storing VM details: 
{'on_xend_stop': 'ignore', 'shadow_memory': '0', 'uuid': 
'34b70208-d172-6d75-344e-888579b73c2c', 'on_reboot': 'restart', 
'start_time': '1215449492.96', 'on_poweroff': 'destroy', 
'on_xend_start': 'ignore', 'on_crash': 'restart', 'xend/restart_count': 
'0', 'vcpus': '1', 'vcpu_avail': '1', 'image': "(linux (kernel 
/boot/vmlinuz-2.6.18-6-xen-amd64) (ramdisk 
/boot/initrd.img-2.6.18-6-xen-amd64) (args 'root=/dev/sda1 ro ') (notes 
(FEATURES 
'writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel') 
(VIRT_BASE 18446744071562067968) (GUEST_VERSION 2.6) (PADDR_OFFSET 
18446744071562067968) (GUEST_OS linux) (HYPERCALL_PAGE 
18446744071564189696) (LOADER generic) (ENTRY 18446744071564165120) 
(XEN_VERSION xen-3.0)))", 'name': 'xen_testdedicat1'}
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Storing domain details: 
{'console/ring-ref': '2214313', 'image/entry': '18446744071564165120', 
'console/port': '2', 'store/ring-ref': '2214314', 'image/loader': 
'generic', 'vm': '/vm/34b70208-d172-6d75-344e-888579b73c2c', 
'control/platform-feature-multiprocessor-suspend': '1', 
'image/guest-os': 'linux', 'image/features/writable-descriptor-tables': 
'1', 'image/virt-base': '18446744071562067968', 'memory/target': 
'262144', 'image/guest-version': '2.6', 
'image/features/supervisor-mode-kernel': '1', 'console/limit': 
'1048576', 'image/paddr-offset': '18446744071562067968', 
'image/hypercall-page': '18446744071564189696', 'cpu/0/availability': 
'online', 'image/features/pae-pgdir-above-4gb': '1', 
'image/features/writable-page-tables': '1', 'console/type': 
'xenconsoled', 'image/features/auto-translated-physmap': '1', 'name': 
'xen_testdedicat1', 'domid': '2', 'image/xen-version': 'xen-3.0', 
'store/port': '1'}
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) DevController: writing 
{'protocol': 'x86_64-abi', 'state': '1', 'backend-id': '0', 'backend': 
'/local/domain/0/backend/console/2/0'} to /local/domain/2/device/console/0.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) DevController: writing 
{'domain': 'xen_testdedicat1', 'protocol': 'vt100', 'uuid': 
'254941ec-2571-00da-95a9-a8d6fcce4414', 'frontend': 
'/local/domain/2/device/console/0', 'state': '1', 'location': '2', 
'online': '1', 'frontend-id': '2'} to /local/domain/0/backend/console/2/0.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) 
XendDomainInfo.handleShutdownWatch 
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Waiting for devices vif.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Waiting for 0.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) hotplugStatusCallback 
/local/domain/0/backend/vif/2/0/hotplug-status.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) hotplugStatusCallback 
/local/domain/0/backend/vif/2/0/hotplug-status. 
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) hotplugStatusCallback 1.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Waiting for devices vbd.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Waiting for 2049.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) hotplugStatusCallback 
/local/domain/0/backend/vbd/2/2049/hotplug-status.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) hotplugStatusCallback 
/local/domain/0/backend/vbd/2/2049/hotplug-status. 
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) hotplugStatusCallback 2.
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) XendDomainInfo.destroy: 
domid=2
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) 
XendDomainInfo.destroyDomain(2) 
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Destroying device model
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Releasing devices
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Removing vif/0
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) 
XendDomainInfo.destroyDevice: deviceClass = vif, device = vif/0 
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Removing vbd/2049
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) 
XendDomainInfo.destroyDevice: deviceClass = vbd, device = vbd/2049 
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) Removing console/0
[2008-07-07 18:51:32 4020] DEBUG (__init__:1072) 
XendDomainInfo.destroyDevice: deviceClass = console, device = console/0 
*****************
nodes don't log any error or warning at all, and the xen-server which is 
trying to run the vm says this: 
************
2008-07-07 17:43:07 E [afr.c:2391:afr_writev_cbk] grup1: 
(path=/domains/xen_testdedicat1/disk.img child=espai2) op_ret=-1 op_errno=22
2008-07-07 17:43:07 E [fuse-bridge.c:1645:fuse_writev_cbk] 
glusterfs-fuse: 656: WRITE => -1 (22)
2008-07-07 17:43:07 E [afr.c:2699:afr_flush] grup1: afrfdp->fdstate[] is 
0, returning ENOTCONN
2008-07-07 17:43:07 E [fuse-bridge.c:945:fuse_err_cbk] glusterfs-fuse: 
657: (16) ERR => -1 (107) 
**************
i'm using this software versions
linux debian etch, kernel 2.6.18-6-xen-amd64
xen-hypervisor-3.2-1-amd64
fuse-2.7.3glfs10
glusterfs--mainline--2.5--patch-788
the thing is, however, that if i move the virtual machine create from 
xen 3.2 from the shared gluster mount point to a local path, it will run 
without any problem at all...
is it that gluster is not fast enough to provide xen with the data 
needed to launch the vm? could i do anything to fix that? 
these are the specs files:
node side:
***********
volume espa
       type storage/posix
       option directory /mnt/compartit
end-volume
volume spai
       type performance/io-threads
       option thread-count 4
       option cache-size 32MB
       subvolumes espa
end-volume
volume espai
       type performance/write-behind
       option aggregate-size 1MB
       option flush-behind on
       subvolumes spai
end-volume
volume ultim
       type protocol/server
       subvolumes espai
       option transport-type tcp/server
       option auth.ip.espai.allow *
end-volume
***********
client/xen side:
***********
volume espai1
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.204
   option remote-subvolume espai
end-volume
volume namespace1
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.204
   option remote-subvolume nm
end-volume
volume espai2
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.206
   option remote-subvolume espai
end-volume
volume espai3
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.213
   option remote-subvolume espai
end-volume
volume espai4
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.161
   option remote-subvolume espai
end-volume
volume namespace2
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.161
   option remote-subvolume nm
end-volume
volume espai5
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.162
   option remote-subvolume espai
end-volume
volume espai6
   type protocol/client
   option transport-type tcp/client
   option remote-host 192.168.1.163
   option remote-subvolume espai
end-volume
volume grup1
   type cluster/afr
   subvolumes espai1 espai2
end-volume
volume grup2
   type cluster/afr
   subvolumes espai3 espai4
end-volume
volume grup3
   type cluster/afr
   subvolumes espai5 espai6
end-volume
volume nm1
   type cluster/afr
   subvolumes namespace1 namespace2
end-volume
volume ultim
   type cluster/unify
   subvolumes grup1 grup2 grup3
   option scheduler rr
   option namespace nm1
end-volume
volume iot
 type performance/io-threads
 option thread-count 4
 option cache-size 32MB
 subvolumes ultim
end-volume
volume ioc
type performance/io-cache
option cache-size 64MB
option page-size 1MB
option force-revalidate-timeout 2
subvolumes iot
end-volume
***********
i've already tried for a while to play with the spec file... adding and 
removing write and read buffers... but the final result is the same... 
the vm won't run.
any idea?
thank you.
_______________________________________________
Xen-users mailing list
Xen-users@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-users
 |