Hi,
you're using different devices:
root = '/dev/sda1 ...'
and
disk = [ '....,hda1,....' ]
Besides this, I'm a bit confused by your boot message which states xvda1 ...
All of these device models are valid, but you'll need to get sure you're using
ONE of this model consequently.
e.g.
root = '/dev/sda1 ...'
disk = [ '......,sda1.....' ]
Additionally your guests /etc/fstab should reflect this by containing e.g.
/dev/sda1 / your_filesystem(e.g. ext3) ...
in this case.
--
Stephan Seitz
Senior System Administrator
*netz-haut* e.K.
multimediale kommunikation
zweierweg 22
97074 würzburg
fon: +49 931 2876247
fax: +49 931 2876248
web: www.netz-haut.de <http://www.netz-haut.de/>
registriergericht: amtsgericht würzburg, hra 5054
> -----Ursprüngliche Nachricht-----
> Von: xen-users-bounces@xxxxxxxxxxxxxxxxxxx [mailto:xen-users-
> bounces@xxxxxxxxxxxxxxxxxxx] Im Auftrag von John Duff
> Gesendet: Donnerstag, 2. Juli 2009 09:21
> An: xen-users@xxxxxxxxxxxxxxxxxxx
> Betreff: [Xen-users] [Xen create] ALERT! /dev/sda1 does not exist
>
>
> Hi,
>
> I wanted to ask it on elli_dbergs's thread 'problem with xm create',
> but it
> seems that my message was never gonna be published (waited for 24 hours
> til
> i figured it was taking way too much time).
>
> So i'm working with Xen unstable, installed dom0 over Ubuntu 8.10, no
> problem so far apparently, reboot on Xen, then I go to /etc/xen to get
> some
> example config files, modify one to match my configuration, 'xend
> start' and
> then i try to create the domU with
>
>
> 'xm create xmtest -c' // xmtest being my config file
>
>
> I get the following error
>
> Begin: Waiting for root file system... ...
> [ 0.495098] blkfront: xvda1: barriers enabled
> [ 1.500021] Clocksource tsc unstable (delta = 363108584 ns)
> Done.
> Gave up waiting for root device. Common problems:
> - Boot args (cat /proc/cmdline)
> - Check rootdelay= (did the system wait long enough?)
> - Check root= (did the system wait for the right device?)
> - Missing modules (cat /proc/modules; ls /dev)
> ALERT! /dev/sda1 does not exist. Dropping to a shell!
>
>
>
> And here is the config file I use :
>
>
>
> # -*- mode: python; -*-
> #======================================================================
> ======
> # Python configuration setup for 'xm create'.
> # This script sets the parameters used when a domain is created using
> 'xm
> create'.
> # You use a separate script for each domain you want to create, or
> # you can set the parameters for the domain on the xm command line.
> #======================================================================
> ======
>
> #----------------------------------------------------------------------
> ------
> # Kernel image file.
> kernel = "/boot/vmlinuz-2.6.27-7-generic"
>
> # Optional ramdisk.
> ramdisk = "/boot/initrd.img-2.6.27-7-generic"
>
> # The domain build function. Default is 'linux'.
> #builder='linux'
>
> # Initial memory allocation (in megabytes) for the new domain.
> #
> # WARNING: Creating a domain with insufficient memory may cause out of
> # memory errors. The domain needs enough memory to boot kernel
> # and modules. Allocating less than 32MBs is not recommended.
> memory = 256
>
> # A name for your domain. All domains must have different names.
> name = "xmtest"
>
> # 128-bit UUID for the domain. The default behavior is to generate a
> new
> UUID
> # on each call to 'xm create'.
> uuid = "06ed00fe-1162-4fc4-b5d8-11993ee4a8b9"
>
> # List of which CPUS this domain is allowed to use, default Xen picks
> #cpus = "" # leave to Xen to pick
> #cpus = "0" # all vcpus run on CPU0
> #cpus = "0-3,5,^1" # all vcpus run on cpus 0,2,3,5
> #cpus = ["2", "3"] # VCPU0 runs on CPU2, VCPU1 runs on CPU3
>
> # Number of Virtual CPUS to use, default is 1
> #vcpus = 1
>
> #----------------------------------------------------------------------
> ------
> # Define network interfaces.
>
> # By default, no network interfaces are configured. You may have one
> created
> # with sensible defaults using an empty vif clause:
> #
> # vif = [ '' ]
> #
> # or optionally override backend, bridge, ip, mac, script, type, or
> vifname:
> #
> # vif = [ 'mac=00:16:3e:00:00:11, bridge=xenbr0' ]
> #
> # or more than one interface may be configured:
> #
> # vif = [ '', 'bridge=xenbr1' ]
>
> #vif = [ '' ]
>
> #----------------------------------------------------------------------
> ------
> # Define the disk devices you want the domain to have access to, and
> # what you want them accessible as.
> # Each disk entry is of the form phy:UNAME,DEV,MODE
> # where UNAME is the device, DEV is the device name the domain will see,
> # and MODE is r for read-only, w for read-write.
>
> disk = [ 'file:/etc/xen/Ubuntu-8.04.img,hda1,w' ]
>
> #----------------------------------------------------------------------
> ------
> # Define frame buffer device.
> #
> # By default, no frame buffer device is configured.
> #
> # To create one using the SDL backend and sensible defaults:
> #
> # vfb = [ 'sdl=1' ]
> #
> # This uses environment variables XAUTHORITY and DISPLAY. You
> # can override that:
> #
> # vfb = [ 'sdl=1,xauthority=/home/bozo/.Xauthority,display=:1' ]
> #
> # To create one using the VNC backend and sensible defaults:
> #
> # vfb = [ 'vnc=1' ]
> #
> # The backend listens on 127.0.0.1 port 5900+N by default, where N is
> # the domain ID. You can override both address and N:
> #
> # vfb = [ 'vnc=1,vnclisten=127.0.0.1,vncdisplay=1' ]
> #
> # Or you can bind the first unused port above 5900:
> #
> # vfb = [ 'vnc=1,vnclisten=0.0.0.0,vncunused=1' ]
> #
> # You can override the password:
> #
> # vfb = [ 'vnc=1,vncpasswd=MYPASSWD' ]
> #
> # Empty password disables authentication. Defaults to the vncpasswd
> # configured in xend-config.sxp.
>
> #----------------------------------------------------------------------
> ------
> # Define to which TPM instance the user domain should communicate.
> # The vtpm entry is of the form 'instance=INSTANCE,backend=DOM'
> # where INSTANCE indicates the instance number of the TPM the VM
> # should be talking to and DOM provides the domain where the backend
> # is located.
> # Note that no two virtual machines should try to connect to the same
> # TPM instance. The handling of all TPM instances does require
> # some management effort in so far that VM configration files (and thus
> # a VM) should be associated with a TPM instance throughout the
> lifetime
> # of the VM / VM configuration file. The instance number must be
> # greater or equal to 1.
> #vtpm = [ 'instance=1,backend=0' ]
>
> #----------------------------------------------------------------------
> ------
> # Set the kernel command line for the new domain.
> # You only need to define the IP parameters and hostname if the
> domain's
> # IP config doesn't, e.g. in ifcfg-eth0 or via DHCP.
> # You can use 'extra' to set the runlevel and custom environment
> # variables used by custom rc scripts (e.g. VMID=, usr= ).
>
> # Set if you want dhcp to allocate the IP address.
> #dhcp="dhcp"
> # Set netmask.
> #netmask=
> # Set default gateway.
> #gateway=
> # Set the hostname.
> #hostname= "vm%d" % vmid
>
> # Set root device.
> root = "/dev/sda1 ro"
>
> # Root device for nfs.
> #root = "/dev/nfs"
> # The nfs server.
> #nfs_server = '192.0.2.1'
> # Root directory on the nfs server.
> #nfs_root = '/full/path/to/root/directory'
>
> # Sets runlevel 4.
> extra = "4"
>
> #----------------------------------------------------------------------
> ------
> # Configure the behaviour when a domain exits. There are three
> 'reasons'
> # for a domain to stop: poweroff, reboot, and crash. For each of these
> you
> # may specify:
> #
> # "destroy", meaning that the domain is cleaned up as normal;
> # "restart", meaning that a new domain is started in place of
> the
> old
> # one;
> # "preserve", meaning that no clean-up is done until the domain
> is
> # manually destroyed (using xm destroy, for
> example); or
> # "rename-restart", meaning that the old domain is not cleaned up,
> but is
> # renamed and a new domain started in its place.
> #
> # In the event a domain stops due to a crash, you have the additional
> options:
> #
> # "coredump-destroy", meaning dump the crashed domain's core and then
> destroy;
> # "coredump-restart', meaning dump the crashed domain's core and the
> restart.
> #
> # The default is
> #
> # on_poweroff = 'destroy'
> # on_reboot = 'restart'
> # on_crash = 'restart'
> #
> # For backwards compatibility we also support the deprecated option
> restart
> #
> # restart = 'onreboot' means on_poweroff = 'destroy'
> # on_reboot = 'restart'
> # on_crash = 'destroy'
> #
> # restart = 'always' means on_poweroff = 'restart'
> # on_reboot = 'restart'
> # on_crash = 'restart'
> #
> # restart = 'never' means on_poweroff = 'destroy'
> # on_reboot = 'destroy'
> # on_crash = 'destroy'
>
> #on_poweroff = 'destroy'
> #on_reboot = 'restart'
> #on_crash = 'restart'
>
> #----------------------------------------------------------------------
> -------
> # Configure PVSCSI devices:
> #
> #vscsi=[ 'PDEV, VDEV' ]
> #
> # PDEV gives physical SCSI device to be attached to specified guest
> # domain by one of the following identifier format.
> # - XX:XX:XX:XX (4-tuples with decimal notation which shows
> # "host:channel:target:lun")
> # - /dev/sdxx or sdx
> # - /dev/stxx or stx
> # - /dev/sgxx or sgx
> # - result of 'scsi_id -gu -s'.
> # ex. # scsi_id -gu -s /block/sdb
> # 36000b5d0006a0000006a0257004c0000
> #
> # VDEV gives virtual SCSI device by 4-tuples (XX:XX:XX:XX) as
> # which the specified guest domain recognize.
> #
>
> #vscsi = [ '/dev/sdx, 0:0:0:0' ]
>
> #======================================================================
> ======
>
> extra = 'xencons=tty'
>
>
>
> Many of those things are probably totally useless, but as I did not
> want to
> make any mistake, I just modified an example config file with my own
> information.
> Let me precise that I'm not really used to Xen, and should i have made
> some
> really silly mistakes, please forgive me :(
> Regards,
>
>
> John
> --
> View this message in context: http://www.nabble.com/-Xen-create--
> ALERT%21--dev-sda1-does-not-exist-tp24302140p24302140.html
> Sent from the Xen - User mailing list archive at Nabble.com.
>
>
> _______________________________________________
> Xen-users mailing list
> Xen-users@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-users
_______________________________________________
Xen-users mailing list
Xen-users@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-users
|