# HG changeset patch
# User vh249@xxxxxxxxxxxxxxxxxxxxxxxx
# Node ID fa660d79f69573459949c847743f6341f466c7d0
# Parent 30ecae4339d57d7c1c3ee04eb7f9d34df6078ecb
modify build system to lose latest digits for 2.4 and 2.6 sparse tree
Signed-off-by: Vincent Hanquez <vincent@xxxxxxxxxxxxx>
diff -r 30ecae4339d5 -r fa660d79f695 buildconfigs/Rules.mk
--- a/buildconfigs/Rules.mk Thu Aug 4 01:13:46 2005
+++ b/buildconfigs/Rules.mk Tue Aug 9 15:17:45 2005
@@ -15,7 +15,6 @@
# Expand Linux series to Linux version
LINUX_SERIES ?= 2.6
-LINUX_VER ?= $(patsubst linux-%-xen-sparse,%,$(wildcard
linux-$(LINUX_SERIES)*-xen-sparse))
# Setup Linux search path
LINUX_SRC_PATH ?= .:..
@@ -50,10 +49,14 @@
OS_VER = $(NETBSD_VER)
endif
-$(patsubst %,pristine-%/.valid-pristine,$(ALLSPARSETREES)) :
pristine-%/.valid-pristine: %.tar.bz2
+pristine-%: pristine-%/.valid-pristine
+ @true
+
+pristine-%/.valid-pristine: %.tar.bz2
rm -rf tmp-pristine-$* $(@D)
mkdir -p tmp-pristine-$*
tar -C tmp-pristine-$* -jxf $<
+ -@rm tmp-pristine-$*/pax_global_header
mv tmp-pristine-$*/* $(@D)
@rm -rf tmp-pristine-$*
touch $(@D)/.hgskip
diff -r 30ecae4339d5 -r fa660d79f695 buildconfigs/mk.linux-2.4-xen0
--- a/buildconfigs/mk.linux-2.4-xen0 Thu Aug 4 01:13:46 2005
+++ b/buildconfigs/mk.linux-2.4-xen0 Tue Aug 9 15:17:45 2005
@@ -2,6 +2,7 @@
OS = linux
LINUX_SERIES = 2.4
+LINUX_VER = 2.4.30
EXTRAVERSION = xen0
@@ -28,7 +29,7 @@
rm -rf $(LINUX_DIR)
cp -al $(<D) $(LINUX_DIR)
# Apply arch-xen patches
- ( cd linux-$(LINUX_VER)-xen-sparse ; \
+ ( cd linux-$(LINUX_SERIES)-xen-sparse ; \
./mkbuildtree ../$(LINUX_DIR) )
# add ebtables patch
( cd $(LINUX_DIR) ; patch -p1 -F3 < ../patches/tmp/ebtables.diff )
diff -r 30ecae4339d5 -r fa660d79f695 buildconfigs/mk.linux-2.4-xenU
--- a/buildconfigs/mk.linux-2.4-xenU Thu Aug 4 01:13:46 2005
+++ b/buildconfigs/mk.linux-2.4-xenU Tue Aug 9 15:17:45 2005
@@ -2,6 +2,7 @@
OS = linux
LINUX_SERIES = 2.4
+LINUX_VER = 2.4.30
EXTRAVERSION = xenU
@@ -19,11 +20,11 @@
fi
$(MAKE) -C $(LINUX_DIR) ARCH=xen INSTALL_PATH=$(DESTDIR) install
-$(LINUX_DIR)/include/linux/autoconf.h: ref-linux-$(LINUX_VER)/.valid-ref
+$(LINUX_DIR)/include/linux/autoconf.h: ref-$(OS)-$(LINUX_VER)/.valid-ref
rm -rf $(LINUX_DIR)
cp -al $(<D) $(LINUX_DIR)
# Apply arch-xen patches
- ( cd linux-$(LINUX_VER)-xen-sparse ; \
+ ( cd linux-$(LINUX_SERIES)-xen-sparse ; \
./mkbuildtree ../$(LINUX_DIR) )
# Re-use config from install dir if one exits else use default config
CONFIG_VERSION=$$(sed -ne 's/^EXTRAVERSION = //p'
$(LINUX_DIR)/Makefile); \
diff -r 30ecae4339d5 -r fa660d79f695 buildconfigs/mk.linux-2.6-xen0
--- a/buildconfigs/mk.linux-2.6-xen0 Thu Aug 4 01:13:46 2005
+++ b/buildconfigs/mk.linux-2.6-xen0 Tue Aug 9 15:17:45 2005
@@ -2,6 +2,7 @@
OS = linux
LINUX_SERIES = 2.6
+LINUX_VER = 2.6.11
EXTRAVERSION = xen0
@@ -23,7 +24,7 @@
rm -rf $(LINUX_DIR)
cp -al $(<D) $(LINUX_DIR)
# Apply arch-xen patches
- ( cd linux-$(LINUX_VER)-xen-sparse ; \
+ ( cd linux-$(LINUX_SERIES)-xen-sparse ; \
./mkbuildtree ../$(LINUX_DIR) )
# Re-use config from install dir if one exits else use default config
CONFIG_VERSION=$$(sed -ne 's/^EXTRAVERSION = //p'
$(LINUX_DIR)/Makefile); \
diff -r 30ecae4339d5 -r fa660d79f695 buildconfigs/mk.linux-2.6-xenU
--- a/buildconfigs/mk.linux-2.6-xenU Thu Aug 4 01:13:46 2005
+++ b/buildconfigs/mk.linux-2.6-xenU Tue Aug 9 15:17:45 2005
@@ -2,6 +2,7 @@
OS = linux
LINUX_SERIES = 2.6
+LINUX_VER = 2.6.11
EXTRAVERSION = xenU
@@ -23,7 +24,7 @@
rm -rf $(LINUX_DIR)
cp -al $(<D) $(LINUX_DIR)
# Apply arch-xen patches
- ( cd linux-$(LINUX_VER)-xen-sparse ; \
+ ( cd linux-$(LINUX_SERIES)-xen-sparse ; \
./mkbuildtree ../$(LINUX_DIR) )
# Re-use config from install dir if one exits else use default config
CONFIG_VERSION=$$(sed -ne 's/^EXTRAVERSION = //p'
$(LINUX_DIR)/Makefile); \
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,592 @@
+VERSION = 2
+PATCHLEVEL = 4
+SUBLEVEL = 30
+EXTRAVERSION =
+
+KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
+
+# SUBARCH always tells us the underlying machine architecture.
+# Unless overridden, by default ARCH is equivalent to SUBARCH.
+# This will be overriden for Xen and UML builds.
+SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e
s/arm.*/arm/ -e s/sa110/arm/)
+ARCH ?= $(SUBARCH)
+
+## XXX The following hack can be discarded after users have adjusted to the
+## architectural name change 'xeno' -> 'xen'.
+ifeq ($(ARCH),xeno)
+ ARCH := xen
+endif
+
+KERNELPATH=kernel-$(shell echo $(KERNELRELEASE) | sed -e "s/-//g")
+
+CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ else if [ -x /bin/bash ]; then echo /bin/bash; \
+ else echo sh; fi ; fi)
+TOPDIR := $(shell /bin/pwd)
+
+HPATH = $(TOPDIR)/include
+FINDHPATH = $(HPATH)/asm $(HPATH)/linux $(HPATH)/scsi $(HPATH)/net
$(HPATH)/math-emu
+
+HOSTCC = gcc
+HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
+
+CROSS_COMPILE =
+
+#
+# Include the make variables (CC, etc...)
+#
+
+AS = $(CROSS_COMPILE)as
+LD = $(CROSS_COMPILE)ld
+CC = $(CROSS_COMPILE)gcc
+CPP = $(CC) -E
+AR = $(CROSS_COMPILE)ar
+NM = $(CROSS_COMPILE)nm
+STRIP = $(CROSS_COMPILE)strip
+OBJCOPY = $(CROSS_COMPILE)objcopy
+OBJDUMP = $(CROSS_COMPILE)objdump
+MAKEFILES = $(TOPDIR)/.config
+GENKSYMS = /sbin/genksyms
+DEPMOD = /sbin/depmod
+MODFLAGS = -DMODULE
+CFLAGS_KERNEL =
+PERL = perl
+AWK = awk
+RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \
+ else echo rpm; fi)
+
+export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION KERNELRELEASE ARCH \
+ CONFIG_SHELL TOPDIR HPATH HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
+ CPP AR NM STRIP OBJCOPY OBJDUMP MAKE MAKEFILES GENKSYMS MODFLAGS PERL
AWK
+
+all: do-it-all
+
+#
+# Make "config" the default target if there is no configuration file or
+# "depend" the target if there is no top-level dependency information.
+#
+
+ifeq (.config,$(wildcard .config))
+include .config
+ifeq (.depend,$(wildcard .depend))
+include .depend
+do-it-all: Version vmlinux
+else
+CONFIGURATION = depend
+do-it-all: depend
+endif
+else
+CONFIGURATION = config
+do-it-all: config
+endif
+
+#
+# INSTALL_PATH specifies where to place the updated kernel and system map
+# images. Uncomment if you want to place them anywhere other than root.
+#
+
+#export INSTALL_PATH=/boot
+
+#
+# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
+# relocations required by build roots. This is not defined in the
+# makefile but the arguement can be passed to make if needed.
+#
+
+MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
+export MODLIB
+
+#
+# standard CFLAGS
+#
+
+CPPFLAGS := -D__KERNEL__ -I$(HPATH)
+
+CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \
+ -fno-strict-aliasing -fno-common
+ifndef CONFIG_FRAME_POINTER
+CFLAGS += -fomit-frame-pointer
+endif
+AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS)
+
+#
+# ROOT_DEV specifies the default root-device when making the image.
+# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
+# the default of FLOPPY is used by 'build'.
+# This is i386 specific.
+#
+
+export ROOT_DEV = CURRENT
+
+#
+# If you want to preset the SVGA mode, uncomment the next line and
+# set SVGA_MODE to whatever number you want.
+# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
+# The number is the same as you would ordinarily press at bootup.
+# This is i386 specific.
+#
+
+export SVGA_MODE = -DSVGA_MODE=NORMAL_VGA
+
+#
+# If you want the RAM disk device, define this to be the size in blocks.
+# This is i386 specific.
+#
+
+#export RAMDISK = -DRAMDISK=512
+
+CORE_FILES =kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o
+NETWORKS =net/network.o
+
+LIBS =$(TOPDIR)/lib/lib.a
+SUBDIRS =kernel drivers mm fs net ipc lib crypto
+
+DRIVERS-n :=
+DRIVERS-y :=
+DRIVERS-m :=
+DRIVERS- :=
+
+DRIVERS-$(CONFIG_ACPI_BOOT) += drivers/acpi/acpi.o
+DRIVERS-$(CONFIG_PARPORT) += drivers/parport/driver.o
+DRIVERS-y += drivers/char/char.o \
+ drivers/block/block.o \
+ drivers/misc/misc.o \
+ drivers/net/net.o
+DRIVERS-$(CONFIG_AGP) += drivers/char/agp/agp.o
+DRIVERS-$(CONFIG_DRM_NEW) += drivers/char/drm/drm.o
+DRIVERS-$(CONFIG_DRM_OLD) += drivers/char/drm-4.0/drm.o
+DRIVERS-$(CONFIG_NUBUS) += drivers/nubus/nubus.a
+DRIVERS-$(CONFIG_NET_FC) += drivers/net/fc/fc.o
+DRIVERS-$(CONFIG_DEV_APPLETALK) += drivers/net/appletalk/appletalk.o
+DRIVERS-$(CONFIG_TR) += drivers/net/tokenring/tr.o
+DRIVERS-$(CONFIG_WAN) += drivers/net/wan/wan.o
+DRIVERS-$(CONFIG_ARCNET) += drivers/net/arcnet/arcnetdrv.o
+DRIVERS-$(CONFIG_ATM) += drivers/atm/atm.o
+DRIVERS-$(CONFIG_IDE) += drivers/ide/idedriver.o
+DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a
+DRIVERS-$(CONFIG_SCSI) += drivers/scsi/scsidrv.o
+DRIVERS-$(CONFIG_FUSION_BOOT) += drivers/message/fusion/fusion.o
+DRIVERS-$(CONFIG_IEEE1394) += drivers/ieee1394/ieee1394drv.o
+
+ifneq
($(CONFIG_CD_NO_IDESCSI)$(CONFIG_BLK_DEV_IDECD)$(CONFIG_BLK_DEV_SR)$(CONFIG_PARIDE_PCD),)
+DRIVERS-y += drivers/cdrom/driver.o
+endif
+
+DRIVERS-$(CONFIG_SOUND) += drivers/sound/sounddrivers.o
+DRIVERS-$(CONFIG_PCI) += drivers/pci/driver.o
+DRIVERS-$(CONFIG_MTD) += drivers/mtd/mtdlink.o
+DRIVERS-$(CONFIG_PCMCIA) += drivers/pcmcia/pcmcia.o
+DRIVERS-$(CONFIG_NET_PCMCIA) += drivers/net/pcmcia/pcmcia_net.o
+DRIVERS-$(CONFIG_NET_WIRELESS) += drivers/net/wireless/wireless_net.o
+DRIVERS-$(CONFIG_PCMCIA_CHRDEV) += drivers/char/pcmcia/pcmcia_char.o
+DRIVERS-$(CONFIG_DIO) += drivers/dio/dio.a
+DRIVERS-$(CONFIG_SBUS) += drivers/sbus/sbus_all.o
+DRIVERS-$(CONFIG_ZORRO) += drivers/zorro/driver.o
+DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a
+DRIVERS-$(CONFIG_PPC32) += drivers/macintosh/macintosh.o
+DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o
+DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o
+DRIVERS-$(CONFIG_I2C) += drivers/i2c/i2c.o
+DRIVERS-$(CONFIG_VT) += drivers/video/video.o
+DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a
+DRIVERS-$(CONFIG_HAMRADIO) += drivers/net/hamradio/hamradio.o
+DRIVERS-$(CONFIG_TC) += drivers/tc/tc.a
+DRIVERS-$(CONFIG_USB) += drivers/usb/usbdrv.o
+DRIVERS-$(CONFIG_USB_GADGET) += drivers/usb/gadget/built-in.o
+DRIVERS-y +=drivers/media/media.o
+DRIVERS-$(CONFIG_INPUT) += drivers/input/inputdrv.o
+DRIVERS-$(CONFIG_HIL) += drivers/hil/hil.o
+DRIVERS-$(CONFIG_I2O) += drivers/message/i2o/i2o.o
+DRIVERS-$(CONFIG_IRDA) += drivers/net/irda/irda.o
+DRIVERS-$(CONFIG_PHONE) += drivers/telephony/telephony.o
+DRIVERS-$(CONFIG_MD) += drivers/md/mddev.o
+DRIVERS-$(CONFIG_GSC) += drivers/gsc/gscbus.o
+DRIVERS-$(CONFIG_BLUEZ) += drivers/bluetooth/bluetooth.o
+DRIVERS-$(CONFIG_HOTPLUG_PCI) += drivers/hotplug/vmlinux-obj.o
+DRIVERS-$(CONFIG_ISDN_BOOL) += drivers/isdn/vmlinux-obj.o
+DRIVERS-$(CONFIG_CRYPTO) += crypto/crypto.o
+
+DRIVERS := $(DRIVERS-y)
+
+
+# files removed with 'make clean'
+CLEAN_FILES = \
+ kernel/ksyms.lst include/linux/compile.h \
+ vmlinux System.map \
+ .tmp* \
+ drivers/char/consolemap_deftbl.c drivers/video/promcon_tbl.c \
+ drivers/char/conmakehash \
+ drivers/char/drm/*-mod.c \
+ drivers/pci/devlist.h drivers/pci/classlist.h drivers/pci/gen-devlist \
+ drivers/zorro/devlist.h drivers/zorro/gen-devlist \
+ drivers/sound/bin2hex drivers/sound/hex2hex \
+ drivers/atm/fore200e_mkfirm drivers/atm/{pca,sba}*{.bin,.bin1,.bin2} \
+ drivers/scsi/aic7xxx/aicasm/aicasm \
+ drivers/scsi/aic7xxx/aicasm/aicasm_gram.c \
+ drivers/scsi/aic7xxx/aicasm/aicasm_gram.h \
+ drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.c \
+ drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.h \
+ drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.c \
+ drivers/scsi/aic7xxx/aicasm/aicasm_scan.c \
+ drivers/scsi/aic7xxx/aicasm/aicdb.h \
+ drivers/scsi/aic7xxx/aicasm/y.tab.h \
+ drivers/scsi/53c700_d.h \
+ drivers/tc/lk201-map.c \
+ net/khttpd/make_times_h \
+ net/khttpd/times.h \
+ submenu* \
+ drivers/ieee1394/oui.c
+# directories removed with 'make clean'
+CLEAN_DIRS = \
+ modules
+
+# files removed with 'make mrproper'
+MRPROPER_FILES = \
+ include/linux/autoconf.h include/linux/version.h \
+ lib/crc32table.h lib/gen_crc32table \
+ drivers/net/hamradio/soundmodem/sm_tbl_{afsk1200,afsk2666,fsk9600}.h \
+ drivers/net/hamradio/soundmodem/sm_tbl_{hapn4800,psk4800}.h \
+ drivers/net/hamradio/soundmodem/sm_tbl_{afsk2400_7,afsk2400_8}.h \
+ drivers/net/hamradio/soundmodem/gentbl \
+ drivers/sound/*_boot.h drivers/sound/.*.boot \
+ drivers/sound/msndinit.c \
+ drivers/sound/msndperm.c \
+ drivers/sound/pndsperm.c \
+ drivers/sound/pndspini.c \
+ drivers/atm/fore200e_*_fw.c drivers/atm/.fore200e_*.fw \
+ .version .config* config.in config.old \
+ scripts/tkparse scripts/kconfig.tk scripts/kconfig.tmp \
+ scripts/lxdialog/*.o scripts/lxdialog/lxdialog \
+ .menuconfig.log \
+ include/asm \
+ .hdepend scripts/mkdep scripts/split-include scripts/docproc \
+ $(TOPDIR)/include/linux/modversions.h \
+ kernel.spec
+
+# directories removed with 'make mrproper'
+MRPROPER_DIRS = \
+ include/config \
+ $(TOPDIR)/include/linux/modules
+
+
+include arch/$(ARCH)/Makefile
+
+# Extra cflags for kbuild 2.4. The default is to forbid includes by kernel
code
+# from user space headers. Some UML code requires user space headers, in the
+# UML Makefiles add 'kbuild_2_4_nostdinc :=' before include Rules.make. No
+# other kernel code should include user space headers, if you need
+# 'kbuild_2_4_nostdinc :=' or -I/usr/include for kernel code and you are not
UML
+# then your code is broken! KAO.
+
+kbuild_2_4_nostdinc := -nostdinc -iwithprefix include
+export kbuild_2_4_nostdinc
+
+export CPPFLAGS CFLAGS CFLAGS_KERNEL AFLAGS AFLAGS_KERNEL
+
+export NETWORKS DRIVERS LIBS HEAD LDFLAGS LINKFLAGS MAKEBOOT ASFLAGS
+
+.S.s:
+ $(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -traditional -o $*.s $<
+.S.o:
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -traditional -c -o $*.o $<
+
+Version: dummy
+ @rm -f include/linux/compile.h
+
+boot: vmlinux
+ @$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C arch/$(ARCH)/boot
+
+vmlinux: include/linux/version.h $(CONFIGURATION) init/main.o init/version.o
init/do_mounts.o linuxsubdirs
+ $(LD) $(LINKFLAGS) $(HEAD) init/main.o init/version.o init/do_mounts.o \
+ --start-group \
+ $(CORE_FILES) \
+ $(DRIVERS) \
+ $(NETWORKS) \
+ $(LIBS) \
+ --end-group \
+ -o vmlinux
+ $(NM) vmlinux | grep -v '\(compiled\)\|\(\.o$$\)\|\( [aUw]
\)\|\(\.\.ng$$\)\|\(LASH[RL]DI\)' | sort > System.map
+
+symlinks:
+ rm -f include/asm
+ ( cd include ; ln -sf asm-$(ARCH) asm)
+ @if [ ! -d include/linux/modules ]; then \
+ mkdir include/linux/modules; \
+ fi
+
+oldconfig: symlinks
+ $(CONFIG_SHELL) scripts/Configure -d arch/$(ARCH)/config.in
+
+xconfig: symlinks
+ $(MAKE) -C scripts kconfig.tk
+ wish -f scripts/kconfig.tk
+
+menuconfig: include/linux/version.h symlinks
+ $(MAKE) -C scripts/lxdialog all
+ $(CONFIG_SHELL) scripts/Menuconfig arch/$(ARCH)/config.in
+
+config: symlinks
+ $(CONFIG_SHELL) scripts/Configure arch/$(ARCH)/config.in
+
+include/config/MARKER: scripts/split-include include/linux/autoconf.h
+ scripts/split-include include/linux/autoconf.h include/config
+ @ touch include/config/MARKER
+
+linuxsubdirs: $(patsubst %, _dir_%, $(SUBDIRS))
+
+$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h
include/config/MARKER
+ $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C $(patsubst _dir_%, %, $@)
+
+$(TOPDIR)/include/linux/version.h: include/linux/version.h
+$(TOPDIR)/include/linux/compile.h: include/linux/compile.h
+
+newversion:
+ . scripts/mkversion > .tmpversion
+ @mv -f .tmpversion .version
+
+uts_len := 64
+uts_truncate := sed -e 's/\(.\{1,$(uts_len)\}\).*/\1/'
+
+include/linux/compile.h: $(CONFIGURATION) include/linux/version.h newversion
+ @echo -n \#`cat .version` > .ver1
+ @if [ -n "$(CONFIG_SMP)" ] ; then echo -n " SMP" >> .ver1; fi
+ @if [ -f .name ]; then echo -n \-`cat .name` >> .ver1; fi
+ @LANG=C echo ' '`date` >> .ver1
+ @echo \#define UTS_VERSION \"`cat .ver1 | $(uts_truncate)`\" > .ver
+ @LANG=C echo \#define LINUX_COMPILE_TIME \"`date +%T`\" >> .ver
+ @echo \#define LINUX_COMPILE_BY \"`whoami`\" >> .ver
+ @echo \#define LINUX_COMPILE_HOST \"`hostname | $(uts_truncate)`\" >>
.ver
+ @([ -x /bin/dnsdomainname ] && /bin/dnsdomainname > .ver1) || \
+ ([ -x /bin/domainname ] && /bin/domainname > .ver1) || \
+ echo > .ver1
+ @echo \#define LINUX_COMPILE_DOMAIN \"`cat .ver1 | $(uts_truncate)`\"
>> .ver
+ @echo \#define LINUX_COMPILER \"`$(CC) $(CFLAGS) -v 2>&1 | tail -n 1`\"
>> .ver
+ @mv -f .ver $@
+ @rm -f .ver1
+
+include/linux/version.h: ./Makefile
+ @expr length "$(KERNELRELEASE)" \<= $(uts_len) > /dev/null || \
+ (echo KERNELRELEASE \"$(KERNELRELEASE)\" exceeds $(uts_len)
characters >&2; false)
+ @echo \#define UTS_RELEASE \"$(KERNELRELEASE)\" > .ver
+ @echo \#define LINUX_VERSION_CODE `expr $(VERSION) \\* 65536 +
$(PATCHLEVEL) \\* 256 + $(SUBLEVEL)` >> .ver
+ @echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
>>.ver
+ @mv -f .ver $@
+
+comma := ,
+
+init/version.o: init/version.c include/linux/compile.h include/config/MARKER
+ $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DUTS_MACHINE='"$(SUBARCH)"'
-DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o init/version.o
init/version.c
+
+init/main.o: init/main.c include/config/MARKER
+ $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst
$(comma),_,$(subst -,_,$(*F))) -c -o $@ $<
+
+init/do_mounts.o: init/do_mounts.c include/config/MARKER
+ $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst
$(comma),_,$(subst -,_,$(*F))) -c -o $@ $<
+
+fs lib mm ipc kernel drivers net: dummy
+ $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" $(subst $@, _dir_$@, $@)
+
+TAGS: dummy
+ { find include/asm-${ARCH} -name '*.h' -print ; \
+ find include -type d \( -name "asm-*" -o -name config \) -prune -o
-name '*.h' -print ; \
+ find $(SUBDIRS) init arch/${ARCH} -name '*.[chS]' ; } | grep -v SCCS |
grep -v '\.svn' | etags -
+
+# Exuberant ctags works better with -I
+tags: dummy
+ CTAGSF=`ctags --version | grep -i exuberant >/dev/null && echo "-I
__initdata,__exitdata,EXPORT_SYMBOL,EXPORT_SYMBOL_NOVERS"`; \
+ ctags $$CTAGSF `find include/asm-$(ARCH) -name '*.h'` && \
+ find include -type d \( -name "asm-*" -o -name config \) -prune -o
-name '*.h' -print | xargs ctags $$CTAGSF -a && \
+ find $(SUBDIRS) init -name '*.[ch]' | xargs ctags $$CTAGSF -a
+
+ifdef CONFIG_MODULES
+ifdef CONFIG_MODVERSIONS
+MODFLAGS += -DMODVERSIONS -include $(HPATH)/linux/modversions.h
+endif
+
+.PHONY: modules
+modules: $(patsubst %, _mod_%, $(SUBDIRS))
+
+.PHONY: $(patsubst %, _mod_%, $(SUBDIRS))
+$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h
include/config/MARKER
+ $(MAKE) -C $(patsubst _mod_%, %, $@) CFLAGS="$(CFLAGS) $(MODFLAGS)"
MAKING_MODULES=1 modules
+
+.PHONY: modules_install
+modules_install: _modinst_ $(patsubst %, _modinst_%, $(SUBDIRS)) _modinst_post
+
+.PHONY: _modinst_
+_modinst_:
+ @rm -rf $(MODLIB)/kernel
+ @rm -f $(MODLIB)/build
+ @mkdir -p $(MODLIB)/kernel
+ @ln -s $(TOPDIR) $(MODLIB)/build
+
+# If System.map exists, run depmod. This deliberately does not have a
+# dependency on System.map since that would run the dependency tree on
+# vmlinux. This depmod is only for convenience to give the initial
+# boot a modules.dep even before / is mounted read-write. However the
+# boot script depmod is the master version.
+ifeq "$(strip $(INSTALL_MOD_PATH))" ""
+depmod_opts :=
+else
+depmod_opts := -b $(INSTALL_MOD_PATH) -r
+endif
+.PHONY: _modinst_post
+_modinst_post: _modinst_post_pcmcia
+ if [ -r System.map ]; then $(DEPMOD) -ae -F System.map $(depmod_opts)
$(KERNELRELEASE); fi
+
+# Backwards compatibilty symlinks for people still using old versions
+# of pcmcia-cs with hard coded pathnames on insmod. Remove
+# _modinst_post_pcmcia for kernel 2.4.1.
+.PHONY: _modinst_post_pcmcia
+_modinst_post_pcmcia:
+ cd $(MODLIB); \
+ mkdir -p pcmcia; \
+ find kernel -path '*/pcmcia/*' -name '*.o' | xargs -i -r ln -sf ../{}
pcmcia
+
+.PHONY: $(patsubst %, _modinst_%, $(SUBDIRS))
+$(patsubst %, _modinst_%, $(SUBDIRS)) :
+ $(MAKE) -C $(patsubst _modinst_%, %, $@) modules_install
+
+# modules disabled....
+
+else
+modules modules_install: dummy
+ @echo
+ @echo "The present kernel configuration has modules disabled."
+ @echo "Type 'make config' and enable loadable module support."
+ @echo "Then build a kernel with module support enabled."
+ @echo
+ @exit 1
+endif
+
+clean: archclean
+ find . \( -name '*.[oas]' -o -name core -o -name '.*.flags' \) -type f
-print \
+ | grep -v lxdialog/ | xargs rm -f
+ rm -f $(CLEAN_FILES)
+ rm -rf $(CLEAN_DIRS)
+ $(MAKE) -C Documentation/DocBook clean
+
+mrproper: clean archmrproper
+ find . \( -size 0 -o -name .depend \) -type f -print | xargs rm -f
+ rm -f $(MRPROPER_FILES)
+ rm -rf $(MRPROPER_DIRS)
+ $(MAKE) -C Documentation/DocBook mrproper
+
+distclean: mrproper
+ rm -f core `find . \( -not -type d \) -and \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+ -o -name '.*.rej' -o -name '.SUMS' -o -size 0 \) -type f
-print` TAGS tags
+
+backup: mrproper
+ cd .. && tar cf - linux/ | gzip -9 > backup.gz
+ sync
+
+sgmldocs:
+ chmod 755 $(TOPDIR)/scripts/docgen
+ chmod 755 $(TOPDIR)/scripts/gen-all-syms
+ chmod 755 $(TOPDIR)/scripts/kernel-doc
+ $(MAKE) -C $(TOPDIR)/Documentation/DocBook books
+
+psdocs: sgmldocs
+ $(MAKE) -C Documentation/DocBook ps
+
+pdfdocs: sgmldocs
+ $(MAKE) -C Documentation/DocBook pdf
+
+htmldocs: sgmldocs
+ $(MAKE) -C Documentation/DocBook html
+
+mandocs:
+ chmod 755 $(TOPDIR)/scripts/kernel-doc
+ chmod 755 $(TOPDIR)/scripts/split-man
+ $(MAKE) -C Documentation/DocBook man
+
+sums:
+ find . -type f -print | sort | xargs sum > .SUMS
+
+dep-files: scripts/mkdep archdep include/linux/version.h
+ rm -f .depend .hdepend
+ $(MAKE) $(patsubst %,_sfdep_%,$(SUBDIRS))
_FASTDEP_ALL_SUB_DIRS="$(SUBDIRS)"
+ifdef CONFIG_MODVERSIONS
+ $(MAKE) update-modverfile
+endif
+ scripts/mkdep -- `find $(FINDHPATH) \( -name SCCS -o -name .svn \)
-prune -o -follow -name \*.h ! -name modversions.h -print` > .hdepend
+ scripts/mkdep -- init/*.c > .depend
+
+ifdef CONFIG_MODVERSIONS
+MODVERFILE := $(TOPDIR)/include/linux/modversions.h
+else
+MODVERFILE :=
+endif
+export MODVERFILE
+
+depend dep: dep-files
+
+checkconfig:
+ find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w
scripts/checkconfig.pl
+
+checkhelp:
+ find * -name [cC]onfig.in -print | sort | xargs $(PERL) -w
scripts/checkhelp.pl
+
+checkincludes:
+ find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w
scripts/checkincludes.pl
+
+ifdef CONFIGURATION
+..$(CONFIGURATION):
+ @echo
+ @echo "You have a bad or nonexistent" .$(CONFIGURATION) ": running
'make" $(CONFIGURATION)"'"
+ @echo
+ $(MAKE) $(CONFIGURATION)
+ @echo
+ @echo "Successful. Try re-making (ignore the error that follows)"
+ @echo
+ exit 1
+
+#dummy: ..$(CONFIGURATION)
+dummy:
+
+else
+
+dummy:
+
+endif
+
+include Rules.make
+
+#
+# This generates dependencies for the .h files.
+#
+
+scripts/mkdep: scripts/mkdep.c
+ $(HOSTCC) $(HOSTCFLAGS) -o scripts/mkdep scripts/mkdep.c
+
+scripts/split-include: scripts/split-include.c
+ $(HOSTCC) $(HOSTCFLAGS) -o scripts/split-include scripts/split-include.c
+
+#
+# RPM target
+#
+# If you do a make spec before packing the tarball you can rpm -ta it
+#
+spec:
+ . scripts/mkspec >kernel.spec
+
+#
+# Build a tar ball, generate an rpm from it and pack the result
+# There arw two bits of magic here
+# 1) The use of /. to avoid tar packing just the symlink
+# 2) Removing the .dep files as they have source paths in them that
+# will become invalid
+#
+rpm: clean spec
+ find . \( -size 0 -o -name .depend -o -name .hdepend \) -type f -print
| xargs rm -f
+ set -e; \
+ cd $(TOPDIR)/.. ; \
+ ln -sf $(TOPDIR) $(KERNELPATH) ; \
+ tar -cvz --exclude CVS -f $(KERNELPATH).tar.gz $(KERNELPATH)/. ; \
+ rm $(KERNELPATH) ; \
+ cd $(TOPDIR) ; \
+ . scripts/mkversion > .version ; \
+ $(RPM) -ta $(TOPDIR)/../$(KERNELPATH).tar.gz ; \
+ rm $(TOPDIR)/../$(KERNELPATH).tar.gz
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,137 @@
+#
+# xen/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713 Artur Skawina <skawina@xxxxxxxxxxxxx>
+# Added '-march' and '-mpreferred-stack-boundary' support
+#
+
+# If no .config file exists then use the appropriate defconfig-* file
+ifneq (.config,$(wildcard .config))
+DUMMYX:=$(shell cp $(TOPDIR)/arch/xen/defconfig$(EXTRAVERSION)
$(TOPDIR)/.config)
+-include $(TOPDIR)/.config
+endif
+
+LD=$(CROSS_COMPILE)ld -m elf_i386
+OBJCOPY=$(CROSS_COMPILE)objcopy -R .note -R .comment -S
+LDFLAGS=-e stext
+LINKFLAGS =-T $(TOPDIR)/arch/xen/vmlinux.lds $(LDFLAGS)
+
+CFLAGS += -pipe
+
+check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null
2>&1; then echo "$(1)"; else echo "$(2)"; fi)
+
+# prevent gcc from keeping the stack 16 byte aligned
+CFLAGS += $(call check_gcc,-mpreferred-stack-boundary=2,)
+
+ifdef CONFIG_M686
+CFLAGS += -march=i686
+endif
+
+ifdef CONFIG_MPENTIUMIII
+CFLAGS += -march=i686
+endif
+
+ifdef CONFIG_MPENTIUM4
+CFLAGS += -march=i686
+endif
+
+ifdef CONFIG_MK7
+CFLAGS += $(call check_gcc,-march=athlon,-march=i686 -malign-functions=4)
+endif
+
+# Disable unit-at-a-time mode, it makes gcc use a lot more stack
+# due to the lack of sharing of stacklots.
+CFLAGS += $(call check_gcc,-fno-unit-at-a-time,)
+
+HEAD := arch/xen/kernel/head.o arch/xen/kernel/init_task.o
+
+SUBDIRS += arch/xen/kernel arch/xen/mm arch/xen/lib
+SUBDIRS += arch/xen/drivers/console
+SUBDIRS += arch/xen/drivers/evtchn
+SUBDIRS += arch/xen/drivers/blkif
+SUBDIRS += arch/xen/drivers/netif
+SUBDIRS += arch/xen/drivers/balloon
+ifdef CONFIG_XEN_PRIVILEGED_GUEST
+SUBDIRS += arch/xen/drivers/dom0
+endif
+
+CORE_FILES += arch/xen/kernel/kernel.o arch/xen/mm/mm.o
+CORE_FILES += arch/xen/drivers/evtchn/drv.o
+CORE_FILES += arch/xen/drivers/console/drv.o
+DRIVERS += arch/xen/drivers/blkif/drv.o
+DRIVERS += arch/xen/drivers/netif/drv.o
+ifdef CONFIG_XEN_PRIVILEGED_GUEST
+CORE_FILES += arch/xen/drivers/dom0/drv.o
+endif
+CORE_FILES += arch/xen/drivers/balloon/drv.o
+LIBS := $(TOPDIR)/arch/xen/lib/lib.a $(LIBS) $(TOPDIR)/arch/xen/lib/lib.a
+
+arch/xen/kernel: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/kernel
+
+arch/xen/mm: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/mm
+
+arch/xen/drivers/console: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/console
+
+arch/xen/drivers/network: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/network
+
+arch/xen/drivers/block: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/block
+
+arch/xen/drivers/dom0: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/dom0
+
+arch/xen/drivers/balloon: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/balloon
+
+MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+
+vmlinux: arch/xen/vmlinux.lds
+
+FORCE: ;
+
+.PHONY: bzImage compressed clean archclean archmrproper archdep
+
+
+bzImage: vmlinux
+ @$(MAKEBOOT) bzImage
+
+INSTALL_NAME ?= $(KERNELRELEASE)
+install: bzImage
+ mkdir -p $(INSTALL_PATH)/boot
+ ln -f -s vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
+ rm -f $(INSTALL_PATH)/boot/vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0644 arch/$(ARCH)/boot/bzImage
$(INSTALL_PATH)/boot/vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0644 vmlinux
$(INSTALL_PATH)/boot/vmlinux-syms-$(INSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0664 .config
$(INSTALL_PATH)/boot/config-$(INSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0664 System.map
$(INSTALL_PATH)/boot/System.map-$(INSTALL_NAME)$(INSTALL_SUFFIX)
+ ln -f -s vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
+
+%_config: arch/xen/defconfig-%
+ rm -f .config arch/xen/defconfig
+ cp -f arch/xen/defconfig-$(@:_config=) arch/xen/defconfig
+ cp -f arch/xen/defconfig-$(@:_config=) .config
+
+
+archclean:
+ @$(MAKEBOOT) clean
+
+archmrproper:
+ rm -f include/asm-xen/xen-public/arch
+
+archdep:
+ @$(MAKEBOOT) dep
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/boot/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/boot/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,13 @@
+#
+# arch/xen/boot/Makefile
+#
+
+bzImage: $(TOPDIR)/vmlinux
+ $(OBJCOPY) $< Image
+ gzip -f -9 < Image > $@
+ rm -f Image
+
+dep:
+
+clean:
+ rm -f bzImage Image
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/config.in
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/config.in Tue Aug 9 15:17:45 2005
@@ -0,0 +1,325 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+#
+mainmenu_name "Linux Kernel Configuration"
+
+define_bool CONFIG_XEN y
+
+define_bool CONFIG_X86 y
+define_bool CONFIG_ISA y
+define_bool CONFIG_SBUS n
+
+define_bool CONFIG_UID16 y
+
+mainmenu_option next_comment
+comment 'Xen'
+bool 'Support for privileged operations (domain 0)' CONFIG_XEN_PRIVILEGED_GUEST
+bool 'Device-driver domain (physical device access)' CONFIG_XEN_PHYSDEV_ACCESS
+bool 'Scrub memory before freeing it to Xen' CONFIG_XEN_SCRUB_PAGES
+bool 'Network-device frontend driver' CONFIG_XEN_NETDEV_FRONTEND
+bool 'Block-device frontend driver' CONFIG_XEN_BLKDEV_FRONTEND
+endmenu
+# The IBM S/390 patch needs this.
+define_bool CONFIG_NO_IDLE_HZ y
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" == "y" ]; then
+ define_bool CONFIG_FOREIGN_PAGES y
+else
+ define_bool CONFIG_FOREIGN_PAGES n
+ define_bool CONFIG_NETDEVICES y
+ define_bool CONFIG_VT n
+fi
+
+mainmenu_option next_comment
+comment 'Code maturity level options'
+bool 'Prompt for development and/or incomplete code/drivers'
CONFIG_EXPERIMENTAL
+endmenu
+
+mainmenu_option next_comment
+comment 'Loadable module support'
+bool 'Enable loadable module support' CONFIG_MODULES
+if [ "$CONFIG_MODULES" = "y" ]; then
+ bool ' Set version information on all module symbols' CONFIG_MODVERSIONS
+ bool ' Kernel module loader' CONFIG_KMOD
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'Processor type and features'
+choice 'Processor family' \
+ "Pentium-Pro/Celeron/Pentium-II CONFIG_M686 \
+ Pentium-III/Celeron(Coppermine) CONFIG_MPENTIUMIII \
+ Pentium-4 CONFIG_MPENTIUM4 \
+ Athlon/Duron/K7 CONFIG_MK7 \
+ Opteron/Athlon64/Hammer/K8 CONFIG_MK8 \
+ VIA-C3-2 CONFIG_MVIAC3_2" Pentium-Pro
+
+ define_bool CONFIG_X86_WP_WORKS_OK y
+ define_bool CONFIG_X86_INVLPG y
+ define_bool CONFIG_X86_CMPXCHG y
+ define_bool CONFIG_X86_XADD y
+ define_bool CONFIG_X86_BSWAP y
+ define_bool CONFIG_X86_POPAD_OK y
+ define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
+ define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
+
+ define_bool CONFIG_X86_GOOD_APIC y
+ define_bool CONFIG_X86_PGE y
+ define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
+ define_bool CONFIG_X86_TSC y
+
+if [ "$CONFIG_M686" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 5
+fi
+if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 5
+fi
+if [ "$CONFIG_MPENTIUM4" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 7
+fi
+if [ "$CONFIG_MK8" = "y" ]; then
+ define_bool CONFIG_MK7 y
+fi
+if [ "$CONFIG_MK7" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 6
+ define_bool CONFIG_X86_USE_3DNOW y
+fi
+if [ "$CONFIG_MVIAC3_2" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 5
+fi
+
+#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+# tristate 'BIOS Enhanced Disk Drive calls determine boot disk
(EXPERIMENTAL)' CONFIG_EDD
+#fi
+
+choice 'High Memory Support' \
+ "off CONFIG_NOHIGHMEM \
+ 4GB CONFIG_HIGHMEM4G" off
+# 64GB CONFIG_HIGHMEM64G" off
+if [ "$CONFIG_HIGHMEM4G" = "y" ]; then
+ define_bool CONFIG_HIGHMEM y
+fi
+if [ "$CONFIG_HIGHMEM64G" = "y" ]; then
+ define_bool CONFIG_HIGHMEM y
+ define_bool CONFIG_X86_PAE y
+fi
+
+if [ "$CONFIG_HIGHMEM" = "y" ]; then
+ bool 'HIGHMEM I/O support' CONFIG_HIGHIO
+fi
+
+define_int CONFIG_FORCE_MAX_ZONEORDER 11
+
+#bool 'Symmetric multi-processing support' CONFIG_SMP
+#if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
+# define_bool CONFIG_HAVE_DEC_LOCK y
+#fi
+endmenu
+
+mainmenu_option next_comment
+comment 'General setup'
+
+bool 'Networking support' CONFIG_NET
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ bool 'PCI support' CONFIG_PCI
+ source drivers/pci/Config.in
+
+ bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
+
+ if [ "$CONFIG_HOTPLUG" = "y" ] ; then
+ source drivers/pcmcia/Config.in
+ source drivers/hotplug/Config.in
+ else
+ define_bool CONFIG_PCMCIA n
+ define_bool CONFIG_HOTPLUG_PCI n
+ fi
+fi
+
+bool 'System V IPC' CONFIG_SYSVIPC
+bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
+bool 'Sysctl support' CONFIG_SYSCTL
+if [ "$CONFIG_PROC_FS" = "y" ]; then
+ choice 'Kernel core (/proc/kcore) format' \
+ "ELF CONFIG_KCORE_ELF \
+ A.OUT CONFIG_KCORE_AOUT" ELF
+fi
+tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
+bool 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
+tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
+bool 'Select task to kill on out of memory condition' CONFIG_OOM_KILLER
+
+endmenu
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ source drivers/mtd/Config.in
+
+ source drivers/parport/Config.in
+
+ source drivers/pnp/Config.in
+
+ source drivers/block/Config.in
+
+ source drivers/md/Config.in
+fi
+
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
+fi
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'ATA/IDE/MFM/RLL support'
+
+ tristate 'ATA/IDE/MFM/RLL support' CONFIG_IDE
+
+ if [ "$CONFIG_IDE" != "n" ]; then
+ source drivers/ide/Config.in
+ else
+ define_bool CONFIG_BLK_DEV_HD n
+ fi
+ endmenu
+fi
+
+mainmenu_option next_comment
+comment 'SCSI support'
+
+tristate 'SCSI support' CONFIG_SCSI
+
+if [ "$CONFIG_SCSI" != "n" ]; then
+ source drivers/scsi/Config.in
+fi
+endmenu
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ source drivers/message/fusion/Config.in
+
+ source drivers/ieee1394/Config.in
+
+ source drivers/message/i2o/Config.in
+
+ if [ "$CONFIG_NET" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Network device support'
+
+ bool 'Network device support' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
+ source drivers/net/Config.in
+ if [ "$CONFIG_ATM" = "y" -o "$CONFIG_ATM" = "m" ]; then
+ source drivers/atm/Config.in
+ fi
+ fi
+ endmenu
+ fi
+
+ source net/ax25/Config.in
+
+ source net/irda/Config.in
+
+ mainmenu_option next_comment
+ comment 'ISDN subsystem'
+ if [ "$CONFIG_NET" != "n" ]; then
+ tristate 'ISDN support' CONFIG_ISDN
+ if [ "$CONFIG_ISDN" != "n" ]; then
+ source drivers/isdn/Config.in
+ fi
+ fi
+ endmenu
+
+ if [ "$CONFIG_ISA" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Old CD-ROM drivers (not SCSI, not IDE)'
+
+ bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
+ if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
+ source drivers/cdrom/Config.in
+ fi
+ endmenu
+ fi
+
+ #
+ # input before char - char/joystick depends on it. As does USB.
+ #
+ source drivers/input/Config.in
+else
+ #
+ # Block device driver configuration
+ #
+ mainmenu_option next_comment
+ comment 'Block devices'
+ tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
+ dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
+ tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
+ if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
+ int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
+ fi
+ dep_bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
$CONFIG_BLK_DEV_RAM
+ bool 'Per partition statistics in /proc/partitions' CONFIG_BLK_STATS
+ define_bool CONFIG_BLK_DEV_HD n
+ endmenu
+fi
+
+source drivers/char/Config.in
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ source drivers/media/Config.in
+fi
+
+source fs/Config.in
+
+mainmenu_option next_comment
+comment 'Console drivers'
+
+define_bool CONFIG_XEN_CONSOLE y
+
+if [ "$CONFIG_VT" = "y" ]; then
+ bool 'VGA text console' CONFIG_VGA_CONSOLE
+ bool 'Dummy console' CONFIG_DUMMY_CONSOLE
+ if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ bool 'Video mode selection support' CONFIG_VIDEO_SELECT
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ tristate 'MDA text console (dual-headed) (EXPERIMENTAL)'
CONFIG_MDA_CONSOLE
+ source drivers/video/Config.in
+ fi
+ fi
+fi
+endmenu
+
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Sound'
+
+ tristate 'Sound card support' CONFIG_SOUND
+ if [ "$CONFIG_SOUND" != "n" ]; then
+ source drivers/sound/Config.in
+ fi
+ endmenu
+
+ source drivers/usb/Config.in
+
+ source net/bluetooth/Config.in
+fi
+
+mainmenu_option next_comment
+comment 'Kernel hacking'
+
+bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
+if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
+ bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
+ bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM
+ bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
+ bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
+ bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
+ bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
+ bool ' Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE
+ bool ' Load all symbols for debugging' CONFIG_KALLSYMS
+ bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER
+fi
+
+int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
+
+endmenu
+
+source crypto/Config.in
+source lib/Config.in
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/defconfig-xen0
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/defconfig-xen0 Tue Aug 9 15:17:45 2005
@@ -0,0 +1,924 @@
+#
+# Automatically generated by make menuconfig: don't edit
+#
+CONFIG_XEN=y
+CONFIG_X86=y
+CONFIG_ISA=y
+# CONFIG_SBUS is not set
+CONFIG_UID16=y
+
+#
+# Xen
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_NO_IDLE_HZ=y
+CONFIG_FOREIGN_PAGES=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_KMOD=y
+
+#
+# Processor type and features
+#
+CONFIG_M686=y
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MVIAC3_2 is not set
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_PGE=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_L1_CACHE_SHIFT=5
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+
+#
+# General setup
+#
+CONFIG_NET=y
+CONFIG_PCI=y
+CONFIG_PCI_NAMES=y
+CONFIG_HOTPLUG=y
+
+#
+# PCMCIA/CardBus support
+#
+# CONFIG_PCMCIA is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
+# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
+# CONFIG_HOTPLUG_PCI_PCIE is not set
+# CONFIG_HOTPLUG_PCI_PCIE_POLL_EVENT_MODE is not set
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+CONFIG_KCORE_ELF=y
+# CONFIG_KCORE_AOUT is not set
+CONFIG_BINFMT_AOUT=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_OOM_KILLER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play configuration
+#
+CONFIG_PNP=y
+# CONFIG_ISAPNP is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_CISS_SCSI_TAPE is not set
+# CONFIG_CISS_MONITOR_THREAD is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_STATS is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+# CONFIG_MD_RAID0 is not set
+CONFIG_MD_RAID1=y
+# CONFIG_MD_RAID5 is not set
+# CONFIG_MD_MULTIPATH is not set
+CONFIG_BLK_DEV_LVM=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+# CONFIG_NETLINK_DEV is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_FILTER=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
+# CONFIG_SYN_COOKIES is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_AMANDA is not set
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+# CONFIG_IP_NF_MATCH_UNCLEAN is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+CONFIG_IP_NF_MATCH_PHYSDEV=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_MIRROR is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+# CONFIG_IP_NF_MANGLE is not set
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_IP_NF_TARGET_ULOG=y
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+# CONFIG_KHTTPD is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_DECNET is not set
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_IPF=m
+CONFIG_BRIDGE_EBT_ARPF=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_VLANF=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_MARKF=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+
+#
+# ATA/IDE/MFM/RLL support
+#
+CONFIG_IDE=y
+
+#
+# IDE, ATA and ATAPI Block devices
+#
+CONFIG_BLK_DEV_IDE=y
+# CONFIG_BLK_DEV_HD_IDE is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+CONFIG_IDEDISK_MULTI_MODE=y
+CONFIG_IDEDISK_STROKE=y
+# CONFIG_BLK_DEV_IDECS is not set
+# CONFIG_BLK_DEV_DELKIN is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=y
+CONFIG_BLK_DEV_IDEFLOPPY=y
+CONFIG_BLK_DEV_IDESCSI=y
+CONFIG_IDE_TASK_IOCTL=y
+CONFIG_BLK_DEV_CMD640=y
+CONFIG_BLK_DEV_CMD640_ENHANCED=y
+# CONFIG_BLK_DEV_ISAPNP is not set
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+CONFIG_BLK_DEV_OFFBOARD=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_PCI_WIP is not set
+CONFIG_BLK_DEV_ADMA100=y
+CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_BLK_DEV_ALI15X3=y
+CONFIG_WDC_ALI15X3=y
+CONFIG_BLK_DEV_AMD74XX=y
+CONFIG_AMD74XX_OVERRIDE=y
+# CONFIG_BLK_DEV_ATIIXP is not set
+CONFIG_BLK_DEV_CMD64X=y
+CONFIG_BLK_DEV_TRIFLEX=y
+CONFIG_BLK_DEV_CY82C693=y
+CONFIG_BLK_DEV_CS5530=y
+CONFIG_BLK_DEV_HPT34X=y
+# CONFIG_HPT34X_AUTODMA is not set
+CONFIG_BLK_DEV_HPT366=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_NS87415=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_PDC202XX_OLD=y
+CONFIG_PDC202XX_BURST=y
+CONFIG_BLK_DEV_PDC202XX_NEW=y
+CONFIG_PDC202XX_FORCE=y
+CONFIG_BLK_DEV_RZ1000=y
+CONFIG_BLK_DEV_SC1200=y
+CONFIG_BLK_DEV_SVWKS=y
+CONFIG_BLK_DEV_SIIMAGE=y
+CONFIG_BLK_DEV_SIS5513=y
+CONFIG_BLK_DEV_SLC90E66=y
+CONFIG_BLK_DEV_TRM290=y
+CONFIG_BLK_DEV_VIA82CXXX=y
+CONFIG_IDE_CHIPSETS=y
+# CONFIG_BLK_DEV_4DRIVES is not set
+# CONFIG_BLK_DEV_ALI14XX is not set
+# CONFIG_BLK_DEV_DTC2278 is not set
+# CONFIG_BLK_DEV_HT6560B is not set
+# CONFIG_BLK_DEV_PDC4030 is not set
+# CONFIG_BLK_DEV_QD65XX is not set
+# CONFIG_BLK_DEV_UMC8672 is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_IDEDMA_IVB is not set
+# CONFIG_DMA_NONPCI is not set
+CONFIG_BLK_DEV_PDC202XX=y
+# CONFIG_BLK_DEV_ATARAID is not set
+# CONFIG_BLK_DEV_ATARAID_PDC is not set
+# CONFIG_BLK_DEV_ATARAID_HPT is not set
+# CONFIG_BLK_DEV_ATARAID_MEDLEY is not set
+# CONFIG_BLK_DEV_ATARAID_SII is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_DEBUG_QUEUES is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_PROBE_EISA_VL is not set
+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_DEBUG_MASK=0
+# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
+CONFIG_SCSI_AIC79XX=y
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+# CONFIG_AIC79XX_DEBUG_ENABLE is not set
+CONFIG_AIC79XX_DEBUG_MASK=0
+# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+CONFIG_SCSI_MEGARAID=y
+# CONFIG_SCSI_MEGARAID2 is not set
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+# CONFIG_SCSI_SATA_QSTOR is not set
+CONFIG_SCSI_SATA_PROMISE=y
+CONFIG_SCSI_SATA_SX4=y
+CONFIG_SCSI_SATA_SIL=y
+CONFIG_SCSI_SATA_SIS=y
+# CONFIG_SCSI_SATA_ULI is not set
+CONFIG_SCSI_SATA_VIA=y
+CONFIG_SCSI_SATA_VITESSE=y
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_BOOT is not set
+# CONFIG_FUSION_ISENSE is not set
+# CONFIG_FUSION_CTL is not set
+# CONFIG_FUSION_LAN is not set
+
+#
+# IEEE 1394 (FireWire) support (EXPERIMENTAL)
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+# CONFIG_I2O_PCI is not set
+# CONFIG_I2O_BLOCK is not set
+# CONFIG_I2O_LAN is not set
+# CONFIG_I2O_SCSI is not set
+# CONFIG_I2O_PROC is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_SUNLANCE is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNBMAC is not set
+# CONFIG_SUNQE is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL1 is not set
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_EL16 is not set
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+# CONFIG_ELMC is not set
+# CONFIG_ELMC_II is not set
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_TULIP is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_DGRS is not set
+# CONFIG_DM9102 is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_EEPRO100_PIO is not set
+CONFIG_E100=y
+# CONFIG_LNE390 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+CONFIG_NE2K_PCI=y
+# CONFIG_FORCEDETH is not set
+# CONFIG_NE3210 is not set
+# CONFIG_ES3210 is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_8139TOO_PIO is not set
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_SUNDANCE_MMIO is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_RHINE_MMIO is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
+# CONFIG_MYRI_SBUS is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+CONFIG_TIGON3=y
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Input core support
+#
+# CONFIG_INPUT is not set
+# CONFIG_INPUT_KEYBDEV is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_UINPUT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+# CONFIG_SERIAL is not set
+# CONFIG_SERIAL_EXTENDED is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+CONFIG_MOUSE=y
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
+# CONFIG_MK712_MOUSE is not set
+
+#
+# Joysticks
+#
+# CONFIG_INPUT_GAMEPORT is not set
+# CONFIG_QIC02_TAPE is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_IPMI_PANIC_EVENT is not set
+# CONFIG_IPMI_DEVICE_INTERFACE is not set
+# CONFIG_IPMI_KCS is not set
+# CONFIG_IPMI_WATCHDOG is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_SCx200 is not set
+# CONFIG_SCx200_GPIO is not set
+# CONFIG_AMD_RNG is not set
+# CONFIG_INTEL_RNG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_AMD_PM768 is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+
+#
+# Direct Rendering Manager (XFree86 DRI support)
+#
+# CONFIG_DRM is not set
+# CONFIG_MWAVE is not set
+# CONFIG_OBMOUSE is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# File systems
+#
+# CONFIG_QUOTA is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BEFS_DEBUG is not set
+# CONFIG_BFS_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_UMSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+CONFIG_TMPFS=y
+CONFIG_RAMFS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_TRACE is not set
+# CONFIG_XFS_DEBUG is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_TCP is not set
+CONFIG_SUNRPC=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_SMB_FS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_ZISOFS_FS=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SMB_NLS is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8559-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Console drivers
+#
+CONFIG_XEN_CONSOLE=y
+CONFIG_VGA_CONSOLE=y
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_VIDEO_SELECT is not set
+# CONFIG_MDA_CONSOLE is not set
+
+#
+# Frame-buffer support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# Support for USB gadgets
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BLUEZ is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_IOVIRT is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_KALLSYMS=y
+# CONFIG_FRAME_POINTER is not set
+CONFIG_LOG_BUF_SHIFT=0
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC32 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+# CONFIG_FW_LOADER is not set
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/defconfig-xenU
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/defconfig-xenU Tue Aug 9 15:17:45 2005
@@ -0,0 +1,560 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_XEN=y
+CONFIG_X86=y
+CONFIG_ISA=y
+# CONFIG_SBUS is not set
+CONFIG_UID16=y
+
+#
+# Xen
+#
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_XEN_PHYSDEV_ACCESS is not set
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_NO_IDLE_HZ=y
+# CONFIG_FOREIGN_PAGES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_VT is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_KMOD=y
+
+#
+# Processor type and features
+#
+CONFIG_M686=y
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MVIAC3_2 is not set
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_PGE=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_L1_CACHE_SHIFT=5
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+
+#
+# General setup
+#
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+CONFIG_KCORE_ELF=y
+# CONFIG_KCORE_AOUT is not set
+CONFIG_BINFMT_AOUT=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_OOM_KILLER is not set
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+# CONFIG_NETLINK_DEV is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_FILTER=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
+# CONFIG_SYN_COOKIES is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=y
+CONFIG_IP_NF_FTP=y
+# CONFIG_IP_NF_AMANDA is not set
+CONFIG_IP_NF_TFTP=y
+CONFIG_IP_NF_IRC=y
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+CONFIG_IP_NF_MATCH_STATE=y
+CONFIG_IP_NF_MATCH_CONNTRACK=y
+# CONFIG_IP_NF_MATCH_UNCLEAN is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_MIRROR is not set
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_IRC=y
+CONFIG_IP_NF_NAT_FTP=y
+CONFIG_IP_NF_NAT_TFTP=y
+# CONFIG_IP_NF_MANGLE is not set
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_IP_NF_TARGET_ULOG=y
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+# CONFIG_KHTTPD is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+CONFIG_VLAN_8021Q=y
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_DEBUG_QUEUES is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_AM53C974 is not set
+# CONFIG_SCSI_MEGARAID is not set
+# CONFIG_SCSI_MEGARAID2 is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+# CONFIG_SCSI_ATA_PIIX is not set
+# CONFIG_SCSI_SATA_NV is not set
+# CONFIG_SCSI_SATA_QSTOR is not set
+# CONFIG_SCSI_SATA_PROMISE is not set
+# CONFIG_SCSI_SATA_SX4 is not set
+# CONFIG_SCSI_SATA_SIL is not set
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_ULI is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_DMA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NCR53C7xx is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_STATS is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL is not set
+# CONFIG_SERIAL_EXTENDED is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+# CONFIG_PRINTER is not set
+# CONFIG_PPDEV is not set
+# CONFIG_TIPAR is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+CONFIG_MOUSE=y
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
+# CONFIG_MK712_MOUSE is not set
+
+#
+# Joysticks
+#
+# CONFIG_INPUT_GAMEPORT is not set
+# CONFIG_INPUT_NS558 is not set
+# CONFIG_INPUT_LIGHTNING is not set
+# CONFIG_INPUT_PCIGAME is not set
+# CONFIG_INPUT_CS461X is not set
+# CONFIG_INPUT_EMU10K1 is not set
+# CONFIG_INPUT_SERIO is not set
+# CONFIG_INPUT_SERPORT is not set
+
+#
+# Joysticks
+#
+# CONFIG_INPUT_ANALOG is not set
+# CONFIG_INPUT_A3D is not set
+# CONFIG_INPUT_ADI is not set
+# CONFIG_INPUT_COBRA is not set
+# CONFIG_INPUT_GF2K is not set
+# CONFIG_INPUT_GRIP is not set
+# CONFIG_INPUT_INTERACT is not set
+# CONFIG_INPUT_TMDC is not set
+# CONFIG_INPUT_SIDEWINDER is not set
+# CONFIG_INPUT_IFORCE_USB is not set
+# CONFIG_INPUT_IFORCE_232 is not set
+# CONFIG_INPUT_WARRIOR is not set
+# CONFIG_INPUT_MAGELLAN is not set
+# CONFIG_INPUT_SPACEORB is not set
+# CONFIG_INPUT_SPACEBALL is not set
+# CONFIG_INPUT_STINGER is not set
+# CONFIG_INPUT_DB9 is not set
+# CONFIG_INPUT_GAMECON is not set
+# CONFIG_INPUT_TURBOGRAFX is not set
+# CONFIG_QIC02_TAPE is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_IPMI_PANIC_EVENT is not set
+# CONFIG_IPMI_DEVICE_INTERFACE is not set
+# CONFIG_IPMI_KCS is not set
+# CONFIG_IPMI_WATCHDOG is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_SCx200 is not set
+# CONFIG_SCx200_GPIO is not set
+# CONFIG_AMD_RNG is not set
+# CONFIG_INTEL_RNG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_AMD_PM768 is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+
+#
+# Direct Rendering Manager (XFree86 DRI support)
+#
+# CONFIG_DRM is not set
+# CONFIG_MWAVE is not set
+# CONFIG_OBMOUSE is not set
+
+#
+# File systems
+#
+# CONFIG_QUOTA is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BEFS_DEBUG is not set
+# CONFIG_BFS_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_UMSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+CONFIG_TMPFS=y
+CONFIG_RAMFS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_TRACE is not set
+# CONFIG_XFS_DEBUG is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_TCP is not set
+CONFIG_SUNRPC=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_SMB_FS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_ZISOFS_FS=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SMB_NLS is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8559-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Console drivers
+#
+CONFIG_XEN_CONSOLE=y
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_IOVIRT is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_KALLSYMS=y
+# CONFIG_FRAME_POINTER is not set
+CONFIG_LOG_BUF_SHIFT=0
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC32 is not set
+CONFIG_ZLIB_INFLATE=y
+# CONFIG_ZLIB_DEFLATE is not set
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/balloon/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/balloon/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,4 @@
+O_TARGET := drv.o
+export-objs := balloon.o
+obj-y := balloon.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/blkif/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/blkif/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,10 @@
+
+O_TARGET := drv.o
+
+subdir-$(CONFIG_XEN_BLKDEV_FRONTEND) += frontend
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += frontend/drv.o
+
+subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend
+obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o
+
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/blkif/backend/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/blkif/backend/Makefile Tue Aug
9 15:17:45 2005
@@ -0,0 +1,3 @@
+O_TARGET := drv.o
+obj-y := main.o control.o interface.o vbd.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile Tue Aug
9 15:17:45 2005
@@ -0,0 +1,3 @@
+O_TARGET := drv.o
+obj-y := blkfront.o vbd.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/common.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/common.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,93 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/frontend/common.h
+ *
+ * Shared definitions between all levels of XenoLinux Virtual block devices.
+ */
+
+#ifndef __XEN_DRIVERS_COMMON_H__
+#define __XEN_DRIVERS_COMMON_H__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/major.h>
+#include <asm-xen/xen-public/xen.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include <asm-xen/xen-public/io/blkif.h>
+
+#if 1
+#define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_blk: " fmt, ##args)
+#else
+#define IPRINTK(fmt, args...) ((void)0)
+#endif
+
+#if 1
+#define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_blk: " fmt, ##args)
+#else
+#define WPRINTK(fmt, args...) ((void)0)
+#endif
+
+#if 0
+#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if 0
+#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
+#endif
+
+/* Private gendisk->flags[] values. */
+#define GENHD_FL_XEN 2 /* Is unit a Xen block device? */
+#define GENHD_FL_VIRT_PARTNS 4 /* Are unit partitions virtual? */
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'.
+ * They hang in an array off the gendisk structure. We may end up putting
+ * all kinds of interesting stuff here :-)
+ */
+typedef struct xl_disk {
+ int usage;
+} xl_disk_t;
+
+extern int blkif_open(struct inode *inode, struct file *filep);
+extern int blkif_release(struct inode *inode, struct file *filep);
+extern int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument);
+extern int blkif_check(kdev_t dev);
+extern int blkif_revalidate(kdev_t dev);
+extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
+extern void do_blkif_request (request_queue_t *rq);
+
+extern void xlvbd_update_vbds(void);
+
+static inline xl_disk_t *xldev_to_xldisk(kdev_t xldev)
+{
+ struct gendisk *gd = get_gendisk(xldev);
+
+ if ( gd == NULL )
+ return NULL;
+
+ return (xl_disk_t *)gd->real_devices +
+ (MINOR(xldev) >> gd->minor_shift);
+}
+
+
+/* Virtual block-device subsystem. */
+extern int xlvbd_init(void);
+extern void xlvbd_cleanup(void);
+
+#endif /* __XEN_DRIVERS_COMMON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c Tue Aug
9 15:17:45 2005
@@ -0,0 +1,564 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/frontend/vbd.c
+ *
+ * Xenolinux virtual block-device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ */
+
+#include "common.h"
+#include <linux/blk.h>
+
+/*
+ * For convenience we distinguish between ide, scsi and 'other' (i.e.
+ * potentially combinations of the two) in the naming scheme and in a few
+ * other places (like default readahead, etc).
+ */
+#define XLIDE_MAJOR_NAME "hd"
+#define XLSCSI_MAJOR_NAME "sd"
+#define XLVBD_MAJOR_NAME "xvd"
+
+#define XLIDE_DEVS_PER_MAJOR 2
+#define XLSCSI_DEVS_PER_MAJOR 16
+#define XLVBD_DEVS_PER_MAJOR 16
+
+#define XLIDE_PARTN_SHIFT 6 /* amount to shift minor to get 'real' minor */
+#define XLIDE_MAX_PART (1 << XLIDE_PARTN_SHIFT) /* minors per ide vbd */
+
+#define XLSCSI_PARTN_SHIFT 4 /* amount to shift minor to get 'real' minor */
+#define XLSCSI_MAX_PART (1 << XLSCSI_PARTN_SHIFT) /* minors per scsi vbd */
+
+#define XLVBD_PARTN_SHIFT 4 /* amount to shift minor to get 'real' minor */
+#define XLVBD_MAX_PART (1 << XLVBD_PARTN_SHIFT) /* minors per 'other' vbd */
+
+/* The below are for the generic drivers/block/ll_rw_block.c code. */
+static int xlide_blksize_size[256];
+static int xlide_hardsect_size[256];
+static int xlide_max_sectors[256];
+static int xlscsi_blksize_size[256];
+static int xlscsi_hardsect_size[256];
+static int xlscsi_max_sectors[256];
+static int xlvbd_blksize_size[256];
+static int xlvbd_hardsect_size[256];
+static int xlvbd_max_sectors[256];
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static int nr_vbds;
+static vdisk_t *vbd_info;
+
+static struct block_device_operations xlvbd_block_fops =
+{
+ open: blkif_open,
+ release: blkif_release,
+ ioctl: blkif_ioctl,
+ check_media_change: blkif_check,
+ revalidate: blkif_revalidate,
+};
+
+static int xlvbd_get_vbd_info(vdisk_t *disk_info)
+{
+ vdisk_t *buf = (vdisk_t *)__get_free_page(GFP_KERNEL);
+ blkif_request_t req;
+ blkif_response_t rsp;
+ int nr;
+
+ memset(&req, 0, sizeof(req));
+ req.operation = BLKIF_OP_PROBE;
+ req.nr_segments = 1;
+ req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+
+ blkif_control_send(&req, &rsp);
+
+ if ( rsp.status <= 0 )
+ {
+ printk(KERN_ALERT "Could not probe disks (%d)\n", rsp.status);
+ return -1;
+ }
+
+ if ( (nr = rsp.status) > MAX_VBDS )
+ nr = MAX_VBDS;
+ memcpy(disk_info, buf, nr * sizeof(vdisk_t));
+
+ return nr;
+}
+
+/*
+ * xlvbd_init_device - initialise a VBD device
+ * @disk: a vdisk_t describing the VBD
+ *
+ * Takes a vdisk_t * that describes a VBD the domain has access to.
+ * Performs appropriate initialisation and registration of the device.
+ *
+ * Care needs to be taken when making re-entrant calls to ensure that
+ * corruption does not occur. Also, devices that are in use should not have
+ * their details updated. This is the caller's responsibility.
+ */
+static int xlvbd_init_device(vdisk_t *xd)
+{
+ int device = xd->device;
+ int major = MAJOR(device);
+ int minor = MINOR(device);
+ int is_ide = IDE_DISK_MAJOR(major); /* is this an ide device? */
+ int is_scsi= SCSI_BLK_MAJOR(major); /* is this a scsi device? */
+ char *major_name;
+ struct gendisk *gd;
+ struct block_device *bd;
+ xl_disk_t *disk;
+ int i, rc = 0, max_part, partno;
+ unsigned long capacity;
+
+ unsigned char buf[64];
+
+ if ( (bd = bdget(device)) == NULL )
+ return -1;
+
+ /*
+ * Update of partition info, and check of usage count, is protected
+ * by the per-block-device semaphore.
+ */
+ down(&bd->bd_sem);
+
+ if ( ((disk = xldev_to_xldisk(device)) != NULL) && (disk->usage != 0) )
+ {
+ printk(KERN_ALERT "VBD update failed - in use [dev=%x]\n", device);
+ rc = -1;
+ goto out;
+ }
+
+ if ( is_ide ) {
+
+ major_name = XLIDE_MAJOR_NAME;
+ max_part = XLIDE_MAX_PART;
+
+ } else if ( is_scsi ) {
+
+ major_name = XLSCSI_MAJOR_NAME;
+ max_part = XLSCSI_MAX_PART;
+
+ } else if (VDISK_VIRTUAL(xd->info)) {
+
+ major_name = XLVBD_MAJOR_NAME;
+ max_part = XLVBD_MAX_PART;
+
+ } else {
+
+ /* SMH: hmm - probably a CCISS driver or sim; assume CCISS for now */
+ printk(KERN_ALERT "Assuming device %02x:%02x is CCISS/SCSI\n",
+ major, minor);
+ is_scsi = 1;
+ major_name = "cciss";
+ max_part = XLSCSI_MAX_PART;
+
+ }
+
+ partno = minor & (max_part - 1);
+
+ if ( (gd = get_gendisk(device)) == NULL )
+ {
+ rc = register_blkdev(major, major_name, &xlvbd_block_fops);
+ if ( rc < 0 )
+ {
+ printk(KERN_ALERT "XL VBD: can't get major %d\n", major);
+ goto out;
+ }
+
+ if ( is_ide )
+ {
+ blksize_size[major] = xlide_blksize_size;
+ hardsect_size[major] = xlide_hardsect_size;
+ max_sectors[major] = xlide_max_sectors;
+ read_ahead[major] = 8;
+ }
+ else if ( is_scsi )
+ {
+ blksize_size[major] = xlscsi_blksize_size;
+ hardsect_size[major] = xlscsi_hardsect_size;
+ max_sectors[major] = xlscsi_max_sectors;
+ read_ahead[major] = 8;
+ }
+ else
+ {
+ blksize_size[major] = xlvbd_blksize_size;
+ hardsect_size[major] = xlvbd_hardsect_size;
+ max_sectors[major] = xlvbd_max_sectors;
+ read_ahead[major] = 8;
+ }
+
+ blk_init_queue(BLK_DEFAULT_QUEUE(major), do_blkif_request);
+
+ /*
+ * Turn off barking 'headactive' mode. We dequeue buffer heads as
+ * soon as we pass them to the back-end driver.
+ */
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
+
+ /* Construct an appropriate gendisk structure. */
+ gd = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
+ gd->major = major;
+ gd->major_name = major_name;
+
+ gd->max_p = max_part;
+ if ( is_ide )
+ {
+ gd->minor_shift = XLIDE_PARTN_SHIFT;
+ gd->nr_real = XLIDE_DEVS_PER_MAJOR;
+ }
+ else if ( is_scsi )
+ {
+ gd->minor_shift = XLSCSI_PARTN_SHIFT;
+ gd->nr_real = XLSCSI_DEVS_PER_MAJOR;
+ }
+ else
+ {
+ gd->minor_shift = XLVBD_PARTN_SHIFT;
+ gd->nr_real = XLVBD_DEVS_PER_MAJOR;
+ }
+
+ /*
+ ** The sizes[] and part[] arrays hold the sizes and other
+ ** information about every partition with this 'major' (i.e.
+ ** every disk sharing the 8 bit prefix * max partns per disk)
+ */
+ gd->sizes = kmalloc(max_part*gd->nr_real*sizeof(int), GFP_KERNEL);
+ gd->part = kmalloc(max_part*gd->nr_real*sizeof(struct hd_struct),
+ GFP_KERNEL);
+ memset(gd->sizes, 0, max_part * gd->nr_real * sizeof(int));
+ memset(gd->part, 0, max_part * gd->nr_real
+ * sizeof(struct hd_struct));
+
+
+ gd->real_devices = kmalloc(gd->nr_real * sizeof(xl_disk_t),
+ GFP_KERNEL);
+ memset(gd->real_devices, 0, gd->nr_real * sizeof(xl_disk_t));
+
+ gd->next = NULL;
+ gd->fops = &xlvbd_block_fops;
+
+ gd->de_arr = kmalloc(gd->nr_real * sizeof(*gd->de_arr),
+ GFP_KERNEL);
+ gd->flags = kmalloc(gd->nr_real * sizeof(*gd->flags), GFP_KERNEL);
+
+ memset(gd->de_arr, 0, gd->nr_real * sizeof(*gd->de_arr));
+ memset(gd->flags, 0, gd->nr_real * sizeof(*gd->flags));
+
+ add_gendisk(gd);
+
+ blk_size[major] = gd->sizes;
+ }
+
+ if ( VDISK_READONLY(xd->info) )
+ set_device_ro(device, 1);
+
+ gd->flags[minor >> gd->minor_shift] |= GENHD_FL_XEN;
+
+ /* NB. Linux 2.4 only handles 32-bit sector offsets and capacities. */
+ capacity = (unsigned long)xd->capacity;
+
+ if ( partno != 0 )
+ {
+ /*
+ * If this was previously set up as a real disc we will have set
+ * up partition-table information. Virtual partitions override
+ * 'real' partitions, and the two cannot coexist on a device.
+ */
+ if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
+ (gd->sizes[minor & ~(max_part-1)] != 0) )
+ {
+ /*
+ * Any non-zero sub-partition entries must be cleaned out before
+ * installing 'virtual' partition entries. The two types cannot
+ * coexist, and virtual partitions are favoured.
+ */
+ kdev_t dev = device & ~(max_part-1);
+ for ( i = max_part - 1; i > 0; i-- )
+ {
+ invalidate_device(dev+i, 1);
+ gd->part[MINOR(dev+i)].start_sect = 0;
+ gd->part[MINOR(dev+i)].nr_sects = 0;
+ gd->sizes[MINOR(dev+i)] = 0;
+ }
+ printk(KERN_ALERT
+ "Virtual partitions found for /dev/%s - ignoring any "
+ "real partition information we may have found.\n",
+ disk_name(gd, MINOR(device), buf));
+ }
+
+ /* Need to skankily setup 'partition' information */
+ gd->part[minor].start_sect = 0;
+ gd->part[minor].nr_sects = capacity;
+ gd->sizes[minor] = capacity >>(BLOCK_SIZE_BITS-9);
+
+ gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
+ }
+ else
+ {
+ gd->part[minor].nr_sects = capacity;
+ gd->sizes[minor] = capacity>>(BLOCK_SIZE_BITS-9);
+
+ /* Some final fix-ups depending on the device type */
+ switch ( VDISK_TYPE(xd->info) )
+ {
+ case VDISK_TYPE_CDROM:
+ case VDISK_TYPE_FLOPPY:
+ case VDISK_TYPE_TAPE:
+ gd->flags[minor >> gd->minor_shift] |= GENHD_FL_REMOVABLE;
+ printk(KERN_ALERT
+ "Skipping partition check on %s /dev/%s\n",
+ VDISK_TYPE(xd->info)==VDISK_TYPE_CDROM ? "cdrom" :
+ (VDISK_TYPE(xd->info)==VDISK_TYPE_TAPE ? "tape" :
+ "floppy"), disk_name(gd, MINOR(device), buf));
+ break;
+
+ case VDISK_TYPE_DISK:
+ /* Only check partitions on real discs (not virtual!). */
+ if ( gd->flags[minor>>gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
+ {
+ printk(KERN_ALERT
+ "Skipping partition check on virtual /dev/%s\n",
+ disk_name(gd, MINOR(device), buf));
+ break;
+ }
+ register_disk(gd, device, gd->max_p, &xlvbd_block_fops, capacity);
+ break;
+
+ default:
+ printk(KERN_ALERT "XenoLinux: unknown device type %d\n",
+ VDISK_TYPE(xd->info));
+ break;
+ }
+ }
+
+ out:
+ up(&bd->bd_sem);
+ bdput(bd);
+ return rc;
+}
+
+
+/*
+ * xlvbd_remove_device - remove a device node if possible
+ * @device: numeric device ID
+ *
+ * Updates the gendisk structure and invalidates devices.
+ *
+ * This is OK for now but in future, should perhaps consider where this should
+ * deallocate gendisks / unregister devices.
+ */
+static int xlvbd_remove_device(int device)
+{
+ int i, rc = 0, minor = MINOR(device);
+ struct gendisk *gd;
+ struct block_device *bd;
+ xl_disk_t *disk = NULL;
+
+ if ( (bd = bdget(device)) == NULL )
+ return -1;
+
+ /*
+ * Update of partition info, and check of usage count, is protected
+ * by the per-block-device semaphore.
+ */
+ down(&bd->bd_sem);
+
+ if ( ((gd = get_gendisk(device)) == NULL) ||
+ ((disk = xldev_to_xldisk(device)) == NULL) )
+ BUG();
+
+ if ( disk->usage != 0 )
+ {
+ printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device);
+ rc = -1;
+ goto out;
+ }
+
+ if ( (minor & (gd->max_p-1)) != 0 )
+ {
+ /* 1: The VBD is mapped to a partition rather than a whole unit. */
+ invalidate_device(device, 1);
+ gd->part[minor].start_sect = 0;
+ gd->part[minor].nr_sects = 0;
+ gd->sizes[minor] = 0;
+
+ /* Clear the consists-of-virtual-partitions flag if possible. */
+ gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS;
+ for ( i = 1; i < gd->max_p; i++ )
+ if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 )
+ gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
+
+ /*
+ * If all virtual partitions are now gone, and a 'whole unit' VBD is
+ * present, then we can try to grok the unit's real partition table.
+ */
+ if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
+ (gd->sizes[minor & ~(gd->max_p-1)] != 0) &&
+ !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) )
+ {
+ register_disk(gd,
+ device&~(gd->max_p-1),
+ gd->max_p,
+ &xlvbd_block_fops,
+ gd->part[minor&~(gd->max_p-1)].nr_sects);
+ }
+ }
+ else
+ {
+ /*
+ * 2: The VBD is mapped to an entire 'unit'. Clear all partitions.
+ * NB. The partition entries are only cleared if there are no VBDs
+ * mapped to individual partitions on this unit.
+ */
+ i = gd->max_p - 1; /* Default: clear subpartitions as well. */
+ if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
+ i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */
+ while ( i >= 0 )
+ {
+ invalidate_device(device+i, 1);
+ gd->part[minor+i].start_sect = 0;
+ gd->part[minor+i].nr_sects = 0;
+ gd->sizes[minor+i] = 0;
+ i--;
+ }
+ }
+
+ out:
+ up(&bd->bd_sem);
+ bdput(bd);
+ return rc;
+}
+
+/*
+ * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
+ * state. The VBDs need to be updated in this way when the domain is
+ * initialised and also each time we receive an XLBLK_UPDATE event.
+ */
+void xlvbd_update_vbds(void)
+{
+ int i, j, k, old_nr, new_nr;
+ vdisk_t *old_info, *new_info, *merged_info;
+
+ old_info = vbd_info;
+ old_nr = nr_vbds;
+
+ new_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
+ if (!new_info)
+ return;
+
+ if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 )
+ goto out;
+
+ /*
+ * Final list maximum size is old list + new list. This occurs only when
+ * old list and new list do not overlap at all, and we cannot yet destroy
+ * VBDs in the old list because the usage counts are busy.
+ */
+ merged_info = kmalloc((old_nr + new_nr) * sizeof(vdisk_t), GFP_KERNEL);
+ if (!merged_info)
+ goto out;
+
+ /* @i tracks old list; @j tracks new list; @k tracks merged list. */
+ i = j = k = 0;
+
+ while ( (i < old_nr) && (j < new_nr) )
+ {
+ if ( old_info[i].device < new_info[j].device )
+ {
+ if ( xlvbd_remove_device(old_info[i].device) != 0 )
+ memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+ i++;
+ }
+ else if ( old_info[i].device > new_info[j].device )
+ {
+ if ( xlvbd_init_device(&new_info[j]) == 0 )
+ memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+ j++;
+ }
+ else
+ {
+ if ( ((old_info[i].capacity == new_info[j].capacity) &&
+ (old_info[i].info == new_info[j].info)) ||
+ (xlvbd_remove_device(old_info[i].device) != 0) )
+ memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+ else if ( xlvbd_init_device(&new_info[j]) == 0 )
+ memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+ i++; j++;
+ }
+ }
+
+ for ( ; i < old_nr; i++ )
+ {
+ if ( xlvbd_remove_device(old_info[i].device) != 0 )
+ memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+ }
+
+ for ( ; j < new_nr; j++ )
+ {
+ if ( xlvbd_init_device(&new_info[j]) == 0 )
+ memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+ }
+
+ vbd_info = merged_info;
+ nr_vbds = k;
+
+ kfree(old_info);
+out:
+ kfree(new_info);
+}
+
+
+/*
+ * Set up all the linux device goop for the virtual block devices (vbd's) that
+ * we know about. Note that although from the backend driver's p.o.v. VBDs are
+ * addressed simply an opaque 16-bit device number, the domain creation tools
+ * conventionally allocate these numbers to correspond to those used by 'real'
+ * linux -- this is just for convenience as it means e.g. that the same
+ * /etc/fstab can be used when booting with or without Xen.
+ */
+int xlvbd_init(void)
+{
+ int i;
+
+ /*
+ * If compiled as a module, we don't support unloading yet. We therefore
+ * permanently increment the reference count to disallow it.
+ */
+ SET_MODULE_OWNER(&xlvbd_block_fops);
+ MOD_INC_USE_COUNT;
+
+ /* Initialize the global arrays. */
+ for ( i = 0; i < 256; i++ )
+ {
+ xlide_blksize_size[i] = 1024;
+ xlide_hardsect_size[i] = 512;
+ xlide_max_sectors[i] = 512;
+
+ xlscsi_blksize_size[i] = 1024;
+ xlscsi_hardsect_size[i] = 512;
+ xlscsi_max_sectors[i] = 512;
+
+ xlvbd_blksize_size[i] = 512;
+ xlvbd_hardsect_size[i] = 512;
+ xlvbd_max_sectors[i] = 512;
+ }
+
+ vbd_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
+ if (!vbd_info)
+ return -ENOMEM;
+
+ nr_vbds = xlvbd_get_vbd_info(vbd_info);
+
+ if ( nr_vbds < 0 )
+ {
+ kfree(vbd_info);
+ vbd_info = NULL;
+ nr_vbds = 0;
+ }
+ else
+ {
+ for ( i = 0; i < nr_vbds; i++ )
+ xlvbd_init_device(&vbd_info[i]);
+ }
+
+ return 0;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/console/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/console/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,3 @@
+O_TARGET := drv.o
+obj-$(CONFIG_XEN_CONSOLE) := console.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/dom0/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/dom0/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,3 @@
+O_TARGET := drv.o
+obj-y := core.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/evtchn/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/evtchn/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,3 @@
+O_TARGET := drv.o
+obj-y := evtchn.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/netif/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/netif/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,10 @@
+
+O_TARGET := drv.o
+
+subdir-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend/drv.o
+
+subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend
+obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o
+
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/netif/backend/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/netif/backend/Makefile Tue Aug
9 15:17:45 2005
@@ -0,0 +1,4 @@
+O_TARGET := drv.o
+export-objs := interface.o
+obj-y := main.o control.o interface.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/drivers/netif/frontend/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/drivers/netif/frontend/Makefile Tue Aug
9 15:17:45 2005
@@ -0,0 +1,3 @@
+O_TARGET := drv.o
+obj-y := main.o
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,20 @@
+
+.S.o:
+ $(CC) $(AFLAGS) -traditional -c $< -o $*.o
+
+all: kernel.o head.o init_task.o
+
+O_TARGET := kernel.o
+
+export-objs := i386_ksyms.o skbuff.o ctrl_if.o
+
+obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
+ ptrace.o ioport.o ldt.o setup.o time.o sys_i386.o \
+ i386_ksyms.o i387.o evtchn.o ctrl_if.o pci-dma.o \
+ reboot.o fixup.o skbuff.o
+
+ifdef CONFIG_PCI
+obj-y += pci-i386.o pci-pc.o
+endif
+
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/entry.S
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/entry.S Tue Aug 9 15:17:45 2005
@@ -0,0 +1,779 @@
+/*
+ * linux/arch/i386/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
+ * on a 486.
+ *
+ * Stack layout in 'ret_to_user':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in fork.c:copy_process, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * 0(%esp) - %ebx
+ * 4(%esp) - %ecx
+ * 8(%esp) - %edx
+ * C(%esp) - %esi
+ * 10(%esp) - %edi
+ * 14(%esp) - %ebp
+ * 18(%esp) - %eax
+ * 1C(%esp) - %ds
+ * 20(%esp) - %es
+ * 24(%esp) - orig_eax
+ * 28(%esp) - %eip
+ * 2C(%esp) - %cs
+ * 30(%esp) - %eflags
+ * 34(%esp) - %oldesp
+ * 38(%esp) - %oldss
+ *
+ * "current" is in register %ebx during any slow entries.
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+
+EBX = 0x00
+ECX = 0x04
+EDX = 0x08
+ESI = 0x0C
+EDI = 0x10
+EBP = 0x14
+EAX = 0x18
+DS = 0x1C
+ES = 0x20
+ORIG_EAX = 0x24
+EIP = 0x28
+CS = 0x2C
+EFLAGS = 0x30
+OLDESP = 0x34
+OLDSS = 0x38
+
+CF_MASK = 0x00000001
+TF_MASK = 0x00000100
+IF_MASK = 0x00000200
+DF_MASK = 0x00000400
+NT_MASK = 0x00004000
+
+/* Offsets into task_struct. */
+state = 0
+flags = 4
+sigpending = 8
+addr_limit = 12
+exec_domain = 16
+need_resched = 20
+tsk_ptrace = 24
+processor = 52
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending /* 0 */
+#define evtchn_upcall_mask 1
+
+ENOSYS = 38
+
+
+#define SAVE_ALL \
+ cld; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ movl $(__KERNEL_DS),%edx; \
+ movl %edx,%ds; \
+ movl %edx,%es;
+
+#define RESTORE_ALL \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax; \
+1: popl %ds; \
+2: popl %es; \
+ addl $4,%esp; \
+3: iret; \
+.section .fixup,"ax"; \
+4: movl $0,(%esp); \
+ jmp 1b; \
+5: movl $0,(%esp); \
+ jmp 2b; \
+6: pushl %ss; \
+ popl %ds; \
+ pushl %ss; \
+ popl %es; \
+ pushl $11; \
+ call do_exit; \
+.previous; \
+.section __ex_table,"a";\
+ .align 4; \
+ .long 1b,4b; \
+ .long 2b,5b; \
+ .long 3b,6b; \
+.previous
+
+#define GET_CURRENT(reg) \
+ movl $-8192, reg; \
+ andl %esp, reg
+
+ENTRY(lcall7)
+ pushfl # We get a different stack layout with call
+ pushl %eax # gates, which has to be cleaned up later..
+ SAVE_ALL
+ movl EIP(%esp),%eax # due to call gates, this is eflags, not eip..
+ movl CS(%esp),%edx # this is eip..
+ movl EFLAGS(%esp),%ecx # and this is cs..
+ movl %eax,EFLAGS(%esp) #
+ andl $~(NT_MASK|TF_MASK|DF_MASK), %eax
+ pushl %eax
+ popfl
+ movl %edx,EIP(%esp) # Now we move them to their "normal" places
+ movl %ecx,CS(%esp) #
+ movl %esp,%ebx
+ pushl %ebx
+ andl $-8192,%ebx # GET_CURRENT
+ movl exec_domain(%ebx),%edx # Get the execution domain
+ movl 4(%edx),%edx # Get the lcall7 handler for the domain
+ pushl $0x7
+ call *%edx
+ addl $4, %esp
+ popl %eax
+ jmp ret_to_user
+
+ENTRY(lcall27)
+ pushfl # We get a different stack layout with call
+ pushl %eax # gates, which has to be cleaned up later..
+ SAVE_ALL
+ movl EIP(%esp),%eax # due to call gates, this is eflags, not eip..
+ movl CS(%esp),%edx # this is eip..
+ movl EFLAGS(%esp),%ecx # and this is cs..
+ movl %eax,EFLAGS(%esp) #
+ andl $~(NT_MASK|TF_MASK|DF_MASK), %eax
+ pushl %eax
+ popfl
+ movl %edx,EIP(%esp) # Now we move them to their "normal" places
+ movl %ecx,CS(%esp) #
+ movl %esp,%ebx
+ pushl %ebx
+ andl $-8192,%ebx # GET_CURRENT
+ movl exec_domain(%ebx),%edx # Get the execution domain
+ movl 4(%edx),%edx # Get the lcall7 handler for the domain
+ pushl $0x27
+ call *%edx
+ addl $4, %esp
+ popl %eax
+ jmp ret_to_user
+
+ENTRY(ret_from_fork)
+ pushl %ebx
+ call SYMBOL_NAME(schedule_tail)
+ addl $4, %esp
+ GET_CURRENT(%ebx)
+ testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
+ jne tracesys_exit
+ jmp ret_to_user
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+ENTRY(system_call)
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_CURRENT(%ebx)
+ testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
+ jne tracesys
+ cmpl $(NR_syscalls),%eax
+ jae badsys
+ call *SYMBOL_NAME(sys_call_table)(,%eax,4)
+ movl %eax,EAX(%esp) # save the return value
+ret_to_user:
+ movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
+ movb $1,evtchn_upcall_mask(%esi) # make tests atomic
+ret_to_user_nocli:
+ cmpl $0,need_resched(%ebx)
+ jne reschedule
+ cmpl $0,sigpending(%ebx)
+ je safesti # ensure need_resched updates are seen
+/*signal_return:*/
+ movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+ movl %esp,%eax
+ xorl %edx,%edx
+ call SYMBOL_NAME(do_signal)
+ jmp safesti
+
+ ALIGN
+restore_all:
+ RESTORE_ALL
+
+ ALIGN
+tracesys:
+ movl $-ENOSYS,EAX(%esp)
+ call SYMBOL_NAME(syscall_trace)
+ movl ORIG_EAX(%esp),%eax
+ cmpl $(NR_syscalls),%eax
+ jae tracesys_exit
+ call *SYMBOL_NAME(sys_call_table)(,%eax,4)
+ movl %eax,EAX(%esp) # save the return value
+tracesys_exit:
+ call SYMBOL_NAME(syscall_trace)
+ jmp ret_to_user
+badsys:
+ movl $-ENOSYS,EAX(%esp)
+ jmp ret_to_user
+
+ ALIGN
+ENTRY(ret_from_intr)
+ GET_CURRENT(%ebx)
+ret_from_exception:
+ movb CS(%esp),%al
+ testl $2,%eax
+ jne ret_to_user
+ jmp restore_all
+
+ ALIGN
+reschedule:
+ movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+ call SYMBOL_NAME(schedule) # test
+ jmp ret_to_user
+
+ENTRY(divide_error)
+ pushl $0 # no error code
+ pushl $ SYMBOL_NAME(do_divide_error)
+ ALIGN
+error_code:
+ pushl %ds
+ pushl %eax
+ xorl %eax,%eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax # eax = -1
+ pushl %ecx
+ pushl %ebx
+ GET_CURRENT(%ebx)
+ cld
+ movl %es,%ecx
+ movl ORIG_EAX(%esp), %esi # get the error code
+ movl ES(%esp), %edi # get the function address
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl %esp,%edx
+ pushl %esi # push the error code
+ pushl %edx # push the pt_regs pointer
+ movl $(__KERNEL_DS),%edx
+ movl %edx,%ds
+ movl %edx,%es
+ call *%edi
+ addl $8,%esp
+ jmp ret_from_exception
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until we've done all processing. HOWEVER, we must enable events before
+# popping the stack frame (can't be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so we'd
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+ pushl %eax
+ SAVE_ALL
+ GET_CURRENT(%ebx)
+ movl EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+ jb critical_region_fixup
+11: push %esp
+ call evtchn_do_upcall
+ add $4,%esp
+ movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne ret_to_user_nocli
+safesti:movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+scrit: /**** START OF CRITICAL REGION ****/
+ testb $0xFF,evtchn_upcall_pending(%esi)
+ jnz 14f # process more events if necessary...
+ RESTORE_ALL
+14: movb $1,evtchn_upcall_mask(%esi)
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ addl $critical_fixup_table-scrit,%eax
+ movzbl (%eax),%eax # %eax contains num bytes popped
+ mov %esp,%esi
+ add %eax,%esi # %esi points at end of src region
+ mov %esp,%edi
+ add $0x34,%edi # %edi points at end of dst region
+ mov %eax,%ecx
+ shr $2,%ecx # convert words to bytes
+ je 16f # skip loop if nothing to copy
+15: subl $4,%esi # pre-decrementing copy loop
+ subl $4,%edi
+ movl (%esi),%eax
+ movl %eax,(%edi)
+ loop 15b
+16: movl %edi,%esp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .byte 0x00,0x00,0x00 # testb $0xFF,(%esi)
+ .byte 0x00,0x00 # jnz 14f
+ .byte 0x00 # pop %ebx
+ .byte 0x04 # pop %ecx
+ .byte 0x08 # pop %edx
+ .byte 0x0c # pop %esi
+ .byte 0x10 # pop %edi
+ .byte 0x14 # pop %ebp
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+ .byte 0x24,0x24,0x24 # add $4,%esp
+ .byte 0x28 # iret
+ .byte 0x00,0x00,0x00,0x00 # movb $1,4(%esi)
+ .byte 0x00,0x00 # jmp 11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+1: popl %ds
+2: popl %es
+3: popl %fs
+4: popl %gs
+5: iret
+.section .fixup,"ax"; \
+6: movl $0,(%esp); \
+ jmp 1b; \
+7: movl $0,(%esp); \
+ jmp 2b; \
+8: movl $0,(%esp); \
+ jmp 3b; \
+9: movl $0,(%esp); \
+ jmp 4b; \
+10: pushl %ss; \
+ popl %ds; \
+ pushl %ss; \
+ popl %es; \
+ pushl $11; \
+ call do_exit; \
+.previous; \
+.section __ex_table,"a";\
+ .align 4; \
+ .long 1b,6b; \
+ .long 2b,7b; \
+ .long 3b,8b; \
+ .long 4b,9b; \
+ .long 5b,10b; \
+.previous
+
+ENTRY(coprocessor_error)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_coprocessor_error)
+ jmp error_code
+
+ENTRY(simd_coprocessor_error)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
+ jmp error_code
+
+ENTRY(device_not_available)
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ GET_CURRENT(%ebx)
+ call SYMBOL_NAME(math_state_restore)
+ jmp ret_from_exception
+
+ENTRY(debug)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_debug)
+ jmp error_code
+
+ENTRY(int3)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_int3)
+ jmp error_code
+
+ENTRY(overflow)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_overflow)
+ jmp error_code
+
+ENTRY(bounds)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_bounds)
+ jmp error_code
+
+ENTRY(invalid_op)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_invalid_op)
+ jmp error_code
+
+ENTRY(coprocessor_segment_overrun)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
+ jmp error_code
+
+ENTRY(double_fault)
+ pushl $ SYMBOL_NAME(do_double_fault)
+ jmp error_code
+
+ENTRY(invalid_TSS)
+ pushl $ SYMBOL_NAME(do_invalid_TSS)
+ jmp error_code
+
+ENTRY(segment_not_present)
+ pushl $ SYMBOL_NAME(do_segment_not_present)
+ jmp error_code
+
+ENTRY(stack_segment)
+ pushl $ SYMBOL_NAME(do_stack_segment)
+ jmp error_code
+
+ENTRY(general_protection)
+ pushl $ SYMBOL_NAME(do_general_protection)
+ jmp error_code
+
+ENTRY(alignment_check)
+ pushl $ SYMBOL_NAME(do_alignment_check)
+ jmp error_code
+
+# This handler is special, because it gets an extra value on its stack,
+# which is the linear faulting address.
+#define PAGE_FAULT_STUB(_name1, _name2) \
+ENTRY(_name1) \
+ pushl %ds ; \
+ pushl %eax ; \
+ xorl %eax,%eax ; \
+ pushl %ebp ; \
+ pushl %edi ; \
+ pushl %esi ; \
+ pushl %edx ; \
+ decl %eax /* eax = -1 */ ; \
+ pushl %ecx ; \
+ pushl %ebx ; \
+ GET_CURRENT(%ebx) ; \
+ cld ; \
+ movl %es,%ecx ; \
+ movl ORIG_EAX(%esp), %esi /* get the error code */ ; \
+ movl ES(%esp), %edi /* get the faulting address */ ; \
+ movl %eax, ORIG_EAX(%esp) ; \
+ movl %ecx, ES(%esp) ; \
+ movl %esp,%edx ; \
+ pushl %edi /* push the faulting address */ ; \
+ pushl %esi /* push the error code */ ; \
+ pushl %edx /* push the pt_regs pointer */ ; \
+ movl $(__KERNEL_DS),%edx ; \
+ movl %edx,%ds ; \
+ movl %edx,%es ; \
+ call SYMBOL_NAME(_name2) ; \
+ addl $12,%esp ; \
+ jmp ret_from_exception ;
+PAGE_FAULT_STUB(page_fault, do_page_fault)
+
+ENTRY(machine_check)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_machine_check)
+ jmp error_code
+
+ENTRY(fixup_4gb_segment)
+ pushl $ SYMBOL_NAME(do_fixup_4gb_segment)
+ jmp error_code
+
+.data
+ENTRY(sys_call_table)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system
call*/
+ .long SYMBOL_NAME(sys_exit)
+ .long SYMBOL_NAME(sys_fork)
+ .long SYMBOL_NAME(sys_read)
+ .long SYMBOL_NAME(sys_write)
+ .long SYMBOL_NAME(sys_open) /* 5 */
+ .long SYMBOL_NAME(sys_close)
+ .long SYMBOL_NAME(sys_waitpid)
+ .long SYMBOL_NAME(sys_creat)
+ .long SYMBOL_NAME(sys_link)
+ .long SYMBOL_NAME(sys_unlink) /* 10 */
+ .long SYMBOL_NAME(sys_execve)
+ .long SYMBOL_NAME(sys_chdir)
+ .long SYMBOL_NAME(sys_time)
+ .long SYMBOL_NAME(sys_mknod)
+ .long SYMBOL_NAME(sys_chmod) /* 15 */
+ .long SYMBOL_NAME(sys_lchown16)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
break syscall holder */
+ .long SYMBOL_NAME(sys_stat)
+ .long SYMBOL_NAME(sys_lseek)
+ .long SYMBOL_NAME(sys_getpid) /* 20 */
+ .long SYMBOL_NAME(sys_mount)
+ .long SYMBOL_NAME(sys_oldumount)
+ .long SYMBOL_NAME(sys_setuid16)
+ .long SYMBOL_NAME(sys_getuid16)
+ .long SYMBOL_NAME(sys_stime) /* 25 */
+ .long SYMBOL_NAME(sys_ptrace)
+ .long SYMBOL_NAME(sys_alarm)
+ .long SYMBOL_NAME(sys_fstat)
+ .long SYMBOL_NAME(sys_pause)
+ .long SYMBOL_NAME(sys_utime) /* 30 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
stty syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
gtty syscall holder */
+ .long SYMBOL_NAME(sys_access)
+ .long SYMBOL_NAME(sys_nice)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 35 */ /* old
ftime syscall holder */
+ .long SYMBOL_NAME(sys_sync)
+ .long SYMBOL_NAME(sys_kill)
+ .long SYMBOL_NAME(sys_rename)
+ .long SYMBOL_NAME(sys_mkdir)
+ .long SYMBOL_NAME(sys_rmdir) /* 40 */
+ .long SYMBOL_NAME(sys_dup)
+ .long SYMBOL_NAME(sys_pipe)
+ .long SYMBOL_NAME(sys_times)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
prof syscall holder */
+ .long SYMBOL_NAME(sys_brk) /* 45 */
+ .long SYMBOL_NAME(sys_setgid16)
+ .long SYMBOL_NAME(sys_getgid16)
+ .long SYMBOL_NAME(sys_signal)
+ .long SYMBOL_NAME(sys_geteuid16)
+ .long SYMBOL_NAME(sys_getegid16) /* 50 */
+ .long SYMBOL_NAME(sys_acct)
+ .long SYMBOL_NAME(sys_umount) /*
recycled never used phys() */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
lock syscall holder */
+ .long SYMBOL_NAME(sys_ioctl)
+ .long SYMBOL_NAME(sys_fcntl) /* 55 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
mpx syscall holder */
+ .long SYMBOL_NAME(sys_setpgid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
ulimit syscall holder */
+ .long SYMBOL_NAME(sys_olduname)
+ .long SYMBOL_NAME(sys_umask) /* 60 */
+ .long SYMBOL_NAME(sys_chroot)
+ .long SYMBOL_NAME(sys_ustat)
+ .long SYMBOL_NAME(sys_dup2)
+ .long SYMBOL_NAME(sys_getppid)
+ .long SYMBOL_NAME(sys_getpgrp) /* 65 */
+ .long SYMBOL_NAME(sys_setsid)
+ .long SYMBOL_NAME(sys_sigaction)
+ .long SYMBOL_NAME(sys_sgetmask)
+ .long SYMBOL_NAME(sys_ssetmask)
+ .long SYMBOL_NAME(sys_setreuid16) /* 70 */
+ .long SYMBOL_NAME(sys_setregid16)
+ .long SYMBOL_NAME(sys_sigsuspend)
+ .long SYMBOL_NAME(sys_sigpending)
+ .long SYMBOL_NAME(sys_sethostname)
+ .long SYMBOL_NAME(sys_setrlimit) /* 75 */
+ .long SYMBOL_NAME(sys_old_getrlimit)
+ .long SYMBOL_NAME(sys_getrusage)
+ .long SYMBOL_NAME(sys_gettimeofday)
+ .long SYMBOL_NAME(sys_settimeofday)
+ .long SYMBOL_NAME(sys_getgroups16) /* 80 */
+ .long SYMBOL_NAME(sys_setgroups16)
+ .long SYMBOL_NAME(old_select)
+ .long SYMBOL_NAME(sys_symlink)
+ .long SYMBOL_NAME(sys_lstat)
+ .long SYMBOL_NAME(sys_readlink) /* 85 */
+ .long SYMBOL_NAME(sys_uselib)
+ .long SYMBOL_NAME(sys_swapon)
+ .long SYMBOL_NAME(sys_reboot)
+ .long SYMBOL_NAME(old_readdir)
+ .long SYMBOL_NAME(old_mmap) /* 90 */
+ .long SYMBOL_NAME(sys_munmap)
+ .long SYMBOL_NAME(sys_truncate)
+ .long SYMBOL_NAME(sys_ftruncate)
+ .long SYMBOL_NAME(sys_fchmod)
+ .long SYMBOL_NAME(sys_fchown16) /* 95 */
+ .long SYMBOL_NAME(sys_getpriority)
+ .long SYMBOL_NAME(sys_setpriority)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old
profil syscall holder */
+ .long SYMBOL_NAME(sys_statfs)
+ .long SYMBOL_NAME(sys_fstatfs) /* 100 */
+ .long SYMBOL_NAME(sys_ioperm)
+ .long SYMBOL_NAME(sys_socketcall)
+ .long SYMBOL_NAME(sys_syslog)
+ .long SYMBOL_NAME(sys_setitimer)
+ .long SYMBOL_NAME(sys_getitimer) /* 105 */
+ .long SYMBOL_NAME(sys_newstat)
+ .long SYMBOL_NAME(sys_newlstat)
+ .long SYMBOL_NAME(sys_newfstat)
+ .long SYMBOL_NAME(sys_uname)
+ .long SYMBOL_NAME(sys_iopl) /* 110 */
+ .long SYMBOL_NAME(sys_vhangup)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old "idle" system call */
+ .long SYMBOL_NAME(sys_ni_syscall) /* was VM86 */
+ .long SYMBOL_NAME(sys_wait4)
+ .long SYMBOL_NAME(sys_swapoff) /* 115 */
+ .long SYMBOL_NAME(sys_sysinfo)
+ .long SYMBOL_NAME(sys_ipc)
+ .long SYMBOL_NAME(sys_fsync)
+ .long SYMBOL_NAME(sys_sigreturn)
+ .long SYMBOL_NAME(sys_clone) /* 120 */
+ .long SYMBOL_NAME(sys_setdomainname)
+ .long SYMBOL_NAME(sys_newuname)
+ .long SYMBOL_NAME(sys_modify_ldt)
+ .long SYMBOL_NAME(sys_adjtimex)
+ .long SYMBOL_NAME(sys_mprotect) /* 125 */
+ .long SYMBOL_NAME(sys_sigprocmask)
+ .long SYMBOL_NAME(sys_create_module)
+ .long SYMBOL_NAME(sys_init_module)
+ .long SYMBOL_NAME(sys_delete_module)
+ .long SYMBOL_NAME(sys_get_kernel_syms) /* 130 */
+ .long SYMBOL_NAME(sys_quotactl)
+ .long SYMBOL_NAME(sys_getpgid)
+ .long SYMBOL_NAME(sys_fchdir)
+ .long SYMBOL_NAME(sys_bdflush)
+ .long SYMBOL_NAME(sys_sysfs) /* 135 */
+ .long SYMBOL_NAME(sys_personality)
+ .long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
+ .long SYMBOL_NAME(sys_setfsuid16)
+ .long SYMBOL_NAME(sys_setfsgid16)
+ .long SYMBOL_NAME(sys_llseek) /* 140 */
+ .long SYMBOL_NAME(sys_getdents)
+ .long SYMBOL_NAME(sys_select)
+ .long SYMBOL_NAME(sys_flock)
+ .long SYMBOL_NAME(sys_msync)
+ .long SYMBOL_NAME(sys_readv) /* 145 */
+ .long SYMBOL_NAME(sys_writev)
+ .long SYMBOL_NAME(sys_getsid)
+ .long SYMBOL_NAME(sys_fdatasync)
+ .long SYMBOL_NAME(sys_sysctl)
+ .long SYMBOL_NAME(sys_mlock) /* 150 */
+ .long SYMBOL_NAME(sys_munlock)
+ .long SYMBOL_NAME(sys_mlockall)
+ .long SYMBOL_NAME(sys_munlockall)
+ .long SYMBOL_NAME(sys_sched_setparam)
+ .long SYMBOL_NAME(sys_sched_getparam) /* 155 */
+ .long SYMBOL_NAME(sys_sched_setscheduler)
+ .long SYMBOL_NAME(sys_sched_getscheduler)
+ .long SYMBOL_NAME(sys_sched_yield)
+ .long SYMBOL_NAME(sys_sched_get_priority_max)
+ .long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */
+ .long SYMBOL_NAME(sys_sched_rr_get_interval)
+ .long SYMBOL_NAME(sys_nanosleep)
+ .long SYMBOL_NAME(sys_mremap)
+ .long SYMBOL_NAME(sys_setresuid16)
+ .long SYMBOL_NAME(sys_getresuid16) /* 165 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* was VM86 */
+ .long SYMBOL_NAME(sys_query_module)
+ .long SYMBOL_NAME(sys_poll)
+ .long SYMBOL_NAME(sys_nfsservctl)
+ .long SYMBOL_NAME(sys_setresgid16) /* 170 */
+ .long SYMBOL_NAME(sys_getresgid16)
+ .long SYMBOL_NAME(sys_prctl)
+ .long SYMBOL_NAME(sys_rt_sigreturn)
+ .long SYMBOL_NAME(sys_rt_sigaction)
+ .long SYMBOL_NAME(sys_rt_sigprocmask) /* 175 */
+ .long SYMBOL_NAME(sys_rt_sigpending)
+ .long SYMBOL_NAME(sys_rt_sigtimedwait)
+ .long SYMBOL_NAME(sys_rt_sigqueueinfo)
+ .long SYMBOL_NAME(sys_rt_sigsuspend)
+ .long SYMBOL_NAME(sys_pread) /* 180 */
+ .long SYMBOL_NAME(sys_pwrite)
+ .long SYMBOL_NAME(sys_chown16)
+ .long SYMBOL_NAME(sys_getcwd)
+ .long SYMBOL_NAME(sys_capget)
+ .long SYMBOL_NAME(sys_capset) /* 185 */
+ .long SYMBOL_NAME(sys_sigaltstack)
+ .long SYMBOL_NAME(sys_sendfile)
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
+ .long SYMBOL_NAME(sys_vfork) /* 190 */
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_mmap2)
+ .long SYMBOL_NAME(sys_truncate64)
+ .long SYMBOL_NAME(sys_ftruncate64)
+ .long SYMBOL_NAME(sys_stat64) /* 195 */
+ .long SYMBOL_NAME(sys_lstat64)
+ .long SYMBOL_NAME(sys_fstat64)
+ .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_getgid) /* 200 */
+ .long SYMBOL_NAME(sys_geteuid)
+ .long SYMBOL_NAME(sys_getegid)
+ .long SYMBOL_NAME(sys_setreuid)
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_getgroups) /* 205 */
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(sys_fchown)
+ .long SYMBOL_NAME(sys_setresuid)
+ .long SYMBOL_NAME(sys_getresuid)
+ .long SYMBOL_NAME(sys_setresgid) /* 210 */
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_setfsuid) /* 215 */
+ .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_pivot_root)
+ .long SYMBOL_NAME(sys_mincore)
+ .long SYMBOL_NAME(sys_madvise)
+ .long SYMBOL_NAME(sys_getdents64) /* 220 */
+ .long SYMBOL_NAME(sys_fcntl64)
+ .long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */
+ .long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
+ .long SYMBOL_NAME(sys_gettid)
+ .long SYMBOL_NAME(sys_readahead) /* 225 */
+ .long SYMBOL_NAME(sys_setxattr)
+ .long SYMBOL_NAME(sys_lsetxattr)
+ .long SYMBOL_NAME(sys_fsetxattr)
+ .long SYMBOL_NAME(sys_getxattr)
+ .long SYMBOL_NAME(sys_lgetxattr) /* 230 */
+ .long SYMBOL_NAME(sys_fgetxattr)
+ .long SYMBOL_NAME(sys_listxattr)
+ .long SYMBOL_NAME(sys_llistxattr)
+ .long SYMBOL_NAME(sys_flistxattr)
+ .long SYMBOL_NAME(sys_removexattr) /* 235 */
+ .long SYMBOL_NAME(sys_lremovexattr)
+ .long SYMBOL_NAME(sys_fremovexattr)
+ .long SYMBOL_NAME(sys_tkill)
+ .long SYMBOL_NAME(sys_sendfile64)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */
+ .long SYMBOL_NAME(sys_ni_syscall) /* reserved for
sched_setaffinity */
+ .long SYMBOL_NAME(sys_ni_syscall) /* reserved for
sched_getaffinity */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_thread_area */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_get_thread_area */
+ .long SYMBOL_NAME(sys_ni_syscall) /* 245 sys_io_setup */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_destroy */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_getevents */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_submit */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_cancel */
+ .long SYMBOL_NAME(sys_ni_syscall) /* 250 sys_alloc_hugepages */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_free_hugepages */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_exit_group */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_lookup_dcookie */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_create */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_ctl 255 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_wait */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_tid_address */
+
+ .rept NR_syscalls-(.-sys_call_table)/4
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .endr
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/kernel/head.S
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/head.S Tue Aug 9 15:17:45 2005
@@ -0,0 +1,39 @@
+
+.section __xen_guest
+ .asciz "GUEST_OS=linux,GUEST_VER=2.4,XEN_VER=2.0,VIRT_BASE=0xC0000000"
+
+.text
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+ENTRY(stext)
+ENTRY(_stext)
+ cld
+ lss stack_start,%esp
+ /* Copy the necessary stuff from xen_start_info structure. */
+ mov $SYMBOL_NAME(xen_start_info_union),%edi
+ mov $128,%ecx
+ rep movsl
+ jmp SYMBOL_NAME(start_kernel)
+
+ENTRY(stack_start)
+ .long SYMBOL_NAME(init_task_union)+8192, __KERNEL_DS
+
+.org 0x1000
+ENTRY(empty_zero_page)
+
+.org 0x2000
+ENTRY(default_ldt)
+
+.org 0x3000
+ENTRY(cpu0_pte_quicklist)
+
+.org 0x3400
+ENTRY(cpu0_pgd_quicklist)
+
+.org 0x3800
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/i386_ksyms.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/i386_ksyms.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,180 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/mca.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/apm_bios.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/mmx.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern spinlock_t rtc_lock;
+
+#if defined(CONFIG_APMXXX) || defined(CONFIG_APM_MODULEXXX)
+extern void machine_real_restart(unsigned char *, int);
+EXPORT_SYMBOL(machine_real_restart);
+extern void default_idle(void);
+EXPORT_SYMBOL(default_idle);
+#endif
+
+#ifdef CONFIG_SMP
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) ||
defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+// XXX extern unsigned long get_cmos_time(void);
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(dump_extended_fpu);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(pm_idle);
+EXPORT_SYMBOL(pm_power_off);
+EXPORT_SYMBOL(apm_info);
+//EXPORT_SYMBOL(gdt);
+EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+
+#ifdef CONFIG_DEBUG_IOVIRT
+EXPORT_SYMBOL(__io_virt_debug);
+#endif
+
+EXPORT_SYMBOL_NOVERS(__down_failed);
+EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
+EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
+EXPORT_SYMBOL_NOVERS(__up_wakeup);
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_generic);
+/* Delay loops */
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+
+EXPORT_SYMBOL_NOVERS(__get_user_1);
+EXPORT_SYMBOL_NOVERS(__get_user_2);
+EXPORT_SYMBOL_NOVERS(__get_user_4);
+
+EXPORT_SYMBOL(strtok);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__generic_copy_from_user);
+EXPORT_SYMBOL(__generic_copy_to_user);
+EXPORT_SYMBOL(strnlen_user);
+
+
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pcibios_penalize_isa_irq);
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+
+
+#ifdef CONFIG_X86_USE_3DNOW
+EXPORT_SYMBOL(_mmx_memcpy);
+EXPORT_SYMBOL(mmx_clear_page);
+EXPORT_SYMBOL(mmx_copy_page);
+#endif
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(kernel_flag_cacheline);
+EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL_NOVERS(__write_lock_failed);
+EXPORT_SYMBOL_NOVERS(__read_lock_failed);
+
+/* Global SMP irq stuff */
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(global_irq_holder);
+EXPORT_SYMBOL(__global_cli);
+EXPORT_SYMBOL(__global_sti);
+EXPORT_SYMBOL(__global_save_flags);
+EXPORT_SYMBOL(__global_restore_flags);
+EXPORT_SYMBOL(smp_call_function);
+
+/* TLB flushing */
+EXPORT_SYMBOL(flush_tlb_page);
+
+/* HT support */
+EXPORT_SYMBOL(smp_num_siblings);
+EXPORT_SYMBOL(cpu_sibling_map);
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+#endif
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(get_wchan);
+
+EXPORT_SYMBOL(rtc_lock);
+
+#undef memcpy
+#undef memset
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL_NOVERS(memset);
+
+#ifdef CONFIG_HAVE_DEC_LOCK
+EXPORT_SYMBOL(atomic_dec_and_lock);
+#endif
+
+#ifdef CONFIG_MULTIQUAD
+EXPORT_SYMBOL(xquad_portio);
+#endif
+
+#include <asm/xen_proc.h>
+EXPORT_SYMBOL(create_xen_proc_entry);
+EXPORT_SYMBOL(remove_xen_proc_entry);
+
+EXPORT_SYMBOL(evtchn_do_upcall);
+EXPORT_SYMBOL(force_evtchn_callback);
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/kernel/irq.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/irq.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1242 @@
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
+ *
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/config.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/delay.h>
+#include <asm/desc.h>
+#include <asm/irq.h>
+
+
+
+/*
+ * Linux has a controller-independent x86 interrupt architecture.
+ * every controller has a 'controller-template', that is used
+ * by the main code to do the right thing. Each driver-visible
+ * interrupt source is transparently wired to the apropriate
+ * controller. Thus drivers need not be aware of the
+ * interrupt-controller.
+ *
+ * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
+ * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
+ * (IO-APICs assumed to be messaging to Pentium local-APICs)
+ *
+ * the code is designed to be easily extended with new/different
+ * interrupt controllers, without having to do assembly magic.
+ */
+
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+
+static void register_irq_proc (unsigned int irq);
+
+/*
+ * Special irq handlers.
+ */
+
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+
+/*
+ * Generic no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves, it doesnt deserve
+ * a generic callback i think.
+ */
+#if CONFIG_X86
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+ * irq slots per priority level, and a 'hanging, unacked' IRQ
+ * holds up an irq slot - in excessive cases (when multiple
+ * unexpected vectors occur) that might lock up the APIC
+ * completely.
+ */
+ ack_APIC_irq();
+#endif
+#endif
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ "none",
+ startup_none,
+ shutdown_none,
+ enable_none,
+ disable_none,
+ ack_none,
+ end_none
+};
+
+atomic_t irq_err_count;
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+atomic_t irq_mis_count;
+#endif
+#endif
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i, j;
+ struct irqaction * action;
+
+ seq_printf(p, " ");
+ for (j=0; j<smp_num_cpus; j++)
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p,'\n');
+
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = irq_desc[i].action;
+ if (!action)
+ continue;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < smp_num_cpus; j++)
+ seq_printf(p, "%10u ",
+ kstat.irqs[cpu_logical_map(j)][i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+ seq_putc(p,'\n');
+ }
+ seq_printf(p, "NMI: ");
+ for (j = 0; j < smp_num_cpus; j++)
+ seq_printf(p, "%10u ",
+ nmi_count(cpu_logical_map(j)));
+ seq_printf(p, "\n");
+#if CONFIG_X86_LOCAL_APIC
+ seq_printf(p, "LOC: ");
+ for (j = 0; j < smp_num_cpus; j++)
+ seq_printf(p, "%10u ",
+ apic_timer_irqs[cpu_logical_map(j)]);
+ seq_printf(p, "\n");
+#endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Global interrupt locks for SMP. Allow interrupts to come in on any
+ * CPU, yet make cli/sti act globally to protect critical regions..
+ */
+
+#ifdef CONFIG_SMP
+unsigned char global_irq_holder = NO_PROC_ID;
+unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
+
+extern void show_stack(unsigned long* esp);
+
+static void show(char * str)
+{
+ int i;
+ int cpu = smp_processor_id();
+
+ printk("\n%s, CPU %d:\n", str, cpu);
+ printk("irq: %d [",irqs_running());
+ for(i=0;i < smp_num_cpus;i++)
+ printk(" %d",local_irq_count(i));
+ printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
+ for(i=0;i < smp_num_cpus;i++)
+ printk(" %d",local_bh_count(i));
+
+ printk(" ]\nStack dumps:");
+ for(i = 0; i < smp_num_cpus; i++) {
+ unsigned long esp;
+ if (i == cpu)
+ continue;
+ printk("\nCPU %d:",i);
+ esp = init_tss[i].esp0;
+ if (!esp) {
+ /* tss->esp0 is set to NULL in cpu_init(),
+ * it's initialized when the cpu returns to user
+ * space. -- manfreds
+ */
+ printk(" <unknown> ");
+ continue;
+ }
+ esp &= ~(THREAD_SIZE-1);
+ esp += sizeof(struct task_struct);
+ show_stack((void*)esp);
+ }
+ printk("\nCPU %d:",cpu);
+ show_stack(NULL);
+ printk("\n");
+}
+
+#define MAXCOUNT 100000000
+
+/*
+ * I had a lockup scenario where a tight loop doing
+ * spin_unlock()/spin_lock() on CPU#1 was racing with
+ * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
+ * apparently the spin_unlock() information did not make it
+ * through to CPU#0 ... nasty, is this by design, do we have to limit
+ * 'memory update oscillation frequency' artificially like here?
+ *
+ * Such 'high frequency update' races can be avoided by careful design, but
+ * some of our major constructs like spinlocks use similar techniques,
+ * it would be nice to clarify this issue. Set this define to 0 if you
+ * want to check whether your system freezes. I suspect the delay done
+ * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
+ * i thought that such things are guaranteed by design, since we use
+ * the 'LOCK' prefix.
+ */
+#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
+
+#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
+# define SYNC_OTHER_CORES(x) udelay(x+1)
+#else
+/*
+ * We have to allow irqs to arrive between __sti and __cli
+ */
+# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
+#endif
+
+static inline void wait_on_irq(int cpu)
+{
+ int count = MAXCOUNT;
+
+ for (;;) {
+
+ /*
+ * Wait until all interrupts are gone. Wait
+ * for bottom half handlers unless we're
+ * already executing in one..
+ */
+ if (!irqs_running())
+ if (local_bh_count(cpu) ||
!spin_is_locked(&global_bh_lock))
+ break;
+
+ /* Duh, we have to loop. Release the lock to avoid deadlocks */
+ clear_bit(0,&global_irq_lock);
+
+ for (;;) {
+ if (!--count) {
+ show("wait_on_irq");
+ count = ~0;
+ }
+ __sti();
+ SYNC_OTHER_CORES(cpu);
+ __cli();
+ if (irqs_running())
+ continue;
+ if (global_irq_lock)
+ continue;
+ if (!local_bh_count(cpu) &&
spin_is_locked(&global_bh_lock))
+ continue;
+ if (!test_and_set_bit(0,&global_irq_lock))
+ break;
+ }
+ }
+}
+
+/*
+ * This is called when we want to synchronize with
+ * interrupts. We may for example tell a device to
+ * stop sending interrupts: but to make sure there
+ * are no interrupts that are executing on another
+ * CPU we need to call this function.
+ */
+void synchronize_irq(void)
+{
+ if (irqs_running()) {
+ /* Stupid approach */
+ cli();
+ sti();
+ }
+}
+
+static inline void get_irqlock(int cpu)
+{
+ if (test_and_set_bit(0,&global_irq_lock)) {
+ /* do we already hold the lock? */
+ if ((unsigned char) cpu == global_irq_holder)
+ return;
+ /* Uhhuh.. Somebody else got it. Wait.. */
+ do {
+ do {
+ rep_nop();
+ } while (test_bit(0,&global_irq_lock));
+ } while (test_and_set_bit(0,&global_irq_lock));
+ }
+ /*
+ * We also to make sure that nobody else is running
+ * in an interrupt context.
+ */
+ wait_on_irq(cpu);
+
+ /*
+ * Ok, finally..
+ */
+ global_irq_holder = cpu;
+}
+
+/*
+ * A global "cli()" while in an interrupt context
+ * turns into just a local cli(). Interrupts
+ * should use spinlocks for the (very unlikely)
+ * case that they ever want to protect against
+ * each other.
+ *
+ * If we already have local interrupts disabled,
+ * this will not turn a local disable into a
+ * global one (problems with spinlocks: this makes
+ * save_flags+cli+sti usable inside a spinlock).
+ */
+void __global_cli(void)
+{
+ unsigned int flags;
+
+ __save_flags(flags);
+ if (!flags) {
+ int cpu = smp_processor_id();
+ __cli();
+ if (!local_irq_count(cpu))
+ get_irqlock(cpu);
+ }
+}
+
+void __global_sti(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!local_irq_count(cpu))
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long __global_save_flags(void)
+{
+ int retval;
+ int local_enabled;
+ unsigned long flags;
+ int cpu = smp_processor_id();
+
+ __save_flags(flags);
+ local_enabled = !flags;
+ /* default to local */
+ retval = 2 + local_enabled;
+
+ /* check for global flags if we're not in an interrupt */
+ if (!local_irq_count(cpu)) {
+ if (local_enabled)
+ retval = 1;
+ if (global_irq_holder == cpu)
+ retval = 0;
+ }
+ return retval;
+}
+
+void __global_restore_flags(unsigned long flags)
+{
+ switch (flags) {
+ case 0:
+ __global_cli();
+ break;
+ case 1:
+ __global_sti();
+ break;
+ case 2:
+ __cli();
+ break;
+ case 3:
+ __sti();
+ break;
+ default:
+ printk("global_restore_flags: %08lx (%08lx)\n",
+ flags, (&flags)[-1]);
+ }
+}
+
+#endif
+
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction
* action)
+{
+ int status;
+ int cpu = smp_processor_id();
+
+ irq_enter(cpu, irq);
+
+ status = 1; /* Force the "do bottom halves" bit */
+
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+
+ irq_exit(cpu, irq);
+
+ return status;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables and Enables are
+ * nested.
+ * Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+
+inline void disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Enables and Disables are
+ * nested.
+ * This function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+
+void disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count(smp_processor_id())) {
+ do {
+ barrier();
+ cpu_relax();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
+}
+
+/**
+ * enable_irq - enable handling of an irq
+ * @irq: Interrupt to enable
+ *
+ * Undoes the effect of one call to disable_irq(). If this
+ * matches the last disable, processing of interrupts on this
+ * IRQ line is re-enabled.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ desc->handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk("enable_irq(%u) unbalanced from %p\n", irq,
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ int irq = regs->orig_eax & 0xff; /* high bits used in ret_from_ code */
+ int cpu = smp_processor_id();
+ irq_desc_t *desc = irq_desc + irq;
+ struct irqaction * action;
+ unsigned int status;
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ long esp;
+
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
+ if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
+ extern void show_stack(unsigned long *);
+
+ printk("do_IRQ: stack overflow: %ld\n",
+ esp - sizeof(struct task_struct));
+ __asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
+ show_stack((void *)esp);
+ }
+#endif
+
+ kstat.irqs[cpu][irq]++;
+ spin_lock(&desc->lock);
+ desc->handler->ack(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ goto out;
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ handle_IRQ_event(irq, regs, action);
+ spin_lock(&desc->lock);
+
+ if (!(desc->status & IRQ_PENDING))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
+
+ if (softirq_pending(cpu))
+ do_softirq();
+ return 1;
+}
+
+/**
+ * request_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. From the point this
+ * call is made your handler function may be invoked. Since
+ * your handler function must clear any interrupt the board
+ * raises, you must take care both to initialise your hardware
+ * and to set up the interrupt handler in the right order.
+ *
+ * Dev_id must be globally unique. Normally the address of the
+ * device data structure is used as the cookie. Since the handler
+ * receives this value it makes sense to use it.
+ *
+ * If your interrupt is shared you must pass a non NULL dev_id
+ * as this is required when freeing the interrupt.
+ *
+ * Flags:
+ *
+ * SA_SHIRQ Interrupt is shared
+ *
+ * SA_INTERRUPT Disable local interrupts while processing
+ *
+ * SA_SAMPLE_RANDOM The interrupt can be used for entropy
+ *
+ */
+
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if (irqflags & SA_SHIRQ) {
+ if (!dev_id)
+ printk("Bad boy: %s (at 0x%x) called us without a
dev_id!\n", devname, (&irq)[-1]);
+ }
+#endif
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+/*
+ * Internal function to unregister an irqaction - typically used to
+ * deallocate special interrupts that are part of the architecture.
+ */
+int teardown_irq(unsigned int irq, struct irqaction * old)
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return -ENOENT;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action != old)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+#ifdef CONFIG_SMP
+ /* Wait to make sure it's not being used on another CPU
*/
+ while (desc->status & IRQ_INPROGRESS) {
+ barrier();
+ cpu_relax();
+ }
+#endif
+ return 0;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -ENOENT;
+ }
+}
+
+/**
+ * free_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function. The function
+ * does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function may be called from interrupt context.
+ *
+ * Bugs: Attempting to free an irq in a handler for the same irq hangs
+ * the machine.
+ */
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ irq_desc_t *desc;
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ for (action = desc->action; action != NULL; action = action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ if (teardown_irq(irq, action) == 0)
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return;
+}
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+
+static DECLARE_MUTEX(probe_sem);
+
+/**
+ * probe_irq_on - begin an interrupt autodetect
+ *
+ * Commence probing for an interrupt. The interrupts are scanned
+ * and a mask of potential interrupt lines is returned.
+ *
+ */
+
+unsigned long probe_irq_on(void)
+{
+ unsigned int i;
+ irq_desc_t *desc;
+ unsigned long val;
+ unsigned long delay;
+
+ down(&probe_sem);
+ /*
+ * something may have generated an irq long ago and we want to
+ * flush such a longstanding irq before considering it as spurious.
+ */
+ for (i = NR_PIRQS-1; i > 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ synchronize_irq();
+
+ /*
+ * enable any unassigned irqs
+ * (we must startup again here because if a longstanding irq
+ * happened in the previous stage, it may have masked itself)
+ */
+ for (i = NR_PIRQS-1; i > 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action) {
+ desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if (desc->handler->startup(i))
+ desc->status |= IRQ_PENDING;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ synchronize_irq();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ val = 0;
+ for (i = 0; i < NR_PIRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ } else
+ if (i < 32)
+ val |= 1 << i;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return val;
+}
+
+/*
+ * Return a mask of triggered interrupts (this
+ * can handle only legacy ISA interrupts).
+ */
+
+/**
+ * probe_irq_mask - scan a bitmap of interrupt lines
+ * @val: mask of interrupts to consider
+ *
+ * Scan the ISA bus interrupt lines and return a bitmap of
+ * active interrupts. The interrupt probe logic state is then
+ * returned to its previous value.
+ *
+ * Note: we need to scan all the irq's even though we will
+ * only return ISA irq numbers - just so that we reset them
+ * all to a known state.
+ */
+unsigned int probe_irq_mask(unsigned long val)
+{
+ int i;
+ unsigned int mask;
+
+ mask = 0;
+ for (i = 0; i < NR_PIRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (i < 16 && !(status & IRQ_WAITING))
+ mask |= 1 << i;
+
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+ up(&probe_sem);
+
+ return mask & val;
+}
+
+/*
+ * Return the one interrupt that triggered (this can
+ * handle any interrupt source).
+ */
+
+/**
+ * probe_irq_off - end an interrupt autodetect
+ * @val: mask of potential interrupts (unused)
+ *
+ * Scans the unused interrupt lines and returns the line which
+ * appears to have triggered the interrupt. If no interrupt was
+ * found then zero is returned. If more than one interrupt is
+ * found then minus the first candidate is returned to indicate
+ * their is doubt.
+ *
+ * The interrupt probe logic state is returned to its previous
+ * value.
+ *
+ * BUGS: When used in a module (which arguably shouldnt happen)
+ * nothing prevents two IRQ probe callers from overlapping. The
+ * results of this are non-optimal.
+ */
+
+int probe_irq_off(unsigned long val)
+{
+ int i, irq_found, nr_irqs;
+
+ nr_irqs = 0;
+ irq_found = 0;
+ for (i = 0; i < NR_PIRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+ up(&probe_sem);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+/* this was setup_x86_irq but it seems pretty generic */
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = irq_desc + irq;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING |
IRQ_INPROGRESS);
+ desc->handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ register_irq_proc(irq);
+ return 0;
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+
+#define HEX_DIGITS 8
+
+static unsigned int parse_hex_value (const char *buffer,
+ unsigned long count, unsigned long *ret)
+{
+ unsigned char hexnum [HEX_DIGITS];
+ unsigned long value;
+ int i;
+
+ if (!count)
+ return -EINVAL;
+ if (count > HEX_DIGITS)
+ count = HEX_DIGITS;
+ if (copy_from_user(hexnum, buffer, count))
+ return -EFAULT;
+
+ /*
+ * Parse the first 8 characters as a hex string, any non-hex char
+ * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
+ */
+ value = 0;
+
+ for (i = 0; i < count; i++) {
+ unsigned int c = hexnum[i];
+
+ switch (c) {
+ case '0' ... '9': c -= '0'; break;
+ case 'a' ... 'f': c -= 'a'-10; break;
+ case 'A' ... 'F': c -= 'A'-10; break;
+ default:
+ goto out;
+ }
+ value = (value << 4) | c;
+ }
+out:
+ *ret = value;
+ return 0;
+}
+
+#if CONFIG_SMP
+
+static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
+static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+}
+
+static int irq_affinity_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (long) data, full_count = count, err;
+ unsigned long new_value;
+
+ if (!irq_desc[irq].handler->set_affinity)
+ return -EIO;
+
+ err = parse_hex_value(buffer, count, &new_value);
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!(new_value & cpu_online_map))
+ return -EINVAL;
+
+ irq_affinity[irq] = new_value;
+ irq_desc[irq].handler->set_affinity(irq, new_value);
+
+ return full_count;
+}
+
+#endif
+
+static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long *mask = (unsigned long *) data;
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08lx\n", *mask);
+}
+
+static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ unsigned long *mask = (unsigned long *) data, full_count = count, err;
+ unsigned long new_value;
+
+ err = parse_hex_value(buffer, count, &new_value);
+ if (err)
+ return err;
+
+ *mask = new_value;
+ return full_count;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
+ irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#if CONFIG_SMP
+ {
+ struct proc_dir_entry *entry;
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+
+ smp_affinity_entry[irq] = entry;
+ }
+#endif
+}
+
+unsigned long prof_cpu_mask = -1;
+
+void init_irq_proc (void)
+{
+ struct proc_dir_entry *entry;
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", 0);
+
+ /* create /proc/irq/prof_cpu_mask */
+ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
+
+ if (!entry)
+ return;
+
+ entry->nlink = 1;
+ entry->data = (void *)&prof_cpu_mask;
+ entry->read_proc = prof_cpu_mask_read_proc;
+ entry->write_proc = prof_cpu_mask_write_proc;
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < NR_IRQS; i++)
+ register_irq_proc(i);
+}
+
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/kernel/ldt.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/ldt.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,271 @@
+/*
+ * linux/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+
+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+static void flush_ldt(void *mm)
+{
+ if (current->active_mm)
+ load_LDT(¤t->active_mm->context);
+}
+#endif
+
+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+{
+ void *oldldt;
+ void *newldt;
+ int oldsize;
+
+ if (mincount <= pc->size)
+ return 0;
+ oldsize = pc->size;
+ mincount = (mincount+511)&(~511);
+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+ else
+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+ if (!newldt)
+ return -ENOMEM;
+
+ if (oldsize)
+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+
+ oldldt = pc->ldt;
+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0,
(mincount-oldsize)*LDT_ENTRY_SIZE);
+ wmb();
+ pc->ldt = newldt;
+ pc->size = mincount;
+ if (reload) {
+ make_pages_readonly(
+ pc->ldt,
+ (pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
+ load_LDT(pc);
+ flush_page_update_queue();
+#ifdef CONFIG_SMP
+ if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
+ smp_call_function(flush_ldt, 0, 1, 1);
+#endif
+ }
+ wmb();
+ if (oldsize) {
+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(oldldt);
+ else
+ kfree(oldldt);
+ }
+ return 0;
+}
+
+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+{
+ int err = alloc_ldt(new, old->size, 0);
+ if (err < 0) {
+ printk(KERN_WARNING "ldt allocation failed\n");
+ new->size = 0;
+ return err;
+ }
+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+ make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
+ return 0;
+}
+
+/*
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ struct mm_struct * old_mm;
+ int retval = 0;
+
+ init_MUTEX(&mm->context.sem);
+ mm->context.size = 0;
+ old_mm = current->mm;
+ if (old_mm && old_mm->context.size > 0) {
+ down(&old_mm->context.sem);
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ up(&old_mm->context.sem);
+ }
+ return retval;
+}
+
+/*
+ * No need to lock the MM as we are the last user
+ * Do not touch the ldt register, we are already
+ * in the next thread.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context.size) {
+ make_pages_writable(
+ mm->context.ldt,
+ (mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
+ flush_page_update_queue();
+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(mm->context.ldt);
+ else
+ kfree(mm->context.ldt);
+ mm->context.size = 0;
+ }
+}
+
+static int read_ldt(void * ptr, unsigned long bytecount)
+{
+ int err;
+ unsigned long size;
+ struct mm_struct * mm = current->mm;
+
+ if (!mm->context.size)
+ return 0;
+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+
+ down(&mm->context.sem);
+ size = mm->context.size*LDT_ENTRY_SIZE;
+ if (size > bytecount)
+ size = bytecount;
+
+ err = 0;
+ if (copy_to_user(ptr, mm->context.ldt, size))
+ err = -EFAULT;
+ up(&mm->context.sem);
+ if (err < 0)
+ return err;
+ if (size != bytecount) {
+ /* zero-fill the rest */
+ clear_user(ptr+size, bytecount-size);
+ }
+ return bytecount;
+}
+
+static int read_default_ldt(void * ptr, unsigned long bytecount)
+{
+ int err;
+ unsigned long size;
+ void *address;
+
+ err = 0;
+ address = &default_ldt[0];
+ size = 5*sizeof(struct desc_struct);
+ if (size > bytecount)
+ size = bytecount;
+
+ err = size;
+ if (copy_to_user(ptr, address, size))
+ err = -EFAULT;
+
+ return err;
+}
+
+static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
+{
+ struct mm_struct * mm = current->mm;
+ __u32 entry_1, entry_2, *lp;
+ unsigned long mach_lp;
+ int error;
+ struct modify_ldt_ldt_s ldt_info;
+
+ error = -EINVAL;
+ if (bytecount != sizeof(ldt_info))
+ goto out;
+ error = -EFAULT;
+ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+ goto out;
+
+ error = -EINVAL;
+ if (ldt_info.entry_number >= LDT_ENTRIES)
+ goto out;
+ if (ldt_info.contents == 3) {
+ if (oldmode)
+ goto out;
+ if (ldt_info.seg_not_present == 0)
+ goto out;
+ }
+
+ down(&mm->context.sem);
+ if (ldt_info.entry_number >= mm->context.size) {
+ error = alloc_ldt(¤t->mm->context,
ldt_info.entry_number+1, 1);
+ if (error < 0)
+ goto out_unlock;
+ }
+
+ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *)
mm->context.ldt);
+ mach_lp = arbitrary_virt_to_machine(lp);
+
+ /* Allow LDTs to be cleared by the user. */
+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+ if (oldmode ||
+ (ldt_info.contents == 0 &&
+ ldt_info.read_exec_only == 1 &&
+ ldt_info.seg_32bit == 0 &&
+ ldt_info.limit_in_pages == 0 &&
+ ldt_info.seg_not_present == 1 &&
+ ldt_info.useable == 0 )) {
+ entry_1 = 0;
+ entry_2 = 0;
+ goto install;
+ }
+ }
+
+ entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
+ (ldt_info.limit & 0x0ffff);
+ entry_2 = (ldt_info.base_addr & 0xff000000) |
+ ((ldt_info.base_addr & 0x00ff0000) >> 16) |
+ (ldt_info.limit & 0xf0000) |
+ ((ldt_info.read_exec_only ^ 1) << 9) |
+ (ldt_info.contents << 10) |
+ ((ldt_info.seg_not_present ^ 1) << 15) |
+ (ldt_info.seg_32bit << 22) |
+ (ldt_info.limit_in_pages << 23) |
+ 0x7000;
+ if (!oldmode)
+ entry_2 |= (ldt_info.useable << 20);
+
+ /* Install the new entry ... */
+install:
+ error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
+
+out_unlock:
+ up(&mm->context.sem);
+out:
+ return error;
+}
+
+asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
+{
+ int ret = -ENOSYS;
+
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
+ break;
+ case 1:
+ ret = write_ldt(ptr, bytecount, 1);
+ break;
+ case 2:
+ ret = read_default_ldt(ptr, bytecount);
+ break;
+ case 0x11:
+ ret = write_ldt(ptr, bytecount, 0);
+ break;
+ }
+ return ret;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/pci-pc.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/pci-pc.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,260 @@
+/*
+ * Low-Level PCI Support for PC
+ *
+ * (c) 1999--2000 Martin Mares <mj@xxxxxx>
+ *
+ * Adjusted to use Xen's interface by Rolf Neugebauer, Intel Research Cambridge
+ * Further modifications by Keir Fraser, University of Cambridge
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/physdev.h>
+
+#include "pci-i386.h"
+
+/*
+ * NB. The following interface functions are not included here:
+ * 1. void eisa_set_level_irq(unsigned int irq)
+ * 2. irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
+ * 3. int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+ * All are used by the ACPI driver. This should be ported to Xen if it is
+ * ever required -- Xen is the ultimate source for IRQ-routing knowledge.
+ */
+
+struct pci_ops *pci_root_ops = NULL;
+
+int (*pci_config_read)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 *value) = NULL;
+int (*pci_config_write)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 value) = NULL;
+
+unsigned int pci_probe = PCI_PROBE_BIOS;
+
+struct pci_fixup pcibios_fixups[] = { { 0 } };
+
+static int pci_confx_read(int seg, int bus, int dev, int fn, int reg,
+ int len, u32 *value)
+{
+ int ret;
+ physdev_op_t op;
+
+ if (bus > 255 || dev > 31 || fn > 7 || reg > 255)
+ return -EINVAL;
+
+ op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
+ op.u.pci_cfgreg_read.bus = bus;
+ op.u.pci_cfgreg_read.dev = dev;
+ op.u.pci_cfgreg_read.func = fn;
+ op.u.pci_cfgreg_read.reg = reg;
+ op.u.pci_cfgreg_read.len = len;
+
+ if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
+ return ret;
+
+ *value = op.u.pci_cfgreg_read.value;
+
+ return 0;
+}
+
+static int pci_confx_write(int seg, int bus, int dev, int fn, int reg,
+ int len, u32 value)
+{
+ int ret;
+ physdev_op_t op;
+
+ if ((bus > 255 || dev > 31 || fn > 7 || reg > 255))
+ return -EINVAL;
+
+ op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
+ op.u.pci_cfgreg_write.bus = bus;
+ op.u.pci_cfgreg_write.dev = dev;
+ op.u.pci_cfgreg_write.func = fn;
+ op.u.pci_cfgreg_write.reg = reg;
+ op.u.pci_cfgreg_write.len = len;
+ op.u.pci_cfgreg_write.value = value;
+
+ if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
+ return ret;
+ return 0;
+}
+
+
+static int pci_confx_read_config_byte(struct pci_dev *dev,
+ int where, u8 *value)
+{
+ int result;
+ u32 data;
+
+ result = pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), where, 1, &data);
+
+ *value = (u8)data;
+
+ return result;
+}
+
+static int pci_confx_read_config_word(struct pci_dev *dev,
+ int where, u16 *value)
+{
+ int result;
+ u32 data;
+
+ result = pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), where, 2, &data);
+
+ *value = (u16)data;
+
+ return result;
+}
+
+static int pci_confx_read_config_dword(struct pci_dev *dev,
+ int where, u32 *value)
+{
+ return pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), where, 4, value);
+}
+
+static int pci_confx_write_config_byte(struct pci_dev *dev,
+ int where, u8 value)
+{
+ return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), where, 1, value);
+}
+
+static int pci_confx_write_config_word(struct pci_dev *dev,
+ int where, u16 value)
+{
+ return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), where, 2, value);
+}
+
+static int pci_confx_write_config_dword(struct pci_dev *dev,
+ int where, u32 value)
+{
+ return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), where, 4, value);
+}
+
+static struct pci_ops pci_conf_xen = {
+ pci_confx_read_config_byte,
+ pci_confx_read_config_word,
+ pci_confx_read_config_dword,
+ pci_confx_write_config_byte,
+ pci_confx_write_config_word,
+ pci_confx_write_config_dword
+};
+
+void pcibios_penalize_isa_irq(int irq)
+{
+ /* nothing */
+}
+
+void __devinit pcibios_fixup_bus(struct pci_bus *b)
+{
+ pci_read_bridge_bases(b);
+}
+
+struct pci_bus * __devinit pcibios_scan_root(int busnum)
+{
+ struct list_head *list;
+ struct pci_bus *bus;
+
+ list_for_each ( list, &pci_root_buses )
+ {
+ bus = pci_bus_b(list);
+ if ( bus->number == busnum )
+ return bus;
+ }
+
+ printk("PCI: Probing PCI hardware (bus %02x)\n", busnum);
+ return pci_scan_bus(busnum, pci_root_ops, NULL);
+}
+
+void __init pcibios_init(void)
+{
+ int bus;
+ physdev_op_t op;
+
+ if ( !pci_probe )
+ return;
+
+ pci_root_ops = &pci_conf_xen;
+ pci_config_read = pci_confx_read;
+ pci_config_write = pci_confx_write;
+
+ pcibios_set_cacheline_size();
+
+ op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
+ if ( HYPERVISOR_physdev_op(&op) != 0 )
+ {
+ printk(KERN_WARNING "PCI: System does not support PCI\n");
+ return;
+ }
+
+ printk(KERN_INFO "PCI: Probing PCI hardware\n");
+ for ( bus = 0; bus < 256; bus++ )
+ if ( test_bit(bus, &op.u.pci_probe_root_buses.busmask[0]) )
+ (void)pcibios_scan_root(bus);
+
+ pcibios_resource_survey();
+}
+
+char * __devinit pcibios_setup(char *str)
+{
+ if ( !strcmp(str, "off") )
+ pci_probe = 0;
+ return NULL;
+}
+
+unsigned int pcibios_assign_all_busses(void)
+{
+ return 0;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ int err;
+ u8 pin;
+ physdev_op_t op;
+
+ /* Inform Xen that we are going to use this device. */
+ op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
+ op.u.pci_initialise_device.bus = dev->bus->number;
+ op.u.pci_initialise_device.dev = PCI_SLOT(dev->devfn);
+ op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
+ if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
+ return err;
+
+ /* Now we can bind to the very final IRQ line. */
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
+ dev->irq = pin;
+
+ /* Turn on device I/O and memory access as necessary. */
+ if ( (err = pcibios_enable_resources(dev, mask)) < 0 )
+ return err;
+
+ /* Sanity-check that an interrupt-producing device is routed to an IRQ. */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if ( pin != 0 )
+ {
+ if ( dev->irq != 0 )
+ printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
+ dev->irq, dev->slot_name);
+ else
+ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of "
+ "device %s.\n", 'A' + pin - 1, dev->slot_name);
+ }
+
+ return 0;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/process.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/process.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,460 @@
+/*
+ * linux/arch/i386/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+#include <asm/multicall.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+
+#include <linux/irq.h>
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int hlt_counter;
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+
+/*
+ * Power off function, if any
+ */
+void (*pm_power_off)(void);
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+ extern int set_timeout_timer(void);
+
+ /* Endless idle loop with no priority at all. */
+ init_idle();
+ current->nice = 20;
+ current->counter = -100;
+
+ for ( ; ; )
+ {
+ while ( !current->need_resched )
+ {
+ __cli();
+ if ( current->need_resched )
+ {
+ /* The race-free check for events failed. */
+ __sti();
+ break;
+ }
+ else if ( set_timeout_timer() == 0 )
+ {
+ /* NB. Blocking reenable events in a race-free manner. */
+ HYPERVISOR_block();
+ }
+ else
+ {
+ /* No race here: yielding will get us the CPU again anyway. */
+ __sti();
+ HYPERVISOR_yield();
+ }
+ }
+ schedule();
+ check_pgt_cache();
+ }
+}
+
+extern void show_trace(unsigned long* esp);
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
+ printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip,
smp_processor_id());
+ if (regs->xcs & 2)
+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
+ printk(" EFLAGS: %08lx %s\n",regs->eflags, print_tainted());
+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+ regs->eax,regs->ebx,regs->ecx,regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+ regs->esi, regs->edi, regs->ebp);
+ printk(" DS: %04x ES: %04x\n",
+ 0xffff & regs->xds,0xffff & regs->xes);
+
+ show_trace(®s->esp);
+}
+
+
+/*
+ * Create a kernel thread
+ */
+int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval, d0;
+
+ __asm__ __volatile__(
+ "movl %%esp,%%esi\n\t"
+ "int $0x80\n\t" /* Linux/i386 system call */
+ "cmpl %%esp,%%esi\n\t" /* child or parent? */
+ "je 1f\n\t" /* parent - jump */
+ /* Load the argument into eax, and push it. That way, it does
+ * not matter whether the called function is compiled with
+ * -mregparm or not. */
+ "movl %4,%%eax\n\t"
+ "pushl %%eax\n\t"
+ "call *%5\n\t" /* call fn */
+ "movl %3,%0\n\t" /* exit */
+ "int $0x80\n"
+ "1:\t"
+ :"=&a" (retval), "=&S" (d0)
+ :"0" (__NR_clone), "i" (__NR_exit),
+ "r" (arg), "r" (fn),
+ "b" (flags | CLONE_VM)
+ : "memory");
+
+ return retval;
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ /* nothing to do ... */
+}
+
+void flush_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
+
+ /*
+ * Forget coprocessor state..
+ */
+ clear_fpu(tsk);
+ tsk->used_math = 0;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ if (dead_task->mm) {
+ // temporary debugging check
+ if (dead_task->mm->context.size) {
+ printk("WARNING: dead process %8s still has LDT? <%p/%08x>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+ dead_task->mm->context.size);
+ BUG();
+ }
+ }
+ //release_x86_irqs(dead_task);
+}
+
+
+/*
+ * Save a segment.
+ */
+#define savesegment(seg,value) \
+ asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+ unsigned long eflags;
+
+ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
+ struct_cpy(childregs, regs);
+ childregs->eax = 0;
+ childregs->esp = esp;
+
+ p->thread.esp = (unsigned long) childregs;
+ p->thread.esp0 = (unsigned long) (childregs+1);
+
+ p->thread.eip = (unsigned long) ret_from_fork;
+
+ savesegment(fs,p->thread.fs);
+ savesegment(gs,p->thread.gs);
+
+ unlazy_fpu(current);
+ struct_cpy(&p->thread.i387, ¤t->thread.i387);
+
+
+ __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
+ p->thread.io_pl = (eflags >> 12) & 3;
+
+ return 0;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ int i;
+
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >>
PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+ for (i = 0; i < 8; i++)
+ dump->u_debugreg[i] = current->thread.debugreg[i];
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >>
PAGE_SHIFT;
+
+ dump->regs.ebx = regs->ebx;
+ dump->regs.ecx = regs->ecx;
+ dump->regs.edx = regs->edx;
+ dump->regs.esi = regs->esi;
+ dump->regs.edi = regs->edi;
+ dump->regs.ebp = regs->ebp;
+ dump->regs.eax = regs->eax;
+ dump->regs.ds = regs->xds;
+ dump->regs.es = regs->xes;
+ savesegment(fs,dump->regs.fs);
+ savesegment(gs,dump->regs.gs);
+ dump->regs.orig_eax = regs->orig_eax;
+ dump->regs.eip = regs->eip;
+ dump->regs.cs = regs->xcs;
+ dump->regs.eflags = regs->eflags;
+ dump->regs.esp = regs->esp;
+ dump->regs.ss = regs->xss;
+
+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+}
+
+/*
+ * switch_to(x,yn) should switch tasks from x to y.
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process. Lazy FP saving no longer makes any sense
+ * with modern CPU's, and this simplifies a lot of things (SMP
+ * and UP become the same).
+ *
+ * NOTE! We used to use the x86 hardware context switching. The
+ * reason for not using it any more becomes apparent when you
+ * try to recover gracefully from saved state that is no longer
+ * valid (stale segment register values in particular). With the
+ * hardware task-switch, there is no way to fix up bad state in
+ * a reasonable manner.
+ *
+ * The fact that Intel documents the hardware task-switching to
+ * be slow is a fairly red herring - this code is not noticeably
+ * faster. However, there _is_ some room for improvement here,
+ * so the performance issues may eventually be a valid point.
+ * More important, however, is the fact that this allows us much
+ * more flexibility.
+ */
+void fastcall __switch_to(struct task_struct *prev_p, struct task_struct
*next_p)
+{
+ struct thread_struct *next = &next_p->thread;
+
+ __cli();
+
+ /*
+ * We clobber FS and GS here so that we avoid a GPF when restoring previous
+ * task's FS/GS values in Xen when the LDT is switched. If we don't do this
+ * then we can end up erroneously re-flushing the page-update queue when
+ * we 'execute_multicall_list'.
+ */
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
+
+ MULTICALL_flush_page_update_queue();
+
+ /*
+ * This is basically 'unlazy_fpu', except that we queue a multicall to
+ * indicate FPU task switch, rather than synchronously trapping to Xen.
+ */
+ if ( prev_p->flags & PF_USEDFPU )
+ {
+ if ( cpu_has_fxsr )
+ asm volatile( "fxsave %0 ; fnclex"
+ : "=m" (prev_p->thread.i387.fxsave) );
+ else
+ asm volatile( "fnsave %0 ; fwait"
+ : "=m" (prev_p->thread.i387.fsave) );
+ prev_p->flags &= ~PF_USEDFPU;
+ queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+ }
+
+ queue_multicall2(__HYPERVISOR_stack_switch, __KERNEL_DS, next->esp0);
+ if ( xen_start_info.flags & SIF_PRIVILEGED )
+ {
+ dom0_op_t op;
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = next->io_pl;
+ op.interface_version = DOM0_INTERFACE_VERSION;
+ queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+ }
+
+ /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
+ execute_multicall_list();
+ __sti();
+
+ /*
+ * Restore %fs and %gs.
+ */
+ loadsegment(fs, next->fs);
+ loadsegment(gs, next->gs);
+
+ /*
+ * Now maybe reload the debug registers
+ */
+ if ( next->debugreg[7] != 0 )
+ {
+ HYPERVISOR_set_debugreg(0, next->debugreg[0]);
+ HYPERVISOR_set_debugreg(1, next->debugreg[1]);
+ HYPERVISOR_set_debugreg(2, next->debugreg[2]);
+ HYPERVISOR_set_debugreg(3, next->debugreg[3]);
+ /* no 4 and 5 */
+ HYPERVISOR_set_debugreg(6, next->debugreg[6]);
+ HYPERVISOR_set_debugreg(7, next->debugreg[7]);
+ }
+}
+
+asmlinkage int sys_fork(struct pt_regs regs)
+{
+ return do_fork(SIGCHLD, regs.esp, ®s, 0);
+}
+
+asmlinkage int sys_clone(struct pt_regs regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+
+ clone_flags = regs.ebx;
+ newsp = regs.ecx;
+ if (!newsp)
+ newsp = regs.esp;
+ return do_fork(clone_flags, newsp, ®s, 0);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(struct pt_regs regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(struct pt_regs regs)
+{
+ int error;
+ char * filename;
+
+ filename = getname((char *) regs.ebx);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s);
+ if (error == 0)
+ current->ptrace &= ~PT_DTRACE;
+ putname(filename);
+ out:
+ return error;
+}
+
+/*
+ * These bracket the sleeping functions..
+ */
+extern void scheduling_functions_start_here(void);
+extern void scheduling_functions_end_here(void);
+#define first_sched ((unsigned long) scheduling_functions_start_here)
+#define last_sched ((unsigned long) scheduling_functions_end_here)
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ebp, esp, eip;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack_page = (unsigned long)p;
+ esp = p->thread.esp;
+ if (!stack_page || esp < stack_page || esp > 8188+stack_page)
+ return 0;
+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
+ ebp = *(unsigned long *) esp;
+ do {
+ if (ebp < stack_page || ebp > 8184+stack_page)
+ return 0;
+ eip = *(unsigned long *) (ebp+4);
+ if (eip < first_sched || eip >= last_sched)
+ return eip;
+ ebp = *(unsigned long *) ebp;
+ } while (count++ < 16);
+ return 0;
+}
+#undef last_sched
+#undef first_sched
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/setup.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/setup.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1222 @@
+/*
+ * linux/arch/i386/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#define __KERNEL_SYSCALLS__
+static int errno;
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/apm_bios.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/reboot.h>
+#include <asm/processor.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <asm/mtrr.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/msr.h>
+#include <asm/desc.h>
+#include <asm/dma.h>
+#include <asm/mpspec.h>
+#include <asm/mmu_context.h>
+#include <asm/ctrl_if.h>
+#include <asm/hypervisor.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/tqueue.h>
+#include <net/pkt_sched.h> /* dev_(de)activate */
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+
+unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+
+multicall_entry_t multicall_list[8];
+int nr_multicall_ents = 0;
+
+/*
+ * Machine setup..
+ */
+
+char ignore_irq13; /* set if exception 16 works */
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+
+unsigned long mmu_cr4_features;
+
+unsigned char * vgacon_mmap;
+
+/*
+ * Bus types ..
+ */
+#ifdef CONFIG_EISA
+int EISA_bus;
+#endif
+int MCA_bus;
+
+/* for MCA, but anyone else can use it if they want */
+unsigned int machine_id;
+unsigned int machine_submodel_id;
+unsigned int BIOS_revision;
+unsigned int mca_pentium_flag;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct apm_info apm_info;
+struct sys_desc_table_struct {
+ unsigned short length;
+ unsigned char table[0];
+};
+
+unsigned char aux_device_present;
+
+extern int root_mountflags;
+extern char _text, _etext, _edata, _end;
+
+extern int blk_nohighio;
+
+int enable_acpi_smp_table;
+
+/* Raw start-of-day parameters from the hypervisor. */
+union xen_start_info_union xen_start_info_union;
+
+#define COMMAND_LINE_SIZE 256
+static char command_line[COMMAND_LINE_SIZE];
+char saved_command_line[COMMAND_LINE_SIZE];
+
+/* parse_mem_cmdline()
+ * returns the value of the mem= boot param converted to pages or 0
+ */
+static int __init parse_mem_cmdline (char ** cmdline_p)
+{
+ char c = ' ', *to = command_line, *from = saved_command_line;
+ int len = 0;
+ unsigned long long bytes;
+ int mem_param = 0;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, xen_start_info.cmd_line, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ /*
+ * "mem=nopentium" disables the 4MB page tables.
+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+ * to <mem>, overriding the bios size.
+ * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+ * <start> to <start>+<mem>, overriding the bios size.
+ */
+ if (c == ' ' && !memcmp(from, "mem=", 4)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+4, "nopentium", 9)) {
+ from += 9+4;
+ } else if (!memcmp(from+4, "exactmap", 8)) {
+ from += 8+4;
+ } else {
+ bytes = memparse(from+4, &from);
+ mem_param = bytes>>PAGE_SHIFT;
+ if (*from == '@')
+ (void)memparse(from+1, &from);
+ }
+ }
+
+ c = *(from++);
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+
+ return mem_param;
+}
+
+/*
+ * Every exception-fixup table is sorted (i.e., kernel main table, and every
+ * module table. Some elements may be out of order if they reference text.init,
+ * for example.
+ */
+static void sort_exception_table(struct exception_table_entry *start,
+ struct exception_table_entry *end)
+{
+ struct exception_table_entry *p, *q, tmp;
+
+ for ( p = start; p < end; p++ )
+ {
+ for ( q = p-1; q > start; q-- )
+ if ( p->insn > q->insn )
+ break;
+ if ( ++q != p )
+ {
+ tmp = *p;
+ memmove(q+1, q, (p-q)*sizeof(*p));
+ *q = tmp;
+ }
+ }
+}
+
+int xen_module_init(struct module *mod)
+{
+ sort_exception_table(mod->ex_table_start, mod->ex_table_end);
+ return 0;
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ int i,j;
+ unsigned long bootmap_size, start_pfn, lmax_low_pfn;
+ int mem_param; /* user specified memory size in pages */
+ int boot_pfn; /* low pages available for bootmem */
+
+ extern void hypervisor_callback(void);
+ extern void failsafe_callback(void);
+
+ extern unsigned long cpu0_pte_quicklist[];
+ extern unsigned long cpu0_pgd_quicklist[];
+
+ extern const struct exception_table_entry __start___ex_table[];
+ extern const struct exception_table_entry __stop___ex_table[];
+
+ extern char _stext;
+
+ /* Force a quick death if the kernel panics. */
+ extern int panic_timeout;
+ if ( panic_timeout == 0 )
+ panic_timeout = 1;
+
+ /* Ensure that the kernel exception-fixup table is sorted. */
+ sort_exception_table(__start___ex_table, __stop___ex_table);
+
+#ifndef CONFIG_HIGHIO
+ blk_nohighio = 1;
+#endif
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments);
+
+ HYPERVISOR_set_callbacks(
+ __KERNEL_CS, (unsigned long)hypervisor_callback,
+ __KERNEL_CS, (unsigned long)failsafe_callback);
+
+ boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
+ boot_cpu_data.pte_quick = cpu0_pte_quicklist;
+
+ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
+ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd. */
+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
+ memset(&drive_info, 0, sizeof(drive_info));
+ memset(&screen_info, 0, sizeof(screen_info));
+
+ /* This is drawn from a dump from vgacon:startup in standard Linux. */
+ screen_info.orig_video_mode = 3;
+ screen_info.orig_video_isVGA = 1;
+ screen_info.orig_video_lines = 25;
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_ega_bx = 3;
+ screen_info.orig_video_points = 16;
+
+ memset(&apm_info.bios, 0, sizeof(apm_info.bios));
+ aux_device_present = 0;
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = 0;
+ rd_prompt = 0;
+ rd_doload = 0;
+#endif
+
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) &_text;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
+ /* The mem= kernel command line param overrides the detected amount
+ * of memory. For xenolinux, if this override is larger than detected
+ * memory, then boot using only detected memory and make provisions to
+ * use all of the override value. The hypervisor can give this
+ * domain more memory later on and it will be added to the free
+ * lists at that time. See claim_new_pages() in
+ * arch/xen/drivers/balloon/balloon.c
+ */
+ mem_param = parse_mem_cmdline(cmdline_p);
+ if (mem_param < xen_start_info.nr_pages)
+ mem_param = xen_start_info.nr_pages;
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+/*
+ * 128MB for vmalloc(), iomap(), kmap(), and fixaddr mappings.
+ */
+#define VMALLOC_RESERVE (unsigned long)(128 << 20)
+#define MAXMEM (unsigned
long)(HYPERVISOR_VIRT_START-PAGE_OFFSET-VMALLOC_RESERVE)
+#define MAXMEM_PFN PFN_DOWN(MAXMEM)
+#define MAX_NONPAE_PFN (1 << 20)
+
+ /*
+ * Determine low and high memory ranges:
+ */
+ lmax_low_pfn = max_pfn = mem_param;
+ if (lmax_low_pfn > MAXMEM_PFN) {
+ lmax_low_pfn = MAXMEM_PFN;
+#ifndef CONFIG_HIGHMEM
+ /* Maximum memory usable is what is directly addressable */
+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+ MAXMEM>>20);
+ if (max_pfn > MAX_NONPAE_PFN)
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ else
+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ max_pfn = lmax_low_pfn;
+#else /* !CONFIG_HIGHMEM */
+#ifndef CONFIG_X86_PAE
+ if (max_pfn > MAX_NONPAE_PFN) {
+ max_pfn = MAX_NONPAE_PFN;
+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ }
+#endif /* !CONFIG_X86_PAE */
+#endif /* !CONFIG_HIGHMEM */
+ }
+
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = highend_pfn = max_pfn;
+ if (max_pfn > MAXMEM_PFN) {
+ highstart_pfn = MAXMEM_PFN;
+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+ pages_to_mb(highend_pfn - highstart_pfn));
+ }
+#endif
+
+ phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
+ cur_pgd = init_mm.pgd = (pgd_t *)xen_start_info.pt_base;
+
+ start_pfn = (__pa(xen_start_info.pt_base) >> PAGE_SHIFT) +
+ xen_start_info.nr_pt_frames;
+
+ /*
+ * Initialize the boot-time allocator, and free up all RAM. Then reserve
+ * space for OS image, initrd, phys->machine table, bootstrap page table,
+ * and the bootmem bitmap.
+ * NB. There is definitely enough room for the bootmem bitmap in the
+ * bootstrap page table. We are guaranteed to get >=512kB unused 'padding'
+ * for our own use after all bootstrap elements
+ * (see asm-xen/xen-public/xen.h).
+ */
+ boot_pfn = min((int)xen_start_info.nr_pages,lmax_low_pfn);
+ bootmap_size = init_bootmem(start_pfn,boot_pfn);
+ free_bootmem(0, PFN_PHYS(boot_pfn));
+ reserve_bootmem(__pa(&_stext),
+ PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1 -
+ __pa(&_stext));
+
+ /* init_bootmem() set the global max_low_pfn to boot_pfn. Now max_low_pfn
+ * can be set to the override value.
+ */
+ max_low_pfn = lmax_low_pfn;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if ( xen_start_info.mod_start != 0 )
+ {
+ if ( (__pa(xen_start_info.mod_start) + xen_start_info.mod_len) <=
+ (max_low_pfn << PAGE_SHIFT) )
+ {
+ initrd_start = xen_start_info.mod_start;
+ initrd_end = initrd_start + xen_start_info.mod_len;
+ initrd_below_start_ok = 1;
+ }
+ else
+ {
+ printk(KERN_ERR "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ __pa(xen_start_info.mod_start) + xen_start_info.mod_len,
+ max_low_pfn << PAGE_SHIFT);
+ initrd_start = 0;
+ }
+ }
+#endif
+
+ paging_init();
+
+ /* Make sure we have a correctly sized P->M table. */
+ if ( max_pfn != xen_start_info.nr_pages )
+ {
+ phys_to_machine_mapping = alloc_bootmem_low_pages(
+ max_pfn * sizeof(unsigned long));
+ if ( max_pfn > xen_start_info.nr_pages )
+ {
+ memset(phys_to_machine_mapping, ~0,
+ max_pfn * sizeof(unsigned long));
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(unsigned long));
+ }
+ else
+ {
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ max_pfn * sizeof(unsigned long));
+ if (HYPERVISOR_dom_mem_op(
+ MEMOP_decrease_reservation,
+ (unsigned long *)xen_start_info.mfn_list + max_pfn,
+ xen_start_info.nr_pages - max_pfn, 0) !=
+ (xen_start_info.nr_pages - max_pfn))
+ BUG();
+ }
+ free_bootmem(__pa(xen_start_info.mfn_list),
+ PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
+ sizeof(unsigned long))));
+ }
+
+ pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+ /* If we are a privileged guest OS then we should request IO privileges. */
+ if ( xen_start_info.flags & SIF_PRIVILEGED )
+ {
+ dom0_op_t op;
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = 1;
+ if( HYPERVISOR_dom0_op(&op) != 0 )
+ panic("Unable to obtain IOPL, despite being SIF_PRIVILEGED");
+ current->thread.io_pl = 1;
+ }
+
+ if (xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ if( !(xen_start_info.flags & SIF_PRIVILEGED) )
+ panic("Xen granted us console access but not privileged status");
+
+#if defined(CONFIG_VT)
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ }
+}
+
+static int cachesize_override __initdata = -1;
+static int __init cachesize_setup(char *str)
+{
+ get_option (&str, &cachesize_override);
+ return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
+static int __init highio_setup(char *str)
+{
+ printk("i386: disabling HIGHMEM block I/O\n");
+ blk_nohighio = 1;
+ return 1;
+}
+__setup("nohighio", highio_setup);
+
+static int __init get_model_name(struct cpuinfo_x86 *c)
+{
+ unsigned int *v;
+ char *p, *q;
+
+ if (cpuid_eax(0x80000000) < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+
+ /* Intel chips right-justify this string for some dumb reason;
+ undo that brain damage */
+ p = q = &c->x86_model_id[0];
+ while ( *p == ' ' )
+ p++;
+ if ( p != q ) {
+ while ( *p )
+ *q++ = *p++;
+ while ( q <= &c->x86_model_id[48] )
+ *q++ = '\0'; /* Zero-pad the rest */
+ }
+
+ return 1;
+}
+
+
+static void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+ unsigned int n, dummy, ecx, edx, l2size;
+
+ n = cpuid_eax(0x80000000);
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK
(%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ }
+
+ if (n < 0x80000006) /* Some chips just has a large L1. */
+ return;
+
+ ecx = cpuid_ecx(0x80000006);
+ l2size = ecx >> 16;
+
+ /* AMD errata T13 (order #21922) */
+ if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
+ if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
+ l2size = 64;
+ if (c->x86_model == 4 &&
+ (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
+ l2size = 256;
+ }
+
+ /* Intel PIII Tualatin. This comes in two flavours.
+ * One has 256kb of cache, the other 512. We have no way
+ * to determine which, so we use a boottime override
+ * for the 512kb model, and assume 256 otherwise.
+ */
+ if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&
+ (c->x86_model == 11) && (l2size == 0))
+ l2size = 256;
+
+ if (c->x86_vendor == X86_VENDOR_CENTAUR) {
+ /* VIA C3 CPUs (670-68F) need further shifting. */
+ if ((c->x86 == 6) &&
+ ((c->x86_model == 7) || (c->x86_model == 8))) {
+ l2size >>= 8;
+ }
+
+ /* VIA also screwed up Nehemiah stepping 1, and made
+ it return '65KB' instead of '64KB'
+ - Note, it seems this may only be in engineering samples. */
+ if ((c->x86==6) && (c->x86_model==9) &&
+ (c->x86_mask==1) && (l2size==65))
+ l2size -= 1;
+ }
+
+ /* Allow user to override all this if necessary. */
+ if (cachesize_override != -1)
+ l2size = cachesize_override;
+
+ if ( l2size == 0 )
+ return; /* Again, no L2 cache is possible */
+
+ c->x86_cache_size = l2size;
+
+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
+}
+
+static void __init init_c3(struct cpuinfo_x86 *c)
+{
+ /* Test for Centaur Extended Feature Flags presence */
+ if (cpuid_eax(0xC0000000) >= 0xC0000001) {
+ /* store Centaur Extended Feature Flags as
+ * word 5 of the CPU capability bit array
+ */
+ c->x86_capability[5] = cpuid_edx(0xC0000001);
+ }
+
+ switch (c->x86_model) {
+ case 9: /* Nehemiah */
+ default:
+ get_model_name(c);
+ display_cacheinfo(c);
+ break;
+ }
+}
+
+static void __init init_centaur(struct cpuinfo_x86 *c)
+{
+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ 3DNow is IDd by bit 31 in extended CPUID (1*3231) anyway */
+ clear_bit(0*32+31, &c->x86_capability);
+
+ switch (c->x86) {
+ case 6:
+ init_c3(c);
+ break;
+ default:
+ panic("Unsupported Centaur CPU (%i)\n", c->x86);
+ }
+}
+
+static int __init init_amd(struct cpuinfo_x86 *c)
+{
+ int r;
+
+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+ clear_bit(0*32+31, &c->x86_capability);
+
+ r = get_model_name(c);
+
+ switch(c->x86)
+ {
+ case 5: /* We don't like AMD K6 */
+ panic("Unsupported AMD processor\n");
+ case 6: /* An Athlon/Duron. We can trust the BIOS probably */
+ break;
+ }
+
+ display_cacheinfo(c);
+ return r;
+}
+
+
+static void __init init_intel(struct cpuinfo_x86 *c)
+{
+ char *p = NULL;
+ unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
+
+ if (c->cpuid_level > 1) {
+ /* supports eax=2 call */
+ int i, j, n;
+ int regs[4];
+ unsigned char *dp = (unsigned char *)regs;
+
+ /* Number of times to iterate */
+ n = cpuid_eax(2) & 0xFF;
+
+ for ( i = 0 ; i < n ; i++ ) {
+ cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
+
+ /* If bit 31 is set, this is an unknown format */
+ for ( j = 0 ; j < 3 ; j++ ) {
+ if ( regs[j] < 0 ) regs[j] = 0;
+ }
+
+ /* Byte 0 is level count, not a descriptor */
+ for ( j = 1 ; j < 16 ; j++ ) {
+ unsigned char des = dp[j];
+ unsigned char dl, dh;
+ unsigned int cs;
+
+ dh = des >> 4;
+ dl = des & 0x0F;
+
+ /* Black magic... */
+
+ switch ( dh )
+ {
+ case 0:
+ switch ( dl ) {
+ case 6:
+ /* L1 I cache */
+ l1i += 8;
+ break;
+ case 8:
+ /* L1 I cache */
+ l1i += 16;
+ break;
+ case 10:
+ /* L1 D cache */
+ l1d += 8;
+ break;
+ case 12:
+ /* L1 D cache */
+ l1d += 16;
+ break;
+ default:;
+ /* TLB, or unknown */
+ }
+ break;
+ case 2:
+ if ( dl ) {
+ /* L3 cache */
+ cs = (dl-1) << 9;
+ l3 += cs;
+ }
+ break;
+ case 4:
+ if ( c->x86 > 6 && dl ) {
+ /* P4 family */
+ /* L3 cache */
+ cs = 128 << (dl-1);
+ l3 += cs;
+ break;
+ }
+ /* else same as 8 - fall through */
+ case 8:
+ if ( dl ) {
+ /* L2 cache */
+ cs = 128 << (dl-1);
+ l2 += cs;
+ }
+ break;
+ case 6:
+ if (dl > 5) {
+ /* L1 D cache */
+ cs = 8<<(dl-6);
+ l1d += cs;
+ }
+ break;
+ case 7:
+ if ( dl >= 8 )
+ {
+ /* L2 cache */
+ cs = 64<<(dl-8);
+ l2 += cs;
+ } else {
+ /* L0 I cache, count as L1 */
+ cs = dl ? (16 << (dl-1)) : 12;
+ l1i += cs;
+ }
+ break;
+ default:
+ /* TLB, or something else we don't know about */
+ break;
+ }
+ }
+ }
+ if ( l1i || l1d )
+ printk(KERN_INFO "CPU: L1 I cache: %dK, L1 D cache: %dK\n",
+ l1i, l1d);
+ if ( l2 )
+ printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
+ if ( l3 )
+ printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+
+ /*
+ * This assumes the L3 cache is shared; it typically lives in
+ * the northbridge. The L1 caches are included by the L2
+ * cache, and so should not be included for the purpose of
+ * SMP switching weights.
+ */
+ c->x86_cache_size = l2 ? l2 : (l1i+l1d);
+ }
+
+ /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
+ if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
+ clear_bit(X86_FEATURE_SEP, &c->x86_capability);
+
+ /* Names for the Pentium II/Celeron processors
+ detectable only by also checking the cache size.
+ Dixon is NOT a Celeron. */
+ if (c->x86 == 6) {
+ switch (c->x86_model) {
+ case 5:
+ if (l2 == 0)
+ p = "Celeron (Covington)";
+ if (l2 == 256)
+ p = "Mobile Pentium II (Dixon)";
+ break;
+
+ case 6:
+ if (l2 == 128)
+ p = "Celeron (Mendocino)";
+ break;
+
+ case 8:
+ if (l2 == 128)
+ p = "Celeron (Coppermine)";
+ break;
+ }
+ }
+
+ if ( p )
+ strcpy(c->x86_model_id, p);
+}
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c)
+{
+ char *v = c->x86_vendor_id;
+
+ if (!strcmp(v, "GenuineIntel"))
+ c->x86_vendor = X86_VENDOR_INTEL;
+ else if (!strcmp(v, "AuthenticAMD"))
+ c->x86_vendor = X86_VENDOR_AMD;
+ else if (!strcmp(v, "CentaurHauls"))
+ c->x86_vendor = X86_VENDOR_CENTAUR;
+ else
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+}
+
+struct cpu_model_info {
+ int vendor;
+ int family;
+ char *model_names[16];
+};
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*/
+static struct cpu_model_info cpu_models[] __initdata = {
+ { X86_VENDOR_INTEL, 6,
+ { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
+ NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
+ "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
+ "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
+ { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
+ { "Athlon", "Athlon",
+ "Athlon", NULL, "Athlon", NULL,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL }}
+};
+
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+ struct cpu_model_info *info = cpu_models;
+ int i;
+
+ if ( c->x86_model >= 16 )
+ return NULL; /* Range check */
+
+ for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ )
{
+ if ( info->vendor == c->x86_vendor &&
+ info->family == c->x86 ) {
+ return info->model_names[c->x86_model];
+ }
+ info++;
+ }
+ return NULL; /* Not found */
+}
+
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
+{
+ u32 f1, f2;
+
+ asm("pushfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "movl %0,%1\n\t"
+ "xorl %2,%0\n\t"
+ "pushl %0\n\t"
+ "popfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "popfl\n\t"
+ : "=&r" (f1), "=&r" (f2)
+ : "ir" (flag));
+
+ return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+static int __init have_cpuid_p(void)
+{
+ return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+unsigned char eddnr;
+struct edd_info edd[EDDMAXNR];
+unsigned int edd_disk80_sig;
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ * from empty_zero_page into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+ eddnr = EDD_NR;
+ memcpy(edd, EDD_BUF, sizeof(edd));
+ edd_disk80_sig = DISK80_SIGNATURE_BUFFER;
+}
+#else
+static inline void copy_edd(void) {}
+#endif
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+ int junk, i;
+ u32 xlvl, tfms;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->cpuid_level = -1; /* CPUID not detected */
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+ c->hard_math = 1;
+
+ if ( !have_cpuid_p() ) {
+ panic("Processor must support CPUID\n");
+ } else {
+ /* CPU does have CPUID */
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c);
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Intel-defined flags: level 0x00000001 */
+ if ( c->cpuid_level >= 0x00000001 ) {
+ u32 capability, excap;
+ cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+ if ( xlvl >= 0x80000001 )
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ if ( xlvl >= 0x80000004 )
+ get_model_name(c); /* Default name */
+ }
+
+ /* Transmeta-defined flags: level 0x80860001 */
+ xlvl = cpuid_eax(0x80860000);
+ if ( (xlvl & 0xffff0000) == 0x80860000 ) {
+ if ( xlvl >= 0x80860001 )
+ c->x86_capability[2] = cpuid_edx(0x80860001);
+ }
+ }
+
+ printk(KERN_DEBUG "CPU: Before vendor init, caps: %08x %08x %08x, vendor =
%d\n",
+ c->x86_capability[0],
+ c->x86_capability[1],
+ c->x86_capability[2],
+ c->x86_vendor);
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ switch ( c->x86_vendor ) {
+ case X86_VENDOR_AMD:
+ init_amd(c);
+ break;
+
+ case X86_VENDOR_INTEL:
+ init_intel(c);
+ break;
+
+ case X86_VENDOR_CENTAUR:
+ init_centaur(c);
+ break;
+
+ default:
+ printk("Unsupported CPU vendor (%d) -- please report!\n",
+ c->x86_vendor);
+ }
+
+ printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n",
+ c->x86_capability[0],
+ c->x86_capability[1],
+ c->x86_capability[2],
+ c->x86_capability[3]);
+
+
+ /* If the model name is still unset, do table lookup. */
+ if ( !c->x86_model_id[0] ) {
+ char *p;
+ p = table_lookup_model(c);
+ if ( p )
+ strcpy(c->x86_model_id, p);
+ else
+ /* Last resort... */
+ sprintf(c->x86_model_id, "%02x/%02x",
+ c->x86_vendor, c->x86_model);
+ }
+
+ /* Now the feature flags better reflect actual CPU features! */
+
+ printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
+ c->x86_capability[0],
+ c->x86_capability[1],
+ c->x86_capability[2],
+ c->x86_capability[3]);
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if ( c != &boot_cpu_data ) {
+ /* AND the already accumulated flags with these */
+ for ( i = 0 ; i < NCAPINTS ; i++ )
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ }
+
+ printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n",
+ boot_cpu_data.x86_capability[0],
+ boot_cpu_data.x86_capability[1],
+ boot_cpu_data.x86_capability[2],
+ boot_cpu_data.x86_capability[3]);
+}
+
+
+/* These need to match <asm/processor.h> */
+static char *cpu_vendor_names[] __initdata = {
+ "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
+
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+ char *vendor = NULL;
+
+ if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
+ vendor = cpu_vendor_names[c->x86_vendor];
+ else if (c->cpuid_level >= 0)
+ vendor = c->x86_vendor_id;
+
+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+ printk("%s ", vendor);
+
+ if (!c->x86_model_id[0])
+ printk("%d86", c->x86);
+ else
+ printk("%s", c->x86_model_id);
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ /*
+ * These flag bits must match the definitions in <asm/cpufeature.h>.
+ * NULL means this bit is undefined or reserved; either way it doesn't
+ * have meaning as far as Linux is concerned. Note that it's important
+ * to realize there is a difference between this table and CPUID -- if
+ * applications want to get the raw CPUID data, they should access
+ * /dev/cpu/<cpu_nr>/cpuid instead.
+ */
+ static char *x86_cap_flags[] = {
+ /* Intel-defined */
+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
+
+ /* AMD-defined */
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "mp", NULL, NULL, "mmxext", NULL,
+ NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+
+ /* Transmeta-defined */
+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Other (Linux-defined) */
+ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Intel-defined (#2) */
+ "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
+ "est", NULL, "cid", NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* VIA/Cyrix/Centaur-defined */
+ NULL, NULL, "xstore", NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ };
+ struct cpuinfo_x86 *c = v;
+ int i, n = c - cpu_data;
+ int fpu_exception;
+
+#ifdef CONFIG_SMP
+ if (!(cpu_online_map & (1<<n)))
+ return 0;
+#endif
+ seq_printf(m, "processor\t: %d\n"
+ "vendor_id\t: %s\n"
+ "cpu family\t: %d\n"
+ "model\t\t: %d\n"
+ "model name\t: %s\n",
+ n,
+ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
+ c->x86,
+ c->x86_model,
+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+ else
+ seq_printf(m, "stepping\t: unknown\n");
+
+ if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
+ seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
+ cpu_khz / 1000, (cpu_khz % 1000));
+ }
+
+ /* Cache size */
+ if (c->x86_cache_size >= 0)
+ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+
+ /* We use exception 16 if we have hardware math and we've either seen
it or the CPU claims it is internal */
+ fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
+ seq_printf(m, "fdiv_bug\t: %s\n"
+ "hlt_bug\t\t: %s\n"
+ "f00f_bug\t: %s\n"
+ "coma_bug\t: %s\n"
+ "fpu\t\t: %s\n"
+ "fpu_exception\t: %s\n"
+ "cpuid level\t: %d\n"
+ "wp\t\t: %s\n"
+ "flags\t\t:",
+ c->fdiv_bug ? "yes" : "no",
+ c->hlt_works_ok ? "no" : "yes",
+ c->f00f_bug ? "yes" : "no",
+ c->coma_bug ? "yes" : "no",
+ c->hard_math ? "yes" : "no",
+ fpu_exception ? "yes" : "no",
+ c->cpuid_level,
+ c->wp_works_ok ? "yes" : "no");
+
+ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
+ if ( test_bit(i, &c->x86_capability) &&
+ x86_cap_flags[i] != NULL )
+ seq_printf(m, " %s", x86_cap_flags[i]);
+
+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
+ c->loops_per_jiffy/(500000/HZ),
+ (c->loops_per_jiffy/(5000/HZ)) % 100);
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+ start: c_start,
+ next: c_next,
+ stop: c_stop,
+ show: show_cpuinfo,
+};
+
+unsigned long cpu_initialized __initdata = 0;
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __init cpu_init (void)
+{
+ int nr = smp_processor_id();
+
+ if (test_and_set_bit(nr, &cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
+ for (;;) __sti();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", nr);
+
+ /*
+ * set up and load the per-CPU TSS and LDT
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if(current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current, nr);
+
+ HYPERVISOR_stack_switch(__KERNEL_DS, current->thread.esp0);
+
+ load_LDT(&init_mm.context);
+ flush_page_update_queue();
+
+ /* Force FPU initialization. */
+ current->flags &= ~PF_USEDFPU;
+ current->used_math = 0;
+ stts();
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/signal.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/signal.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,717 @@
+/*
+ * linux/arch/i386/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+
+int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
+{
+ if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
+ return -EFAULT;
+ if (from->si_code < 0)
+ return __copy_to_user(to, from, sizeof(siginfo_t));
+ else {
+ int err;
+
+ /* If you change siginfo_t structure, please be sure
+ this code is fixed accordingly.
+ It should never copy any pad contained in the structure
+ to avoid security leaks, but must copy the generic
+ 3 ints plus the relevant union member. */
+ err = __put_user(from->si_signo, &to->si_signo);
+ err |= __put_user(from->si_errno, &to->si_errno);
+ err |= __put_user((short)from->si_code, &to->si_code);
+ /* First 32bits of unions are always present. */
+ err |= __put_user(from->si_pid, &to->si_pid);
+ switch (from->si_code >> 16) {
+ case __SI_FAULT >> 16:
+ break;
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
+ default:
+ err |= __put_user(from->si_uid, &to->si_uid);
+ break;
+ /* case __SI_RT: This is not generated by the kernel as of now.
*/
+ }
+ return err;
+ }
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+{
+ struct pt_regs * regs = (struct pt_regs *) &history0;
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ regs->eax = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
+{
+ struct pt_regs * regs = (struct pt_regs *) &unewset;
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ regs->eax = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+{
+ struct pt_regs *regs = (struct pt_regs *) &uss;
+ return do_sigaltstack(uss, uoss, regs->esp);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe
+{
+ char *pretcode;
+ int sig;
+ struct sigcontext sc;
+ struct _fpstate fpstate;
+ unsigned long extramask[_NSIG_WORDS-1];
+ char retcode[8];
+};
+
+struct rt_sigframe
+{
+ char *pretcode;
+ int sig;
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ struct _fpstate fpstate;
+ char retcode[8];
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *peax)
+{
+ unsigned int err = 0;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->x)
+
+#define COPY_SEG(seg) \
+ { unsigned short tmp; \
+ err |= __get_user(tmp, &sc->seg); \
+ regs->x##seg = tmp; }
+
+#define COPY_SEG_STRICT(seg) \
+ { unsigned short tmp; \
+ err |= __get_user(tmp, &sc->seg); \
+ regs->x##seg = tmp|3; }
+
+#define GET_SEG(seg) \
+ { unsigned short tmp; \
+ err |= __get_user(tmp, &sc->seg); \
+ loadsegment(seg,tmp); }
+
+ GET_SEG(gs);
+ GET_SEG(fs);
+ COPY_SEG(es);
+ COPY_SEG(ds);
+ COPY(edi);
+ COPY(esi);
+ COPY(ebp);
+ COPY(esp);
+ COPY(ebx);
+ COPY(edx);
+ COPY(ecx);
+ COPY(eip);
+ COPY_SEG_STRICT(cs);
+ COPY_SEG_STRICT(ss);
+
+ {
+ unsigned int tmpflags;
+ err |= __get_user(tmpflags, &sc->eflags);
+ regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
+ regs->orig_eax = -1; /* disable syscall checks */
+ }
+
+ {
+ struct _fpstate * buf;
+ err |= __get_user(buf, &sc->fpstate);
+ if (buf) {
+ if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ goto badframe;
+ err |= restore_i387(buf);
+ }
+ }
+
+ err |= __get_user(*peax, &sc->eax);
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage int sys_sigreturn(unsigned long __unused)
+{
+ struct pt_regs *regs = (struct pt_regs *) &__unused;
+ struct sigframe *frame = (struct sigframe *)(regs->esp - 8);
+ sigset_t set;
+ int eax;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ if (restore_sigcontext(regs, &frame->sc, &eax))
+ goto badframe;
+ return eax;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+{
+ struct pt_regs *regs = (struct pt_regs *) &__unused;
+ struct rt_sigframe *frame = (struct rt_sigframe *)(regs->esp - 4);
+ sigset_t set;
+ stack_t st;
+ int eax;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs->esp);
+
+ return eax;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate,
+ struct pt_regs *regs, unsigned long mask)
+{
+ int tmp, err = 0;
+
+ tmp = 0;
+ __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
+ err |= __put_user(tmp, (unsigned int *)&sc->gs);
+ __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
+ err |= __put_user(tmp, (unsigned int *)&sc->fs);
+
+ err |= __put_user(regs->xes, (unsigned int *)&sc->es);
+ err |= __put_user(regs->xds, (unsigned int *)&sc->ds);
+ err |= __put_user(regs->edi, &sc->edi);
+ err |= __put_user(regs->esi, &sc->esi);
+ err |= __put_user(regs->ebp, &sc->ebp);
+ err |= __put_user(regs->esp, &sc->esp);
+ err |= __put_user(regs->ebx, &sc->ebx);
+ err |= __put_user(regs->edx, &sc->edx);
+ err |= __put_user(regs->ecx, &sc->ecx);
+ err |= __put_user(regs->eax, &sc->eax);
+ err |= __put_user(current->thread.trap_no, &sc->trapno);
+ err |= __put_user(current->thread.error_code, &sc->err);
+ err |= __put_user(regs->eip, &sc->eip);
+ err |= __put_user(regs->xcs, (unsigned int *)&sc->cs);
+ err |= __put_user(regs->eflags, &sc->eflags);
+ err |= __put_user(regs->esp, &sc->esp_at_signal);
+ err |= __put_user(regs->xss, (unsigned int *)&sc->ss);
+
+ tmp = save_i387(fpstate);
+ if (tmp < 0)
+ err = 1;
+ else
+ err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+
+ /* non-iBCS2 extensions.. */
+ err |= __put_user(mask, &sc->oldmask);
+ err |= __put_user(current->thread.cr2, &sc->cr2);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+ unsigned long esp;
+
+ /* Default to using normal stack */
+ esp = regs->esp;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (sas_ss_flags(esp) == 0)
+ esp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ /* This is the legacy signal stack switching. */
+ else if ((regs->xss & 0xffff) != __USER_DS &&
+ !(ka->sa.sa_flags & SA_RESTORER) &&
+ ka->sa.sa_restorer) {
+ esp = (unsigned long) ka->sa.sa_restorer;
+ }
+
+ return (void *)((esp - frame_size) & -8ul);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs * regs)
+{
+ struct sigframe *frame;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ err |= __put_user((current->exec_domain
+ && current->exec_domain->signal_invmap
+ && sig < 32
+ ? current->exec_domain->signal_invmap[sig]
+ : sig),
+ &frame->sig);
+ if (err)
+ goto give_sigsegv;
+
+ err |= setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
+ if (err)
+ goto give_sigsegv;
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
+ } else {
+ err |= __put_user(frame->retcode, &frame->pretcode);
+ /* This is popl %eax ; movl $,%eax ; int $0x80 */
+ err |= __put_user(0xb858, (short *)(frame->retcode+0));
+ err |= __put_user(__NR_sigreturn, (int *)(frame->retcode+2));
+ err |= __put_user(0x80cd, (short *)(frame->retcode+6));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->esp = (unsigned long) frame;
+ regs->eip = (unsigned long) ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+ regs->xds = __USER_DS;
+ regs->xes = __USER_DS;
+ regs->xss = __USER_DS;
+ regs->xcs = __USER_CS;
+ regs->eflags &= ~TF_MASK;
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs * regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ err |= __put_user((current->exec_domain
+ && current->exec_domain->signal_invmap
+ && sig < 32
+ ? current->exec_domain->signal_invmap[sig]
+ : sig),
+ &frame->sig);
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err)
+ goto give_sigsegv;
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->esp),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
+ } else {
+ err |= __put_user(frame->retcode, &frame->pretcode);
+ /* This is movl $,%eax ; int $0x80 */
+ err |= __put_user(0xb8, (char *)(frame->retcode+0));
+ err |= __put_user(__NR_rt_sigreturn, (int *)(frame->retcode+1));
+ err |= __put_user(0x80cd, (short *)(frame->retcode+5));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->esp = (unsigned long) frame;
+ regs->eip = (unsigned long) ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+ regs->xds = __USER_DS;
+ regs->xes = __USER_DS;
+ regs->xss = __USER_DS;
+ regs->xcs = __USER_CS;
+ regs->eflags &= ~TF_MASK;
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Are we from a system call? */
+ if (regs->orig_eax >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->eax) {
+ case -ERESTARTNOHAND:
+ regs->eax = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->eax = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->eax = regs->orig_eax;
+ regs->eip -= 2;
+ }
+ }
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sigmask_lock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ struct k_sigaction *ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if ((regs->xcs & 2) != 2)
+ return 1;
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq(¤t->sigmask_lock);
+ signr = dequeue_signal(¤t->blocked, &info);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ if (!signr)
+ break;
+
+ if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(¤t->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = ¤t->sig->action[signr-1];
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP: {
+ struct signal_struct *sig;
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ sig = current->p_pptr->sig;
+ if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags
& SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+ }
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
+ if (do_coredump(signr, regs))
+ exit_code |= 0x80;
+ /* FALLTHRU */
+
+ default:
+ sig_exit(signr, exit_code, &info);
+ /* NOTREACHED */
+ }
+ }
+
+ /* Reenable any watchpoints before delivering the
+ * signal to user space. The processor register will
+ * have been cleared if the watchpoint triggered
+ * inside the kernel.
+ */
+ if ( current->thread.debugreg[7] != 0 )
+ HYPERVISOR_set_debugreg(7, current->thread.debugreg[7]);
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->orig_eax >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->eax == -ERESTARTNOHAND ||
+ regs->eax == -ERESTARTSYS ||
+ regs->eax == -ERESTARTNOINTR) {
+ regs->eax = regs->orig_eax;
+ regs->eip -= 2;
+ }
+ }
+ return 0;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/kernel/time.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/time.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,721 @@
+/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
+ ****************************************************************************
+ * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
+ * (C) 2002-2003 - Keir Fraser - University of Cambridge
+ ****************************************************************************
+ *
+ * File: arch/xen/kernel/time.c
+ * Author: Rolf Neugebauer and Keir Fraser
+ *
+ * Description: Interface with Xen to get correct notion of time
+ */
+
+/*
+ * linux/arch/i386/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ * 1996-05-03 Ingo Molnar
+ * fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-09-05 (Various)
+ * More robust do_fast_gettimeoffset() algorithm implemented
+ * (works with APM, Cyrix 6x86MX and Centaur C6),
+ * monotonic gettimeofday() with fast_get_timeoffset(),
+ * drift-proof precision TSC calibration on boot
+ * (C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, Andrew D.
+ * Balsa <andrebalsa@xxxxxxxxxx>, Philip Gladstone <philip@xxxxxxxxxx>;
+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@xxxxxxxxxxxxx>).
+ * 1998-12-16 Andrea Arcangeli
+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
+ * because was not accounting lost_ticks.
+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ * serialize accesses to xtime/lost_ticks).
+ */
+
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+#include <asm/delay.h>
+#include <asm/mpspec.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+#include <asm/div64.h>
+#include <asm/hypervisor.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+
+#include <linux/mc146818rtc.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/sysctl.h>
+#include <linux/sysrq.h>
+
+spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+extern rwlock_t xtime_lock;
+extern unsigned long wall_jiffies;
+
+unsigned long cpu_khz; /* get this from Xen, used elsewhere */
+
+static unsigned int rdtsc_bitshift;
+static u32 st_scale_f; /* convert ticks -> usecs */
+static u32 st_scale_i; /* convert ticks -> usecs */
+
+/* These are peridically updated in shared_info, and then copied here. */
+static u32 shadow_tsc_stamp;
+static u64 shadow_system_time;
+static u32 shadow_time_version;
+static struct timeval shadow_tv;
+
+/*
+ * We use this to ensure that gettimeofday() is monotonically increasing. We
+ * only break this guarantee if the wall clock jumps backwards "a long way".
+ */
+static struct timeval last_seen_tv = {0,0};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+/* Periodically propagate synchronised time base to the RTC and to Xen. */
+static long last_update_to_rtc, last_update_to_xen;
+#endif
+
+/* Periodically take synchronised time base from Xen, if we need it. */
+static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
+
+/* Keep track of last time we did processing/updating of jiffies and xtime. */
+static u64 processed_system_time; /* System time (ns) at last processing. */
+
+#define NS_PER_TICK (1000000000ULL/HZ)
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC (1000000000L)
+#endif
+
+#define HANDLE_USEC_UNDERFLOW(_tv) \
+ do { \
+ while ( (_tv).tv_usec < 0 ) \
+ { \
+ (_tv).tv_usec += 1000000; \
+ (_tv).tv_sec--; \
+ } \
+ } while ( 0 )
+#define HANDLE_USEC_OVERFLOW(_tv) \
+ do { \
+ while ( (_tv).tv_usec >= 1000000 ) \
+ { \
+ (_tv).tv_usec -= 1000000; \
+ (_tv).tv_sec++; \
+ } \
+ } while ( 0 )
+static inline void __normalize_time(time_t *sec, s64 *nsec)
+{
+ while (*nsec >= NSEC_PER_SEC) {
+ (*nsec) -= NSEC_PER_SEC;
+ (*sec)++;
+ }
+ while (*nsec < 0) {
+ (*nsec) += NSEC_PER_SEC;
+ (*sec)--;
+ }
+}
+
+/* Dynamically-mapped IRQs. */
+static int time_irq, debug_irq;
+
+/* Does this guest OS track Xen time, or set its wall clock independently? */
+static int independent_wallclock = 0;
+static int __init __independent_wallclock(char *str)
+{
+ independent_wallclock = 1;
+ return 1;
+}
+__setup("independent_wallclock", __independent_wallclock);
+#define INDEPENDENT_WALLCLOCK() \
+ (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you'll only notice that after reboot!
+ */
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ /* gets recalled with irq locally disabled */
+ spin_lock(&rtc_lock);
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if ( !(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD )
+ BCD_TO_BIN(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds, don't interfere with
+ * hour overflow. This avoids messing with unknown time zones but requires
+ * your RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if ( ((abs(real_minutes - cmos_minutes) + 15)/30) & 1 )
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if ( abs(real_minutes - cmos_minutes) < 30 )
+ {
+ if ( !(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD )
+ {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ }
+ else
+ {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+#endif
+
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area. Must be called with the xtime_lock held for writing.
+ */
+static void __get_time_values_from_xen(void)
+{
+ do {
+ shadow_time_version = HYPERVISOR_shared_info->time_version2;
+ rmb();
+ shadow_tv.tv_sec = HYPERVISOR_shared_info->wc_sec;
+ shadow_tv.tv_usec = HYPERVISOR_shared_info->wc_usec;
+ shadow_tsc_stamp =
+ (u32)(HYPERVISOR_shared_info->tsc_timestamp >> rdtsc_bitshift);
+ shadow_system_time = HYPERVISOR_shared_info->system_time;
+ rmb();
+ }
+ while ( shadow_time_version != HYPERVISOR_shared_info->time_version1 );
+}
+
+#define TIME_VALUES_UP_TO_DATE \
+ ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
+
+
+/*
+ * Returns the system time elapsed, in ns, since the current shadow_timestamp
+ * was calculated. Must be called with the xtime_lock held for reading.
+ */
+static inline unsigned long __get_time_delta_usecs(void)
+{
+ s32 delta_tsc;
+ u32 low;
+ u64 delta, tsc;
+
+ rdtscll(tsc);
+ low = (u32)(tsc >> rdtsc_bitshift);
+ delta_tsc = (s32)(low - shadow_tsc_stamp);
+ if ( unlikely(delta_tsc < 0) ) delta_tsc = 0;
+ delta = ((u64)delta_tsc * st_scale_f);
+ delta >>= 32;
+ delta += ((u64)delta_tsc * st_scale_i);
+
+ return (unsigned long)delta;
+}
+
+
+/*
+ * Returns the current time-of-day in UTC timeval format.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags, lost;
+ struct timeval _tv;
+ s64 nsec;
+
+ again:
+ read_lock_irqsave(&xtime_lock, flags);
+
+ _tv.tv_usec = __get_time_delta_usecs();
+ if ( (lost = (jiffies - wall_jiffies)) != 0 )
+ _tv.tv_usec += lost * (1000000 / HZ);
+ _tv.tv_sec = xtime.tv_sec;
+ _tv.tv_usec += xtime.tv_usec;
+
+ nsec = shadow_system_time - processed_system_time;
+ __normalize_time(&_tv.tv_sec, &nsec);
+ _tv.tv_usec += (long)nsec / 1000L;
+
+ if ( unlikely(!TIME_VALUES_UP_TO_DATE) )
+ {
+ /*
+ * We may have blocked for a long time, rendering our calculations
+ * invalid (e.g. the time delta may have overflowed). Detect that
+ * and recalculate with fresh values.
+ */
+ read_unlock_irqrestore(&xtime_lock, flags);
+ write_lock_irqsave(&xtime_lock, flags);
+ __get_time_values_from_xen();
+ write_unlock_irqrestore(&xtime_lock, flags);
+ goto again;
+ }
+
+ HANDLE_USEC_OVERFLOW(_tv);
+
+ /* Ensure that time-of-day is monotonically increasing. */
+ if ( (_tv.tv_sec < last_seen_tv.tv_sec) ||
+ ((_tv.tv_sec == last_seen_tv.tv_sec) &&
+ (_tv.tv_usec < last_seen_tv.tv_usec)) )
+ _tv = last_seen_tv;
+ last_seen_tv = _tv;
+
+ read_unlock_irqrestore(&xtime_lock, flags);
+
+ *tv = _tv;
+}
+
+
+/*
+ * Sets the current time-of-day based on passed-in UTC timeval parameter.
+ */
+void do_settimeofday(struct timeval *tv)
+{
+ struct timeval newtv;
+ s64 nsec;
+ suseconds_t usec;
+
+ if ( !INDEPENDENT_WALLCLOCK() )
+ return;
+
+ write_lock_irq(&xtime_lock);
+
+ /*
+ * Ensure we don't get blocked for a long time so that our time delta
+ * overflows. If that were to happen then our shadow time values would
+ * be stale, so we can retry with fresh ones.
+ */
+ again:
+ usec = tv->tv_usec - __get_time_delta_usecs();
+
+ nsec = shadow_system_time - processed_system_time;
+ __normalize_time(&tv->tv_sec, &nsec);
+ usec -= (long)nsec / 1000L;
+
+ if ( unlikely(!TIME_VALUES_UP_TO_DATE) )
+ {
+ __get_time_values_from_xen();
+ goto again;
+ }
+ tv->tv_usec = usec;
+
+ HANDLE_USEC_UNDERFLOW(*tv);
+
+ newtv = *tv;
+
+ tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
+ HANDLE_USEC_UNDERFLOW(*tv);
+
+ xtime = *tv;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ /* Reset all our running time counts. They make no sense now. */
+ last_seen_tv.tv_sec = 0;
+ last_update_from_xen = 0;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ dom0_op_t op;
+ last_update_to_rtc = last_update_to_xen = 0;
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = newtv.tv_sec;
+ op.u.settime.usecs = newtv.tv_usec;
+ op.u.settime.system_time = shadow_system_time;
+ write_unlock_irq(&xtime_lock);
+ HYPERVISOR_dom0_op(&op);
+ }
+ else
+#endif
+ {
+ write_unlock_irq(&xtime_lock);
+ }
+}
+
+
+asmlinkage long sys_stime(int *tptr)
+{
+ int value;
+ struct timeval tv;
+
+ if ( !capable(CAP_SYS_TIME) )
+ return -EPERM;
+
+ if ( get_user(value, tptr) )
+ return -EFAULT;
+
+ tv.tv_sec = value;
+ tv.tv_usec = 0;
+
+ do_settimeofday(&tv);
+
+ return 0;
+}
+
+
+/* Convert jiffies to system time. Call with xtime_lock held for reading. */
+static inline u64 __jiffies_to_st(unsigned long j)
+{
+ return processed_system_time + ((j - jiffies) * NS_PER_TICK);
+}
+
+
+static inline void do_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ s64 delta;
+ unsigned long ticks = 0;
+ long sec_diff;
+
+ do {
+ __get_time_values_from_xen();
+
+ delta = (s64)(shadow_system_time +
+ ((s64)__get_time_delta_usecs() * 1000LL) -
+ processed_system_time);
+ }
+ while ( !TIME_VALUES_UP_TO_DATE );
+
+ if ( unlikely(delta < 0) )
+ {
+ printk("Timer ISR: Time went backwards: %lld\n", delta);
+ return;
+ }
+
+ /* Process elapsed jiffies since last call. */
+ while ( delta >= NS_PER_TICK )
+ {
+ ticks++;
+ delta -= NS_PER_TICK;
+ processed_system_time += NS_PER_TICK;
+ }
+
+ if ( ticks != 0 )
+ {
+ do_timer_ticks(ticks);
+
+ if ( user_mode(regs) )
+ update_process_times_us(ticks, 0);
+ else
+ update_process_times_us(0, ticks);
+ }
+
+ /*
+ * Take synchronised time from Xen once a minute if we're not
+ * synchronised ourselves, and we haven't chosen to keep an independent
+ * time base.
+ */
+ if ( !INDEPENDENT_WALLCLOCK() &&
+ ((time_status & STA_UNSYNC) != 0) &&
+ (xtime.tv_sec > (last_update_from_xen + 60)) )
+ {
+ /* Adjust shadow timeval for jiffies that haven't updated xtime yet. */
+ shadow_tv.tv_usec -= (jiffies - wall_jiffies) * (1000000/HZ);
+ HANDLE_USEC_UNDERFLOW(shadow_tv);
+
+ /*
+ * Reset our running time counts if they are invalidated by a warp
+ * backwards of more than 500ms.
+ */
+ sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
+ if ( unlikely(abs(sec_diff) > 1) ||
+ unlikely(((sec_diff * 1000000) +
+ xtime.tv_usec - shadow_tv.tv_usec) > 500000) )
+ {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ last_update_to_rtc = last_update_to_xen = 0;
+#endif
+ last_seen_tv.tv_sec = 0;
+ }
+
+ /* Update our unsynchronised xtime appropriately. */
+ xtime = shadow_tv;
+
+ last_update_from_xen = xtime.tv_sec;
+ }
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if ( (xen_start_info.flags & SIF_INITDOMAIN) &&
+ ((time_status & STA_UNSYNC) == 0) )
+ {
+ /* Send synchronised time to Xen approximately every minute. */
+ if ( xtime.tv_sec > (last_update_to_xen + 60) )
+ {
+ dom0_op_t op;
+ struct timeval tv = xtime;
+
+ tv.tv_usec += (jiffies - wall_jiffies) * (1000000/HZ);
+ HANDLE_USEC_OVERFLOW(tv);
+
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = tv.tv_sec;
+ op.u.settime.usecs = tv.tv_usec;
+ op.u.settime.system_time = shadow_system_time;
+ HYPERVISOR_dom0_op(&op);
+
+ last_update_to_xen = xtime.tv_sec;
+ }
+
+ /*
+ * If we have an externally synchronized Linux clock, then update CMOS
+ * clock accordingly every ~11 minutes. Set_rtc_mmss() has to be called
+ * as close as possible to 500 ms before the new second starts.
+ */
+ if ( (xtime.tv_sec > (last_update_to_rtc + 660)) &&
+ (xtime.tv_usec >= (500000 - ((unsigned) tick) / 2)) &&
+ (xtime.tv_usec <= (500000 + ((unsigned) tick) / 2)) )
+ {
+ if ( set_rtc_mmss(xtime.tv_sec) == 0 )
+ last_update_to_rtc = xtime.tv_sec;
+ else
+ last_update_to_rtc = xtime.tv_sec - 600;
+ }
+ }
+#endif
+}
+
+
+static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ write_lock(&xtime_lock);
+ do_timer_interrupt(irq, NULL, regs);
+ write_unlock(&xtime_lock);
+}
+
+static struct irqaction irq_timer = {
+ timer_interrupt,
+ SA_INTERRUPT,
+ 0,
+ "timer",
+ NULL,
+ NULL
+};
+
+
+/*
+ * This function works out when the the next timer function has to be
+ * executed (by looking at the timer list) and sets the Xen one-shot
+ * domain timer to the appropriate value. This is typically called in
+ * cpu_idle() before the domain blocks.
+ *
+ * The function returns a non-0 value on error conditions.
+ *
+ * It must be called with interrupts disabled.
+ */
+extern spinlock_t timerlist_lock;
+int set_timeout_timer(void)
+{
+ struct timer_list *timer;
+ u64 alarm = 0;
+ int ret = 0;
+
+ spin_lock(&timerlist_lock);
+
+ /*
+ * This is safe against long blocking (since calculations are not based on
+ * TSC deltas). It is also safe against warped system time since
+ * suspend-resume is cooperative and we would first get locked out. It is
+ * safe against normal updates of jiffies since interrupts are off.
+ */
+ if ( (timer = next_timer_event()) != NULL )
+ alarm = __jiffies_to_st(timer->expires);
+
+ /* Tasks on the timer task queue expect to be executed on the next tick. */
+ if ( TQ_ACTIVE(tq_timer) )
+ alarm = __jiffies_to_st(jiffies + 1);
+
+ /* Failure is pretty bad, but we'd best soldier on. */
+ if ( HYPERVISOR_set_timer_op(alarm) != 0 )
+ ret = -1;
+
+ spin_unlock(&timerlist_lock);
+
+ return ret;
+}
+
+
+/* Time debugging. */
+static void dbg_time_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ unsigned long flags, j;
+ u64 s_now, j_st;
+ struct timeval s_tv, tv;
+
+ struct timer_list *timer;
+ u64 t_st;
+
+ read_lock_irqsave(&xtime_lock, flags);
+ s_tv.tv_sec = shadow_tv.tv_sec;
+ s_tv.tv_usec = shadow_tv.tv_usec;
+ s_now = shadow_system_time;
+ read_unlock_irqrestore(&xtime_lock, flags);
+
+ do_gettimeofday(&tv);
+
+ j = jiffies;
+ j_st = __jiffies_to_st(j);
+
+ timer = next_timer_event();
+ t_st = __jiffies_to_st(timer->expires);
+
+ printk(KERN_ALERT "time: shadow_st=0x%X:%08X\n",
+ (u32)(s_now>>32), (u32)s_now);
+ printk(KERN_ALERT "time: wct=%lds %ldus shadow_wct=%lds %ldus\n",
+ tv.tv_sec, tv.tv_usec, s_tv.tv_sec, s_tv.tv_usec);
+ printk(KERN_ALERT "time: jiffies=%lu(0x%X:%08X) timeout=%lu(0x%X:%08X)\n",
+ jiffies,(u32)(j_st>>32), (u32)j_st,
+ timer->expires,(u32)(t_st>>32), (u32)t_st);
+ printk(KERN_ALERT "time: processed_system_time=0x%X:%08X\n",
+ (u32)(processed_system_time>>32), (u32)processed_system_time);
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ handle_sysrq('t',NULL,NULL,NULL);
+#endif
+}
+
+static struct irqaction dbg_time = {
+ dbg_time_int,
+ SA_SHIRQ,
+ 0,
+ "timer_dbg",
+ &dbg_time_int,
+ NULL
+};
+
+void __init time_init(void)
+{
+ unsigned long long alarm;
+ u64 __cpu_khz, __cpu_ghz, cpu_freq, scale, scale2;
+ unsigned int cpu_ghz;
+
+ __cpu_khz = __cpu_ghz = cpu_freq = HYPERVISOR_shared_info->cpu_freq;
+ do_div(__cpu_khz, 1000UL);
+ cpu_khz = (u32)__cpu_khz;
+ do_div(__cpu_ghz, 1000000000UL);
+ cpu_ghz = (unsigned int)__cpu_ghz;
+
+ printk("Xen reported: %lu.%03lu MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
+
+ xtime.tv_sec = HYPERVISOR_shared_info->wc_sec;
+ xtime.tv_usec = HYPERVISOR_shared_info->wc_usec;
+ processed_system_time = shadow_system_time;
+
+ for ( rdtsc_bitshift = 0; cpu_ghz != 0; rdtsc_bitshift++, cpu_ghz >>= 1 )
+ continue;
+
+ scale = 1000000LL << (32 + rdtsc_bitshift);
+ do_div(scale, (u32)cpu_freq);
+
+ if ( (cpu_freq >> 32) != 0 )
+ {
+ scale2 = 1000000LL << rdtsc_bitshift;
+ do_div(scale2, (u32)(cpu_freq>>32));
+ scale += scale2;
+ }
+
+ st_scale_f = scale & 0xffffffff;
+ st_scale_i = scale >> 32;
+
+ __get_time_values_from_xen();
+ processed_system_time = shadow_system_time;
+
+ time_irq = bind_virq_to_irq(VIRQ_TIMER);
+ debug_irq = bind_virq_to_irq(VIRQ_DEBUG);
+
+ (void)setup_irq(time_irq, &irq_timer);
+ (void)setup_irq(debug_irq, &dbg_time);
+
+ rdtscll(alarm);
+}
+
+void time_suspend(void)
+{
+}
+
+void time_resume(void)
+{
+ unsigned long flags;
+ write_lock_irqsave(&xtime_lock, flags);
+ /* Get timebases for new environment. */
+ __get_time_values_from_xen();
+ /* Reset our own concept of passage of system time. */
+ processed_system_time = shadow_system_time;
+ /* Accept a warp in UTC (wall-clock) time. */
+ last_seen_tv.tv_sec = 0;
+ /* Make sure we resync UTC time with Xen on next timer interrupt. */
+ last_update_from_xen = 0;
+ write_unlock_irqrestore(&xtime_lock, flags);
+}
+
+/*
+ * /proc/sys/xen: This really belongs in another file. It can stay here for
+ * now however.
+ */
+static ctl_table xen_subtable[] = {
+ {1, "independent_wallclock", &independent_wallclock,
+ sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
+ {0}
+};
+static ctl_table xen_table[] = {
+ {123, "xen", NULL, 0, 0555, xen_subtable},
+ {0}
+};
+static int __init xen_sysctl_init(void)
+{
+ (void)register_sysctl_table(xen_table, 0);
+ return 0;
+}
+__initcall(xen_sysctl_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/arch/xen/kernel/traps.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/kernel/traps.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,628 @@
+/*
+ * linux/arch/i386/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
+
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+
+#include <asm/hypervisor.h>
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+asmlinkage int system_call(void);
+asmlinkage void lcall7(void);
+asmlinkage void lcall27(void);
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void int3(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+asmlinkage void double_fault(void);
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void simd_coprocessor_error(void);
+asmlinkage void alignment_check(void);
+asmlinkage void fixup_4gb_segment(void);
+asmlinkage void machine_check(void);
+
+int kstack_depth_to_print = 24;
+
+
+/*
+ * If the address is either in the .text section of the
+ * kernel, or in the vmalloc'ed module regions, it *may*
+ * be the address of a calling routine
+ */
+
+#ifdef CONFIG_MODULES
+
+extern struct module *module_list;
+extern struct module kernel_module;
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ int retval = 0;
+ struct module *mod;
+
+ if (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext)
+ return 1;
+
+ for (mod = module_list; mod != &kernel_module; mod = mod->next) {
+ /* mod_bound tests for addr being inside the vmalloc'ed
+ * module area. Of course it'd be better to test only
+ * for the .text subset... */
+ if (mod_bound(addr, 0, mod)) {
+ retval = 1;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+#else
+
+static inline int kernel_text_address(unsigned long addr)
+{
+ return (addr >= (unsigned long) &_stext &&
+ addr <= (unsigned long) &_etext);
+}
+
+#endif
+
+void show_trace(unsigned long * stack)
+{
+ int i;
+ unsigned long addr;
+
+ if (!stack)
+ stack = (unsigned long*)&stack;
+
+ printk("Call Trace: ");
+ i = 1;
+ while (((long) stack & (THREAD_SIZE-1)) != 0) {
+ addr = *stack++;
+ if (kernel_text_address(addr)) {
+ if (i && ((i % 6) == 0))
+ printk("\n ");
+ printk("[<%08lx>] ", addr);
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+ unsigned long esp = tsk->thread.esp;
+
+ /* User space on another CPU? */
+ if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
+ return;
+ show_trace((unsigned long *)esp);
+}
+
+void show_stack(unsigned long * esp)
+{
+ unsigned long *stack;
+ int i;
+
+ // debugging aid: "show_stack(NULL);" prints the
+ // back trace for this cpu.
+
+ if(esp==NULL)
+ esp=(unsigned long*)&esp;
+
+ stack = esp;
+ for(i=0; i < kstack_depth_to_print; i++) {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+ printk("\n");
+ show_trace(esp);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ int in_kernel = 1;
+ unsigned long esp;
+ unsigned short ss;
+
+ esp = (unsigned long) (®s->esp);
+ ss = __KERNEL_DS;
+ if (regs->xcs & 2) {
+ in_kernel = 0;
+ esp = regs->esp;
+ ss = regs->xss & 0xffff;
+ }
+ printk(KERN_ALERT "CPU: %d\n", smp_processor_id() );
+ printk(KERN_ALERT "EIP: %04x:[<%08lx>] %s\n",
+ 0xffff & regs->xcs, regs->eip, print_tainted());
+ printk(KERN_ALERT "EFLAGS: %08lx\n",regs->eflags);
+ printk(KERN_ALERT "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+ printk(KERN_ALERT "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ regs->esi, regs->edi, regs->ebp, esp);
+ printk(KERN_ALERT "ds: %04x es: %04x ss: %04x\n",
+ regs->xds & 0xffff, regs->xes & 0xffff, ss);
+ printk(KERN_ALERT "Process %s (pid: %d, stackpage=%08lx)",
+ current->comm, current->pid, 4096+(unsigned long)current);
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+
+ printk(KERN_ALERT "\nStack: ");
+ show_stack((unsigned long*)esp);
+
+#if 0
+ {
+ int i;
+ printk(KERN_ALERT "\nCode: ");
+ if(regs->eip < PAGE_OFFSET)
+ goto bad;
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
+ if(__get_user(c, &((unsigned
char*)regs->eip)[i])) {
+bad:
+ printk(KERN_ALERT " Bad EIP value.");
+ break;
+ }
+ printk("%02x ", c);
+ }
+ }
+#endif
+ }
+ printk(KERN_ALERT "\n");
+}
+
+spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ printk("%s: %04lx\n", str, err & 0xffff);
+ show_registers(regs);
+ bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long
err)
+{
+ if (!(2 & regs->xcs))
+ die(str, regs, err);
+}
+
+
+static void inline do_trap(int trapnr, int signr, char *str,
+ struct pt_regs * regs, long error_code,
+ siginfo_t *info)
+{
+ if (!(regs->xcs & 2))
+ goto kernel_trap;
+
+ /*trap_signal:*/ {
+ struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+ force_sig(signr, tsk);
+ return;
+ }
+
+ kernel_trap: {
+ unsigned long fixup = search_exception_table(regs->eip);
+ if (fixup)
+ regs->eip = fixup;
+ else
+ die(str, regs, error_code);
+ return;
+ }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void *)siaddr; \
+ do_trap(trapnr, signr, str, regs, error_code, &info); \
+}
+
+DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
+DO_ERROR( 3, SIGTRAP, "int3", int3)
+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN,
regs->eip)
+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun",
coprocessor_segment_overrun)
+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+DO_ERROR(18, SIGBUS, "machine check", machine_check)
+
+asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
+{
+ /*
+ * If we trapped on an LDT access then ensure that the default_ldt is
+ * loaded, if nothing else. We load default_ldt lazily because LDT
+ * switching costs time and many applications don't need it.
+ */
+ if ( unlikely((error_code & 6) == 4) )
+ {
+ unsigned long ldt;
+ __asm__ __volatile__ ( "sldt %0" : "=r" (ldt) );
+ if ( ldt == 0 )
+ {
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.ptr |= (unsigned long)&default_ldt[0];
+ u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
+ if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0) )
+ {
+ show_trace(NULL);
+ panic("Failed to install default LDT");
+ }
+ return;
+ }
+ }
+
+ if (!(regs->xcs & 2))
+ goto gp_in_kernel;
+
+ current->thread.error_code = error_code;
+ current->thread.trap_no = 13;
+ force_sig(SIGSEGV, current);
+ return;
+
+gp_in_kernel:
+ {
+ unsigned long fixup;
+ fixup = search_exception_table(regs->eip);
+ if (fixup) {
+ regs->eip = fixup;
+ return;
+ }
+ die("general protection fault", regs, error_code);
+ }
+}
+
+
+asmlinkage void do_debug(struct pt_regs * regs, long error_code)
+{
+ unsigned int condition;
+ struct task_struct *tsk = current;
+ siginfo_t info;
+
+ condition = HYPERVISOR_get_debugreg(6);
+
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+ if (!tsk->thread.debugreg[7])
+ goto clear_dr7;
+ }
+
+ /* Save debug status register where ptrace can see it */
+ tsk->thread.debugreg[6] = condition;
+
+ /* Mask out spurious TF errors due to lazy TF clearing */
+ if (condition & DR_STEP) {
+ /*
+ * The TF error should be masked out only if the current
+ * process is not traced and if the TRAP flag has been set
+ * previously by a tracing process (condition detected by
+ * the PT_DTRACE flag); remember that the i386 TRAP flag
+ * can be modified by the process itself in user mode,
+ * allowing programs to debug themselves without the ptrace()
+ * interface.
+ */
+ if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
+ goto clear_TF;
+ }
+
+ /* Ok, finally something we can handle */
+ tsk->thread.trap_no = 1;
+ tsk->thread.error_code = error_code;
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+
+ /* If this is a kernel mode trap, save the user PC on entry to
+ * the kernel, that's what the debugger can make sense of.
+ */
+ info.si_addr = ((regs->xcs & 2) == 0) ? (void *)tsk->thread.eip :
+ (void *)regs->eip;
+ force_sig_info(SIGTRAP, &info, tsk);
+
+ /* Disable additional traps. They'll be re-enabled when
+ * the signal is delivered.
+ */
+ clear_dr7:
+ HYPERVISOR_set_debugreg(7, 0);
+ return;
+
+ clear_TF:
+ regs->eflags &= ~TF_MASK;
+ return;
+}
+
+
+/*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+void math_error(void *eip)
+{
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short cwd, swd;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 16;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't syncronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
+ switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x041: /* Stack Fault */
+ case 0x241: /* Stack Fault | Direction */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
+{
+ ignore_irq13 = 1;
+ math_error((void *)regs->eip);
+}
+
+void simd_math_error(void *eip)
+{
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short mxcsr;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 19;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ mxcsr = get_fpu_mxcsr(task);
+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
+ long error_code)
+{
+ if (cpu_has_xmm) {
+ /* Handle SIMD FPU exceptions on PIII+ processors. */
+ ignore_irq13 = 1;
+ simd_math_error((void *)regs->eip);
+ } else {
+ die_if_kernel("cache flush denied", regs, error_code);
+ current->thread.trap_no = 19;
+ current->thread.error_code = error_code;
+ force_sig(SIGSEGV, current);
+ }
+}
+
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ */
+asmlinkage void math_state_restore(struct pt_regs regs)
+{
+ /*
+ * A trap in kernel mode can be ignored. It'll be the fast XOR or
+ * copying libraries, which will correctly save/restore state and
+ * reset the TS bit in CR0.
+ */
+ if ( (regs.xcs & 2) == 0 )
+ return;
+
+ if (current->used_math) {
+ restore_fpu(current);
+ } else {
+ init_fpu();
+ }
+ current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */
+}
+
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+do { \
+ int __d0, __d1; \
+ __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %4,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \
+} while (0)
+
+static void __init set_call_gate(void *a, void *addr)
+{
+ _set_gate(a,12,3,addr);
+}
+
+
+/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
+static trap_info_t trap_table[] = {
+ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
+ { 1, 0, __KERNEL_CS, (unsigned long)debug },
+ { 3, 3, __KERNEL_CS, (unsigned long)int3 },
+ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
+ { 5, 3, __KERNEL_CS, (unsigned long)bounds },
+ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
+ { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
+ { 8, 0, __KERNEL_CS, (unsigned long)double_fault },
+ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
+ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
+ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
+ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
+ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
+ { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
+ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
+ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
+ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
+ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
+ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
+ { SYSCALL_VECTOR,
+ 3, __KERNEL_CS, (unsigned long)system_call },
+ { 0, 0, 0, 0 }
+};
+
+
+void __init trap_init(void)
+{
+ HYPERVISOR_set_trap_table(trap_table);
+ HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
+
+ /*
+ * The default LDT is a single-entry callgate to lcall7 for iBCS and a
+ * callgate to lcall27 for Solaris/x86 binaries.
+ */
+ clear_page(&default_ldt[0]);
+ set_call_gate(&default_ldt[0],lcall7);
+ set_call_gate(&default_ldt[4],lcall27);
+ __make_page_readonly(&default_ldt[0]);
+
+ cpu_init();
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/lib/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/lib/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,15 @@
+
+.S.o:
+ $(CC) $(AFLAGS) -c $< -o $*.o
+
+L_TARGET = lib.a
+
+obj-y = checksum.o old-checksum.o delay.o \
+ usercopy.o getuser.o \
+ memcpy.o strstr.o xen_proc.o
+
+obj-$(CONFIG_X86_USE_3DNOW) += mmx.o
+obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
+obj-$(CONFIG_DEBUG_IOVIRT) += iodebug.o
+
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/lib/delay.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/lib/delay.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,52 @@
+/*
+ * Precise Delay Loops for i386
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>
+ *
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors. The additional
+ * jump magic is needed to get the timing stable on all the CPU's
+ * we have to worry about.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
+
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#endif
+
+void __delay(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ rdtscl(bclock);
+ do
+ {
+ rep_nop();
+ rdtscl(now);
+ } while ((now-bclock) < loops);
+}
+
+inline void __const_udelay(unsigned long xloops)
+{
+ int d0;
+ __asm__("mull %0"
+ :"=d" (xloops), "=&a" (d0)
+ :"1" (xloops),"0" (current_cpu_data.loops_per_jiffy));
+ __delay(xloops * HZ);
+}
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/mm/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/mm/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,16 @@
+#
+# Makefile for the linux i386-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := mm.o
+
+obj-y := init.o fault.o extable.o pageattr.o hypervisor.o ioremap.o
+
+export-objs := pageattr.o
+
+include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/mm/fault.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/mm/fault.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,306 @@
+/*
+ * linux/arch/i386/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/hardirq.h>
+
+extern void die(const char *,struct pt_regs *,long);
+
+pgd_t *cur_pgd;
+
+extern spinlock_t timerlist_lock;
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out (timerlist_lock is acquired through the
+ * console unblank code)
+ */
+void bust_spinlocks(int yes)
+{
+ spin_lock_init(&timerlist_lock);
+ if (yes) {
+ oops_in_progress = 1;
+ } else {
+ int loglevel_save = console_loglevel;
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the
console up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs,
+ unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long page;
+ unsigned long fixup;
+ int write;
+ siginfo_t info;
+
+ /* Set the "privileged fault" bit to something sane. */
+ error_code &= 3;
+ error_code |= (regs->xcs & 2) << 1;
+
+ if ( flush_page_update_queue() != 0 )
+ return;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 1) == 0.
+ */
+ if (address >= TASK_SIZE && !(error_code & 5))
+ goto vmalloc_fault;
+
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (error_code & 4) {
+ /*
+ * accessing the stack below %esp is always a bug.
+ * The "+ 32" is there due to some instructions (like
+ * pusha) doing post-decrement on the stack and that
+ * doesn't show up until later..
+ */
+ if (address + 32 < regs->esp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case 1:
+ tsk->min_flt++;
+ break;
+ case 2:
+ tsk->maj_flt++;
+ break;
+ case 0:
+ goto do_sigbus;
+ default:
+ goto out_of_memory;
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(regs->eip)) != 0) {
+ regs->eip = fixup;
+ return;
+ }
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer
dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+ printk(" printing eip:\n");
+ printk("%08lx\n", regs->eip);
+ page = ((unsigned long *) cur_pgd)[address >> 22];
+ printk(KERN_ALERT "*pde=%08lx(%08lx)\n", page, machine_to_phys(page));
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = machine_to_phys(page);
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte=%08lx(%08lx)\n", page,
+ machine_to_phys(page));
+ }
+ die("Oops", regs, error_code);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ if (tsk->pid == 1) {
+ yield();
+ goto survive;
+ }
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", tsk->comm);
+ if (error_code & 4)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = __pgd_offset(address);
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = offset + cur_pgd;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ pmd = pmd_offset(pgd, address);
+ pmd_k = pmd_offset(pgd_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+ XEN_flush_page_update_queue(); /* flush PMD update */
+
+ pte_k = pte_offset(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/mm/init.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/mm/init.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,487 @@
+/*
+ * linux/arch/i386/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+
+/* XEN: We *cannot* use mmx_clear_page() this early. Force dumb memset(). */
+#undef clear_page
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+
+mmu_gather_t mmu_gathers[NR_CPUS];
+unsigned long highstart_pfn, highend_pfn;
+static unsigned long totalram_pages;
+static unsigned long totalhigh_pages;
+
+int do_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+ if(pgtable_cache_size > high) {
+ do {
+ if (!QUICKLIST_EMPTY(pgd_quicklist)) {
+ free_pgd_slow(get_pgd_fast());
+ freed++;
+ }
+ if (!QUICKLIST_EMPTY(pte_quicklist)) {
+ pte_free_slow(pte_alloc_one_fast(NULL, 0));
+ freed++;
+ }
+ } while(pgtable_cache_size > low);
+ }
+ return freed;
+}
+
+/*
+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
+ * physical space so we can cache the place of the first one and move
+ * around without checking the pgd every time.
+ */
+
+#if CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+
+ kmap_prot = PAGE_KERNEL;
+}
+#endif /* CONFIG_HIGHMEM */
+
+void show_mem(void)
+{
+ int i, total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+ int highmem = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageHighMem(mem_map+i))
+ highmem++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (page_count(mem_map+i))
+ shared += page_count(mem_map+i) - 1;
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d pages of HIGHMEM\n",highmem);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
+ show_buffers();
+}
+
+/* References to section boundaries */
+
+extern char _text, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+static inline void set_pte_phys (unsigned long vaddr,
+ unsigned long phys, pgprot_t prot)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = init_mm.pgd + __pgd_offset(vaddr);
+ if (pgd_none(*pgd)) {
+ printk("PAE BUG #00!\n");
+ return;
+ }
+ pmd = pmd_offset(pgd, vaddr);
+ if (pmd_none(*pmd)) {
+ printk("PAE BUG #01!\n");
+ return;
+ }
+ pte = pte_offset(pmd, vaddr);
+
+ queue_l1_entry_update(pte, phys | pgprot_val(prot));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+void __set_fixmap(enum fixed_addresses idx, unsigned long phys,
+ pgprot_t flags)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+ set_pte_phys(address, phys, flags);
+}
+
+void clear_fixmap(enum fixed_addresses idx)
+{
+ set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0));
+}
+
+static void __init fixrange_init (unsigned long start,
+ unsigned long end, pgd_t *pgd_base)
+{
+ pgd_t *pgd, *kpgd;
+ pmd_t *pmd, *kpmd;
+ pte_t *pte, *kpte;
+ int i, j;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+#if CONFIG_X86_PAE
+ if (pgd_none(*pgd)) {
+ pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
+ if (pmd != pmd_offset(pgd, 0))
+ printk("PAE BUG #02!\n");
+ }
+ pmd = pmd_offset(pgd, vaddr);
+#else
+ pmd = (pmd_t *)pgd;
+#endif
+ for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ clear_page(pte);
+ kpgd = pgd_offset_k((unsigned long)pte);
+ kpmd = pmd_offset(kpgd, (unsigned long)pte);
+ kpte = pte_offset(kpmd, (unsigned long)pte);
+ queue_l1_entry_update(kpte,
+ (*(unsigned long *)kpte)&~_PAGE_RW);
+
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
+ }
+ vaddr += PMD_SIZE;
+ }
+ j = 0;
+ }
+
+ XEN_flush_page_update_queue();
+}
+
+
+static void __init pagetable_init (void)
+{
+ unsigned long vaddr, end, ram_end;
+ pgd_t *kpgd, *pgd, *pgd_base;
+ int i, j, k;
+ pmd_t *kpmd, *pmd;
+ pte_t *kpte, *pte, *pte_base;
+
+ ram_end = end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
+ if ( xen_start_info.nr_pages < max_low_pfn )
+ ram_end = (unsigned long)__va(xen_start_info.nr_pages * PAGE_SIZE);
+
+ pgd_base = init_mm.pgd;
+ i = __pgd_offset(PAGE_OFFSET);
+ pgd = pgd_base + i;
+
+ for (; i < PTRS_PER_PGD; pgd++, i++) {
+ vaddr = i*PGDIR_SIZE;
+ if (vaddr >= end)
+ break;
+ pmd = (pmd_t *)pgd;
+ for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
+ vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
+ if (vaddr >= end)
+ break;
+
+ /* Filled in for us already? */
+ if ( pmd_val(*pmd) & _PAGE_PRESENT )
+ continue;
+
+ pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ clear_page(pte_base);
+
+ for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
+ vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
+ if (vaddr >= ram_end)
+ break;
+ *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
+ }
+ kpgd = pgd_offset_k((unsigned long)pte_base);
+ kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
+ kpte = pte_offset(kpmd, (unsigned long)pte_base);
+ queue_l1_entry_update(kpte,
+ (*(unsigned long *)kpte)&~_PAGE_RW);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
+ XEN_flush_page_update_queue();
+ }
+ }
+
+ /*
+ * Fixed mappings, only the page table structure has to be
+ * created - mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
+
+#if CONFIG_HIGHMEM
+ /*
+ * Permanent kmaps:
+ */
+ vaddr = PKMAP_BASE;
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, init_mm.pgd);
+
+ pgd = init_mm.pgd + __pgd_offset(vaddr);
+ pmd = pmd_offset(pgd, vaddr);
+ pte = pte_offset(pmd, vaddr);
+ pkmap_page_table = pte;
+#endif
+}
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned int max_dma, high, low;
+
+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ low = max_low_pfn;
+ high = highend_pfn;
+
+ if (low < max_dma)
+ zones_size[ZONE_DMA] = low;
+ else {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = low - max_dma;
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = high - low;
+#endif
+ }
+ free_area_init(zones_size);
+}
+
+void __init paging_init(void)
+{
+ pagetable_init();
+
+ zone_sizes_init();
+ /* Switch to the real shared_info page, and clear the dummy page. */
+ set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
+
+#ifdef CONFIG_HIGHMEM
+ kmap_init();
+#endif
+}
+
+static inline int page_is_ram (unsigned long pagenr)
+{
+ return 1;
+}
+
+#ifdef CONFIG_HIGHMEM
+void __init one_highpage_init(struct page *page, int free_page)
+{
+ ClearPageReserved(page);
+ set_bit(PG_highmem, &page->flags);
+ atomic_set(&page->count, 1);
+ if ( free_page )
+ __free_page(page);
+ totalhigh_pages++;
+}
+#endif /* CONFIG_HIGHMEM */
+
+static void __init set_max_mapnr_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ highmem_start_page = mem_map + highstart_pfn;
+ max_mapnr = num_physpages = highend_pfn;
+ num_mappedpages = max_low_pfn;
+#else
+ max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
+#endif
+}
+
+static int __init free_pages_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ int bad_ppro = 0;
+#endif
+ int reservedpages, pfn;
+
+ /* add only boot_pfn pages of low memory to free list.
+ * max_low_pfn may be sized for
+ * pages yet to be allocated from the hypervisor, or it may be set
+ * to override the xen_start_info amount of memory
+ */
+ int boot_pfn = min(xen_start_info.nr_pages,max_low_pfn);
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages += free_all_bootmem();
+ /* XEN: init and count low-mem pages outside initial allocation. */
+ for (pfn = boot_pfn; pfn < max_low_pfn; pfn++) {
+ ClearPageReserved(&mem_map[pfn]);
+ atomic_set(&mem_map[pfn].count, 1);
+ totalram_pages++;
+ }
+
+ reservedpages = 0;
+ for (pfn = 0; pfn < boot_pfn ; pfn++) {
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
+ reservedpages++;
+ }
+#ifdef CONFIG_HIGHMEM
+ for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
+ one_highpage_init((struct page *) (mem_map + pfn),
+ (pfn < xen_start_info.nr_pages));
+ totalram_pages += totalhigh_pages;
+#endif
+ return reservedpages;
+}
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+
+ if (!mem_map)
+ BUG();
+
+#ifdef CONFIG_HIGHMEM
+ /* check that fixmap and pkmap do not overlap */
+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
+ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
+ BUG();
+ }
+#endif
+
+ set_max_mapnr_init();
+
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ reservedpages = free_pages_init();
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk
reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+ );
+
+ boot_cpu_data.wp_works_ok = 1;
+}
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start)
>> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
+void si_meminfo(struct sysinfo *val)
+{
+ val->totalram = max_pfn;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = atomic_read(&buffermem_pages);
+ val->totalhigh = max_pfn-max_low_pfn;
+ val->freehigh = nr_free_highpages();
+ val->mem_unit = PAGE_SIZE;
+ return;
+}
+
+#if defined(CONFIG_X86_PAE)
+struct kmem_cache_s *pae_pgd_cachep;
+void __init pgtable_cache_init(void)
+{
+ /*
+ * PAE pgds must be 16-byte aligned:
+ */
+ pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
+ SLAB_HWCACHE_ALIGN |
SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
+ if (!pae_pgd_cachep)
+ panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
+}
+#endif /* CONFIG_X86_PAE */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/mm/ioremap.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/mm/ioremap.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,271 @@
+/*
+ * arch/xen/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ *
+ * Modifications for Xenolinux (c) 2003-2004 Keir Fraser
+ */
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm/mmu.h>
+
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+ __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+ __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void direct_remap_area_pte(pte_t *pte,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+
+ do {
+ (*v)->ptr = virt_to_machine(pte);
+ (*v)++;
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int direct_remap_area_pmd(struct mm_struct *mm,
+ pmd_t *pmd,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t *pte = pte_alloc(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ direct_remap_area_pte(pte, address, end - address, v);
+
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ dir = pgd_offset(mm, address);
+ flush_cache_all();
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ do {
+ pmd_t *pmd = pmd_alloc(mm, dir, address);
+ if (!pmd)
+ return -ENOMEM;
+ direct_remap_area_pmd(mm, pmd, address, end - address, &v);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+
+ } while (address && (address < end));
+ spin_unlock(&mm->page_table_lock);
+ flush_tlb_all();
+ return 0;
+}
+
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid)
+{
+ int i;
+ unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)domid << 16;
+ v = w = &u[1];
+
+ start_address = address;
+
+ for( i = 0; i < size; i += PAGE_SIZE )
+ {
+ if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
+ {
+ /* Fill in the PTE pointers. */
+ __direct_remap_area_pages( mm,
+ start_address,
+ address-start_address,
+ w);
+
+ if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
+ return -EFAULT;
+ v = w;
+ start_address = address;
+ }
+
+ /*
+ * Fill in the machine address: PTE ptr is done later by
+ * __direct_remap_area_pages().
+ */
+ v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+ machine_addr += PAGE_SIZE;
+ address += PAGE_SIZE;
+ v++;
+ }
+
+ if ( v != w )
+ {
+ /* get the ptep's filled in */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ w);
+ if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+#endif /* CONFIG_XEN_PRIVILEGED_GUEST */
+
+
+/*
+ * Remap an arbitrary machine address space into the kernel virtual
+ * address space. Needed when a privileged instance of Xenolinux wants
+ * to access space outside its world directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void * __ioremap(unsigned long machine_addr,
+ unsigned long size,
+ unsigned long flags)
+{
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ void * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
+ pgprot_t prot;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = machine_addr + size - 1;
+ if (!size || last_addr < machine_addr)
+ return NULL;
+
+ /* Mappings have to be page-aligned */
+ offset = machine_addr & ~PAGE_MASK;
+ machine_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr+1) - machine_addr;
+
+ /* Ok, go for it */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
+ _PAGE_ACCESSED | flags);
+ if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
+ machine_addr, size, prot, 0)) {
+ vfree(addr);
+ return NULL;
+ }
+ return (void *) (offset + (char *)addr);
+#else
+ return NULL;
+#endif
+}
+
+void iounmap(void *addr)
+{
+ vfree((void *)((unsigned long)addr & PAGE_MASK));
+}
+
+/* implementation of boot time ioremap for purpose of provising access
+to the vga console for privileged domains. Unlike boot time ioremap on
+other architectures, ours is permanent and not reclaimed when then vmalloc
+infrastructure is started */
+
+void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
+{
+ unsigned long offset, last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = machine_addr + size - 1;
+ if (!size || last_addr < machine_addr)
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = machine_addr & ~PAGE_MASK;
+ machine_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr) - machine_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (nrpages > NR_FIX_BTMAPS)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ __set_fixmap(idx, machine_addr, PAGE_KERNEL);
+ machine_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+
+ flush_tlb_all();
+
+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+}
+
+
+#if 0 /* We don't support these functions. They shouldn't be required. */
+void __init bt_iounmap(void *addr, unsigned long size) {}
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/arch/xen/vmlinux.lds
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/arch/xen/vmlinux.lds Tue Aug 9 15:17:45 2005
@@ -0,0 +1,75 @@
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>;
+ */
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0xC0000000 + 0x100000;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+ .kstrtab : { *(.kstrtab) }
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : { *(.data.init) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) }
+ __initcall_end = .;
+ . = ALIGN(4096);
+ __init_end = .;
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/drivers/block/ll_rw_blk.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/drivers/block/ll_rw_blk.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1663 @@
+/*
+ * linux/drivers/block/ll_rw_blk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+ * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@xxxxxxx> SuSE
+ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@xxxxxxx>
+ * kernel-doc documentation started by NeilBrown <neilb@xxxxxxxxxxxxxxx> -
July2000
+ */
+
+/*
+ * This handles all read/write requests to block devices
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/locks.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/completion.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/*
+ * MAC Floppy IWM hooks
+ */
+
+#ifdef CONFIG_MAC_FLOPPY_IWM
+extern int mac_floppy_init(void);
+#endif
+
+/*
+ * For the allocated request tables
+ */
+static kmem_cache_t *request_cachep;
+
+/*
+ * The "disk" task queue is used to start the actual requests
+ * after a plug
+ */
+DECLARE_TASK_QUEUE(tq_disk);
+
+/*
+ * Protect the request list against multiple users..
+ *
+ * With this spinlock the Linux block IO subsystem is 100% SMP threaded
+ * from the IRQ event side, and almost 100% SMP threaded from the syscall
+ * side (we still have protect against block device array operations, and
+ * the do_request() side is casually still unsafe. The kernel lock protects
+ * this part currently.).
+ *
+ * there is a fair chance that things will work just OK if these functions
+ * are called with no global kernel lock held ...
+ */
+spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
+
+/* This specifies how many sectors to read ahead on the disk. */
+
+int read_ahead[MAX_BLKDEV];
+
+/* blk_dev_struct is:
+ * *request_fn
+ * *current_request
+ */
+struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
+
+/*
+ * blk_size contains the size of all block-devices in units of 1024 byte
+ * sectors:
+ *
+ * blk_size[MAJOR][MINOR]
+ *
+ * if (!blk_size[MAJOR]) then no minor size checking is done.
+ */
+int * blk_size[MAX_BLKDEV];
+
+/*
+ * blksize_size contains the size of all block-devices:
+ *
+ * blksize_size[MAJOR][MINOR]
+ *
+ * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
+ */
+int * blksize_size[MAX_BLKDEV];
+
+/*
+ * hardsect_size contains the size of the hardware sector of a device.
+ *
+ * hardsect_size[MAJOR][MINOR]
+ *
+ * if (!hardsect_size[MAJOR])
+ * then 512 bytes is assumed.
+ * else
+ * sector_size is hardsect_size[MAJOR][MINOR]
+ * This is currently set by some scsi devices and read by the msdos fs driver.
+ * Other uses may appear later.
+ */
+int * hardsect_size[MAX_BLKDEV];
+
+/*
+ * The following tunes the read-ahead algorithm in mm/filemap.c
+ */
+int * max_readahead[MAX_BLKDEV];
+
+/*
+ * Max number of sectors per request
+ */
+int * max_sectors[MAX_BLKDEV];
+
+unsigned long blk_max_low_pfn, blk_max_pfn;
+int blk_nohighio = 0;
+
+int block_dump = 0;
+
+static struct timer_list writeback_timer;
+
+static inline int get_max_sectors(kdev_t dev)
+{
+ if (!max_sectors[MAJOR(dev)])
+ return MAX_SECTORS;
+ return max_sectors[MAJOR(dev)][MINOR(dev)];
+}
+
+static inline request_queue_t *__blk_get_queue(kdev_t dev)
+{
+ struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
+
+ if (bdev->queue)
+ return bdev->queue(dev);
+ else
+ return &blk_dev[MAJOR(dev)].request_queue;
+}
+
+request_queue_t *blk_get_queue(kdev_t dev)
+{
+ return __blk_get_queue(dev);
+}
+
+static int __blk_cleanup_queue(struct request_list *list)
+{
+ struct list_head *head = &list->free;
+ struct request *rq;
+ int i = 0;
+
+ while (!list_empty(head)) {
+ rq = list_entry(head->next, struct request, queue);
+ list_del(&rq->queue);
+ kmem_cache_free(request_cachep, rq);
+ i++;
+ };
+
+ if (i != list->count)
+ printk("request list leak!\n");
+
+ list->count = 0;
+ return i;
+}
+
+/**
+ * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * @q: the request queue to be released
+ *
+ * Description:
+ * blk_cleanup_queue is the pair to blk_init_queue(). It should
+ * be called when a request queue is being released; typically
+ * when a block device is being de-registered. Currently, its
+ * primary task it to free all the &struct request structures that
+ * were allocated to the queue.
+ * Caveat:
+ * Hopefully the low level driver will have finished any
+ * outstanding requests first...
+ **/
+void blk_cleanup_queue(request_queue_t * q)
+{
+ int count = q->nr_requests;
+
+ count -= __blk_cleanup_queue(&q->rq);
+
+ if (count)
+ printk("blk_cleanup_queue: leaked requests (%d)\n", count);
+ if (atomic_read(&q->nr_sectors))
+ printk("blk_cleanup_queue: leaked sectors (%d)\n",
atomic_read(&q->nr_sectors));
+
+ memset(q, 0, sizeof(*q));
+}
+
+/**
+ * blk_queue_headactive - indicate whether head of request queue may be active
+ * @q: The queue which this applies to.
+ * @active: A flag indication where the head of the queue is active.
+ *
+ * Description:
+ * The driver for a block device may choose to leave the currently active
+ * request on the request queue, removing it only when it has completed.
+ * The queue handling routines assume this by default for safety reasons
+ * and will not involve the head of the request queue in any merging or
+ * reordering of requests when the queue is unplugged (and thus may be
+ * working on this particular request).
+ *
+ * If a driver removes requests from the queue before processing them, then
+ * it may indicate that it does so, there by allowing the head of the queue
+ * to be involved in merging and reordering. This is done be calling
+ * blk_queue_headactive() with an @active flag of %0.
+ *
+ * If a driver processes several requests at once, it must remove them (or
+ * at least all but one of them) from the request queue.
+ *
+ * When a queue is plugged the head will be assumed to be inactive.
+ **/
+
+void blk_queue_headactive(request_queue_t * q, int active)
+{
+ q->head_active = active;
+}
+
+/**
+ * blk_queue_throttle_sectors - indicates you will call sector throttling funcs
+ * @q: The queue which this applies to.
+ * @active: A flag indication if you want sector throttling on
+ *
+ * Description:
+ * The sector throttling code allows us to put a limit on the number of
+ * sectors pending io to the disk at a given time, sending @active nonzero
+ * indicates you will call blk_started_sectors and blk_finished_sectors in
+ * addition to calling blk_started_io and blk_finished_io in order to
+ * keep track of the number of sectors in flight.
+ **/
+
+void blk_queue_throttle_sectors(request_queue_t * q, int active)
+{
+ q->can_throttle = active;
+}
+
+/**
+ * blk_queue_make_request - define an alternate make_request function for a
device
+ * @q: the request queue for the device to be affected
+ * @mfn: the alternate make_request function
+ *
+ * Description:
+ * The normal way for &struct buffer_heads to be passed to a device
+ * driver is for them to be collected into requests on a request
+ * queue, and then to allow the device driver to select requests
+ * off that queue when it is ready. This works well for many block
+ * devices. However some block devices (typically virtual devices
+ * such as md or lvm) do not benefit from the processing on the
+ * request queue, and are served best by having the requests passed
+ * directly to them. This can be achieved by providing a function
+ * to blk_queue_make_request().
+ *
+ * Caveat:
+ * The driver that does this *must* be able to deal appropriately
+ * with buffers in "highmemory", either by calling bh_kmap() to get
+ * a kernel mapping, to by calling create_bounce() to create a
+ * buffer in normal memory.
+ **/
+
+void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+{
+ q->make_request_fn = mfn;
+}
+
+/**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q: the request queue for the device
+ * @dma_addr: bus address limit
+ *
+ * Description:
+ * Different hardware can have different requirements as to what pages
+ * it can do I/O directly to. A low level driver can call
+ * blk_queue_bounce_limit to have lower memory pages allocated as bounce
+ * buffers for doing I/O to pages residing above @page. By default
+ * the block layer sets this to the highest numbered "low" memory page.
+ **/
+void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+{
+ unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+ unsigned long mb = dma_addr >> 20;
+ static request_queue_t *old_q;
+
+ /*
+ * keep this for debugging for now...
+ */
+ if (dma_addr != BLK_BOUNCE_HIGH && q != old_q) {
+ old_q = q;
+ printk("blk: queue %p, ", q);
+ if (dma_addr == BLK_BOUNCE_ANY)
+ printk("no I/O memory limit\n");
+ else
+ printk("I/O limit %luMb (mask 0x%Lx)\n", mb,
+ (long long) dma_addr);
+ }
+
+ q->bounce_pfn = bounce_pfn;
+}
+
+
+/*
+ * can we merge the two segments, or do we need to start a new one?
+ */
+static inline int __blk_seg_merge_ok(struct buffer_head *bh, struct
buffer_head *nxt)
+{
+ /*
+ * if bh and nxt are contigous and don't cross a 4g boundary, it's ok
+ */
+ if (BH_CONTIG(bh, nxt) && BH_PHYS_4G(bh, nxt))
+ return 1;
+
+ return 0;
+}
+
+int blk_seg_merge_ok(struct buffer_head *bh, struct buffer_head *nxt)
+{
+ return __blk_seg_merge_ok(bh, nxt);
+}
+
+static inline int ll_new_segment(request_queue_t *q, struct request *req, int
max_segments)
+{
+ if (req->nr_segments < max_segments) {
+ req->nr_segments++;
+ return 1;
+ }
+ return 0;
+}
+
+static int ll_back_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (__blk_seg_merge_ok(req->bhtail, bh))
+ return 1;
+
+ return ll_new_segment(q, req, max_segments);
+}
+
+static int ll_front_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh, int max_segments)
+{
+ if (__blk_seg_merge_ok(bh, req->bh))
+ return 1;
+
+ return ll_new_segment(q, req, max_segments);
+}
+
+static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+ struct request *next, int max_segments)
+{
+ int total_segments = req->nr_segments + next->nr_segments;
+
+ if (__blk_seg_merge_ok(req->bhtail, next->bh))
+ total_segments--;
+
+ if (total_segments > max_segments)
+ return 0;
+
+ req->nr_segments = total_segments;
+ return 1;
+}
+
+/*
+ * "plug" the device if there are no outstanding requests: this will
+ * force the transfer to start only after we have put all the requests
+ * on the list.
+ *
+ * This is called with interrupts off and no requests on the queue.
+ * (and with the request spinlock acquired)
+ */
+static void generic_plug_device(request_queue_t *q, kdev_t dev)
+{
+ /*
+ * no need to replug device
+ */
+ if (!list_empty(&q->queue_head) || q->plugged)
+ return;
+
+ q->plugged = 1;
+ queue_task(&q->plug_tq, &tq_disk);
+}
+
+/*
+ * remove the plug and let it rip..
+ */
+static inline void __generic_unplug_device(request_queue_t *q)
+{
+ if (q->plugged) {
+ q->plugged = 0;
+ if (!list_empty(&q->queue_head))
+ q->request_fn(q);
+ }
+}
+
+void generic_unplug_device(void *data)
+{
+ request_queue_t *q = (request_queue_t *) data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ __generic_unplug_device(q);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+/** blk_grow_request_list
+ * @q: The &request_queue_t
+ * @nr_requests: how many requests are desired
+ *
+ * More free requests are added to the queue's free lists, bringing
+ * the total number of requests to @nr_requests.
+ *
+ * The requests are added equally to the request queue's read
+ * and write freelists.
+ *
+ * This function can sleep.
+ *
+ * Returns the (new) number of requests which the queue has available.
+ */
+int blk_grow_request_list(request_queue_t *q, int nr_requests, int
max_queue_sectors)
+{
+ unsigned long flags;
+ /* Several broken drivers assume that this function doesn't sleep,
+ * this causes system hangs during boot.
+ * As a temporary fix, make the function non-blocking.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+ while (q->nr_requests < nr_requests) {
+ struct request *rq;
+
+ rq = kmem_cache_alloc(request_cachep, SLAB_ATOMIC);
+ if (rq == NULL)
+ break;
+ memset(rq, 0, sizeof(*rq));
+ rq->rq_status = RQ_INACTIVE;
+ list_add(&rq->queue, &q->rq.free);
+ q->rq.count++;
+
+ q->nr_requests++;
+ }
+
+ /*
+ * Wakeup waiters after both one quarter of the
+ * max-in-fligh queue and one quarter of the requests
+ * are available again.
+ */
+
+ q->batch_requests = q->nr_requests / 4;
+ if (q->batch_requests > 32)
+ q->batch_requests = 32;
+ q->batch_sectors = max_queue_sectors / 4;
+
+ q->max_queue_sectors = max_queue_sectors;
+
+ BUG_ON(!q->batch_sectors);
+ atomic_set(&q->nr_sectors, 0);
+
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return q->nr_requests;
+}
+
+static void blk_init_free_list(request_queue_t *q)
+{
+ struct sysinfo si;
+ int megs; /* Total memory, in megabytes */
+ int nr_requests, max_queue_sectors = MAX_QUEUE_SECTORS;
+
+ INIT_LIST_HEAD(&q->rq.free);
+ q->rq.count = 0;
+ q->rq.pending[READ] = q->rq.pending[WRITE] = 0;
+ q->nr_requests = 0;
+
+ si_meminfo(&si);
+ megs = si.totalram >> (20 - PAGE_SHIFT);
+ nr_requests = MAX_NR_REQUESTS;
+ if (megs < 30) {
+ nr_requests /= 2;
+ max_queue_sectors /= 2;
+ }
+ /* notice early if anybody screwed the defaults */
+ BUG_ON(!nr_requests);
+ BUG_ON(!max_queue_sectors);
+
+ blk_grow_request_list(q, nr_requests, max_queue_sectors);
+
+ init_waitqueue_head(&q->wait_for_requests);
+
+ spin_lock_init(&q->queue_lock);
+}
+
+static int __make_request(request_queue_t * q, int rw, struct buffer_head *
bh);
+
+/**
+ * blk_init_queue - prepare a request queue for use with a block device
+ * @q: The &request_queue_t to be initialised
+ * @rfn: The function to be called to process requests that have been
+ * placed on the queue.
+ *
+ * Description:
+ * If a block device wishes to use the standard request handling procedures,
+ * which sorts requests and coalesces adjacent requests, then it must
+ * call blk_init_queue(). The function @rfn will be called when there
+ * are requests on the queue that need to be processed. If the device
+ * supports plugging, then @rfn may not be called immediately when requests
+ * are available on the queue, but may be called at some time later instead.
+ * Plugged queues are generally unplugged when a buffer belonging to one
+ * of the requests on the queue is needed, or due to memory pressure.
+ *
+ * @rfn is not required, or even expected, to remove all requests off the
+ * queue, but only as many as it can handle at a time. If it does leave
+ * requests on the queue, it is responsible for arranging that the requests
+ * get dealt with eventually.
+ *
+ * A global spin lock $io_request_lock must be held while manipulating the
+ * requests on the request queue.
+ *
+ * The request on the head of the queue is by default assumed to be
+ * potentially active, and it is not considered for re-ordering or merging
+ * whenever the given queue is unplugged. This behaviour can be changed with
+ * blk_queue_headactive().
+ *
+ * Note:
+ * blk_init_queue() must be paired with a blk_cleanup_queue() call
+ * when the block device is deactivated (such as at module unload).
+ **/
+void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
+{
+ INIT_LIST_HEAD(&q->queue_head);
+ elevator_init(&q->elevator, ELEVATOR_LINUS);
+ blk_init_free_list(q);
+ q->request_fn = rfn;
+ q->back_merge_fn = ll_back_merge_fn;
+ q->front_merge_fn = ll_front_merge_fn;
+ q->merge_requests_fn = ll_merge_requests_fn;
+ q->make_request_fn = __make_request;
+ q->plug_tq.sync = 0;
+ q->plug_tq.routine = &generic_unplug_device;
+ q->plug_tq.data = q;
+ q->plugged = 0;
+ q->can_throttle = 0;
+
+ /*
+ * These booleans describe the queue properties. We set the
+ * default (and most common) values here. Other drivers can
+ * use the appropriate functions to alter the queue properties.
+ * as appropriate.
+ */
+ q->plug_device_fn = generic_plug_device;
+ q->head_active = 1;
+
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+}
+
+#define blkdev_free_rq(list) list_entry((list)->next, struct request, queue);
+/*
+ * Get a free request. io_request_lock must be held and interrupts
+ * disabled on the way in. Returns NULL if there are no free requests.
+ */
+static struct request *get_request(request_queue_t *q, int rw)
+{
+ struct request *rq = NULL;
+ struct request_list *rl = &q->rq;
+
+ if (blk_oversized_queue(q)) {
+ int rlim = q->nr_requests >> 5;
+
+ if (rlim < 4)
+ rlim = 4;
+
+ /*
+ * if its a write, or we have more than a handful of reads
+ * pending, bail out
+ */
+ if ((rw == WRITE) || (rw == READ && rl->pending[READ] > rlim))
+ return NULL;
+ if (blk_oversized_queue_reads(q))
+ return NULL;
+ }
+
+ if (!list_empty(&rl->free)) {
+ rq = blkdev_free_rq(&rl->free);
+ list_del(&rq->queue);
+ rl->count--;
+ rl->pending[rw]++;
+ rq->rq_status = RQ_ACTIVE;
+ rq->cmd = rw;
+ rq->special = NULL;
+ rq->q = q;
+ }
+
+ return rq;
+}
+
+/*
+ * Here's the request allocation design, low latency version:
+ *
+ * 1: Blocking on request exhaustion is a key part of I/O throttling.
+ *
+ * 2: We want to be `fair' to all requesters. We must avoid starvation, and
+ * attempt to ensure that all requesters sleep for a similar duration.
Hence
+ * no stealing requests when there are other processes waiting.
+ *
+ * There used to be more here, attempting to allow a process to send in a
+ * number of requests once it has woken up. But, there's no way to
+ * tell if a process has just been woken up, or if it is a new process
+ * coming in to steal requests from the waiters. So, we give up and force
+ * everyone to wait fairly.
+ *
+ * So here's what we do:
+ *
+ * a) A READA requester fails if free_requests < batch_requests
+ *
+ * We don't want READA requests to prevent sleepers from ever
+ * waking. Note that READA is used extremely rarely - a few
+ * filesystems use it for directory readahead.
+ *
+ * When a process wants a new request:
+ *
+ * b) If free_requests == 0, the requester sleeps in FIFO manner, and
+ * the queue full condition is set. The full condition is not
+ * cleared until there are no longer any waiters. Once the full
+ * condition is set, all new io must wait, hopefully for a very
+ * short period of time.
+ *
+ * When a request is released:
+ *
+ * c) If free_requests < batch_requests, do nothing.
+ *
+ * d) If free_requests >= batch_requests, wake up a single waiter.
+ *
+ * As each waiter gets a request, he wakes another waiter. We do this
+ * to prevent a race where an unplug might get run before a request makes
+ * it's way onto the queue. The result is a cascade of wakeups, so delaying
+ * the initial wakeup until we've got batch_requests available helps avoid
+ * wakeups where there aren't any requests available yet.
+ */
+
+static struct request *__get_request_wait(request_queue_t *q, int rw)
+{
+ register struct request *rq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue_exclusive(&q->wait_for_requests, &wait);
+
+ do {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irq(&io_request_lock);
+ if (blk_oversized_queue(q) || q->rq.count == 0) {
+ __generic_unplug_device(q);
+ spin_unlock_irq(&io_request_lock);
+ schedule();
+ spin_lock_irq(&io_request_lock);
+ }
+ rq = get_request(q, rw);
+ spin_unlock_irq(&io_request_lock);
+ } while (rq == NULL);
+ remove_wait_queue(&q->wait_for_requests, &wait);
+ current->state = TASK_RUNNING;
+
+ return rq;
+}
+
+static void get_request_wait_wakeup(request_queue_t *q, int rw)
+{
+ /*
+ * avoid losing an unplug if a second __get_request_wait did the
+ * generic_unplug_device while our __get_request_wait was running
+ * w/o the queue_lock held and w/ our request out of the queue.
+ */
+ if (waitqueue_active(&q->wait_for_requests))
+ wake_up(&q->wait_for_requests);
+}
+
+/* RO fail safe mechanism */
+
+static long ro_bits[MAX_BLKDEV][8];
+
+int is_read_only(kdev_t dev)
+{
+ int minor,major;
+
+ major = MAJOR(dev);
+ minor = MINOR(dev);
+ if (major < 0 || major >= MAX_BLKDEV) return 0;
+ return ro_bits[major][minor >> 5] & (1 << (minor & 31));
+}
+
+void set_device_ro(kdev_t dev,int flag)
+{
+ int minor,major;
+
+ major = MAJOR(dev);
+ minor = MINOR(dev);
+ if (major < 0 || major >= MAX_BLKDEV) return;
+ if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
+ else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
+}
+
+inline void drive_stat_acct (kdev_t dev, int rw,
+ unsigned long nr_sectors, int new_io)
+{
+ unsigned int major = MAJOR(dev);
+ unsigned int index;
+
+ index = disk_index(dev);
+ if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
+ return;
+
+ kstat.dk_drive[major][index] += new_io;
+ if (rw == READ) {
+ kstat.dk_drive_rio[major][index] += new_io;
+ kstat.dk_drive_rblk[major][index] += nr_sectors;
+ } else if (rw == WRITE) {
+ kstat.dk_drive_wio[major][index] += new_io;
+ kstat.dk_drive_wblk[major][index] += nr_sectors;
+ } else
+ printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
+}
+
+#ifdef CONFIG_BLK_STATS
+/*
+ * Return up to two hd_structs on which to do IO accounting for a given
+ * request.
+ *
+ * On a partitioned device, we want to account both against the partition
+ * and against the whole disk.
+ */
+static void locate_hd_struct(struct request *req,
+ struct hd_struct **hd1,
+ struct hd_struct **hd2)
+{
+ struct gendisk *gd;
+
+ *hd1 = NULL;
+ *hd2 = NULL;
+
+ gd = get_gendisk(req->rq_dev);
+ if (gd && gd->part) {
+ /* Mask out the partition bits: account for the entire disk */
+ int devnr = MINOR(req->rq_dev) >> gd->minor_shift;
+ int whole_minor = devnr << gd->minor_shift;
+
+ *hd1 = &gd->part[whole_minor];
+ if (whole_minor != MINOR(req->rq_dev))
+ *hd2= &gd->part[MINOR(req->rq_dev)];
+ }
+}
+
+/*
+ * Round off the performance stats on an hd_struct.
+ *
+ * The average IO queue length and utilisation statistics are maintained
+ * by observing the current state of the queue length and the amount of
+ * time it has been in this state for.
+ * Normally, that accounting is done on IO completion, but that can result
+ * in more than a second's worth of IO being accounted for within any one
+ * second, leading to >100% utilisation. To deal with that, we do a
+ * round-off before returning the results when reading /proc/partitions,
+ * accounting immediately for all queue usage up to the current jiffies and
+ * restarting the counters again.
+ */
+void disk_round_stats(struct hd_struct *hd)
+{
+ unsigned long now = jiffies;
+
+ hd->aveq += (hd->ios_in_flight * (jiffies - hd->last_queue_change));
+ hd->last_queue_change = now;
+
+ if (hd->ios_in_flight)
+ hd->io_ticks += (now - hd->last_idle_time);
+ hd->last_idle_time = now;
+}
+
+static inline void down_ios(struct hd_struct *hd)
+{
+ disk_round_stats(hd);
+ --hd->ios_in_flight;
+}
+
+static inline void up_ios(struct hd_struct *hd)
+{
+ disk_round_stats(hd);
+ ++hd->ios_in_flight;
+}
+
+static void account_io_start(struct hd_struct *hd, struct request *req,
+ int merge, int sectors)
+{
+ switch (req->cmd) {
+ case READ:
+ if (merge)
+ hd->rd_merges++;
+ hd->rd_sectors += sectors;
+ break;
+ case WRITE:
+ if (merge)
+ hd->wr_merges++;
+ hd->wr_sectors += sectors;
+ break;
+ }
+ if (!merge)
+ up_ios(hd);
+}
+
+static void account_io_end(struct hd_struct *hd, struct request *req)
+{
+ unsigned long duration = jiffies - req->start_time;
+ switch (req->cmd) {
+ case READ:
+ hd->rd_ticks += duration;
+ hd->rd_ios++;
+ break;
+ case WRITE:
+ hd->wr_ticks += duration;
+ hd->wr_ios++;
+ break;
+ }
+ down_ios(hd);
+}
+
+void req_new_io(struct request *req, int merge, int sectors)
+{
+ struct hd_struct *hd1, *hd2;
+
+ locate_hd_struct(req, &hd1, &hd2);
+ if (hd1)
+ account_io_start(hd1, req, merge, sectors);
+ if (hd2)
+ account_io_start(hd2, req, merge, sectors);
+}
+
+void req_merged_io(struct request *req)
+{
+ struct hd_struct *hd1, *hd2;
+
+ locate_hd_struct(req, &hd1, &hd2);
+ if (hd1)
+ down_ios(hd1);
+ if (hd2)
+ down_ios(hd2);
+}
+
+void req_finished_io(struct request *req)
+{
+ struct hd_struct *hd1, *hd2;
+
+ locate_hd_struct(req, &hd1, &hd2);
+ if (hd1)
+ account_io_end(hd1, req);
+ if (hd2)
+ account_io_end(hd2, req);
+}
+EXPORT_SYMBOL(req_finished_io);
+#endif /* CONFIG_BLK_STATS */
+
+/*
+ * add-request adds a request to the linked list.
+ * io_request_lock is held and interrupts disabled, as we muck with the
+ * request queue list.
+ *
+ * By this point, req->cmd is always either READ/WRITE, never READA,
+ * which is important for drive_stat_acct() above.
+ */
+static inline void add_request(request_queue_t * q, struct request * req,
+ struct list_head *insert_here)
+{
+ drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
+
+ if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
+ spin_unlock_irq(&io_request_lock);
+ BUG();
+ }
+
+ /*
+ * elevator indicated where it wants this request to be
+ * inserted at elevator_merge time
+ */
+ list_add(&req->queue, insert_here);
+}
+
+/*
+ * Must be called with io_request_lock held and interrupts disabled
+ */
+void blkdev_release_request(struct request *req)
+{
+ request_queue_t *q = req->q;
+
+ req->rq_status = RQ_INACTIVE;
+ req->q = NULL;
+
+ /*
+ * Request may not have originated from ll_rw_blk. if not,
+ * assume it has free buffers and check waiters
+ */
+ if (q) {
+ struct request_list *rl = &q->rq;
+ int oversized_batch = 0;
+
+ if (q->can_throttle)
+ oversized_batch = blk_oversized_queue_batch(q);
+ rl->count++;
+ /*
+ * paranoia check
+ */
+ if (req->cmd == READ || req->cmd == WRITE)
+ rl->pending[req->cmd]--;
+ if (rl->pending[READ] > q->nr_requests)
+ printk("blk: reads: %u\n", rl->pending[READ]);
+ if (rl->pending[WRITE] > q->nr_requests)
+ printk("blk: writes: %u\n", rl->pending[WRITE]);
+ if (rl->pending[READ] + rl->pending[WRITE] > q->nr_requests)
+ printk("blk: r/w: %u + %u > %u\n", rl->pending[READ],
rl->pending[WRITE], q->nr_requests);
+ list_add(&req->queue, &rl->free);
+ if (rl->count >= q->batch_requests && !oversized_batch) {
+ smp_mb();
+ if (waitqueue_active(&q->wait_for_requests))
+ wake_up(&q->wait_for_requests);
+ }
+ }
+}
+
+/*
+ * Has to be called with the request spinlock acquired
+ */
+static void attempt_merge(request_queue_t * q,
+ struct request *req,
+ int max_sectors,
+ int max_segments)
+{
+ struct request *next;
+
+ next = blkdev_next_request(req);
+ if (req->sector + req->nr_sectors != next->sector)
+ return;
+ if (req->cmd != next->cmd
+ || req->rq_dev != next->rq_dev
+ || req->nr_sectors + next->nr_sectors > max_sectors
+ || next->waiting)
+ return;
+ /*
+ * If we are not allowed to merge these requests, then
+ * return. If we are allowed to merge, then the count
+ * will have been updated to the appropriate number,
+ * and we shouldn't do it here too.
+ */
+ if (!q->merge_requests_fn(q, req, next, max_segments))
+ return;
+
+ q->elevator.elevator_merge_req_fn(req, next);
+
+ /* At this point we have either done a back merge
+ * or front merge. We need the smaller start_time of
+ * the merged requests to be the current request
+ * for accounting purposes.
+ */
+ if (time_after(req->start_time, next->start_time))
+ req->start_time = next->start_time;
+
+ req->bhtail->b_reqnext = next->bh;
+ req->bhtail = next->bhtail;
+ req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+ list_del(&next->queue);
+
+ /* One last thing: we have removed a request, so we now have one
+ less expected IO to complete for accounting purposes. */
+ req_merged_io(req);
+
+ blkdev_release_request(next);
+}
+
+static inline void attempt_back_merge(request_queue_t * q,
+ struct request *req,
+ int max_sectors,
+ int max_segments)
+{
+ if (&req->queue == q->queue_head.prev)
+ return;
+ attempt_merge(q, req, max_sectors, max_segments);
+}
+
+static inline void attempt_front_merge(request_queue_t * q,
+ struct list_head * head,
+ struct request *req,
+ int max_sectors,
+ int max_segments)
+{
+ struct list_head * prev;
+
+ prev = req->queue.prev;
+ if (head == prev)
+ return;
+ attempt_merge(q, blkdev_entry_to_request(prev), max_sectors,
max_segments);
+}
+
+static int __make_request(request_queue_t * q, int rw,
+ struct buffer_head * bh)
+{
+ unsigned int sector, count, sync;
+ int max_segments = MAX_SEGMENTS;
+ struct request * req, *freereq = NULL;
+ int rw_ahead, max_sectors, el_ret;
+ struct list_head *head, *insert_here;
+ int latency;
+ elevator_t *elevator = &q->elevator;
+ int should_wake = 0;
+
+ count = bh->b_size >> 9;
+ sector = bh->b_rsector;
+ sync = test_and_clear_bit(BH_Sync, &bh->b_state);
+
+ rw_ahead = 0; /* normal case; gets changed below for READA */
+ switch (rw) {
+ case READA:
+#if 0 /* bread() misinterprets failed READA attempts as IO errors on SMP */
+ rw_ahead = 1;
+#endif
+ rw = READ; /* drop into READ */
+ case READ:
+ case WRITE:
+ latency = elevator_request_latency(elevator, rw);
+ break;
+ default:
+ BUG();
+ goto end_io;
+ }
+
+ /* We'd better have a real physical mapping!
+ Check this bit only if the buffer was dirty and just locked
+ down by us so at this point flushpage will block and
+ won't clear the mapped bit under us. */
+ if (!buffer_mapped(bh))
+ BUG();
+
+ /*
+ * Temporary solution - in 2.5 this will be done by the lowlevel
+ * driver. Create a bounce buffer if the buffer data points into
+ * high memory - keep the original buffer otherwise.
+ */
+ bh = blk_queue_bounce(q, rw, bh);
+
+/* look for a free request. */
+ /*
+ * Try to coalesce the new request with old requests
+ */
+ max_sectors = get_max_sectors(bh->b_rdev);
+
+ req = NULL;
+ head = &q->queue_head;
+ /*
+ * Now we acquire the request spinlock, we have to be mega careful
+ * not to schedule or do something nonatomic
+ */
+ spin_lock_irq(&io_request_lock);
+
+again:
+ insert_here = head->prev;
+
+ if (list_empty(head)) {
+ q->plug_device_fn(q, bh->b_rdev); /* is atomic */
+ goto get_rq;
+ } else if (q->head_active && !q->plugged)
+ head = head->next;
+
+ el_ret = elevator->elevator_merge_fn(q, &req, head, bh, rw,max_sectors);
+ switch (el_ret) {
+
+ case ELEVATOR_BACK_MERGE:
+ if (!q->back_merge_fn(q, req, bh, max_segments)) {
+ insert_here = &req->queue;
+ break;
+ }
+ req->bhtail->b_reqnext = bh;
+ req->bhtail = bh;
+ req->nr_sectors = req->hard_nr_sectors += count;
+ blk_started_io(count);
+ blk_started_sectors(req, count);
+ drive_stat_acct(req->rq_dev, req->cmd, count, 0);
+ req_new_io(req, 1, count);
+ attempt_back_merge(q, req, max_sectors, max_segments);
+ goto out;
+
+ case ELEVATOR_FRONT_MERGE:
+ if (!q->front_merge_fn(q, req, bh, max_segments)) {
+ insert_here = req->queue.prev;
+ break;
+ }
+ bh->b_reqnext = req->bh;
+ req->bh = bh;
+ /*
+ * may not be valid, but queues not having bounce
+ * enabled for highmem pages must not look at
+ * ->buffer anyway
+ */
+ req->buffer = bh->b_data;
+ req->current_nr_sectors = req->hard_cur_sectors = count;
+ req->sector = req->hard_sector = sector;
+ req->nr_sectors = req->hard_nr_sectors += count;
+ blk_started_io(count);
+ blk_started_sectors(req, count);
+ drive_stat_acct(req->rq_dev, req->cmd, count, 0);
+ req_new_io(req, 1, count);
+ attempt_front_merge(q, head, req, max_sectors,
max_segments);
+ goto out;
+
+ /*
+ * elevator says don't/can't merge. get new request
+ */
+ case ELEVATOR_NO_MERGE:
+ /*
+ * use elevator hints as to where to insert the
+ * request. if no hints, just add it to the back
+ * of the queue
+ */
+ if (req)
+ insert_here = &req->queue;
+ break;
+
+ default:
+ printk("elevator returned crap (%d)\n", el_ret);
+ BUG();
+ }
+
+get_rq:
+ if (freereq) {
+ req = freereq;
+ freereq = NULL;
+ } else {
+ /*
+ * See description above __get_request_wait()
+ */
+ if (rw_ahead) {
+ if (q->rq.count < q->batch_requests ||
blk_oversized_queue_batch(q)) {
+ spin_unlock_irq(&io_request_lock);
+ goto end_io;
+ }
+ req = get_request(q, rw);
+ if (req == NULL)
+ BUG();
+ } else {
+ req = get_request(q, rw);
+ if (req == NULL) {
+ spin_unlock_irq(&io_request_lock);
+ freereq = __get_request_wait(q, rw);
+ head = &q->queue_head;
+ spin_lock_irq(&io_request_lock);
+ should_wake = 1;
+ goto again;
+ }
+ }
+ }
+
+/* fill up the request-info, and add it to the queue */
+ req->elevator_sequence = latency;
+ req->cmd = rw;
+ req->errors = 0;
+ req->hard_sector = req->sector = sector;
+ req->hard_nr_sectors = req->nr_sectors = count;
+ req->current_nr_sectors = req->hard_cur_sectors = count;
+ req->nr_segments = 1; /* Always 1 for a new request. */
+ req->nr_hw_segments = 1; /* Always 1 for a new request. */
+ req->buffer = bh->b_data;
+ req->waiting = NULL;
+ req->bh = bh;
+ req->bhtail = bh;
+ req->rq_dev = bh->b_rdev;
+ req->start_time = jiffies;
+ req_new_io(req, 0, count);
+ blk_started_io(count);
+ blk_started_sectors(req, count);
+ add_request(q, req, insert_here);
+out:
+ if (freereq)
+ blkdev_release_request(freereq);
+ if (should_wake)
+ get_request_wait_wakeup(q, rw);
+ if (sync)
+ __generic_unplug_device(q);
+ spin_unlock_irq(&io_request_lock);
+ return 0;
+end_io:
+ bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
+ return 0;
+}
+
+/**
+ * generic_make_request: hand a buffer head to it's device driver for I/O
+ * @rw: READ, WRITE, or READA - what sort of I/O is desired.
+ * @bh: The buffer head describing the location in memory and on the device.
+ *
+ * generic_make_request() is used to make I/O requests of block
+ * devices. It is passed a &struct buffer_head and a &rw value. The
+ * %READ and %WRITE options are (hopefully) obvious in meaning. The
+ * %READA value means that a read is required, but that the driver is
+ * free to fail the request if, for example, it cannot get needed
+ * resources immediately.
+ *
+ * generic_make_request() does not return any status. The
+ * success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the bh->b_end_io
+ * function described (one day) else where.
+ *
+ * The caller of generic_make_request must make sure that b_page,
+ * b_addr, b_size are set to describe the memory buffer, that b_rdev
+ * and b_rsector are set to describe the device address, and the
+ * b_end_io and optionally b_private are set to describe how
+ * completion notification should be signaled. BH_Mapped should also
+ * be set (to confirm that b_dev and b_blocknr are valid).
+ *
+ * generic_make_request and the drivers it calls may use b_reqnext,
+ * and may change b_rdev and b_rsector. So the values of these fields
+ * should NOT be depended on after the call to generic_make_request.
+ * Because of this, the caller should record the device address
+ * information in b_dev and b_blocknr.
+ *
+ * Apart from those fields mentioned above, no other fields, and in
+ * particular, no other flags, are changed by generic_make_request or
+ * any lower level drivers.
+ * */
+void generic_make_request (int rw, struct buffer_head * bh)
+{
+ int major = MAJOR(bh->b_rdev);
+ int minorsize = 0;
+ request_queue_t *q;
+
+ if (!bh->b_end_io)
+ BUG();
+
+ /* Test device size, when known. */
+ if (blk_size[major])
+ minorsize = blk_size[major][MINOR(bh->b_rdev)];
+ if (minorsize) {
+ unsigned long maxsector = (minorsize << 1) + 1;
+ unsigned long sector = bh->b_rsector;
+ unsigned int count = bh->b_size >> 9;
+
+ if (maxsector < count || maxsector - count < sector) {
+ /* Yecch */
+ bh->b_state &= ~(1 << BH_Dirty);
+
+ /* This may well happen - the kernel calls bread()
+ without checking the size of the device, e.g.,
+ when mounting a device. */
+ printk(KERN_INFO
+ "attempt to access beyond end of device\n");
+ printk(KERN_INFO "%s: rw=%d, want=%ld, limit=%d\n",
+ kdevname(bh->b_rdev), rw,
+ (sector + count)>>1, minorsize);
+
+ bh->b_end_io(bh, 0);
+ return;
+ }
+ }
+
+ /*
+ * Resolve the mapping until finished. (drivers are
+ * still free to implement/resolve their own stacking
+ * by explicitly returning 0)
+ */
+ /* NOTE: we don't repeat the blk_size check for each new device.
+ * Stacking drivers are expected to know what they are doing.
+ */
+ do {
+ q = __blk_get_queue(bh->b_rdev);
+ if (!q) {
+ printk(KERN_ERR
+ "generic_make_request: Trying to access "
+ "nonexistent block-device %s (%ld)\n",
+ kdevname(bh->b_rdev), bh->b_rsector);
+ buffer_IO_error(bh);
+ break;
+ }
+ } while (q->make_request_fn(q, rw, bh));
+}
+
+
+/**
+ * submit_bh: submit a buffer_head to the block device later for I/O
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+ * @bh: The &struct buffer_head which describes the I/O
+ *
+ * submit_bh() is very similar in purpose to generic_make_request(), and
+ * uses that function to do most of the work.
+ *
+ * The extra functionality provided by submit_bh is to determine
+ * b_rsector from b_blocknr and b_size, and to set b_rdev from b_dev.
+ * This is is appropriate for IO requests that come from the buffer
+ * cache and page cache which (currently) always use aligned blocks.
+ */
+void submit_bh(int rw, struct buffer_head * bh)
+{
+ int count = bh->b_size >> 9;
+
+ if (!test_bit(BH_Lock, &bh->b_state))
+ BUG();
+
+ set_bit(BH_Req, &bh->b_state);
+ set_bit(BH_Launder, &bh->b_state);
+
+ /*
+ * First step, 'identity mapping' - RAID or LVM might
+ * further remap this.
+ */
+ bh->b_rdev = bh->b_dev;
+ bh->b_rsector = bh->b_blocknr * count;
+
+ get_bh(bh);
+ generic_make_request(rw, bh);
+
+ /* fix race condition with wait_on_buffer() */
+ smp_mb(); /* spin_unlock may have inclusive semantics */
+ if (waitqueue_active(&bh->b_wait))
+ wake_up(&bh->b_wait);
+
+ if (block_dump)
+ printk(KERN_DEBUG "%s: %s block %lu/%u on %s\n", current->comm,
rw == WRITE ? "WRITE" : "READ", bh->b_rsector, count, kdevname(bh->b_rdev));
+
+ put_bh(bh);
+ switch (rw) {
+ case WRITE:
+ kstat.pgpgout += count;
+ break;
+ default:
+ kstat.pgpgin += count;
+ break;
+ }
+}
+
+/**
+ * ll_rw_block: low-level access to block devices
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @nr: number of &struct buffer_heads in the array
+ * @bhs: array of pointers to &struct buffer_head
+ *
+ * ll_rw_block() takes an array of pointers to &struct buffer_heads,
+ * and requests an I/O operation on them, either a %READ or a %WRITE.
+ * The third %READA option is described in the documentation for
+ * generic_make_request() which ll_rw_block() calls.
+ *
+ * This function provides extra functionality that is not in
+ * generic_make_request() that is relevant to buffers in the buffer
+ * cache or page cache. In particular it drops any buffer that it
+ * cannot get a lock on (with the BH_Lock state bit), any buffer that
+ * appears to be clean when doing a write request, and any buffer that
+ * appears to be up-to-date when doing read request. Further it marks
+ * as clean buffers that are processed for writing (the buffer cache
+ * wont assume that they are actually clean until the buffer gets
+ * unlocked).
+ *
+ * ll_rw_block sets b_end_io to simple completion handler that marks
+ * the buffer up-to-date (if approriate), unlocks the buffer and wakes
+ * any waiters. As client that needs a more interesting completion
+ * routine should call submit_bh() (or generic_make_request())
+ * directly.
+ *
+ * Caveat:
+ * All of the buffers must be for the same device, and must also be
+ * of the current approved size for the device. */
+
+void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
+{
+ unsigned int major;
+ int correct_size;
+ int i;
+
+ if (!nr)
+ return;
+
+ major = MAJOR(bhs[0]->b_dev);
+
+ /* Determine correct block size for this device. */
+ correct_size = get_hardsect_size(bhs[0]->b_dev);
+
+ /* Verify requested block sizes. */
+ for (i = 0; i < nr; i++) {
+ struct buffer_head *bh = bhs[i];
+ if (bh->b_size % correct_size) {
+ printk(KERN_NOTICE "ll_rw_block: device %s: "
+ "only %d-char blocks implemented (%u)\n",
+ kdevname(bhs[0]->b_dev),
+ correct_size, bh->b_size);
+ goto sorry;
+ }
+ }
+
+ if ((rw & WRITE) && is_read_only(bhs[0]->b_dev)) {
+ printk(KERN_NOTICE "Can't write to read-only device %s\n",
+ kdevname(bhs[0]->b_dev));
+ goto sorry;
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct buffer_head *bh = bhs[i];
+
+ lock_buffer(bh);
+
+ /* We have the buffer lock */
+ atomic_inc(&bh->b_count);
+ bh->b_end_io = end_buffer_io_sync;
+
+ switch(rw) {
+ case WRITE:
+ if (!atomic_set_buffer_clean(bh))
+ /* Hmmph! Nothing to write */
+ goto end_io;
+ __mark_buffer_clean(bh);
+ break;
+
+ case READA:
+ case READ:
+ if (buffer_uptodate(bh))
+ /* Hmmph! Already have it */
+ goto end_io;
+ break;
+ default:
+ BUG();
+ end_io:
+ bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
+ continue;
+ }
+
+ submit_bh(rw, bh);
+ }
+ return;
+
+sorry:
+ /* Make sure we don't get infinite dirty retries.. */
+ for (i = 0; i < nr; i++)
+ mark_buffer_clean(bhs[i]);
+}
+
+#ifdef CONFIG_STRAM_SWAP
+extern int stram_device_init (void);
+#endif
+
+static void blk_writeback_timer(unsigned long data)
+{
+ wakeup_bdflush();
+ wakeup_kupdate();
+}
+
+/**
+ * end_that_request_first - end I/O on one buffer.
+ * @req: the request being processed
+ * @uptodate: 0 for I/O error
+ * @name: the name printed for an I/O error
+ *
+ * Description:
+ * Ends I/O on the first buffer attached to @req, and sets it up
+ * for the next buffer_head (if any) in the cluster.
+ *
+ * Return:
+ * 0 - we are done with this request, call end_that_request_last()
+ * 1 - still buffers pending for this request
+ *
+ * Caveat:
+ * Drivers implementing their own end_request handling must call
+ * blk_finished_io() appropriately.
+ **/
+
+int end_that_request_first (struct request *req, int uptodate, char *name)
+{
+ struct buffer_head * bh;
+ int nsect;
+
+ req->errors = 0;
+ if (!uptodate)
+ printk("end_request: I/O error, dev %s (%s), sector %lu\n",
+ kdevname(req->rq_dev), name, req->sector);
+
+ if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ blk_finished_io(nsect);
+ blk_finished_sectors(req, nsect);
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ bh->b_end_io(bh, uptodate);
+ if ((bh = req->bh) != NULL) {
+ req->hard_sector += nsect;
+ req->hard_nr_sectors -= nsect;
+ req->sector = req->hard_sector;
+ req->nr_sectors = req->hard_nr_sectors;
+
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->hard_cur_sectors = req->current_nr_sectors;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+extern int laptop_mode;
+
+void end_that_request_last(struct request *req)
+{
+ struct completion *waiting = req->waiting;
+
+ /*
+ * schedule the writeout of pending dirty data when the disk is idle
+ */
+ if (laptop_mode && req->cmd == READ)
+ mod_timer(&writeback_timer, jiffies + 5 * HZ);
+
+ req_finished_io(req);
+ blkdev_release_request(req);
+ if (waiting)
+ complete(waiting);
+}
+
+int __init blk_dev_init(void)
+{
+ struct blk_dev_struct *dev;
+
+ request_cachep = kmem_cache_create("blkdev_requests",
+ sizeof(struct request),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ if (!request_cachep)
+ panic("Can't create request pool slab cache\n");
+
+ for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;)
+ dev->queue = NULL;
+
+ memset(ro_bits,0,sizeof(ro_bits));
+ memset(max_readahead, 0, sizeof(max_readahead));
+ memset(max_sectors, 0, sizeof(max_sectors));
+
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
+
+ init_timer(&writeback_timer);
+ writeback_timer.function = blk_writeback_timer;
+
+#ifdef CONFIG_AMIGA_Z2RAM
+ z2_init();
+#endif
+#ifdef CONFIG_STRAM_SWAP
+ stram_device_init();
+#endif
+#ifdef CONFIG_ISP16_CDI
+ isp16_init();
+#endif
+#ifdef CONFIG_BLK_DEV_PS2
+ ps2esdi_init();
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+ xd_init();
+#endif
+#ifdef CONFIG_BLK_DEV_MFM
+ mfm_init();
+#endif
+#ifdef CONFIG_PARIDE
+ { extern void paride_init(void); paride_init(); };
+#endif
+#ifdef CONFIG_MAC_FLOPPY
+ swim3_init();
+#endif
+#ifdef CONFIG_BLK_DEV_SWIM_IOP
+ swimiop_init();
+#endif
+#ifdef CONFIG_AMIGA_FLOPPY
+ amiga_floppy_init();
+#endif
+#ifdef CONFIG_ATARI_FLOPPY
+ atari_floppy_init();
+#endif
+#ifdef CONFIG_BLK_DEV_FD
+ floppy_init();
+#else
+#if defined(__i386__) && !defined(CONFIG_XEN) /* Do we even need this? */
+ outb_p(0xc, 0x3f2);
+#endif
+#endif
+#ifdef CONFIG_CDU31A
+ cdu31a_init();
+#endif
+#ifdef CONFIG_ATARI_ACSI
+ acsi_init();
+#endif
+#ifdef CONFIG_MCD
+ mcd_init();
+#endif
+#ifdef CONFIG_MCDX
+ mcdx_init();
+#endif
+#ifdef CONFIG_SBPCD
+ sbpcd_init();
+#endif
+#ifdef CONFIG_AZTCD
+ aztcd_init();
+#endif
+#ifdef CONFIG_CDU535
+ sony535_init();
+#endif
+#ifdef CONFIG_GSCD
+ gscd_init();
+#endif
+#ifdef CONFIG_CM206
+ cm206_init();
+#endif
+#ifdef CONFIG_OPTCD
+ optcd_init();
+#endif
+#ifdef CONFIG_SJCD
+ sjcd_init();
+#endif
+#ifdef CONFIG_APBLOCK
+ ap_init();
+#endif
+#ifdef CONFIG_DDV
+ ddv_init();
+#endif
+#ifdef CONFIG_MDISK
+ mdisk_init();
+#endif
+#ifdef CONFIG_DASD
+ dasd_init();
+#endif
+#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_BLOCK)
+ tapeblock_init();
+#endif
+#ifdef CONFIG_BLK_DEV_XPRAM
+ xpram_init();
+#endif
+
+#ifdef CONFIG_SUN_JSFLASH
+ jsfd_init();
+#endif
+
+#if defined(CONFIG_XEN_BLKDEV_FRONTEND)
+ xlblk_init();
+#endif
+
+ return 0;
+};
+
+EXPORT_SYMBOL(io_request_lock);
+EXPORT_SYMBOL(end_that_request_first);
+EXPORT_SYMBOL(end_that_request_last);
+EXPORT_SYMBOL(blk_grow_request_list);
+EXPORT_SYMBOL(blk_init_queue);
+EXPORT_SYMBOL(blk_get_queue);
+EXPORT_SYMBOL(blk_cleanup_queue);
+EXPORT_SYMBOL(blk_queue_headactive);
+EXPORT_SYMBOL(blk_queue_throttle_sectors);
+EXPORT_SYMBOL(blk_queue_make_request);
+EXPORT_SYMBOL(generic_make_request);
+EXPORT_SYMBOL(blkdev_release_request);
+EXPORT_SYMBOL(generic_unplug_device);
+EXPORT_SYMBOL(blk_queue_bounce_limit);
+EXPORT_SYMBOL(blk_max_low_pfn);
+EXPORT_SYMBOL(blk_max_pfn);
+EXPORT_SYMBOL(blk_seg_merge_ok);
+EXPORT_SYMBOL(blk_nohighio);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/drivers/char/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/drivers/char/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,361 @@
+#
+# Makefile for the kernel character device drivers.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now inherited from the
+# parent makes..
+#
+
+#
+# This file contains the font map for the default (hardware) font
+#
+FONTMAPFILE = cp437.uni
+
+O_TARGET := char.o
+
+obj-y += mem.o tty_io.o n_tty.o tty_ioctl.o raw.o pty.o misc.o random.o
+
+# All of the (potential) objects that export symbols.
+# This list comes from 'grep -l EXPORT_SYMBOL *.[hc]'.
+
+export-objs := busmouse.o console.o keyboard.o sysrq.o \
+ misc.o pty.o random.o selection.o serial.o \
+ sonypi.o tty_io.o tty_ioctl.o generic_serial.o \
+ au1000_gpio.o vac-serial.o hp_psaux.o nvram.o \
+ scx200.o fetchop.o
+
+mod-subdirs := joystick ftape drm drm-4.0 pcmcia
+
+list-multi :=
+
+KEYMAP =defkeymap.o
+KEYBD =pc_keyb.o
+CONSOLE =console.o
+SERIAL =serial.o
+
+ifeq ($(ARCH),xen)
+ ifneq ($(CONFIG_XEN_PHYSDEV_ACCESS),y)
+ KEYBD =
+ endif
+endif
+
+ifeq ($(ARCH),s390)
+ KEYMAP =
+ KEYBD =
+ CONSOLE =
+ SERIAL =
+endif
+
+ifeq ($(ARCH),mips)
+ ifneq ($(CONFIG_PC_KEYB),y)
+ KEYBD =
+ endif
+ ifeq ($(CONFIG_VR41XX_KIU),y)
+ KEYMAP =
+ KEYBD = vr41xx_keyb.o
+ endif
+endif
+
+ifeq ($(ARCH),s390x)
+ KEYMAP =
+ KEYBD =
+ CONSOLE =
+ SERIAL =
+endif
+
+ifeq ($(ARCH),m68k)
+ ifdef CONFIG_AMIGA
+ KEYBD = amikeyb.o
+ else
+ ifndef CONFIG_MAC
+ KEYBD =
+ endif
+ endif
+ SERIAL =
+endif
+
+ifeq ($(ARCH),parisc)
+ ifdef CONFIG_GSC_PS2
+ KEYBD = hp_psaux.o hp_keyb.o
+ else
+ KEYBD =
+ endif
+ ifdef CONFIG_SERIAL_MUX
+ CONSOLE += mux.o
+ endif
+ ifdef CONFIG_PDC_CONSOLE
+ CONSOLE += pdc_console.o
+ endif
+endif
+
+ifdef CONFIG_Q40
+ KEYBD += q40_keyb.o
+ SERIAL = serial.o
+endif
+
+ifdef CONFIG_APOLLO
+ KEYBD += dn_keyb.o
+endif
+
+ifeq ($(ARCH),parisc)
+ ifdef CONFIG_GSC_PS2
+ KEYBD = hp_psaux.o hp_keyb.o
+ else
+ KEYBD =
+ endif
+ ifdef CONFIG_PDC_CONSOLE
+ CONSOLE += pdc_console.o
+ endif
+endif
+
+ifeq ($(ARCH),arm)
+ ifneq ($(CONFIG_PC_KEYMAP),y)
+ KEYMAP =
+ endif
+ ifneq ($(CONFIG_PC_KEYB),y)
+ KEYBD =
+ endif
+endif
+
+ifeq ($(ARCH),sh)
+ KEYMAP =
+ KEYBD =
+ CONSOLE =
+ ifeq ($(CONFIG_SH_HP600),y)
+ KEYMAP = defkeymap.o
+ KEYBD = scan_keyb.o hp600_keyb.o
+ CONSOLE = console.o
+ endif
+ ifeq ($(CONFIG_SH_DMIDA),y)
+ # DMIDA does not connect the HD64465 PS/2 keyboard port
+ # but we allow for USB keyboards to be plugged in.
+ KEYMAP = defkeymap.o
+ KEYBD = # hd64465_keyb.o pc_keyb.o
+ CONSOLE = console.o
+ endif
+ ifeq ($(CONFIG_SH_EC3104),y)
+ KEYMAP = defkeymap.o
+ KEYBD = ec3104_keyb.o
+ CONSOLE = console.o
+ endif
+ ifeq ($(CONFIG_SH_DREAMCAST),y)
+ KEYMAP = defkeymap.o
+ KEYBD =
+ CONSOLE = console.o
+ endif
+endif
+
+ifeq ($(CONFIG_DECSTATION),y)
+ KEYMAP =
+ KEYBD =
+endif
+
+ifeq ($(CONFIG_BAGET_MIPS),y)
+ KEYBD =
+ SERIAL = vac-serial.o
+endif
+
+ifeq ($(CONFIG_NINO),y)
+ SERIAL =
+endif
+
+ifneq ($(CONFIG_SUN_SERIAL),)
+ SERIAL =
+endif
+
+ifeq ($(CONFIG_QTRONIX_KEYBOARD),y)
+ KEYBD = qtronix.o
+ KEYMAP = qtronixmap.o
+endif
+
+ifeq ($(CONFIG_DUMMY_KEYB),y)
+ KEYBD = dummy_keyb.o
+endif
+
+obj-$(CONFIG_VT) += vt.o vc_screen.o consolemap.o consolemap_deftbl.o
$(CONSOLE) selection.o
+obj-$(CONFIG_SERIAL) += $(SERIAL)
+obj-$(CONFIG_PARPORT_SERIAL) += parport_serial.o
+obj-$(CONFIG_SERIAL_HCDP) += hcdp_serial.o
+obj-$(CONFIG_SERIAL_21285) += serial_21285.o
+obj-$(CONFIG_SERIAL_SA1100) += serial_sa1100.o
+obj-$(CONFIG_SERIAL_AMBA) += serial_amba.o
+obj-$(CONFIG_TS_AU1X00_ADS7846) += au1000_ts.o
+obj-$(CONFIG_SERIAL_DEC) += decserial.o
+
+ifndef CONFIG_SUN_KEYBOARD
+ obj-$(CONFIG_VT) += keyboard.o $(KEYMAP) $(KEYBD)
+else
+ obj-$(CONFIG_PCI) += keyboard.o $(KEYMAP)
+endif
+
+obj-$(CONFIG_HIL) += hp_keyb.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
+obj-$(CONFIG_ROCKETPORT) += rocket.o
+obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
+obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
+obj-$(CONFIG_DIGI) += pcxx.o
+obj-$(CONFIG_DIGIEPCA) += epca.o
+obj-$(CONFIG_CYCLADES) += cyclades.o
+obj-$(CONFIG_STALLION) += stallion.o
+obj-$(CONFIG_ISTALLION) += istallion.o
+obj-$(CONFIG_SIBYTE_SB1250_DUART) += sb1250_duart.o
+obj-$(CONFIG_COMPUTONE) += ip2.o ip2main.o
+obj-$(CONFIG_RISCOM8) += riscom8.o
+obj-$(CONFIG_ISI) += isicom.o
+obj-$(CONFIG_ESPSERIAL) += esp.o
+obj-$(CONFIG_SYNCLINK) += synclink.o
+obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
+obj-$(CONFIG_N_HDLC) += n_hdlc.o
+obj-$(CONFIG_SPECIALIX) += specialix.o
+obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
+obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o
+obj-$(CONFIG_SX) += sx.o generic_serial.o
+obj-$(CONFIG_RIO) += rio/rio.o generic_serial.o
+obj-$(CONFIG_SH_SCI) += sh-sci.o generic_serial.o
+obj-$(CONFIG_SERIAL167) += serial167.o
+obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
+obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
+obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
+obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o
+obj-$(CONFIG_SERIAL_TX3912) += generic_serial.o serial_tx3912.o
+obj-$(CONFIG_TXX927_SERIAL) += serial_txx927.o
+obj-$(CONFIG_SERIAL_TXX9) += generic_serial.o serial_txx9.o
+obj-$(CONFIG_IP22_SERIAL) += sgiserial.o
+obj-$(CONFIG_AU1X00_UART) += au1x00-serial.o
+obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o
+
+subdir-$(CONFIG_RIO) += rio
+subdir-$(CONFIG_INPUT) += joystick
+
+obj-$(CONFIG_ATIXL_BUSMOUSE) += atixlmouse.o
+obj-$(CONFIG_LOGIBUSMOUSE) += logibusmouse.o
+obj-$(CONFIG_PRINTER) += lp.o
+obj-$(CONFIG_TIPAR) += tipar.o
+obj-$(CONFIG_OBMOUSE) += obmouse.o
+
+ifeq ($(CONFIG_INPUT),y)
+obj-y += joystick/js.o
+endif
+
+obj-$(CONFIG_FETCHOP) += fetchop.o
+obj-$(CONFIG_BUSMOUSE) += busmouse.o
+obj-$(CONFIG_DTLK) += dtlk.o
+obj-$(CONFIG_R3964) += n_r3964.o
+obj-$(CONFIG_APPLICOM) += applicom.o
+obj-$(CONFIG_SONYPI) += sonypi.o
+obj-$(CONFIG_MS_BUSMOUSE) += msbusmouse.o
+obj-$(CONFIG_82C710_MOUSE) += qpmouse.o
+obj-$(CONFIG_AMIGAMOUSE) += amigamouse.o
+obj-$(CONFIG_ATARIMOUSE) += atarimouse.o
+obj-$(CONFIG_ADBMOUSE) += adbmouse.o
+obj-$(CONFIG_PC110_PAD) += pc110pad.o
+obj-$(CONFIG_MK712_MOUSE) += mk712.o
+obj-$(CONFIG_RTC) += rtc.o
+obj-$(CONFIG_GEN_RTC) += genrtc.o
+obj-$(CONFIG_EFI_RTC) += efirtc.o
+obj-$(CONFIG_SGI_DS1286) += ds1286.o
+obj-$(CONFIG_MIPS_RTC) += mips_rtc.o
+obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
+ifeq ($(CONFIG_PPC),)
+ obj-$(CONFIG_NVRAM) += nvram.o
+endif
+obj-$(CONFIG_TOSHIBA) += toshiba.o
+obj-$(CONFIG_I8K) += i8k.o
+obj-$(CONFIG_DS1620) += ds1620.o
+obj-$(CONFIG_DS1742) += ds1742.o
+obj-$(CONFIG_INTEL_RNG) += i810_rng.o
+obj-$(CONFIG_AMD_RNG) += amd768_rng.o
+obj-$(CONFIG_HW_RANDOM) += hw_random.o
+obj-$(CONFIG_AMD_PM768) += amd76x_pm.o
+obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
+
+obj-$(CONFIG_ITE_GPIO) += ite_gpio.o
+obj-$(CONFIG_AU1X00_GPIO) += au1000_gpio.o
+obj-$(CONFIG_AU1X00_USB_TTY) += au1000_usbtty.o
+obj-$(CONFIG_AU1X00_USB_RAW) += au1000_usbraw.o
+obj-$(CONFIG_COBALT_LCD) += lcd.o
+
+obj-$(CONFIG_QIC02_TAPE) += tpqic02.o
+
+subdir-$(CONFIG_FTAPE) += ftape
+subdir-$(CONFIG_DRM_OLD) += drm-4.0
+subdir-$(CONFIG_DRM_NEW) += drm
+subdir-$(CONFIG_PCMCIA) += pcmcia
+subdir-$(CONFIG_AGP) += agp
+
+ifeq ($(CONFIG_FTAPE),y)
+obj-y += ftape/ftape.o
+endif
+
+obj-$(CONFIG_H8) += h8.o
+obj-$(CONFIG_PPDEV) += ppdev.o
+obj-$(CONFIG_DZ) += dz.o
+obj-$(CONFIG_NWBUTTON) += nwbutton.o
+obj-$(CONFIG_NWFLASH) += nwflash.o
+obj-$(CONFIG_SCx200) += scx200.o
+obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+
+# Only one watchdog can succeed. We probe the hardware watchdog
+# drivers first, then the softdog driver. This means if your hardware
+# watchdog dies or is 'borrowed' for some reason the software watchdog
+# still gives you some cover.
+
+obj-$(CONFIG_PCWATCHDOG) += pcwd.o
+obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
+obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
+obj-$(CONFIG_IB700_WDT) += ib700wdt.o
+obj-$(CONFIG_MIXCOMWD) += mixcomwd.o
+obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
+obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
+obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
+obj-$(CONFIG_WDT) += wdt.o
+obj-$(CONFIG_WDTPCI) += wdt_pci.o
+obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
+obj-$(CONFIG_977_WATCHDOG) += wdt977.o
+obj-$(CONFIG_I810_TCO) += i810-tco.o
+obj-$(CONFIG_MACHZ_WDT) += machzwd.o
+obj-$(CONFIG_SH_WDT) += shwdt.o
+obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o
+obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
+obj-$(CONFIG_ALIM1535_WDT) += alim1535d_wdt.o
+obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
+obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
+obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
+obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
+obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
+
+subdir-$(CONFIG_MWAVE) += mwave
+ifeq ($(CONFIG_MWAVE),y)
+ obj-y += mwave/mwave.o
+endif
+
+subdir-$(CONFIG_IPMI_HANDLER) += ipmi
+ifeq ($(CONFIG_IPMI_HANDLER),y)
+ obj-y += ipmi/ipmi.o
+endif
+
+include $(TOPDIR)/Rules.make
+
+fastdep:
+
+conmakehash: conmakehash.c
+ $(HOSTCC) $(HOSTCFLAGS) -o conmakehash conmakehash.c
+
+consolemap_deftbl.c: $(FONTMAPFILE) conmakehash
+ ./conmakehash $(FONTMAPFILE) > consolemap_deftbl.c
+
+consolemap_deftbl.o: consolemap_deftbl.c $(TOPDIR)/include/linux/types.h
+
+.DELETE_ON_ERROR:
+
+defkeymap.c: defkeymap.map
+ set -e ; loadkeys --mktable $< | sed -e 's/^static *//' > $@
+
+qtronixmap.c: qtronixmap.map
+ set -e ; loadkeys --mktable $< | sed -e 's/^static *//' > $@
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/drivers/char/mem.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/drivers/char/mem.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,812 @@
+/*
+ * linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>
+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@xxxxxxx>
+ *
+ * MODIFIED FOR XEN by Keir Fraser, 10th July 2003.
+ * Linux running on Xen has strange semantics for /dev/mem and /dev/kmem!!
+ * 1. mmap will not work on /dev/kmem
+ * 2. mmap on /dev/mem interprets the 'file offset' as a machine address
+ * rather than a physical address.
+ * I don't believe anyone sane mmaps /dev/kmem, but /dev/mem is mmapped
+ * to get at memory-mapped I/O spaces (eg. the VESA X server does this).
+ * For this to work at all we need to expect machine addresses.
+ * Reading/writing of /dev/kmem expects kernel virtual addresses, as usual.
+ * Reading/writing of /dev/mem expects 'physical addresses' as usual -- this
+ * is because /dev/mem can only read/write existing kernel mappings, which
+ * will be normal RAM, and we should present pseudo-physical layout for all
+ * except I/O (which is the sticky case that mmap is hacked to deal with).
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/tpqic02.h>
+#include <linux/ftape.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/ptrace.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_I2C
+extern int i2c_init_all(void);
+#endif
+#ifdef CONFIG_FB
+extern void fbmem_init(void);
+#endif
+#ifdef CONFIG_PROM_CONSOLE
+extern void prom_con_init(void);
+#endif
+#ifdef CONFIG_MDA_CONSOLE
+extern void mda_console_init(void);
+#endif
+#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
+extern void tapechar_init(void);
+#endif
+
+static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
+ const char * buf, size_t count, loff_t *ppos)
+{
+ ssize_t written;
+
+ written = 0;
+#if defined(__sparc__) || defined(__mc68000__)
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (realp < PAGE_SIZE) {
+ unsigned long sz = PAGE_SIZE-realp;
+ if (sz > count) sz = count;
+ /* Hmm. Do something? */
+ buf+=sz;
+ p+=sz;
+ count-=sz;
+ written+=sz;
+ }
+#endif
+ if (copy_from_user(p, buf, count))
+ return -EFAULT;
+ written += count;
+ *ppos = realp + written;
+ return written;
+}
+
+
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ unsigned long end_mem;
+ ssize_t read;
+
+ end_mem = __pa(high_memory);
+ if (p >= end_mem)
+ return 0;
+ if (count > end_mem - p)
+ count = end_mem - p;
+ read = 0;
+#if defined(__sparc__) || defined(__mc68000__)
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ unsigned long sz = PAGE_SIZE-p;
+ if (sz > count)
+ sz = count;
+ if (sz > 0) {
+ if (clear_user(buf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ }
+ }
+#endif
+ if (copy_to_user(buf, __va(p), count))
+ return -EFAULT;
+ read += count;
+ *ppos = p + read;
+ return read;
+}
+
+static ssize_t write_mem(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ unsigned long end_mem;
+
+ end_mem = __pa(high_memory);
+ if (p >= end_mem)
+ return 0;
+ if (count > end_mem - p)
+ count = end_mem - p;
+ return do_write_mem(file, __va(p), p, buf, count, ppos);
+}
+
+#ifndef pgprot_noncached
+
+/*
+ * This should probably be per-architecture in <asm/pgtable.h>
+ */
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+#if defined(__i386__) || defined(__x86_64__)
+ /* On PPro and successors, PCD alone doesn't always mean
+ uncached because of interactions with the MTRRs. PCD | PWT
+ means definitely uncached. */
+ if (boot_cpu_data.x86 > 3)
+ prot |= _PAGE_PCD | _PAGE_PWT;
+#elif defined(__powerpc__)
+ prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
+#elif defined(__mc68000__)
+#ifdef SUN3_PAGE_NOCACHE
+ if (MMU_IS_SUN3)
+ prot |= SUN3_PAGE_NOCACHE;
+ else
+#endif
+ if (MMU_IS_851 || MMU_IS_030)
+ prot |= _PAGE_NOCACHE030;
+ /* Use no-cache mode, serialized */
+ else if (MMU_IS_040 || MMU_IS_060)
+ prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
+#endif
+
+ return __pgprot(prot);
+}
+
+#endif /* !pgprot_noncached */
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ */
+static inline int noncached_address(unsigned long addr)
+{
+#if defined(__i386__)
+ /*
+ * On the PPro and successors, the MTRRs are used to set
+ * memory types for physical addresses outside main memory,
+ * so blindly setting PCD or PWT on those pages is wrong.
+ * For Pentiums and earlier, the surround logic should disable
+ * caching for the high addresses through the KEN pin, but
+ * we maintain the tradition of paranoia in this code.
+ */
+ return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability)
||
+ test_bit(X86_FEATURE_CYRIX_ARR,
&boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_CENTAUR_MCR,
&boot_cpu_data.x86_capability) )
+ && addr >= __pa(high_memory);
+#else
+ return addr >= __pa(high_memory);
+#endif
+}
+
+#if !defined(CONFIG_XEN)
+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ /*
+ * Accessing memory above the top the kernel knows about or
+ * through a file pointer that was marked O_SYNC will be
+ * done non-cached.
+ */
+ if (noncached_address(offset) || (file->f_flags & O_SYNC))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Don't try to swap out physical pages.. */
+ vma->vm_flags |= VM_RESERVED;
+
+ /*
+ * Don't dump addresses that are not real memory to a core file.
+ */
+ if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
+ vma->vm_flags |= VM_IO;
+
+ if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+#elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
+{
+ return -ENXIO;
+}
+#else
+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ return -ENXIO;
+
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
+ vma->vm_end-vma->vm_start, vma->vm_page_prot,
+ DOMID_IO))
+ return -EAGAIN;
+ return 0;
+}
+#endif /* CONFIG_XEN */
+
+/*
+ * This function reads the *virtual* memory as seen by the kernel.
+ */
+static ssize_t read_kmem(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t read = 0;
+ ssize_t virtr = 0;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+
+ if (p < (unsigned long) high_memory) {
+ read = count;
+ if (count > (unsigned long) high_memory - p)
+ read = (unsigned long) high_memory - p;
+
+#if defined(__sparc__) || defined(__mc68000__)
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE && read > 0) {
+ size_t tmp = PAGE_SIZE - p;
+ if (tmp > read) tmp = read;
+ if (clear_user(buf, tmp))
+ return -EFAULT;
+ buf += tmp;
+ p += tmp;
+ read -= tmp;
+ count -= tmp;
+ }
+#endif
+ if (copy_to_user(buf, (char *)p, read))
+ return -EFAULT;
+ p += read;
+ buf += read;
+ count -= read;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len = vread(kbuf, (char *)p, len);
+ if (!len)
+ break;
+ if (copy_to_user(buf, kbuf, len)) {
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
+ }
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+ *ppos = p;
+ return virtr + read;
+}
+
+extern long vwrite(char *buf, char *addr, unsigned long count);
+
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t wrote = 0;
+ ssize_t virtr = 0;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
+ if (p < (unsigned long) high_memory) {
+ wrote = count;
+ if (count > (unsigned long) high_memory - p)
+ wrote = (unsigned long) high_memory - p;
+
+ wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
+
+ p += wrote;
+ buf += wrote;
+ count -= wrote;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len && copy_from_user(kbuf, buf, len)) {
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
+ }
+ len = vwrite(kbuf, (char *)p, len);
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+
+ *ppos = p;
+ return virtr + wrote;
+}
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static ssize_t read_port(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ char *tmp = buf;
+
+ if (verify_area(VERIFY_WRITE,buf,count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ if (__put_user(inb(i),tmp) < 0)
+ return -EFAULT;
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+
+static ssize_t write_port(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ const char * tmp = buf;
+
+ if (verify_area(VERIFY_READ,buf,count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ char c;
+ if (__get_user(c, tmp))
+ return -EFAULT;
+ outb(c,i);
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+#endif
+
+static ssize_t read_null(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t write_null(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+/*
+ * For fun, we are using the MMU for this.
+ */
+static inline size_t read_zero_pagealigned(char * buf, size_t size)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long addr=(unsigned long)buf;
+
+ mm = current->mm;
+ /* Oops, this was forgotten before. -ben */
+ down_read(&mm->mmap_sem);
+
+ /* For private mappings, just map in zero pages. */
+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ unsigned long count;
+
+ if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
+ goto out_up;
+ if (vma->vm_flags & VM_SHARED)
+ break;
+ count = vma->vm_end - addr;
+ if (count > size)
+ count = size;
+
+ zap_page_range(mm, addr, count);
+ zeromap_page_range(addr, count, PAGE_COPY);
+
+ size -= count;
+ buf += count;
+ addr += count;
+ if (size == 0)
+ goto out_up;
+ }
+
+ up_read(&mm->mmap_sem);
+
+ /* The shared case is hard. Let's do the conventional zeroing. */
+ do {
+ unsigned long unwritten = clear_user(buf, PAGE_SIZE);
+ if (unwritten)
+ return size + unwritten - PAGE_SIZE;
+ if (current->need_resched)
+ schedule();
+ buf += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ } while (size);
+
+ return size;
+out_up:
+ up_read(&mm->mmap_sem);
+ return size;
+}
+
+static ssize_t read_zero(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long left, unwritten, written = 0;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ left = count;
+
+ /* do we want to be clever? Arbitrary cut-off */
+ if (count >= PAGE_SIZE*4) {
+ unsigned long partial;
+
+ /* How much left of the page? */
+ partial = (PAGE_SIZE-1) & -(unsigned long) buf;
+ unwritten = clear_user(buf, partial);
+ written = partial - unwritten;
+ if (unwritten)
+ goto out;
+ left -= partial;
+ buf += partial;
+ unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
+ written += (left & PAGE_MASK) - unwritten;
+ if (unwritten)
+ goto out;
+ buf += left & PAGE_MASK;
+ left &= ~PAGE_MASK;
+ }
+ unwritten = clear_user(buf, left);
+ written += left - unwritten;
+out:
+ return written ? written : -EFAULT;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+static ssize_t write_full(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero. Most notably, you
+ * can fopen() both devices with "a" now. This was previously impossible.
+ * -- SRB.
+ */
+
+static loff_t null_lseek(struct file * file, loff_t offset, int orig)
+{
+ return file->f_pos = 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ case 1:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int open_port(struct inode * inode, struct file * filp)
+{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address,
int write)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long kaddr;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ struct page *page = NULL;
+
+ /* address is user VA; convert to kernel VA of desired page */
+ kaddr = (address - vma->vm_start) + offset;
+ kaddr = VMALLOC_VMADDR(kaddr);
+
+ spin_lock(&init_mm.page_table_lock);
+
+ /* Lookup page structure for kernel VA */
+ pgd = pgd_offset(&init_mm, kaddr);
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ goto out;
+ pmd = pmd_offset(pgd, kaddr);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ goto out;
+ ptep = pte_offset(pmd, kaddr);
+ if (!ptep)
+ goto out;
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto out;
+ if (write && !pte_write(pte))
+ goto out;
+ page = pte_page(pte);
+ if (!VALID_PAGE(page)) {
+ page = NULL;
+ goto out;
+ }
+
+ /* Increment reference count on page */
+ get_page(page);
+
+out:
+ spin_unlock(&init_mm.page_table_lock);
+
+ return page;
+}
+
+struct vm_operations_struct kmem_vm_ops = {
+ nopage: kmem_vm_nopage,
+};
+
+static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ /*
+ * If the user is not attempting to mmap a high memory address then
+ * the standard mmap_mem mechanism will work. High memory addresses
+ * need special handling, as remap_page_range expects a physically-
+ * contiguous range of kernel addresses (such as obtained in kmalloc).
+ */
+ if ((offset + size) < (unsigned long) high_memory)
+ return mmap_mem(file, vma);
+
+ /*
+ * Accessing memory above the top the kernel knows about or
+ * through a file pointer that was marked O_SYNC will be
+ * done non-cached.
+ */
+ if (noncached_address(offset) || (file->f_flags & O_SYNC))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Don't do anything here; "nopage" will fill the holes */
+ vma->vm_ops = &kmem_vm_ops;
+
+ /* Don't try to swap out physical pages.. */
+ vma->vm_flags |= VM_RESERVED;
+
+ /*
+ * Don't dump addresses that are not real memory to a core file.
+ */
+ vma->vm_flags |= VM_IO;
+
+ return 0;
+}
+
+#define zero_lseek null_lseek
+#define full_lseek null_lseek
+#define write_zero write_null
+#define read_full read_zero
+#define open_mem open_port
+#define open_kmem open_mem
+
+static struct file_operations mem_fops = {
+ llseek: memory_lseek,
+ read: read_mem,
+ write: write_mem,
+ mmap: mmap_mem,
+ open: open_mem,
+};
+
+static struct file_operations kmem_fops = {
+ llseek: memory_lseek,
+ read: read_kmem,
+ write: write_kmem,
+#if !defined(CONFIG_XEN)
+ mmap: mmap_kmem,
+#endif
+ open: open_kmem,
+};
+
+static struct file_operations null_fops = {
+ llseek: null_lseek,
+ read: read_null,
+ write: write_null,
+};
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static struct file_operations port_fops = {
+ llseek: memory_lseek,
+ read: read_port,
+ write: write_port,
+ open: open_port,
+};
+#endif
+
+static struct file_operations zero_fops = {
+ llseek: zero_lseek,
+ read: read_zero,
+ write: write_zero,
+ mmap: mmap_zero,
+};
+
+static struct file_operations full_fops = {
+ llseek: full_lseek,
+ read: read_full,
+ write: write_full,
+};
+
+static int memory_open(struct inode * inode, struct file * filp)
+{
+ switch (MINOR(inode->i_rdev)) {
+ case 1:
+ filp->f_op = &mem_fops;
+ break;
+ case 2:
+ filp->f_op = &kmem_fops;
+ break;
+ case 3:
+ filp->f_op = &null_fops;
+ break;
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+ case 4:
+ filp->f_op = &port_fops;
+ break;
+#endif
+ case 5:
+ filp->f_op = &zero_fops;
+ break;
+ case 7:
+ filp->f_op = &full_fops;
+ break;
+ case 8:
+ filp->f_op = &random_fops;
+ break;
+ case 9:
+ filp->f_op = &urandom_fops;
+ break;
+ default:
+ return -ENXIO;
+ }
+ if (filp->f_op && filp->f_op->open)
+ return filp->f_op->open(inode,filp);
+ return 0;
+}
+
+void __init memory_devfs_register (void)
+{
+ /* These are never unregistered */
+ static const struct {
+ unsigned short minor;
+ char *name;
+ umode_t mode;
+ struct file_operations *fops;
+ } list[] = { /* list of minor devices */
+ {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+ {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+ {3, "null", S_IRUGO | S_IWUGO, &null_fops},
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+ {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
+#endif
+ {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
+ {7, "full", S_IRUGO | S_IWUGO, &full_fops},
+ {8, "random", S_IRUGO | S_IWUSR, &random_fops},
+ {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}
+ };
+ int i;
+
+ for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
+ devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
+ MEM_MAJOR, list[i].minor,
+ list[i].mode | S_IFCHR,
+ list[i].fops, NULL);
+}
+
+static struct file_operations memory_fops = {
+ open: memory_open, /* just a selector for the real open */
+};
+
+int __init chr_dev_init(void)
+{
+ if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
+ printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+ memory_devfs_register();
+ rand_initialize();
+#ifdef CONFIG_I2C
+ i2c_init_all();
+#endif
+#if defined (CONFIG_FB)
+ fbmem_init();
+#endif
+#if defined (CONFIG_PROM_CONSOLE)
+ prom_con_init();
+#endif
+#if defined (CONFIG_MDA_CONSOLE)
+ mda_console_init();
+#endif
+ tty_init();
+#ifdef CONFIG_M68K_PRINTER
+ lp_m68k_init();
+#endif
+ misc_init();
+#if CONFIG_QIC02_TAPE
+ qic02_tape_init();
+#endif
+#ifdef CONFIG_FTAPE
+ ftape_init();
+#endif
+#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
+ tapechar_init();
+#endif
+ return 0;
+}
+
+__initcall(chr_dev_init);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/drivers/char/tty_io.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/drivers/char/tty_io.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2891 @@
+/*
+ * linux/drivers/char/tty_io.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures. Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time. Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c). This
+ * makes for cleaner and more compact code. -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling. No delays, but all
+ * other bits should be there.
+ * -- Nick Holloway <alfie@xxxxxxxxxxxxxxxxx>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * -- julian@xxxxxxxxxxxxxxxxxxxxxx (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ * -- ctm@xxxxxxxx, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ * -- mj@xxxxxxxxxxxxxxxxx, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ * -- grif@xxxxxxxxxx, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ * -- Marko Kohtala <Marko.Kohtala@xxxxxx>, March 97
+ *
+ * Rewrote init_dev and release_dev to eliminate races.
+ * -- Bill Hawes <whawes@xxxxxxxx>, June 97
+ *
+ * Added devfs support.
+ * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ * -- Russell King <rmk@xxxxxxxxxxxxxxxx>
+ *
+ * Move do_SAK() into process context. Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc() -- Andrew Morton
<andrewm@xxxxxxxxxx> 17Mar01
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <linux/kmod.h>
+
+#ifdef CONFIG_XEN_CONSOLE
+extern void xen_console_init(void);
+#endif
+
+#ifdef CONFIG_VT
+extern void con_init_devfs (void);
+#endif
+
+extern void disable_early_printk(void);
+
+#define CONSOLE_DEV MKDEV(TTY_MAJOR,0)
+#define TTY_DEV MKDEV(TTYAUX_MAJOR,0)
+#define SYSCONS_DEV MKDEV(TTYAUX_MAJOR,1)
+#define PTMX_DEV MKDEV(TTYAUX_MAJOR,2)
+
+#undef TTY_DEBUG_HANGUP
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct termios tty_std_termios; /* for the benefit of tty
drivers */
+struct tty_driver *tty_drivers; /* linked list of tty drivers */
+
+#ifdef CONFIG_UNIX98_PTYS
+extern struct tty_driver ptm_driver[]; /* Unix98 pty masters; for /dev/ptmx */
+extern struct tty_driver pts_driver[]; /* Unix98 pty slaves; for /dev/ptmx */
+#endif
+
+static void initialize_tty_struct(struct tty_struct *tty);
+
+static ssize_t tty_read(struct file *, char *, size_t, loff_t *);
+static ssize_t tty_write(struct file *, const char *, size_t, loff_t *);
+static unsigned int tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+static int tty_release(struct inode *, struct file *);
+int tty_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg);
+static int tty_fasync(int fd, struct file * filp, int on);
+extern int vme_scc_init (void);
+extern long vme_scc_console_init(void);
+extern int serial167_init(void);
+extern long serial167_console_init(void);
+extern void console_8xx_init(void);
+extern void au1x00_serial_console_init(void);
+extern int rs_8xx_init(void);
+extern void mac_scc_console_init(void);
+extern void hwc_console_init(void);
+extern void hwc_tty_init(void);
+extern void con3215_init(void);
+extern void tty3215_init(void);
+extern void tub3270_con_init(void);
+extern void tub3270_init(void);
+extern void rs285_console_init(void);
+extern void sa1100_rs_console_init(void);
+extern void sgi_serial_console_init(void);
+extern void sn_sal_serial_console_init(void);
+extern void sci_console_init(void);
+extern void dec_serial_console_init(void);
+extern void tx3912_console_init(void);
+extern void tx3912_rs_init(void);
+extern void txx927_console_init(void);
+extern void txx9_rs_init(void);
+extern void txx9_serial_console_init(void);
+extern void sb1250_serial_console_init(void);
+extern void arc_console_init(void);
+extern int hvc_console_init(void);
+
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+#ifndef MAX
+#define MAX(a,b) ((a) < (b) ? (b) : (a))
+#endif
+
+static struct tty_struct *alloc_tty_struct(void)
+{
+ struct tty_struct *tty;
+
+ tty = kmalloc(sizeof(struct tty_struct), GFP_KERNEL);
+ if (tty)
+ memset(tty, 0, sizeof(struct tty_struct));
+ return tty;
+}
+
+static inline void free_tty_struct(struct tty_struct *tty)
+{
+ kfree(tty);
+}
+
+/*
+ * This routine returns the name of tty.
+ */
+static char *
+_tty_make_name(struct tty_struct *tty, const char *name, char *buf)
+{
+ int idx = (tty)?MINOR(tty->device) - tty->driver.minor_start:0;
+
+ if (!tty) /* Hmm. NULL pointer. That's fun. */
+ strcpy(buf, "NULL tty");
+ else
+ sprintf(buf, name,
+ idx + tty->driver.name_base);
+
+ return buf;
+}
+
+#define TTY_NUMBER(tty) (MINOR((tty)->device) - (tty)->driver.minor_start + \
+ (tty)->driver.name_base)
+
+char *tty_name(struct tty_struct *tty, char *buf)
+{
+ return _tty_make_name(tty, (tty)?tty->driver.name:NULL, buf);
+}
+
+inline int tty_paranoia_check(struct tty_struct *tty, kdev_t device,
+ const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+ static const char badmagic[] = KERN_WARNING
+ "Warning: bad magic number for tty struct (%s) in %s\n";
+ static const char badtty[] = KERN_WARNING
+ "Warning: null TTY for (%s) in %s\n";
+
+ if (!tty) {
+ printk(badtty, kdevname(device), routine);
+ return 1;
+ }
+ if (tty->magic != TTY_MAGIC) {
+ printk(badmagic, kdevname(device), routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+ struct list_head *p;
+ int count = 0;
+
+ file_list_lock();
+ for(p = tty->tty_files.next; p != &tty->tty_files; p = p->next) {
+ if(list_entry(p, struct file, f_list)->private_data == tty)
+ count++;
+ }
+ file_list_unlock();
+ if (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver.subtype == PTY_TYPE_SLAVE &&
+ tty->link && tty->link->count)
+ count++;
+ if (tty->count != count) {
+ printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
+ "!= #fd's(%d) in %s\n",
+ kdevname(tty->device), tty->count, count, routine);
+ return count;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * This is probably overkill for real world processors but
+ * they are not on hot paths so a little discipline won't do
+ * any harm.
+ */
+
+static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+{
+ down(&tty->termios_sem);
+ tty->termios->c_line = num;
+ up(&tty->termios_sem);
+}
+
+/*
+ * This guards the refcounted line discipline lists. The lock
+ * must be taken with irqs off because there are hangup path
+ * callers who will do ldisc lookups and cannot sleep.
+ */
+
+spinlock_t tty_ldisc_lock = SPIN_LOCK_UNLOCKED;
+DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */
+
+int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
+{
+
+ unsigned long flags;
+ int ret = 0;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if (new_ldisc) {
+ tty_ldiscs[disc] = *new_ldisc;
+ tty_ldiscs[disc].num = disc;
+ tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED;
+ tty_ldiscs[disc].refcount = 0;
+ } else {
+ if(tty_ldiscs[disc].refcount)
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc].flags &= ~LDISC_FLAG_DEFINED;
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+
+}
+
+
+EXPORT_SYMBOL(tty_register_ldisc);
+
+struct tty_ldisc *tty_ldisc_get(int disc)
+{
+ unsigned long flags;
+ struct tty_ldisc *ld;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return NULL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+
+ ld = &tty_ldiscs[disc];
+ /* Check the entry is defined */
+ if(ld->flags & LDISC_FLAG_DEFINED)
+ ld->refcount++;
+ else
+ ld = NULL;
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return ld;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_get);
+
+void tty_ldisc_put(int disc)
+{
+ struct tty_ldisc *ld;
+ unsigned long flags;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ BUG();
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = &tty_ldiscs[disc];
+ if(ld->refcount <= 0)
+ BUG();
+ ld->refcount--;
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_put);
+
+void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ tty->ldisc = *ld;
+ tty->ldisc.refcount = 0;
+}
+
+/**
+ * tty_ldisc_try - internal helper
+ * @tty: the tty
+ *
+ * Make a single attempt to grab and bump the refcount on
+ * the tty ldisc. Return 0 on failure or 1 on success. This is
+ * used to implement both the waiting and non waiting versions
+ * of tty_ldisc_ref
+ */
+
+static int tty_ldisc_try(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct tty_ldisc *ld;
+ int ret = 0;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = &tty->ldisc;
+ if(test_bit(TTY_LDISC, &tty->flags))
+ {
+ ld->refcount++;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return ret;
+}
+
+/**
+ * tty_ldisc_ref_wait - wait for the tty ldisc
+ * @tty: tty device
+ *
+ * Dereference the line discipline for the terminal and take a
+ * reference to it. If the line discipline is in flux then
+ * wait patiently until it changes.
+ *
+ * Note: Must not be called from an IRQ/timer context. The caller
+ * must also be careful not to hold other locks that will deadlock
+ * against a discipline change, such as an existing ldisc reference
+ * (which we check for)
+ */
+
+struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
+{
+ /* wait_event is a macro */
+ wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
+ return &tty->ldisc;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
+
+/**
+ * tty_ldisc_ref - get the tty ldisc
+ * @tty: tty device
+ *
+ * Dereference the line discipline for the terminal and take a
+ * reference to it. If the line discipline is in flux then
+ * return NULL. Can be called from IRQ and timer functions.
+ */
+
+struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
+{
+ if(tty_ldisc_try(tty))
+ return &tty->ldisc;
+ return NULL;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_ref);
+
+
+void tty_ldisc_deref(struct tty_ldisc *ld)
+{
+
+ unsigned long flags;
+
+ if(ld == NULL)
+ BUG();
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if(ld->refcount == 0)
+ printk(KERN_EMERG "tty_ldisc_deref: no references.\n");
+ else
+ ld->refcount--;
+ if(ld->refcount == 0)
+ wake_up(&tty_ldisc_wait);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_deref);
+
+/**
+ * tty_ldisc_enable - allow ldisc use
+ * @tty: terminal to activate ldisc on
+ *
+ * Set the TTY_LDISC flag when the line discipline can be called
+ * again. Do neccessary wakeups for existing sleepers.
+ *
+ * Note: nobody should set this bit except via this function. Clearing
+ * directly is allowed.
+ */
+
+static void tty_ldisc_enable(struct tty_struct *tty)
+{
+ set_bit(TTY_LDISC, &tty->flags);
+ wake_up(&tty_ldisc_wait);
+}
+
+/**
+ * tty_set_ldisc - set line discipline
+ * @tty: the terminal to set
+ * @ldisc: the line discipline
+ *
+ * Set the discipline of a tty line. Must be called from a process
+ * context.
+ */
+
+static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+{
+ int retval = 0;
+ struct tty_ldisc o_ldisc;
+ char buf[64];
+ unsigned long flags;
+ struct tty_ldisc *ld;
+
+ if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
+ return -EINVAL;
+
+restart:
+
+ if (tty->ldisc.num == ldisc)
+ return 0; /* We are already in the desired discipline */
+
+ ld = tty_ldisc_get(ldisc);
+ /* Eduardo Blanco <ejbs@xxxxxxxxxxxx> */
+ /* Cyrus Durgin <cider@xxxxxxxxxxxxx> */
+ if (ld == NULL)
+ {
+ char modname [20];
+ sprintf(modname, "tty-ldisc-%d", ldisc);
+ request_module (modname);
+ ld = tty_ldisc_get(ldisc);
+ }
+
+ if (ld == NULL)
+ return -EINVAL;
+
+
+ o_ldisc = tty->ldisc;
+ tty_wait_until_sent(tty, 0);
+
+ /*
+ * Make sure we don't change while someone holds a
+ * reference to the line discipline. The TTY_LDISC bit
+ * prevents anyone taking a reference once it is clear.
+ * We need the lock to avoid racing reference takers.
+ */
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if(tty->ldisc.refcount)
+ {
+ /* Free the new ldisc we grabbed. Must drop the lock
+ first. */
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ tty_ldisc_put(ldisc);
+ /*
+ * There are several reasons we may be busy, including
+ * random momentary I/O traffic. We must therefore
+ * retry. We could distinguish between blocking ops
+ * and retries if we made tty_ldisc_wait() smarter. That
+ * is up for discussion.
+ */
+ if(wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount
== 0) < 0)
+ return -ERESTARTSYS;
+ goto restart;
+ }
+ clear_bit(TTY_LDISC, &tty->flags);
+ clear_bit(TTY_DONT_FLIP, &tty->flags);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ /*
+ * From this point on we know nobody has an ldisc
+ * usage reference, nor can they obtain one until
+ * we say so later on.
+ */
+
+ /*
+ * Wait for ->hangup_work and ->flip.work handlers to terminate
+ */
+ run_task_queue(&tq_timer);
+ flush_scheduled_tasks();
+
+ /* Shutdown the current discipline. */
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+
+ /* Now set up the new line discipline. */
+ tty_ldisc_assign(tty, ld);
+ tty_set_termios_ldisc(tty, ldisc);
+ if (tty->ldisc.open)
+ retval = (tty->ldisc.open)(tty);
+ if (retval < 0) {
+ tty_ldisc_put(ldisc);
+ /* There is an outstanding reference here so this is safe */
+ tty_ldisc_assign(tty, tty_ldisc_get(o_ldisc.num));
+ tty_set_termios_ldisc(tty, tty->ldisc.num);
+ if (tty->ldisc.open && (tty->ldisc.open(tty) < 0)) {
+ tty_ldisc_put(o_ldisc.num);
+ /* This driver is always present */
+ tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+ tty_set_termios_ldisc(tty, N_TTY);
+ if (tty->ldisc.open) {
+ int r = tty->ldisc.open(tty);
+
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty, buf), r);
+ }
+ }
+ }
+ /* At this point we hold a reference to the new ldisc and a
+ reference to the old ldisc. If we ended up flipping back
+ to the existing ldisc we have two references to it */
+
+ if (tty->ldisc.num != o_ldisc.num && tty->driver.set_ldisc)
+ tty->driver.set_ldisc(tty);
+
+ tty_ldisc_put(o_ldisc.num);
+
+ /*
+ * Allow ldisc referencing to occur as soon as the driver
+ * ldisc callback completes.
+ */
+ tty_ldisc_enable(tty);
+
+ return retval;
+}
+
+/*
+ * This routine returns a tty driver structure, given a device number
+ */
+struct tty_driver *get_tty_driver(kdev_t device)
+{
+ int major, minor;
+ struct tty_driver *p;
+
+ minor = MINOR(device);
+ major = MAJOR(device);
+
+ for (p = tty_drivers; p; p = p->next) {
+ if (p->major != major)
+ continue;
+ if (minor < p->minor_start)
+ continue;
+ if (minor >= p->minor_start + p->num)
+ continue;
+ return p;
+ }
+ return NULL;
+}
+
+/*
+ * If we try to write to, or set the state of, a terminal and we're
+ * not in the foreground, send a SIGTTOU. If the signal is blocked or
+ * ignored, go ahead and perform the operation. (POSIX 7.2)
+ */
+int tty_check_change(struct tty_struct * tty)
+{
+ if (current->tty != tty)
+ return 0;
+ if (tty->pgrp <= 0) {
+ printk(KERN_WARNING "tty_check_change: tty->pgrp <= 0!\n");
+ return 0;
+ }
+ if (current->pgrp == tty->pgrp)
+ return 0;
+ if (is_ignored(SIGTTOU))
+ return 0;
+ if (is_orphaned_pgrp(current->pgrp))
+ return -EIO;
+ (void) kill_pg(current->pgrp,SIGTTOU,1);
+ return -ERESTARTSYS;
+}
+
+static ssize_t hung_up_tty_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ /* Can't seek (pread) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+ return 0;
+}
+
+static ssize_t hung_up_tty_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+ return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait)
+{
+ return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
+}
+
+static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static struct file_operations tty_fops = {
+ llseek: no_llseek,
+ read: tty_read,
+ write: tty_write,
+ poll: tty_poll,
+ ioctl: tty_ioctl,
+ open: tty_open,
+ release: tty_release,
+ fasync: tty_fasync,
+};
+
+static struct file_operations hung_up_tty_fops = {
+ llseek: no_llseek,
+ read: hung_up_tty_read,
+ write: hung_up_tty_write,
+ poll: hung_up_tty_poll,
+ ioctl: hung_up_tty_ioctl,
+ release: tty_release,
+};
+
+static spinlock_t redirect_lock = SPIN_LOCK_UNLOCKED;
+static struct file *redirect;
+
+/**
+ * tty_wakeup - request more data
+ * @tty: terminal
+ *
+ * Internal and external helper for wakeups of tty. This function
+ * informs the line discipline if present that the driver is ready\
+ * to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+ ld = tty_ldisc_ref(tty);
+ if(ld) {
+ if(ld->write_wakeup)
+ ld->write_wakeup(tty);
+ tty_ldisc_deref(ld);
+ }
+ }
+ wake_up_interruptible(&tty->write_wait);
+}
+
+/*
+ * tty_wakeup/tty_ldisc_flush are actually _GPL exports but we can't do
+ * that in 2.4 for modutils compat reasons.
+ */
+EXPORT_SYMBOL(tty_wakeup);
+
+
+void tty_ldisc_flush(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld = tty_ldisc_ref(tty);
+ if(ld) {
+ if(ld->flush_buffer)
+ ld->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+}
+
+
+/*
+ * tty_wakeup/tty_ldisc_flush are actually _GPL exports but we can't do
+ * that in 2.4 for modutils compat reasons.
+ */
+EXPORT_SYMBOL(tty_ldisc_flush);
+
+void do_tty_hangup(void *data)
+{
+ struct tty_struct *tty = (struct tty_struct *) data;
+ struct file * cons_filp = NULL;
+ struct file *f = NULL;
+ struct task_struct *p;
+ struct list_head *l;
+ struct tty_ldisc *ld;
+ int closecount = 0, n;
+
+ if (!tty)
+ return;
+
+ /* inuse_filps is protected by the single kernel lock */
+ lock_kernel();
+
+ spin_lock(&redirect_lock);
+ if (redirect && redirect->private_data == tty) {
+ f = redirect;
+ redirect = NULL;
+ }
+ spin_unlock(&redirect_lock);
+
+ check_tty_count(tty, "do_tty_hangup");
+ file_list_lock();
+ for (l = tty->tty_files.next; l != &tty->tty_files; l = l->next) {
+ struct file * filp = list_entry(l, struct file, f_list);
+ if (filp->f_dentry->d_inode->i_rdev == CONSOLE_DEV ||
+ filp->f_dentry->d_inode->i_rdev == SYSCONS_DEV) {
+ cons_filp = filp;
+ continue;
+ }
+ if (filp->f_op != &tty_fops)
+ continue;
+ closecount++;
+ tty_fasync(-1, filp, 0); /* can't block */
+ filp->f_op = &hung_up_tty_fops;
+ }
+ file_list_unlock();
+
+ /* FIXME! What are the locking issues here? This may me overdoing
things.. */
+ ld = tty_ldisc_ref(tty);
+ if(ld != NULL)
+ {
+ if (ld->flush_buffer)
+ ld->flush_buffer(tty);
+ if (tty->driver.flush_buffer)
+ tty->driver.flush_buffer(tty);
+ if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
ld->write_wakeup)
+ ld->write_wakeup(tty);
+ if (ld->hangup)
+ ld->hangup(tty);
+ }
+
+ /* FIXME: Once we trust the LDISC code better we can wait here for
+ ldisc completion and fix the driver call race */
+
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
+
+ /*
+ * Shutdown the current line discipline, and reset it to
+ * N_TTY.
+ */
+
+ if (tty->driver.flags & TTY_DRIVER_RESET_TERMIOS)
+ {
+ down(&tty->termios_sem);
+ *tty->termios = tty->driver.init_termios;
+ up(&tty->termios_sem);
+ }
+
+ /* Defer ldisc switch */
+ /* tty_deferred_ldisc_switch(N_TTY)
+ This should get done automatically when the port closes and
+ tty_release is called */
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if ((tty->session > 0) && (p->session == tty->session) &&
+ p->leader) {
+ send_sig(SIGHUP,p,1);
+ send_sig(SIGCONT,p,1);
+ if (tty->pgrp > 0)
+ p->tty_old_pgrp = tty->pgrp;
+ }
+ if (p->tty == tty)
+ p->tty = NULL;
+ }
+ read_unlock(&tasklist_lock);
+
+ tty->flags = 0;
+ tty->session = 0;
+ tty->pgrp = -1;
+ tty->ctrl_status = 0;
+ /*
+ * If one of the devices matches a console pointer, we
+ * cannot just call hangup() because that will cause
+ * tty->count and state->count to go out of sync.
+ * So we just call close() the right number of times.
+ */
+ if (cons_filp) {
+ if (tty->driver.close)
+ for (n = 0; n < closecount; n++)
+ tty->driver.close(tty, cons_filp);
+ } else if (tty->driver.hangup)
+ (tty->driver.hangup)(tty);
+
+ /* We don't want to have driver/ldisc interactions beyond
+ the ones we did here. The driver layer expects no
+ calls after ->hangup() from the ldisc side. However we
+ can't yet guarantee all that */
+
+ set_bit(TTY_HUPPED, &tty->flags);
+ if(ld) {
+ tty_ldisc_enable(tty);
+ tty_ldisc_deref(ld);
+ }
+ unlock_kernel();
+ if (f)
+ fput(f);
+}
+
+void tty_hangup(struct tty_struct * tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+ char buf[64];
+
+ printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
+#endif
+ schedule_task(&tty->tq_hangup);
+}
+
+void tty_vhangup(struct tty_struct * tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+ char buf[64];
+
+ printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
+#endif
+ do_tty_hangup((void *) tty);
+}
+
+int tty_hung_up_p(struct file * filp)
+{
+ return (filp->f_op == &hung_up_tty_fops);
+}
+
+/*
+ * This function is typically called only by the session leader, when
+ * it wants to disassociate itself from its controlling tty.
+ *
+ * It performs the following functions:
+ * (1) Sends a SIGHUP and SIGCONT to the foreground process group
+ * (2) Clears the tty from being controlling the session
+ * (3) Clears the controlling tty for all processes in the
+ * session group.
+ *
+ * The argument on_exit is set to 1 if called when a process is
+ * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ */
+void disassociate_ctty(int on_exit)
+{
+ struct tty_struct *tty = current->tty;
+ struct task_struct *p;
+ int tty_pgrp = -1;
+
+ if (tty) {
+ tty_pgrp = tty->pgrp;
+ if (on_exit && tty->driver.type != TTY_DRIVER_TYPE_PTY)
+ tty_vhangup(tty);
+ } else {
+ if (current->tty_old_pgrp) {
+ kill_pg(current->tty_old_pgrp, SIGHUP, on_exit);
+ kill_pg(current->tty_old_pgrp, SIGCONT, on_exit);
+ }
+ return;
+ }
+ if (tty_pgrp > 0) {
+ kill_pg(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pg(tty_pgrp, SIGCONT, on_exit);
+ }
+
+ current->tty_old_pgrp = 0;
+ tty->session = 0;
+ tty->pgrp = -1;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ if (p->session == current->session)
+ p->tty = NULL;
+ read_unlock(&tasklist_lock);
+}
+
+void stop_tty(struct tty_struct *tty)
+{
+ if (tty->stopped)
+ return;
+ tty->stopped = 1;
+ if (tty->link && tty->link->packet) {
+ tty->ctrl_status &= ~TIOCPKT_START;
+ tty->ctrl_status |= TIOCPKT_STOP;
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ if (tty->driver.stop)
+ (tty->driver.stop)(tty);
+}
+
+void start_tty(struct tty_struct *tty)
+{
+ if (!tty->stopped || tty->flow_stopped)
+ return;
+ tty->stopped = 0;
+ if (tty->link && tty->link->packet) {
+ tty->ctrl_status &= ~TIOCPKT_STOP;
+ tty->ctrl_status |= TIOCPKT_START;
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ if (tty->driver.start)
+ (tty->driver.start)(tty);
+ /* If we have a running line discipline it may need kicking */
+ tty_wakeup(tty);
+}
+
+static ssize_t tty_read(struct file * file, char * buf, size_t count,
+ loff_t *ppos)
+{
+ int i;
+ struct tty_struct * tty;
+ struct inode *inode;
+ struct tty_ldisc *ld;
+
+ /* Can't seek (pread) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
+ tty = (struct tty_struct *)file->private_data;
+ inode = file->f_dentry->d_inode;
+ if (tty_paranoia_check(tty, inode->i_rdev, "tty_read"))
+ return -EIO;
+ if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
+ return -EIO;
+
+ /* This check not only needs to be done before reading, but also
+ whenever read_chan() gets woken up after sleeping, so I've
+ moved it to there. This should only be done for the N_TTY
+ line discipline, anyway. Same goes for write_chan(). -- jlc. */
+#if 0
+ if ((inode->i_rdev != CONSOLE_DEV) && /* don't stop on /dev/console */
+ (tty->pgrp > 0) &&
+ (current->tty == tty) &&
+ (tty->pgrp != current->pgrp))
+ if (is_ignored(SIGTTIN) || is_orphaned_pgrp(current->pgrp))
+ return -EIO;
+ else {
+ (void) kill_pg(current->pgrp, SIGTTIN, 1);
+ return -ERESTARTSYS;
+ }
+#endif
+ /* We want to wait for the line discipline to sort out in this
+ situation */
+ ld = tty_ldisc_ref_wait(tty);
+ lock_kernel();
+ if (ld->read)
+ i = (ld->read)(tty,file,buf,count);
+ else
+ i = -EIO;
+ tty_ldisc_deref(ld);
+ unlock_kernel();
+ if (i > 0)
+ inode->i_atime = CURRENT_TIME;
+ return i;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned
char *, size_t),
+ struct tty_struct *tty,
+ struct file *file,
+ const unsigned char *buf,
+ size_t count)
+{
+ ssize_t ret = 0, written = 0;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (down_trylock(&tty->atomic_write))
+ return -EAGAIN;
+ }
+ else {
+ if (down_interruptible(&tty->atomic_write))
+ return -ERESTARTSYS;
+ }
+ if ( test_bit(TTY_NO_WRITE_SPLIT, &tty->flags) ) {
+ lock_kernel();
+ written = write(tty, file, buf, count);
+ unlock_kernel();
+ } else {
+ for (;;) {
+ unsigned long size = MAX(PAGE_SIZE*2,16384);
+ if (size > count)
+ size = count;
+ lock_kernel();
+ ret = write(tty, file, buf, size);
+ unlock_kernel();
+ if (ret <= 0)
+ break;
+ written += ret;
+ buf += ret;
+ count -= ret;
+ if (!count)
+ break;
+ ret = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ if (current->need_resched)
+ schedule();
+ }
+ }
+ if (written) {
+ file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
+ ret = written;
+ }
+ up(&tty->atomic_write);
+ return ret;
+}
+
+
+static ssize_t tty_write(struct file * file, const char * buf, size_t count,
+ loff_t *ppos)
+{
+ int is_console;
+ struct tty_struct * tty;
+ struct inode *inode = file->f_dentry->d_inode;
+ ssize_t ret;
+ struct tty_ldisc *ld;
+
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
+ /*
+ * For now, we redirect writes from /dev/console as
+ * well as /dev/tty0.
+ */
+ inode = file->f_dentry->d_inode;
+ is_console = (inode->i_rdev == SYSCONS_DEV ||
+ inode->i_rdev == CONSOLE_DEV);
+
+ if (is_console) {
+ struct file *p = NULL;
+
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ get_file(redirect);
+ p = redirect;
+ }
+ spin_unlock(&redirect_lock);
+
+ if (p) {
+ ssize_t res = p->f_op->write(p, buf, count, &p->f_pos);
+ fput(p);
+ return res;
+ }
+ }
+
+ tty = (struct tty_struct *)file->private_data;
+ if (tty_paranoia_check(tty, inode->i_rdev, "tty_write"))
+ return -EIO;
+ if (!tty || !tty->driver.write || (test_bit(TTY_IO_ERROR, &tty->flags)))
+ return -EIO;
+#if 0
+ if (!is_console && L_TOSTOP(tty) && (tty->pgrp > 0) &&
+ (current->tty == tty) && (tty->pgrp != current->pgrp)) {
+ if (is_orphaned_pgrp(current->pgrp))
+ return -EIO;
+ if (!is_ignored(SIGTTOU)) {
+ (void) kill_pg(current->pgrp, SIGTTOU, 1);
+ return -ERESTARTSYS;
+ }
+ }
+#endif
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld->write)
+ ret = -EIO;
+ else
+ ret = do_tty_write(ld->write, tty, file,
+ (const unsigned char __user *)buf, count);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+/* Semaphore to protect creating and releasing a tty. This is shared with
+ vt.c for deeply disgusting hack reasons */
+static DECLARE_MUTEX(tty_sem);
+
+static void down_tty_sem(int index)
+{
+ down(&tty_sem);
+}
+
+static void up_tty_sem(int index)
+{
+ up(&tty_sem);
+}
+
+static void release_mem(struct tty_struct *tty, int idx);
+
+/*
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open. The new code protects the open with a semaphore, so it's
+ * really quite straightforward. The semaphore locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ */
+static int init_dev(kdev_t device, struct tty_struct **ret_tty)
+{
+ struct tty_struct *tty, *o_tty;
+ struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
+ struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
+ struct tty_driver *driver;
+ int retval=0;
+ int idx;
+
+ driver = get_tty_driver(device);
+ if (!driver)
+ return -ENODEV;
+
+ idx = MINOR(device) - driver->minor_start;
+
+ /*
+ * Check whether we need to acquire the tty semaphore to avoid
+ * race conditions. For now, play it safe.
+ */
+ down_tty_sem(idx);
+
+ /* check whether we're reopening an existing tty */
+ tty = driver->table[idx];
+ if (tty) goto fast_track;
+
+ /*
+ * First time open is complex, especially for PTY devices.
+ * This code guarantees that either everything succeeds and the
+ * TTY is ready for operation, or else the table slots are vacated
+ * and the allocated memory released. (Except that the termios
+ * and locked termios may be retained.)
+ */
+
+ o_tty = NULL;
+ tp = o_tp = NULL;
+ ltp = o_ltp = NULL;
+
+ tty = alloc_tty_struct();
+ if(!tty)
+ goto fail_no_mem;
+ initialize_tty_struct(tty);
+ tty->device = device;
+ tty->driver = *driver;
+
+ tp_loc = &driver->termios[idx];
+ if (!*tp_loc) {
+ tp = (struct termios *) kmalloc(sizeof(struct termios),
+ GFP_KERNEL);
+ if (!tp)
+ goto free_mem_out;
+ *tp = driver->init_termios;
+ }
+
+ ltp_loc = &driver->termios_locked[idx];
+ if (!*ltp_loc) {
+ ltp = (struct termios *) kmalloc(sizeof(struct termios),
+ GFP_KERNEL);
+ if (!ltp)
+ goto free_mem_out;
+ memset(ltp, 0, sizeof(struct termios));
+ }
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY) {
+ o_tty = alloc_tty_struct();
+ if (!o_tty)
+ goto free_mem_out;
+ initialize_tty_struct(o_tty);
+ o_tty->device = (kdev_t) MKDEV(driver->other->major,
+ driver->other->minor_start + idx);
+ o_tty->driver = *driver->other;
+
+ o_tp_loc = &driver->other->termios[idx];
+ if (!*o_tp_loc) {
+ o_tp = (struct termios *)
+ kmalloc(sizeof(struct termios), GFP_KERNEL);
+ if (!o_tp)
+ goto free_mem_out;
+ *o_tp = driver->other->init_termios;
+ }
+
+ o_ltp_loc = &driver->other->termios_locked[idx];
+ if (!*o_ltp_loc) {
+ o_ltp = (struct termios *)
+ kmalloc(sizeof(struct termios), GFP_KERNEL);
+ if (!o_ltp)
+ goto free_mem_out;
+ memset(o_ltp, 0, sizeof(struct termios));
+ }
+
+ /*
+ * Everything allocated ... set up the o_tty structure.
+ */
+ driver->other->table[idx] = o_tty;
+ if (!*o_tp_loc)
+ *o_tp_loc = o_tp;
+ if (!*o_ltp_loc)
+ *o_ltp_loc = o_ltp;
+ o_tty->termios = *o_tp_loc;
+ o_tty->termios_locked = *o_ltp_loc;
+ (*driver->other->refcount)++;
+ if (driver->subtype == PTY_TYPE_MASTER)
+ o_tty->count++;
+
+ /* Establish the links in both directions */
+ tty->link = o_tty;
+ o_tty->link = tty;
+ }
+
+ /*
+ * All structures have been allocated, so now we install them.
+ * Failures after this point use release_mem to clean up, so
+ * there's no need to null out the local pointers.
+ */
+ driver->table[idx] = tty;
+
+ if (!*tp_loc)
+ *tp_loc = tp;
+ if (!*ltp_loc)
+ *ltp_loc = ltp;
+ tty->termios = *tp_loc;
+ tty->termios_locked = *ltp_loc;
+ (*driver->refcount)++;
+ tty->count++;
+
+ /*
+ * Structures all installed ... call the ldisc open routines.
+ * If we fail here just call release_mem to clean up. No need
+ * to decrement the use counts, as release_mem doesn't care.
+ */
+ if (tty->ldisc.open) {
+ retval = (tty->ldisc.open)(tty);
+ if (retval)
+ goto release_mem_out;
+ }
+ if (o_tty && o_tty->ldisc.open) {
+ retval = (o_tty->ldisc.open)(o_tty);
+ if (retval) {
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+ goto release_mem_out;
+ }
+ set_bit(TTY_LDISC, &o_tty->flags);
+ tty_ldisc_enable(o_tty);
+ }
+ tty_ldisc_enable(tty);
+ goto success;
+
+ /*
+ * This fast open can be used if the tty is already open.
+ * No memory is allocated, and the only failures are from
+ * attempting to open a closing tty or attempting multiple
+ * opens on a pty master.
+ */
+fast_track:
+ if (test_bit(TTY_CLOSING, &tty->flags)) {
+ retval = -EIO;
+ goto end_init;
+ }
+ if (driver->type == TTY_DRIVER_TYPE_PTY &&
+ driver->subtype == PTY_TYPE_MASTER) {
+ /*
+ * special case for PTY masters: only one open permitted,
+ * and the slave side open count is incremented as well.
+ */
+ if (tty->count) {
+ retval = -EIO;
+ goto end_init;
+ }
+ tty->link->count++;
+ }
+ tty->count++;
+ tty->driver = *driver; /* N.B. why do this every time?? */
+ /* FIXME */
+ if(!test_bit(TTY_LDISC, &tty->flags))
+ printk(KERN_ERR "init_dev but no ldisc\n");
+success:
+ *ret_tty = tty;
+
+ /* All paths come through here to release the semaphore */
+end_init:
+ up_tty_sem(idx);
+ return retval;
+
+ /* Release locally allocated memory ... nothing placed in slots */
+free_mem_out:
+ if (o_tp)
+ kfree(o_tp);
+ if (o_tty)
+ free_tty_struct(o_tty);
+ if (ltp)
+ kfree(ltp);
+ if (tp)
+ kfree(tp);
+ free_tty_struct(tty);
+
+fail_no_mem:
+ retval = -ENOMEM;
+ goto end_init;
+
+ /* call the tty release_mem routine to clean out this slot */
+release_mem_out:
+ printk(KERN_INFO "init_dev: ldisc open failed, "
+ "clearing slot %d\n", idx);
+ release_mem(tty, idx);
+ goto end_init;
+}
+
+/*
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots.
+ */
+static void release_mem(struct tty_struct *tty, int idx)
+{
+ struct tty_struct *o_tty;
+ struct termios *tp;
+
+ if ((o_tty = tty->link) != NULL) {
+ o_tty->driver.table[idx] = NULL;
+ if (o_tty->driver.flags & TTY_DRIVER_RESET_TERMIOS) {
+ tp = o_tty->driver.termios[idx];
+ o_tty->driver.termios[idx] = NULL;
+ kfree(tp);
+ }
+ o_tty->magic = 0;
+ (*o_tty->driver.refcount)--;
+ list_del_init(&o_tty->tty_files);
+ free_tty_struct(o_tty);
+ }
+
+ tty->driver.table[idx] = NULL;
+ if (tty->driver.flags & TTY_DRIVER_RESET_TERMIOS) {
+ tp = tty->driver.termios[idx];
+ tty->driver.termios[idx] = NULL;
+ kfree(tp);
+ }
+ tty->magic = 0;
+ (*tty->driver.refcount)--;
+ list_del_init(&tty->tty_files);
+ free_tty_struct(tty);
+}
+
+/*
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+static void release_dev(struct file * filp)
+{
+ struct tty_struct *tty, *o_tty;
+ int pty_master, tty_closing, o_tty_closing, do_sleep;
+ int idx;
+ char buf[64];
+ unsigned long flags;
+
+ tty = (struct tty_struct *)filp->private_data;
+ if (tty_paranoia_check(tty, filp->f_dentry->d_inode->i_rdev,
"release_dev"))
+ return;
+
+ check_tty_count(tty, "release_dev");
+
+ tty_fasync(-1, filp, 0);
+
+ idx = MINOR(tty->device) - tty->driver.minor_start;
+ pty_master = (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver.subtype == PTY_TYPE_MASTER);
+ o_tty = tty->link;
+
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver.num) {
+ printk(KERN_DEBUG "release_dev: bad idx when trying to "
+ "free (%s)\n", kdevname(tty->device));
+ return;
+ }
+ if (tty != tty->driver.table[idx]) {
+ printk(KERN_DEBUG "release_dev: driver.table[%d] not tty "
+ "for (%s)\n", idx, kdevname(tty->device));
+ return;
+ }
+ if (tty->termios != tty->driver.termios[idx]) {
+ printk(KERN_DEBUG "release_dev: driver.termios[%d] not termios "
+ "for (%s)\n",
+ idx, kdevname(tty->device));
+ return;
+ }
+ if (tty->termios_locked != tty->driver.termios_locked[idx]) {
+ printk(KERN_DEBUG "release_dev: driver.termios_locked[%d] not "
+ "termios_locked for (%s)\n",
+ idx, kdevname(tty->device));
+ return;
+ }
+#endif
+
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
+ tty_name(tty, buf), tty->count);
+#endif
+
+#ifdef TTY_PARANOIA_CHECK
+ if (tty->driver.other) {
+ if (o_tty != tty->driver.other->table[idx]) {
+ printk(KERN_DEBUG "release_dev: other->table[%d] "
+ "not o_tty for (%s)\n",
+ idx, kdevname(tty->device));
+ return;
+ }
+ if (o_tty->termios != tty->driver.other->termios[idx]) {
+ printk(KERN_DEBUG "release_dev: other->termios[%d] "
+ "not o_termios for (%s)\n",
+ idx, kdevname(tty->device));
+ return;
+ }
+ if (o_tty->termios_locked !=
+ tty->driver.other->termios_locked[idx]) {
+ printk(KERN_DEBUG "release_dev: other->termios_locked["
+ "%d] not o_termios_locked for (%s)\n",
+ idx, kdevname(tty->device));
+ return;
+ }
+ if (o_tty->link != tty) {
+ printk(KERN_DEBUG "release_dev: bad pty pointers\n");
+ return;
+ }
+ }
+#endif
+
+ if (tty->driver.close)
+ tty->driver.close(tty, filp);
+
+ /*
+ * Sanity check: if tty->count is going to zero, there shouldn't be
+ * any waiters on tty->read_wait or tty->write_wait. We test the
+ * wait queues and kick everyone out _before_ actually starting to
+ * close. This ensures that we won't block while releasing the tty
+ * structure.
+ *
+ * The test for the o_tty closing is necessary, since the master and
+ * slave sides may close in any order. If the slave side closes out
+ * first, its count will be one, since the master side holds an open.
+ * Thus this test wouldn't be triggered at the time the slave closes,
+ * so we do it now.
+ *
+ * Note that it's possible for the tty to be opened again while we're
+ * flushing out waiters. By recalculating the closing flags before
+ * each iteration we avoid any problems.
+ */
+ while (1) {
+ tty_closing = tty->count <= 1;
+ o_tty_closing = o_tty &&
+ (o_tty->count <= (pty_master ? 1 : 0));
+ do_sleep = 0;
+
+ if (tty_closing) {
+ if (waitqueue_active(&tty->read_wait)) {
+ wake_up(&tty->read_wait);
+ do_sleep++;
+ }
+ if (waitqueue_active(&tty->write_wait)) {
+ wake_up(&tty->write_wait);
+ do_sleep++;
+ }
+ }
+ if (o_tty_closing) {
+ if (waitqueue_active(&o_tty->read_wait)) {
+ wake_up(&o_tty->read_wait);
+ do_sleep++;
+ }
+ if (waitqueue_active(&o_tty->write_wait)) {
+ wake_up(&o_tty->write_wait);
+ do_sleep++;
+ }
+ }
+ if (!do_sleep)
+ break;
+
+ printk(KERN_WARNING "release_dev: %s: read/write wait queue "
+ "active!\n", tty_name(tty, buf));
+ schedule();
+ }
+
+ /*
+ * The closing flags are now consistent with the open counts on
+ * both sides, and we've completed the last operation that could
+ * block, so it's safe to proceed with closing.
+ */
+ if (pty_master) {
+ if (--o_tty->count < 0) {
+ printk(KERN_WARNING "release_dev: bad pty slave count "
+ "(%d) for %s\n",
+ o_tty->count, tty_name(o_tty, buf));
+ o_tty->count = 0;
+ }
+ }
+ if (--tty->count < 0) {
+ printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n",
+ tty->count, tty_name(tty, buf));
+ tty->count = 0;
+ }
+
+ /*
+ * We've decremented tty->count, so we should zero out
+ * filp->private_data, to break the link between the tty and
+ * the file descriptor. Otherwise if filp_close() blocks before
+ * the file descriptor is removed from the inuse_filp
+ * list, check_tty_count() could observe a discrepancy and
+ * printk a warning message to the user.
+ */
+ filp->private_data = 0;
+
+ /*
+ * Perform some housekeeping before deciding whether to return.
+ *
+ * Set the TTY_CLOSING flag if this was the last open. In the
+ * case of a pty we may have to wait around for the other side
+ * to close, and TTY_CLOSING makes sure we can't be reopened.
+ */
+ if(tty_closing)
+ set_bit(TTY_CLOSING, &tty->flags);
+ if(o_tty_closing)
+ set_bit(TTY_CLOSING, &o_tty->flags);
+
+ /*
+ * If _either_ side is closing, make sure there aren't any
+ * processes that still think tty or o_tty is their controlling
+ * tty.
+ */
+ if (tty_closing || o_tty_closing) {
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (p->tty == tty || (o_tty && p->tty == o_tty))
+ p->tty = NULL;
+ }
+ read_unlock(&tasklist_lock);
+ }
+
+ /* check whether both sides are closing ... */
+ if (!tty_closing || (o_tty && !o_tty_closing))
+ return;
+
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "freeing tty structure...");
+#endif
+
+ /*
+ * Prevent flush_to_ldisc() from rescheduling the work for later. Then
+ * kill any delayed work. As this is the final close it does not
+ * race with the set_ldisc code path.
+ */
+ clear_bit(TTY_LDISC, &tty->flags);
+ clear_bit(TTY_DONT_FLIP, &tty->flags);
+
+ /*
+ * Wait for ->hangup_work and ->flip.work handlers to terminate
+ */
+
+ run_task_queue(&tq_timer);
+ flush_scheduled_tasks();
+
+ /*
+ * Wait for any short term users (we know they are just driver
+ * side waiters as the file is closing so user count on the file
+ * side is zero.
+ */
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ while(tty->ldisc.refcount)
+ {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ /*
+ * Shutdown the current line discipline, and reset it to N_TTY.
+ * N.B. why reset ldisc when we're releasing the memory??
+ * FIXME: this MUST get fixed for the new reflocking
+ */
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+ tty_ldisc_put(tty->ldisc.num);
+
+ /*
+ * Switch the line discipline back
+ */
+ tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+ tty_set_termios_ldisc(tty,N_TTY);
+
+ if (o_tty) {
+ /* FIXME: could o_tty be in setldisc here ? */
+ clear_bit(TTY_LDISC, &o_tty->flags);
+ if (o_tty->ldisc.close)
+ (o_tty->ldisc.close)(o_tty);
+ tty_ldisc_put(o_tty->ldisc.num);
+ tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY));
+ tty_set_termios_ldisc(o_tty,N_TTY);
+ }
+
+ /*
+ * The release_mem function takes care of the details of clearing
+ * the slots and preserving the termios structure.
+ */
+ release_mem(tty, idx);
+}
+
+/*
+ * tty_open and tty_release keep up the tty count that contains the
+ * number of opens done on a tty. We cannot use the inode-count, as
+ * different inodes might point to the same tty.
+ *
+ * Open-counting is needed for pty masters, as well as for keeping
+ * track of serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ *
+ * The termios state of a pty is reset on first open so that
+ * settings don't persist across reuse.
+ */
+static int tty_open(struct inode * inode, struct file * filp)
+{
+ struct tty_struct *tty;
+ int noctty, retval;
+ kdev_t device;
+ unsigned short saved_flags;
+ char buf[64];
+
+ saved_flags = filp->f_flags;
+retry_open:
+ noctty = filp->f_flags & O_NOCTTY;
+ device = inode->i_rdev;
+ if (device == TTY_DEV) {
+ if (!current->tty)
+ return -ENXIO;
+ device = current->tty->device;
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ }
+#ifdef CONFIG_VT
+ if (device == CONSOLE_DEV) {
+ extern int fg_console;
+ device = MKDEV(TTY_MAJOR, fg_console + 1);
+ noctty = 1;
+ }
+#endif
+ if (device == SYSCONS_DEV) {
+ struct console *c = console_drivers;
+ while(c && !c->device)
+ c = c->next;
+ if (!c)
+ return -ENODEV;
+ device = c->device(c);
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/console block */
+ noctty = 1;
+ }
+
+ if (device == PTMX_DEV) {
+#ifdef CONFIG_UNIX98_PTYS
+
+ /* find a free pty. */
+ int major, minor;
+ struct tty_driver *driver;
+
+ /* find a device that is not in use. */
+ retval = -1;
+ for ( major = 0 ; major < UNIX98_NR_MAJORS ; major++ ) {
+ driver = &ptm_driver[major];
+ for (minor = driver->minor_start ;
+ minor < driver->minor_start + driver->num ;
+ minor++) {
+ device = MKDEV(driver->major, minor);
+ if (!init_dev(device, &tty)) goto ptmx_found;
/* ok! */
+ }
+ }
+ return -EIO; /* no free ptys */
+ ptmx_found:
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ minor -= driver->minor_start;
+ devpts_pty_new(driver->other->name_base + minor,
MKDEV(driver->other->major, minor + driver->other->minor_start));
+ tty_register_devfs(&pts_driver[major], DEVFS_FL_DEFAULT,
+ pts_driver[major].minor_start + minor);
+ noctty = 1;
+ goto init_dev_done;
+
+#else /* CONFIG_UNIX_98_PTYS */
+
+ return -ENODEV;
+
+#endif /* CONFIG_UNIX_98_PTYS */
+ }
+
+ retval = init_dev(device, &tty);
+ if (retval)
+ return retval;
+
+#ifdef CONFIG_UNIX98_PTYS
+init_dev_done:
+#endif
+ filp->private_data = tty;
+ file_move(filp, &tty->tty_files);
+ check_tty_count(tty, "tty_open");
+ if (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver.subtype == PTY_TYPE_MASTER)
+ noctty = 1;
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "opening %s...", tty_name(tty, buf));
+#endif
+ if (tty->driver.open)
+ retval = tty->driver.open(tty, filp);
+ else
+ retval = -ENODEV;
+ filp->f_flags = saved_flags;
+
+ if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !suser())
+ retval = -EBUSY;
+
+ if (retval) {
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "error %d in opening %s...", retval,
+ tty_name(tty, buf));
+#endif
+
+ release_dev(filp);
+ if (retval != -ERESTARTSYS)
+ return retval;
+ if (signal_pending(current))
+ return retval;
+ schedule();
+ /*
+ * Need to reset f_op in case a hangup happened.
+ */
+ filp->f_op = &tty_fops;
+ goto retry_open;
+ }
+ if (!noctty &&
+ current->leader &&
+ !current->tty &&
+ tty->session == 0) {
+ task_lock(current);
+ current->tty = tty;
+ task_unlock(current);
+ current->tty_old_pgrp = 0;
+ tty->session = current->session;
+ tty->pgrp = current->pgrp;
+ }
+ if ((tty->driver.type == TTY_DRIVER_TYPE_SERIAL) &&
+ (tty->driver.subtype == SERIAL_TYPE_CALLOUT) &&
+ (tty->count == 1)) {
+ static int nr_warns;
+ if (nr_warns < 5) {
+ printk(KERN_WARNING "tty_io.c: "
+ "process %d (%s) used obsolete /dev/%s - "
+ "update software to use /dev/ttyS%d\n",
+ current->pid, current->comm,
+ tty_name(tty, buf), TTY_NUMBER(tty));
+ nr_warns++;
+ }
+ }
+ return 0;
+}
+
+static int tty_release(struct inode * inode, struct file * filp)
+{
+ lock_kernel();
+ release_dev(filp);
+ unlock_kernel();
+ return 0;
+}
+
+/* No kernel lock held - fine */
+static unsigned int tty_poll(struct file * filp, poll_table * wait)
+{
+ struct tty_struct * tty;
+ struct tty_ldisc *ld;
+ int ret = 0;
+
+ tty = (struct tty_struct *)filp->private_data;
+ if (tty_paranoia_check(tty, filp->f_dentry->d_inode->i_rdev,
"tty_poll"))
+ return 0;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld->poll)
+ ret = (ld->poll)(tty, filp, wait);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+static int tty_fasync(int fd, struct file * filp, int on)
+{
+ struct tty_struct * tty;
+ int retval;
+
+ tty = (struct tty_struct *)filp->private_data;
+ if (tty_paranoia_check(tty, filp->f_dentry->d_inode->i_rdev,
"tty_fasync"))
+ return 0;
+
+ retval = fasync_helper(fd, filp, on, &tty->fasync);
+ if (retval <= 0)
+ return retval;
+
+ if (on) {
+ if (!waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = 1;
+ if (filp->f_owner.pid == 0) {
+ filp->f_owner.pid = (-tty->pgrp) ? : current->pid;
+ filp->f_owner.uid = current->uid;
+ filp->f_owner.euid = current->euid;
+ }
+ } else {
+ if (!tty->fasync && !waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = N_TTY_BUF_SIZE;
+ }
+ return 0;
+}
+
+static int tiocsti(struct tty_struct *tty, char * arg)
+{
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
+ if ((current->tty != tty) && !suser())
+ return -EPERM;
+ if (get_user(ch, arg))
+ return -EFAULT;
+ ld = tty_ldisc_ref_wait(tty);
+ ld->receive_buf(tty, &ch, &mbz, 1);
+ tty_ldisc_deref(ld);
+ return 0;
+}
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize * arg)
+{
+ if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
+ return -EFAULT;
+ return 0;
+}
+
+static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize * arg)
+{
+ struct winsize tmp_ws;
+
+ if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+ return -EFAULT;
+ if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
+ return 0;
+ if (tty->pgrp > 0)
+ kill_pg(tty->pgrp, SIGWINCH, 1);
+ if ((real_tty->pgrp != tty->pgrp) && (real_tty->pgrp > 0))
+ kill_pg(real_tty->pgrp, SIGWINCH, 1);
+ tty->winsize = tmp_ws;
+ real_tty->winsize = tmp_ws;
+ return 0;
+}
+
+static int tioccons(struct inode *inode, struct file *file)
+{
+ if (inode->i_rdev == SYSCONS_DEV ||
+ inode->i_rdev == CONSOLE_DEV) {
+ struct file *f;
+ if (!suser())
+ return -EPERM;
+ spin_lock(&redirect_lock);
+ f = redirect;
+ redirect = NULL;
+ spin_unlock(&redirect_lock);
+ if (f)
+ fput(f);
+ return 0;
+ }
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ spin_unlock(&redirect_lock);
+ return -EBUSY;
+ }
+ get_file(file);
+ redirect = file;
+ spin_unlock(&redirect_lock);
+ return 0;
+}
+
+
+static int fionbio(struct file *file, int *arg)
+{
+ int nonblock;
+
+ if (get_user(nonblock, arg))
+ return -EFAULT;
+
+ if (nonblock)
+ file->f_flags |= O_NONBLOCK;
+ else
+ file->f_flags &= ~O_NONBLOCK;
+ return 0;
+}
+
+static int tiocsctty(struct tty_struct *tty, int arg)
+{
+ if (current->leader &&
+ (current->session == tty->session))
+ return 0;
+ /*
+ * The process must be a session leader and
+ * not have a controlling tty already.
+ */
+ if (!current->leader || current->tty)
+ return -EPERM;
+ if (tty->session > 0) {
+ /*
+ * This tty is already the controlling
+ * tty for another session group!
+ */
+ if ((arg == 1) && suser()) {
+ /*
+ * Steal it away
+ */
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ if (p->tty == tty)
+ p->tty = NULL;
+ read_unlock(&tasklist_lock);
+ } else
+ return -EPERM;
+ }
+ task_lock(current);
+ current->tty = tty;
+ task_unlock(current);
+ current->tty_old_pgrp = 0;
+ tty->session = current->session;
+ tty->pgrp = current->pgrp;
+ return 0;
+}
+
+static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t *arg)
+{
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->tty != real_tty)
+ return -ENOTTY;
+ return put_user(real_tty->pgrp, arg);
+}
+
+static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t *arg)
+{
+ pid_t pgrp;
+ int retval = tty_check_change(real_tty);
+
+ if (retval == -EIO)
+ return -ENOTTY;
+ if (retval)
+ return retval;
+ if (!current->tty ||
+ (current->tty != real_tty) ||
+ (real_tty->session != current->session))
+ return -ENOTTY;
+ if (get_user(pgrp, (pid_t *) arg))
+ return -EFAULT;
+ if (pgrp < 0)
+ return -EINVAL;
+ if (session_of_pgrp(pgrp) != current->session)
+ return -EPERM;
+ real_tty->pgrp = pgrp;
+ return 0;
+}
+
+static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
*arg)
+{
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->tty != real_tty)
+ return -ENOTTY;
+ if (real_tty->session <= 0)
+ return -ENOTTY;
+ return put_user(real_tty->session, arg);
+}
+
+static int tiocttygstruct(struct tty_struct *tty, struct tty_struct *arg)
+{
+ if (copy_to_user(arg, tty, sizeof(*arg)))
+ return -EFAULT;
+ return 0;
+}
+
+static int tiocsetd(struct tty_struct *tty, int *arg)
+{
+ int ldisc;
+
+ if (get_user(ldisc, arg))
+ return -EFAULT;
+ return tty_set_ldisc(tty, ldisc);
+}
+
+static int send_break(struct tty_struct *tty, int duration)
+{
+ tty->driver.break_ctl(tty, -1);
+ if (!signal_pending(current)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(duration);
+ }
+ tty->driver.break_ctl(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ return 0;
+}
+
+static int tty_generic_brk(struct tty_struct *tty, struct file *file, unsigned
int cmd, unsigned long arg)
+{
+ if (cmd == TCSBRK && arg)
+ {
+ /* tcdrain case */
+ int retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ return 0;
+}
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+int tty_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty_struct *tty, *real_tty;
+ int retval;
+ struct tty_ldisc *ld;
+
+ tty = (struct tty_struct *)file->private_data;
+ if (tty_paranoia_check(tty, inode->i_rdev, "tty_ioctl"))
+ return -EINVAL;
+
+ real_tty = tty;
+ if (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver.subtype == PTY_TYPE_MASTER)
+ real_tty = tty->link;
+
+ /*
+ * Break handling by driver
+ */
+ if (!tty->driver.break_ctl) {
+ switch(cmd) {
+ case TIOCSBRK:
+ case TIOCCBRK:
+ if (tty->driver.ioctl)
+ return tty->driver.ioctl(tty, file, cmd, arg);
+ return -EINVAL;
+
+ /* These two ioctl's always return success; even if */
+ /* the driver doesn't support them. */
+ case TCSBRK:
+ case TCSBRKP:
+ retval = -ENOIOCTLCMD;
+ if (tty->driver.ioctl)
+ retval = tty->driver.ioctl(tty, file, cmd, arg);
+ /* Not driver handled */
+ if (retval == -ENOIOCTLCMD)
+ retval = tty_generic_brk(tty, file, cmd, arg);
+ return retval;
+ }
+ }
+
+ /*
+ * Factor out some common prep work
+ */
+ switch (cmd) {
+ case TIOCSETD:
+ case TIOCSBRK:
+ case TIOCCBRK:
+ case TCSBRK:
+ case TCSBRKP:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ if (cmd != TIOCCBRK) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ break;
+ }
+
+ switch (cmd) {
+ case TIOCSTI:
+ return tiocsti(tty, (char *)arg);
+ case TIOCGWINSZ:
+ return tiocgwinsz(tty, (struct winsize *) arg);
+ case TIOCSWINSZ:
+ return tiocswinsz(tty, real_tty, (struct winsize *)
arg);
+ case TIOCCONS:
+ return real_tty!=tty ? -EINVAL : tioccons(inode, file);
+ case FIONBIO:
+ return fionbio(file, (int *) arg);
+ case TIOCEXCL:
+ set_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNXCL:
+ clear_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNOTTY:
+ if (current->tty != tty)
+ return -ENOTTY;
+ if (current->leader)
+ disassociate_ctty(0);
+ task_lock(current);
+ current->tty = NULL;
+ task_unlock(current);
+ return 0;
+ case TIOCSCTTY:
+ return tiocsctty(tty, arg);
+ case TIOCGPGRP:
+ return tiocgpgrp(tty, real_tty, (pid_t *) arg);
+ case TIOCSPGRP:
+ return tiocspgrp(tty, real_tty, (pid_t *) arg);
+ case TIOCGSID:
+ return tiocgsid(tty, real_tty, (pid_t *) arg);
+ case TIOCGETD:
+ /* FIXME: check this is ok */
+ return put_user(tty->ldisc.num, (int *) arg);
+ case TIOCSETD:
+ return tiocsetd(tty, (int *) arg);
+#ifdef CONFIG_VT
+ case TIOCLINUX:
+ return tioclinux(tty, arg);
+#endif
+ case TIOCTTYGSTRUCT:
+ return tiocttygstruct(tty, (struct tty_struct *) arg);
+
+ /*
+ * Break handling
+ */
+ case TIOCSBRK: /* Turn break on, unconditionally */
+ tty->driver.break_ctl(tty, -1);
+ return 0;
+
+ case TIOCCBRK: /* Turn break off, unconditionally */
+ tty->driver.break_ctl(tty, 0);
+ return 0;
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+ /*
+ * XXX is the above comment correct, or the
+ * code below correct? Is this ioctl used at
+ * all by anyone?
+ */
+ if (!arg)
+ return send_break(tty, HZ/4);
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+ return send_break(tty, arg ? arg*(HZ/10) : HZ/4);
+ }
+ if (tty->driver.ioctl) {
+ retval = (tty->driver.ioctl)(tty, file, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+ ld = tty_ldisc_ref_wait(tty);
+ retval = -EINVAL;
+ if (ld->ioctl) {
+ retval = ld->ioctl(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD)
+ retval = -EINVAL;
+ }
+ tty_ldisc_deref(ld);
+ return retval;
+}
+
+
+/*
+ * This implements the "Secure Attention Key" --- the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key". Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal. But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context. This can
+ * deadlock. We punt it up to process context. AKPM - 16Mar2001
+ */
+static void __do_SAK(void *arg)
+{
+#ifdef TTY_SOFT_SAK
+ tty_hangup(tty);
+#else
+ struct tty_struct *tty = arg;
+ struct task_struct *p;
+ int session;
+ int i;
+ struct file *filp;
+ struct tty_ldisc *disc;
+
+ if (!tty)
+ return;
+ session = tty->session;
+ /* We don't want an ldisc switch during this */
+ disc = tty_ldisc_ref(tty);
+ if (disc && disc->flush_buffer)
+ disc->flush_buffer(tty);
+ tty_ldisc_deref(disc);
+
+ if (tty->driver.flush_buffer)
+ tty->driver.flush_buffer(tty);
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if ((p->tty == tty) ||
+ ((session > 0) && (p->session == session))) {
+ send_sig(SIGKILL, p, 1);
+ continue;
+ }
+ task_lock(p);
+ if (p->files) {
+ read_lock(&p->files->file_lock);
+ for (i=0; i < p->files->max_fds; i++) {
+ filp = fcheck_files(p->files, i);
+ if (filp && (filp->f_op == &tty_fops) &&
+ (filp->private_data == tty)) {
+ send_sig(SIGKILL, p, 1);
+ break;
+ }
+ }
+ read_unlock(&p->files->file_lock);
+ }
+ task_unlock(p);
+ }
+ read_unlock(&tasklist_lock);
+#endif
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_tq may already be queued.
+ * But there's no mechanism to fix that without futzing with tqueue_lock.
+ * Fortunately we don't need to worry, because if ->SAK_tq is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+ if (!tty)
+ return;
+ PREPARE_TQUEUE(&tty->SAK_tq, __do_SAK, tty);
+ schedule_task(&tty->SAK_tq);
+}
+
+/*
+ * This routine is called out of the software interrupt to flush data
+ * from the flip buffer to the line discipline.
+ */
+static void flush_to_ldisc(void *private_)
+{
+ struct tty_struct *tty = (struct tty_struct *) private_;
+ unsigned char *cp;
+ char *fp;
+ int count;
+ unsigned long flags;
+ struct tty_ldisc *disc;
+
+ disc = tty_ldisc_ref(tty);
+ if (disc == NULL) /* !TTY_LDISC */
+ return;
+
+ if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
+ queue_task(&tty->flip.tqueue, &tq_timer);
+ goto out;
+ }
+ if (tty->flip.buf_num) {
+ cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
+ fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
+ tty->flip.buf_num = 0;
+
+ save_flags(flags); cli();
+ tty->flip.char_buf_ptr = tty->flip.char_buf;
+ tty->flip.flag_buf_ptr = tty->flip.flag_buf;
+ } else {
+ cp = tty->flip.char_buf;
+ fp = tty->flip.flag_buf;
+ tty->flip.buf_num = 1;
+
+ save_flags(flags); cli();
+ tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
+ tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
+ }
+ count = tty->flip.count;
+ tty->flip.count = 0;
+ restore_flags(flags);
+
+ disc->receive_buf(tty, cp, fp, count);
+out:
+ tty_ldisc_deref(disc);
+}
+
+/*
+ * Call the ldisc flush directly from a driver. This function may
+ * return an error and need retrying by the user.
+ */
+
+int tty_push_data(struct tty_struct *tty, unsigned char *cp, unsigned char
*fp, int count)
+{
+ int ret = 0;
+ struct tty_ldisc *disc;
+
+ disc = tty_ldisc_ref(tty);
+ if(test_bit(TTY_DONT_FLIP, &tty->flags))
+ ret = -EAGAIN;
+ else if(disc == NULL)
+ ret = -EIO;
+ else
+ disc->receive_buf(tty, cp, fp, count);
+ tty_ldisc_deref(disc);
+ return ret;
+
+}
+
+/*
+ * Routine which returns the baud rate of the tty
+ *
+ * Note that the baud_table needs to be kept in sync with the
+ * include/asm/termbits.h file.
+ */
+static int baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+ 9600, 19200, 38400, 57600, 115200, 230400, 460800,
+#ifdef __sparc__
+ 76800, 153600, 307200, 614400, 921600
+#else
+ 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
+ 2500000, 3000000, 3500000, 4000000
+#endif
+};
+
+static int n_baud_table = sizeof(baud_table)/sizeof(int);
+
+int tty_get_baud_rate(struct tty_struct *tty)
+{
+ unsigned int cflag, i;
+
+ cflag = tty->termios->c_cflag;
+
+ i = cflag & CBAUD;
+ if (i & CBAUDEX) {
+ i &= ~CBAUDEX;
+ if (i < 1 || i+15 >= n_baud_table)
+ tty->termios->c_cflag &= ~CBAUDEX;
+ else
+ i += 15;
+ }
+ if (i==15 && tty->alt_speed) {
+ if (!tty->warned) {
+ printk(KERN_WARNING "Use of setserial/setrocket to "
+ "set SPD_* flags is deprecated\n");
+ tty->warned = 1;
+ }
+ return(tty->alt_speed);
+ }
+
+ return baud_table[i];
+}
+
+void tty_flip_buffer_push(struct tty_struct *tty)
+{
+ if (tty->low_latency)
+ flush_to_ldisc((void *) tty);
+ else
+ queue_task(&tty->flip.tqueue, &tq_timer);
+}
+
+/*
+ * This subroutine initializes a tty structure.
+ */
+static void initialize_tty_struct(struct tty_struct *tty)
+{
+ memset(tty, 0, sizeof(struct tty_struct));
+ tty->magic = TTY_MAGIC;
+ tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+ tty->pgrp = -1;
+ tty->flip.char_buf_ptr = tty->flip.char_buf;
+ tty->flip.flag_buf_ptr = tty->flip.flag_buf;
+ tty->flip.tqueue.routine = flush_to_ldisc;
+ tty->flip.tqueue.data = tty;
+ init_MUTEX(&tty->flip.pty_sem);
+ init_MUTEX(&tty->termios_sem);
+ init_waitqueue_head(&tty->write_wait);
+ init_waitqueue_head(&tty->read_wait);
+ tty->tq_hangup.routine = do_tty_hangup;
+ tty->tq_hangup.data = tty;
+ sema_init(&tty->atomic_read, 1);
+ sema_init(&tty->atomic_write, 1);
+ spin_lock_init(&tty->read_lock);
+ INIT_LIST_HEAD(&tty->tty_files);
+ INIT_TQUEUE(&tty->SAK_tq, 0, 0);
+}
+
+/*
+ * The default put_char routine if the driver did not define one.
+ */
+void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ tty->driver.write(tty, 0, &ch, 1);
+}
+
+/*
+ * Register a tty device described by <driver>, with minor number <minor>.
+ */
+void tty_register_devfs (struct tty_driver *driver, unsigned int flags,
unsigned minor)
+{
+#ifdef CONFIG_DEVFS_FS
+ umode_t mode = S_IFCHR | S_IRUSR | S_IWUSR;
+ kdev_t device = MKDEV (driver->major, minor);
+ int idx = minor - driver->minor_start;
+ char buf[32];
+
+ switch (device) {
+ case TTY_DEV:
+ case PTMX_DEV:
+ mode |= S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+ break;
+ default:
+ if (driver->major == PTY_MASTER_MAJOR)
+ mode |= S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+ break;
+ }
+ if ( (minor < driver->minor_start) ||
+ (minor >= driver->minor_start + driver->num) ) {
+ printk(KERN_ERR "Attempt to register invalid minor number "
+ "with devfs (%d:%d).\n", (int)driver->major,(int)minor);
+ return;
+ }
+# ifdef CONFIG_UNIX98_PTYS
+ if ( (driver->major >= UNIX98_PTY_SLAVE_MAJOR) &&
+ (driver->major < UNIX98_PTY_SLAVE_MAJOR + UNIX98_NR_MAJORS) )
+ flags |= DEVFS_FL_CURRENT_OWNER;
+# endif
+ sprintf(buf, driver->name, idx + driver->name_base);
+ devfs_register (NULL, buf, flags | DEVFS_FL_DEFAULT,
+ driver->major, minor, mode, &tty_fops, NULL);
+#endif /* CONFIG_DEVFS_FS */
+}
+
+void tty_unregister_devfs (struct tty_driver *driver, unsigned minor)
+{
+#ifdef CONFIG_DEVFS_FS
+ void * handle;
+ int idx = minor - driver->minor_start;
+ char buf[32];
+
+ sprintf(buf, driver->name, idx + driver->name_base);
+ handle = devfs_find_handle (NULL, buf, driver->major, minor,
+ DEVFS_SPECIAL_CHR, 0);
+ devfs_unregister (handle);
+#endif /* CONFIG_DEVFS_FS */
+}
+
+EXPORT_SYMBOL(tty_register_devfs);
+EXPORT_SYMBOL(tty_unregister_devfs);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+ int error;
+ int i;
+
+ if (driver->flags & TTY_DRIVER_INSTALLED)
+ return 0;
+
+ error = devfs_register_chrdev(driver->major, driver->name, &tty_fops);
+ if (error < 0)
+ return error;
+ else if(driver->major == 0)
+ driver->major = error;
+
+ if (!driver->put_char)
+ driver->put_char = tty_default_put_char;
+
+ driver->prev = 0;
+ driver->next = tty_drivers;
+ if (tty_drivers) tty_drivers->prev = driver;
+ tty_drivers = driver;
+
+ if ( !(driver->flags & TTY_DRIVER_NO_DEVFS) ) {
+ for(i = 0; i < driver->num; i++)
+ tty_register_devfs(driver, 0, driver->minor_start + i);
+ }
+ proc_tty_register_driver(driver);
+ return error;
+}
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+ int retval;
+ struct tty_driver *p;
+ int i, found = 0;
+ struct termios *tp;
+ const char *othername = NULL;
+
+ if (*driver->refcount)
+ return -EBUSY;
+
+ for (p = tty_drivers; p; p = p->next) {
+ if (p == driver)
+ found++;
+ else if (p->major == driver->major)
+ othername = p->name;
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ if (othername == NULL) {
+ retval = devfs_unregister_chrdev(driver->major, driver->name);
+ if (retval)
+ return retval;
+ } else
+ devfs_register_chrdev(driver->major, othername, &tty_fops);
+
+ if (driver->prev)
+ driver->prev->next = driver->next;
+ else
+ tty_drivers = driver->next;
+
+ if (driver->next)
+ driver->next->prev = driver->prev;
+
+ /*
+ * Free the termios and termios_locked structures because
+ * we don't want to get memory leaks when modular tty
+ * drivers are removed from the kernel.
+ */
+ for (i = 0; i < driver->num; i++) {
+ tp = driver->termios[i];
+ if (tp) {
+ driver->termios[i] = NULL;
+ kfree(tp);
+ }
+ tp = driver->termios_locked[i];
+ if (tp) {
+ driver->termios_locked[i] = NULL;
+ kfree(tp);
+ }
+ tty_unregister_devfs(driver, driver->minor_start + i);
+ }
+ proc_tty_unregister_driver(driver);
+ return 0;
+}
+
+
+/*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+ * Just do some early initializations, and do the complex setup
+ * later.
+ */
+void __init console_init(void)
+{
+ /* Setup the default TTY line discipline. */
+ memset(tty_ldiscs, 0, NR_LDISCS*sizeof(struct tty_ldisc));
+ (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
+
+ /*
+ * Set up the standard termios. Individual tty drivers may
+ * deviate from this; this is used as a template.
+ */
+ memset(&tty_std_termios, 0, sizeof(struct termios));
+ memcpy(tty_std_termios.c_cc, INIT_C_CC, NCCS);
+ tty_std_termios.c_iflag = ICRNL | IXON;
+ tty_std_termios.c_oflag = OPOST | ONLCR;
+ tty_std_termios.c_cflag = B38400 | CS8 | CREAD | HUPCL;
+ tty_std_termios.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+ ECHOCTL | ECHOKE | IEXTEN;
+
+ /*
+ * set up the console device so that later boot sequences can
+ * inform about problems etc..
+ */
+#ifdef CONFIG_EARLY_PRINTK
+ disable_early_printk();
+#endif
+
+#ifdef CONFIG_XEN_CONSOLE
+ xen_console_init();
+#endif
+
+#ifdef CONFIG_VT
+ con_init();
+#endif
+#ifdef CONFIG_AU1X00_SERIAL_CONSOLE
+ au1x00_serial_console_init();
+#endif
+#ifdef CONFIG_SERIAL_CONSOLE
+#if (defined(CONFIG_8xx) || defined(CONFIG_CPM2))
+ console_8xx_init();
+#elif defined(CONFIG_MAC_SERIAL) && defined(CONFIG_SERIAL)
+ if (_machine == _MACH_Pmac)
+ mac_scc_console_init();
+ else
+ serial_console_init();
+#elif defined(CONFIG_MAC_SERIAL)
+ mac_scc_console_init();
+#elif defined(CONFIG_PARISC)
+ pdc_console_init();
+#elif defined(CONFIG_SERIAL)
+ serial_console_init();
+#endif /* CONFIG_8xx */
+#if defined(CONFIG_MVME162_SCC) || defined(CONFIG_BVME6000_SCC) ||
defined(CONFIG_MVME147_SCC)
+ vme_scc_console_init();
+#endif
+#if defined(CONFIG_SERIAL167)
+ serial167_console_init();
+#endif
+#if defined(CONFIG_SH_SCI)
+ sci_console_init();
+#endif
+#endif
+#ifdef CONFIG_SERIAL_DEC_CONSOLE
+ dec_serial_console_init();
+#endif
+#ifdef CONFIG_TN3270_CONSOLE
+ tub3270_con_init();
+#endif
+#ifdef CONFIG_TN3215
+ con3215_init();
+#endif
+#ifdef CONFIG_HWC
+ hwc_console_init();
+#endif
+#ifdef CONFIG_STDIO_CONSOLE
+ stdio_console_init();
+#endif
+#ifdef CONFIG_SERIAL_21285_CONSOLE
+ rs285_console_init();
+#endif
+#ifdef CONFIG_SERIAL_SA1100_CONSOLE
+ sa1100_rs_console_init();
+#endif
+#ifdef CONFIG_ARC_CONSOLE
+ arc_console_init();
+#endif
+#ifdef CONFIG_SERIAL_AMBA_CONSOLE
+ ambauart_console_init();
+#endif
+#ifdef CONFIG_SERIAL_TX3912_CONSOLE
+ tx3912_console_init();
+#endif
+#ifdef CONFIG_TXX927_SERIAL_CONSOLE
+ txx927_console_init();
+#endif
+#ifdef CONFIG_SERIAL_TXX9_CONSOLE
+ txx9_serial_console_init();
+#endif
+#ifdef CONFIG_SIBYTE_SB1250_DUART_CONSOLE
+ sb1250_serial_console_init();
+#endif
+#ifdef CONFIG_IP22_SERIAL
+ sgi_serial_console_init();
+#endif
+}
+
+static struct tty_driver dev_tty_driver, dev_syscons_driver;
+#ifdef CONFIG_UNIX98_PTYS
+static struct tty_driver dev_ptmx_driver;
+#endif
+#ifdef CONFIG_HVC_CONSOLE
+ hvc_console_init();
+#endif
+#ifdef CONFIG_VT
+static struct tty_driver dev_console_driver;
+#endif
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+void __init tty_init(void)
+{
+ /*
+ * dev_tty_driver and dev_console_driver are actually magic
+ * devices which get redirected at open time. Nevertheless,
+ * we register them so that register_chrdev is called
+ * appropriately.
+ */
+ memset(&dev_tty_driver, 0, sizeof(struct tty_driver));
+ dev_tty_driver.magic = TTY_DRIVER_MAGIC;
+ dev_tty_driver.driver_name = "/dev/tty";
+ dev_tty_driver.name = dev_tty_driver.driver_name + 5;
+ dev_tty_driver.name_base = 0;
+ dev_tty_driver.major = TTYAUX_MAJOR;
+ dev_tty_driver.minor_start = 0;
+ dev_tty_driver.num = 1;
+ dev_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+ dev_tty_driver.subtype = SYSTEM_TYPE_TTY;
+
+ if (tty_register_driver(&dev_tty_driver))
+ panic("Couldn't register /dev/tty driver\n");
+
+ dev_syscons_driver = dev_tty_driver;
+ dev_syscons_driver.driver_name = "/dev/console";
+ dev_syscons_driver.name = dev_syscons_driver.driver_name + 5;
+ dev_syscons_driver.major = TTYAUX_MAJOR;
+ dev_syscons_driver.minor_start = 1;
+ dev_syscons_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+ dev_syscons_driver.subtype = SYSTEM_TYPE_SYSCONS;
+
+ if (tty_register_driver(&dev_syscons_driver))
+ panic("Couldn't register /dev/console driver\n");
+
+ /* console calls tty_register_driver() before kmalloc() works.
+ * Thus, we can't devfs_register() then. Do so now, instead.
+ */
+#ifdef CONFIG_VT
+ con_init_devfs();
+#endif
+
+#ifdef CONFIG_UNIX98_PTYS
+ dev_ptmx_driver = dev_tty_driver;
+ dev_ptmx_driver.driver_name = "/dev/ptmx";
+ dev_ptmx_driver.name = dev_ptmx_driver.driver_name + 5;
+ dev_ptmx_driver.major= MAJOR(PTMX_DEV);
+ dev_ptmx_driver.minor_start = MINOR(PTMX_DEV);
+ dev_ptmx_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+ dev_ptmx_driver.subtype = SYSTEM_TYPE_SYSPTMX;
+
+ if (tty_register_driver(&dev_ptmx_driver))
+ panic("Couldn't register /dev/ptmx driver\n");
+#endif
+
+#ifdef CONFIG_VT
+ dev_console_driver = dev_tty_driver;
+ dev_console_driver.driver_name = "/dev/vc/0";
+ dev_console_driver.name = dev_console_driver.driver_name + 5;
+ dev_console_driver.major = TTY_MAJOR;
+ dev_console_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+ dev_console_driver.subtype = SYSTEM_TYPE_CONSOLE;
+
+ if (tty_register_driver(&dev_console_driver))
+ panic("Couldn't register /dev/tty0 driver\n");
+
+ kbd_init();
+#endif
+
+#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE
+ if (ia64_platform_is("sn2")) {
+ sn_sal_serial_console_init();
+ return; /* only one console right now for SN2 */
+ }
+#endif
+#ifdef CONFIG_ESPSERIAL /* init ESP before rs, so rs doesn't see the port */
+ espserial_init();
+#endif
+#if defined(CONFIG_MVME162_SCC) || defined(CONFIG_BVME6000_SCC) ||
defined(CONFIG_MVME147_SCC)
+ vme_scc_init();
+#endif
+#ifdef CONFIG_SERIAL_TX3912
+ tx3912_rs_init();
+#endif
+#ifdef CONFIG_ROCKETPORT
+ rp_init();
+#endif
+#ifdef CONFIG_SERIAL167
+ serial167_init();
+#endif
+#ifdef CONFIG_CYCLADES
+ cy_init();
+#endif
+#ifdef CONFIG_STALLION
+ stl_init();
+#endif
+#ifdef CONFIG_ISTALLION
+ stli_init();
+#endif
+#ifdef CONFIG_DIGI
+ pcxe_init();
+#endif
+#ifdef CONFIG_DIGIEPCA
+ pc_init();
+#endif
+#ifdef CONFIG_SPECIALIX
+ specialix_init();
+#endif
+#if (defined(CONFIG_8xx) || defined(CONFIG_CPM2))
+ rs_8xx_init();
+#endif /* CONFIG_8xx */
+ pty_init();
+#ifdef CONFIG_MOXA_SMARTIO
+ mxser_init();
+#endif
+#ifdef CONFIG_MOXA_INTELLIO
+ moxa_init();
+#endif
+#ifdef CONFIG_VT
+ vcs_init();
+#endif
+#ifdef CONFIG_TN3270
+ tub3270_init();
+#endif
+#ifdef CONFIG_TN3215
+ tty3215_init();
+#endif
+#ifdef CONFIG_HWC
+ hwc_tty_init();
+#endif
+#ifdef CONFIG_A2232
+ a2232board_init();
+#endif
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/drivers/scsi/aic7xxx/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/drivers/scsi/aic7xxx/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,97 @@
+#
+# drivers/scsi/aic7xxx/Makefile
+#
+# Makefile for the Linux aic7xxx SCSI driver.
+#
+
+O_TARGET := aic7xxx_drv.o
+
+list-multi := aic7xxx.o aic79xx.o
+
+obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx.o
+ifeq ($(CONFIG_PCI),y)
+obj-$(CONFIG_SCSI_AIC79XX) += aic79xx.o
+endif
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/scsi -Werror
+#EXTRA_CFLAGS += -g
+
+# Platform Specific Files
+obj-aic7xxx = aic7xxx_osm.o aic7xxx_proc.o
+
+# Core Files
+obj-aic7xxx += aic7xxx_core.o aic7xxx_93cx6.o
+ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
+obj-aic7xxx += aic7xxx_reg_print.o
+endif
+
+#EISA Specific Files
+AIC7XXX_EISA_ARCH = $(filter i386 alpha xen,$(ARCH))
+ifneq ($(AIC7XXX_EISA_ARCH),)
+obj-aic7xxx += aic7770.o
+# Platform Specific EISA Files
+obj-aic7xxx += aic7770_osm.o
+endif
+
+#PCI Specific Files
+ifeq ($(CONFIG_PCI),y)
+obj-aic7xxx += aic7xxx_pci.o
+# Platform Specific PCI Files
+obj-aic7xxx += aic7xxx_osm_pci.o
+endif
+
+# Platform Specific U320 Files
+obj-aic79xx = aic79xx_osm.o aic79xx_proc.o aic79xx_osm_pci.o
+# Core Files
+obj-aic79xx += aic79xx_core.o aic79xx_pci.o
+ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
+obj-aic79xx += aic79xx_reg_print.o
+endif
+
+# Override our module desitnation
+MOD_DESTDIR = $(shell cd .. && $(CONFIG_SHELL) $(TOPDIR)/scripts/pathdown.sh)
+
+include $(TOPDIR)/Rules.make
+
+aic7xxx_core.o: aic7xxx_seq.h
+$(obj-aic7xxx): aic7xxx_reg.h
+aic7xxx.o: aic7xxx_seq.h aic7xxx_reg.h $(obj-aic7xxx)
+ $(LD) $(LD_RFLAG) -r -o $@ $(obj-aic7xxx)
+
+aic79xx_core.o: aic79xx_seq.h
+$(obj-aic79xx): aic79xx_reg.h
+aic79xx.o: aic79xx_seq.h aic79xx_reg.h $(obj-aic79xx)
+ $(LD) $(LD_RFLAG) -r -o $@ $(obj-aic79xx)
+
+ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
+aic7xxx_gen = aic7xxx_seq.h aic7xxx_reg.h
+ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
+aic7xxx_gen += aic7xxx_reg_print.c
+aic7xxx_asm_cmd = aicasm/aicasm -I. -r aic7xxx_reg.h \
+ -p aic7xxx_reg_print.c -i aic7xxx_osm.h \
+ -o aic7xxx_seq.h aic7xxx.seq
+else
+aic7xxx_asm_cmd = aicasm/aicasm -I. -r aic7xxx_reg.h \
+ -o aic7xxx_seq.h aic7xxx.seq
+endif
+$(aic7xxx_gen): aic7xxx.seq aic7xxx.reg aicasm/aicasm
+ $(aic7xxx_asm_cmd)
+endif
+
+ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
+aic79xx_gen = aic79xx_seq.h aic79xx_reg.h
+ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
+aic79xx_gen += aic79xx_reg_print.c
+aic79xx_asm_cmd = aicasm/aicasm -I. -r aic79xx_reg.h \
+ -p aic79xx_reg_print.c -i aic79xx_osm.h \
+ -o aic79xx_seq.h aic79xx.seq
+else
+aic79xx_asm_cmd = aicasm/aicasm -I. -r aic79xx_reg.h \
+ -o aic79xx_seq.h aic79xx.seq
+endif
+$(aic79xx_gen): aic79xx.seq aic79xx.reg aicasm/aicasm
+ $(aic79xx_asm_cmd)
+endif
+
+aicasm/aicasm: aicasm/*.[chyl]
+ $(MAKE) -C aicasm
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/fs/exec.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/fs/exec.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1179 @@
+/*
+ * linux/fs/exec.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * #!-checking implemented by tytso.
+ */
+/*
+ * Demand-loading implemented 01.12.91 - no need to read anything but
+ * the header into memory. The inode of the executable is put into
+ * "current->executable", and page faults do the actual loading. Clean.
+ *
+ * Once more I can proudly say that linux stood up to being changed: it
+ * was less than 2 hours work to get demand-loading completely implemented.
+ *
+ * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
+ * current->executable is only used by the procfs. This allows a dispatch
+ * table to check for several different types of binary formats. We keep
+ * trying until we recognize the file or we run out of supported binary
+ * formats.
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/personality.h>
+#include <linux/swap.h>
+#include <linux/utsname.h>
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+int core_uses_pid;
+char core_pattern[65] = "core";
+int core_setuid_ok = 0;
+/* The maximal length of core_pattern is also specified in sysctl.c */
+
+static struct linux_binfmt *formats;
+static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
+
+int register_binfmt(struct linux_binfmt * fmt)
+{
+ struct linux_binfmt ** tmp = &formats;
+
+ if (!fmt)
+ return -EINVAL;
+ if (fmt->next)
+ return -EBUSY;
+ write_lock(&binfmt_lock);
+ while (*tmp) {
+ if (fmt == *tmp) {
+ write_unlock(&binfmt_lock);
+ return -EBUSY;
+ }
+ tmp = &(*tmp)->next;
+ }
+ fmt->next = formats;
+ formats = fmt;
+ write_unlock(&binfmt_lock);
+ return 0;
+}
+
+int unregister_binfmt(struct linux_binfmt * fmt)
+{
+ struct linux_binfmt ** tmp = &formats;
+
+ write_lock(&binfmt_lock);
+ while (*tmp) {
+ if (fmt == *tmp) {
+ *tmp = fmt->next;
+ write_unlock(&binfmt_lock);
+ return 0;
+ }
+ tmp = &(*tmp)->next;
+ }
+ write_unlock(&binfmt_lock);
+ return -EINVAL;
+}
+
+static inline void put_binfmt(struct linux_binfmt * fmt)
+{
+ if (fmt->module)
+ __MOD_DEC_USE_COUNT(fmt->module);
+}
+
+/*
+ * Note that a shared library must be both readable and executable due to
+ * security reasons.
+ *
+ * Also note that we take the address to load from from the file itself.
+ */
+asmlinkage long sys_uselib(const char * library)
+{
+ struct file * file;
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk(library, &nd);
+ if (error)
+ goto out;
+
+ error = -EINVAL;
+ if (!S_ISREG(nd.dentry->d_inode->i_mode))
+ goto exit;
+
+ error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC);
+ if (error)
+ goto exit;
+
+ file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ error = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+
+ error = -ENOEXEC;
+ if(file->f_op && file->f_op->read) {
+ struct linux_binfmt * fmt;
+
+ read_lock(&binfmt_lock);
+ for (fmt = formats ; fmt ; fmt = fmt->next) {
+ if (!fmt->load_shlib)
+ continue;
+ if (!try_inc_mod_count(fmt->module))
+ continue;
+ read_unlock(&binfmt_lock);
+ error = fmt->load_shlib(file);
+ read_lock(&binfmt_lock);
+ put_binfmt(fmt);
+ if (error != -ENOEXEC)
+ break;
+ }
+ read_unlock(&binfmt_lock);
+ }
+ fput(file);
+out:
+ return error;
+exit:
+ path_release(&nd);
+ goto out;
+}
+
+/*
+ * count() counts the number of arguments/envelopes
+ */
+static int count(char ** argv, int max)
+{
+ int i = 0;
+
+ if (argv != NULL) {
+ for (;;) {
+ char * p;
+
+ if (get_user(p, argv))
+ return -EFAULT;
+ if (!p)
+ break;
+ argv++;
+ if(++i > max)
+ return -E2BIG;
+ }
+ }
+ return i;
+}
+
+/*
+ * 'copy_strings()' copies argument/envelope strings from user
+ * memory to free pages in kernel mem. These are in a format ready
+ * to be put directly into the top of new user memory.
+ */
+int copy_strings(int argc,char ** argv, struct linux_binprm *bprm)
+{
+ struct page *kmapped_page = NULL;
+ char *kaddr = NULL;
+ int ret;
+
+ while (argc-- > 0) {
+ char *str;
+ int len;
+ unsigned long pos;
+
+ if (get_user(str, argv+argc) ||
+ !(len = strnlen_user(str, bprm->p))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (bprm->p < len) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ bprm->p -= len;
+ /* XXX: add architecture specific overflow check here. */
+ pos = bprm->p;
+
+ while (len > 0) {
+ int i, new, err;
+ int offset, bytes_to_copy;
+ struct page *page;
+
+ offset = pos % PAGE_SIZE;
+ i = pos/PAGE_SIZE;
+ page = bprm->page[i];
+ new = 0;
+ if (!page) {
+ page = alloc_page(GFP_HIGHUSER);
+ bprm->page[i] = page;
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ new = 1;
+ }
+
+ if (page != kmapped_page) {
+ if (kmapped_page)
+ kunmap(kmapped_page);
+ kmapped_page = page;
+ kaddr = kmap(kmapped_page);
+ }
+ if (new && offset)
+ memset(kaddr, 0, offset);
+ bytes_to_copy = PAGE_SIZE - offset;
+ if (bytes_to_copy > len) {
+ bytes_to_copy = len;
+ if (new)
+ memset(kaddr+offset+len, 0,
+ PAGE_SIZE-offset-len);
+ }
+ err = copy_from_user(kaddr+offset, str, bytes_to_copy);
+ if (err) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ pos += bytes_to_copy;
+ str += bytes_to_copy;
+ len -= bytes_to_copy;
+ }
+ }
+ ret = 0;
+out:
+ if (kmapped_page)
+ kunmap(kmapped_page);
+ return ret;
+}
+
+/*
+ * Like copy_strings, but get argv and its values from kernel memory.
+ */
+int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
+{
+ int r;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ r = copy_strings(argc, argv, bprm);
+ set_fs(oldfs);
+ return r;
+}
+
+/*
+ * This routine is used to map in a page into an address space: needed by
+ * execve() for the initial stack and environment pages.
+ *
+ * tsk->mmap_sem is held for writing.
+ */
+void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long
address)
+{
+ pgd_t * pgd;
+ pmd_t * pmd;
+ pte_t * pte;
+ struct vm_area_struct *vma;
+ pgprot_t prot = PAGE_COPY;
+
+ if (page_count(page) != 1)
+ printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page,
address);
+ pgd = pgd_offset(tsk->mm, address);
+
+ spin_lock(&tsk->mm->page_table_lock);
+ pmd = pmd_alloc(tsk->mm, pgd, address);
+ if (!pmd)
+ goto out;
+ pte = pte_alloc(tsk->mm, pmd, address);
+ if (!pte)
+ goto out;
+ if (!pte_none(*pte))
+ goto out;
+ lru_cache_add(page);
+ flush_dcache_page(page);
+ flush_page_to_ram(page);
+ /* lookup is cheap because there is only a single entry in the list */
+ vma = find_vma(tsk->mm, address);
+ if (vma)
+ prot = vma->vm_page_prot;
+ set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
+ XEN_flush_page_update_queue();
+ tsk->mm->rss++;
+ spin_unlock(&tsk->mm->page_table_lock);
+
+ /* no need for flush_tlb */
+ return;
+out:
+ spin_unlock(&tsk->mm->page_table_lock);
+ __free_page(page);
+ force_sig(SIGKILL, tsk);
+ return;
+}
+
+int setup_arg_pages(struct linux_binprm *bprm)
+{
+ unsigned long stack_base;
+ struct vm_area_struct *mpnt;
+ int i, ret;
+
+ stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
+
+ bprm->p += stack_base;
+ if (bprm->loader)
+ bprm->loader += stack_base;
+ bprm->exec += stack_base;
+
+ mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!mpnt)
+ return -ENOMEM;
+
+ down_write(¤t->mm->mmap_sem);
+ {
+ mpnt->vm_mm = current->mm;
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = STACK_TOP;
+ mpnt->vm_flags = VM_STACK_FLAGS;
+ mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
+ mpnt->vm_ops = NULL;
+ mpnt->vm_pgoff = 0;
+ mpnt->vm_file = NULL;
+ mpnt->vm_private_data = (void *) 0;
+ if ((ret = insert_vm_struct(current->mm, mpnt))) {
+ up_write(¤t->mm->mmap_sem);
+ kmem_cache_free(vm_area_cachep, mpnt);
+ return ret;
+ }
+ current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >>
PAGE_SHIFT;
+ }
+
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+ struct page *page = bprm->page[i];
+ if (page) {
+ bprm->page[i] = NULL;
+ put_dirty_page(current,page,stack_base);
+ }
+ stack_base += PAGE_SIZE;
+ }
+ up_write(¤t->mm->mmap_sem);
+
+ return 0;
+}
+
+struct file *open_exec(const char *name)
+{
+ struct nameidata nd;
+ struct inode *inode;
+ struct file *file;
+ int err = 0;
+
+ err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd);
+ file = ERR_PTR(err);
+ if (!err) {
+ inode = nd.dentry->d_inode;
+ file = ERR_PTR(-EACCES);
+ if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
+ S_ISREG(inode->i_mode)) {
+ int err = permission(inode, MAY_EXEC);
+ if (!err && !(inode->i_mode & 0111))
+ err = -EACCES;
+ file = ERR_PTR(err);
+ if (!err) {
+ file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ if (!IS_ERR(file)) {
+ err = deny_write_access(file);
+ if (err) {
+ fput(file);
+ file = ERR_PTR(err);
+ }
+ }
+out:
+ return file;
+ }
+ }
+ path_release(&nd);
+ }
+ goto out;
+}
+
+int kernel_read(struct file *file, unsigned long offset,
+ char * addr, unsigned long count)
+{
+ mm_segment_t old_fs;
+ loff_t pos = offset;
+ int result = -ENOSYS;
+
+ if (!file->f_op->read)
+ goto fail;
+ old_fs = get_fs();
+ set_fs(get_ds());
+ result = file->f_op->read(file, addr, count, &pos);
+ set_fs(old_fs);
+fail:
+ return result;
+}
+
+static int exec_mmap(void)
+{
+ struct mm_struct * mm, * old_mm;
+
+ old_mm = current->mm;
+
+ if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
+ mm_release();
+ down_write(&old_mm->mmap_sem);
+ exit_mmap(old_mm);
+ up_write(&old_mm->mmap_sem);
+ return 0;
+ }
+
+
+ mm = mm_alloc();
+ if (mm) {
+ struct mm_struct *active_mm;
+
+ if (init_new_context(current, mm)) {
+ mmdrop(mm);
+ return -ENOMEM;
+ }
+
+ /* Add it to the list of mm's */
+ spin_lock(&mmlist_lock);
+ list_add(&mm->mmlist, &init_mm.mmlist);
+ mmlist_nr++;
+ spin_unlock(&mmlist_lock);
+
+ task_lock(current);
+ active_mm = current->active_mm;
+ current->mm = mm;
+ current->active_mm = mm;
+ task_unlock(current);
+ activate_mm(active_mm, mm);
+ mm_release();
+ if (old_mm) {
+ if (active_mm != old_mm) BUG();
+ mmput(old_mm);
+ return 0;
+ }
+ mmdrop(active_mm);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/*
+ * This function makes sure the current process has its own signal table,
+ * so that flush_signal_handlers can later reset the handlers without
+ * disturbing other processes. (Other processes might share the signal
+ * table via the CLONE_SIGNAL option to clone().)
+ */
+
+static inline int make_private_signals(void)
+{
+ struct signal_struct * newsig;
+
+ if (atomic_read(¤t->sig->count) <= 1)
+ return 0;
+ newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
+ if (newsig == NULL)
+ return -ENOMEM;
+ spin_lock_init(&newsig->siglock);
+ atomic_set(&newsig->count, 1);
+ memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
+ spin_lock_irq(¤t->sigmask_lock);
+ current->sig = newsig;
+ spin_unlock_irq(¤t->sigmask_lock);
+ return 0;
+}
+
+/*
+ * If make_private_signals() made a copy of the signal table, decrement the
+ * refcount of the original table, and free it if necessary.
+ * We don't do that in make_private_signals() so that we can back off
+ * in flush_old_exec() if an error occurs after calling make_private_signals().
+ */
+
+static inline void release_old_signals(struct signal_struct * oldsig)
+{
+ if (current->sig == oldsig)
+ return;
+ if (atomic_dec_and_test(&oldsig->count))
+ kmem_cache_free(sigact_cachep, oldsig);
+}
+
+/*
+ * These functions flushes out all traces of the currently running executable
+ * so that a new one can be started
+ */
+
+static inline void flush_old_files(struct files_struct * files)
+{
+ long j = -1;
+
+ write_lock(&files->file_lock);
+ for (;;) {
+ unsigned long set, i;
+
+ j++;
+ i = j * __NFDBITS;
+ if (i >= files->max_fds || i >= files->max_fdset)
+ break;
+ set = files->close_on_exec->fds_bits[j];
+ if (!set)
+ continue;
+ files->close_on_exec->fds_bits[j] = 0;
+ write_unlock(&files->file_lock);
+ for ( ; set ; i++,set >>= 1) {
+ if (set & 1) {
+ sys_close(i);
+ }
+ }
+ write_lock(&files->file_lock);
+
+ }
+ write_unlock(&files->file_lock);
+}
+
+/*
+ * An execve() will automatically "de-thread" the process.
+ * Note: we don't have to hold the tasklist_lock to test
+ * whether we migth need to do this. If we're not part of
+ * a thread group, there is no way we can become one
+ * dynamically. And if we are, we only need to protect the
+ * unlink - even if we race with the last other thread exit,
+ * at worst the list_del_init() might end up being a no-op.
+ */
+static inline void de_thread(struct task_struct *tsk)
+{
+ if (!list_empty(&tsk->thread_group)) {
+ write_lock_irq(&tasklist_lock);
+ list_del_init(&tsk->thread_group);
+ write_unlock_irq(&tasklist_lock);
+ }
+
+ /* Minor oddity: this might stay the same. */
+ tsk->tgid = tsk->pid;
+}
+
+void get_task_comm(char *buf, struct task_struct *tsk)
+{
+ /* buf must be at least sizeof(tsk->comm) in size */
+ task_lock(tsk);
+ memcpy(buf, tsk->comm, sizeof(tsk->comm));
+ task_unlock(tsk);
+}
+
+void set_task_comm(struct task_struct *tsk, char *buf)
+{
+ task_lock(tsk);
+ strncpy(tsk->comm, buf, sizeof(tsk->comm));
+ tsk->comm[sizeof(tsk->comm)-1]='\0';
+ task_unlock(tsk);
+}
+
+int flush_old_exec(struct linux_binprm * bprm)
+{
+ char * name;
+ int i, ch, retval;
+ struct signal_struct * oldsig;
+ struct files_struct * files;
+ char tcomm[sizeof(current->comm)];
+
+ /*
+ * Make sure we have a private signal table
+ */
+ oldsig = current->sig;
+ retval = make_private_signals();
+ if (retval) goto flush_failed;
+
+ /*
+ * Make sure we have private file handles. Ask the
+ * fork helper to do the work for us and the exit
+ * helper to do the cleanup of the old one.
+ */
+
+ files = current->files; /* refcounted so safe to hold */
+ retval = unshare_files();
+ if(retval)
+ goto flush_failed;
+
+ /*
+ * Release all of the old mmap stuff
+ */
+ retval = exec_mmap();
+ if (retval) goto mmap_failed;
+
+ /* This is the point of no return */
+ steal_locks(files);
+ put_files_struct(files);
+ release_old_signals(oldsig);
+
+ current->sas_ss_sp = current->sas_ss_size = 0;
+
+ if (current->euid == current->uid && current->egid == current->gid) {
+ current->mm->dumpable = 1;
+ current->task_dumpable = 1;
+ }
+ name = bprm->filename;
+ for (i=0; (ch = *(name++)) != '\0';) {
+ if (ch == '/')
+ i = 0;
+ else
+ if (i < (sizeof(tcomm) - 1))
+ tcomm[i++] = ch;
+ }
+ tcomm[i] = '\0';
+ set_task_comm(current, tcomm);
+
+ flush_thread();
+
+ de_thread(current);
+
+ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
+ permission(bprm->file->f_dentry->d_inode,MAY_READ))
+ current->mm->dumpable = 0;
+
+ /* An exec changes our domain. We are no longer part of the thread
+ group */
+
+ current->self_exec_id++;
+
+ flush_signal_handlers(current);
+ flush_old_files(current->files);
+
+ return 0;
+
+mmap_failed:
+ put_files_struct(current->files);
+ current->files = files;
+flush_failed:
+ spin_lock_irq(¤t->sigmask_lock);
+ if (current->sig != oldsig) {
+ kmem_cache_free(sigact_cachep, current->sig);
+ current->sig = oldsig;
+ }
+ spin_unlock_irq(¤t->sigmask_lock);
+ return retval;
+}
+
+/*
+ * We mustn't allow tracing of suid binaries, unless
+ * the tracer has the capability to trace anything..
+ */
+static inline int must_not_trace_exec(struct task_struct * p)
+{
+ return (p->ptrace & PT_PTRACED) && !(p->ptrace & PT_PTRACE_CAP);
+}
+
+/*
+ * Fill the binprm structure from the inode.
+ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+ */
+int prepare_binprm(struct linux_binprm *bprm)
+{
+ int mode;
+ struct inode * inode = bprm->file->f_dentry->d_inode;
+
+ mode = inode->i_mode;
+ /*
+ * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
+ * vfs_permission lets a non-executable through
+ */
+ if (!(mode & 0111)) /* with at least _one_ execute bit set */
+ return -EACCES;
+ if (bprm->file->f_op == NULL)
+ return -EACCES;
+
+ bprm->e_uid = current->euid;
+ bprm->e_gid = current->egid;
+
+ if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
+ /* Set-uid? */
+ if (mode & S_ISUID)
+ bprm->e_uid = inode->i_uid;
+
+ /* Set-gid? */
+ /*
+ * If setgid is set but no group execute bit then this
+ * is a candidate for mandatory locking, not a setgid
+ * executable.
+ */
+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
+ bprm->e_gid = inode->i_gid;
+ }
+
+ /* We don't have VFS support for capabilities yet */
+ cap_clear(bprm->cap_inheritable);
+ cap_clear(bprm->cap_permitted);
+ cap_clear(bprm->cap_effective);
+
+ /* To support inheritance of root-permissions and suid-root
+ * executables under compatibility mode, we raise all three
+ * capability sets for the file.
+ *
+ * If only the real uid is 0, we only raise the inheritable
+ * and permitted sets of the executable file.
+ */
+
+ if (!issecure(SECURE_NOROOT)) {
+ if (bprm->e_uid == 0 || current->uid == 0) {
+ cap_set_full(bprm->cap_inheritable);
+ cap_set_full(bprm->cap_permitted);
+ }
+ if (bprm->e_uid == 0)
+ cap_set_full(bprm->cap_effective);
+ }
+
+ memset(bprm->buf,0,BINPRM_BUF_SIZE);
+ return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
+}
+
+/*
+ * This function is used to produce the new IDs and capabilities
+ * from the old ones and the file's capabilities.
+ *
+ * The formula used for evolving capabilities is:
+ *
+ * pI' = pI
+ * (***) pP' = (fP & X) | (fI & pI)
+ * pE' = pP' & fE [NB. fE is 0 or ~0]
+ *
+ * I=Inheritable, P=Permitted, E=Effective // p=process, f=file
+ * ' indicates post-exec(), and X is the global 'cap_bset'.
+ *
+ */
+
+void compute_creds(struct linux_binprm *bprm)
+{
+ kernel_cap_t new_permitted, working;
+ int do_unlock = 0;
+
+ new_permitted = cap_intersect(bprm->cap_permitted, cap_bset);
+ working = cap_intersect(bprm->cap_inheritable,
+ current->cap_inheritable);
+ new_permitted = cap_combine(new_permitted, working);
+
+ if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
+ !cap_issubset(new_permitted, current->cap_permitted)) {
+ current->mm->dumpable = 0;
+
+ lock_kernel();
+ if (must_not_trace_exec(current)
+ || atomic_read(¤t->fs->count) > 1
+ || atomic_read(¤t->files->count) > 1
+ || atomic_read(¤t->sig->count) > 1) {
+ if(!capable(CAP_SETUID)) {
+ bprm->e_uid = current->uid;
+ bprm->e_gid = current->gid;
+ }
+ if(!capable(CAP_SETPCAP)) {
+ new_permitted = cap_intersect(new_permitted,
+ current->cap_permitted);
+ }
+ }
+ do_unlock = 1;
+ }
+
+
+ /* For init, we want to retain the capabilities set
+ * in the init_task struct. Thus we skip the usual
+ * capability rules */
+ if (current->pid != 1) {
+ current->cap_permitted = new_permitted;
+ current->cap_effective =
+ cap_intersect(new_permitted, bprm->cap_effective);
+ }
+
+ /* AUD: Audit candidate if current->cap_effective is set */
+
+ current->suid = current->euid = current->fsuid = bprm->e_uid;
+ current->sgid = current->egid = current->fsgid = bprm->e_gid;
+
+ if(do_unlock)
+ unlock_kernel();
+ current->keep_capabilities = 0;
+}
+
+
+void remove_arg_zero(struct linux_binprm *bprm)
+{
+ if (bprm->argc) {
+ unsigned long offset;
+ char * kaddr;
+ struct page *page;
+
+ offset = bprm->p % PAGE_SIZE;
+ goto inside;
+
+ while (bprm->p++, *(kaddr+offset++)) {
+ if (offset != PAGE_SIZE)
+ continue;
+ offset = 0;
+ kunmap(page);
+inside:
+ page = bprm->page[bprm->p/PAGE_SIZE];
+ kaddr = kmap(page);
+ }
+ kunmap(page);
+ bprm->argc--;
+ }
+}
+
+/*
+ * cycle the list of binary formats handler, until one recognizes the image
+ */
+int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+{
+ int try,retval=0;
+ struct linux_binfmt *fmt;
+#ifdef __alpha__
+ /* handle /sbin/loader.. */
+ {
+ struct exec * eh = (struct exec *) bprm->buf;
+
+ if (!bprm->loader && eh->fh.f_magic == 0x183 &&
+ (eh->fh.f_flags & 0x3000) == 0x3000)
+ {
+ struct file * file;
+ unsigned long loader;
+
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+
+ loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+
+ file = open_exec("/sbin/loader");
+ retval = PTR_ERR(file);
+ if (IS_ERR(file))
+ return retval;
+
+ /* Remember if the application is TASO. */
+ bprm->sh_bang = eh->ah.entry < 0x100000000;
+
+ bprm->file = file;
+ bprm->loader = loader;
+ retval = prepare_binprm(bprm);
+ if (retval<0)
+ return retval;
+ /* should call search_binary_handler recursively here,
+ but it does not matter */
+ }
+ }
+#endif
+ /* kernel module loader fixup */
+ /* so we don't try to load run modprobe in kernel space. */
+ set_fs(USER_DS);
+ for (try=0; try<2; try++) {
+ read_lock(&binfmt_lock);
+ for (fmt = formats ; fmt ; fmt = fmt->next) {
+ int (*fn)(struct linux_binprm *, struct pt_regs *) =
fmt->load_binary;
+ if (!fn)
+ continue;
+ if (!try_inc_mod_count(fmt->module))
+ continue;
+ read_unlock(&binfmt_lock);
+ retval = fn(bprm, regs);
+ if (retval >= 0) {
+ put_binfmt(fmt);
+ allow_write_access(bprm->file);
+ if (bprm->file)
+ fput(bprm->file);
+ bprm->file = NULL;
+ current->did_exec = 1;
+ return retval;
+ }
+ read_lock(&binfmt_lock);
+ put_binfmt(fmt);
+ if (retval != -ENOEXEC)
+ break;
+ if (!bprm->file) {
+ read_unlock(&binfmt_lock);
+ return retval;
+ }
+ }
+ read_unlock(&binfmt_lock);
+ if (retval != -ENOEXEC) {
+ break;
+#ifdef CONFIG_KMOD
+ }else{
+#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
+ char modname[20];
+ if (printable(bprm->buf[0]) &&
+ printable(bprm->buf[1]) &&
+ printable(bprm->buf[2]) &&
+ printable(bprm->buf[3]))
+ break; /* -ENOEXEC */
+ sprintf(modname, "binfmt-%04x", *(unsigned short
*)(&bprm->buf[2]));
+ request_module(modname);
+#endif
+ }
+ }
+ return retval;
+}
+
+
+/*
+ * sys_execve() executes a new program.
+ */
+int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs *
regs)
+{
+ struct linux_binprm bprm;
+ struct file *file;
+ int retval;
+ int i;
+
+ file = open_exec(filename);
+
+ retval = PTR_ERR(file);
+ if (IS_ERR(file))
+ return retval;
+
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+ bprm.file = file;
+ bprm.filename = filename;
+ bprm.sh_bang = 0;
+ bprm.loader = 0;
+ bprm.exec = 0;
+ if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
+ allow_write_access(file);
+ fput(file);
+ return bprm.argc;
+ }
+
+ if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
+ allow_write_access(file);
+ fput(file);
+ return bprm.envc;
+ }
+
+ retval = prepare_binprm(&bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
+ retval = copy_strings(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = copy_strings(bprm.argc, argv, &bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = search_binary_handler(&bprm,regs);
+ if (retval >= 0)
+ /* execve success */
+ return retval;
+
+out:
+ /* Something went wrong, return the inode and free the argument pages*/
+ allow_write_access(bprm.file);
+ if (bprm.file)
+ fput(bprm.file);
+
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+ struct page * page = bprm.page[i];
+ if (page)
+ __free_page(page);
+ }
+
+ return retval;
+}
+
+void set_binfmt(struct linux_binfmt *new)
+{
+ struct linux_binfmt *old = current->binfmt;
+ if (new && new->module)
+ __MOD_INC_USE_COUNT(new->module);
+ current->binfmt = new;
+ if (old && old->module)
+ __MOD_DEC_USE_COUNT(old->module);
+}
+
+#define CORENAME_MAX_SIZE 64
+
+/* format_corename will inspect the pattern parameter, and output a
+ * name into corename, which must have space for at least
+ * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
+ */
+void format_corename(char *corename, const char *pattern, long signr)
+{
+ const char *pat_ptr = pattern;
+ char *out_ptr = corename;
+ char *const out_end = corename + CORENAME_MAX_SIZE;
+ int rc;
+ int pid_in_pattern = 0;
+
+ /* Repeat as long as we have more pattern to process and more output
+ space */
+ while (*pat_ptr) {
+ if (*pat_ptr != '%') {
+ if (out_ptr == out_end)
+ goto out;
+ *out_ptr++ = *pat_ptr++;
+ } else {
+ switch (*++pat_ptr) {
+ case 0:
+ goto out;
+ /* Double percent, output one percent */
+ case '%':
+ if (out_ptr == out_end)
+ goto out;
+ *out_ptr++ = '%';
+ break;
+ /* pid */
+ case 'p':
+ pid_in_pattern = 1;
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%d", current->pid);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ /* uid */
+ case 'u':
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%d", current->uid);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ /* gid */
+ case 'g':
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%d", current->gid);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ /* signal that caused the coredump */
+ case 's':
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%ld", signr);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ /* UNIX time of coredump */
+ case 't': {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%ld", tv.tv_sec);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ }
+ /* hostname */
+ case 'h':
+ down_read(&uts_sem);
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%s", system_utsname.nodename);
+ up_read(&uts_sem);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ /* executable */
+ case 'e':
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ "%s", current->comm);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ break;
+ default:
+ break;
+ }
+ ++pat_ptr;
+ }
+ }
+ /* Backward compatibility with core_uses_pid:
+ *
+ * If core_pattern does not include a %p (as is the default)
+ * and core_uses_pid is set, then .%pid will be appended to
+ * the filename */
+ if (!pid_in_pattern
+ && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
+ rc = snprintf(out_ptr, out_end - out_ptr,
+ ".%d", current->pid);
+ if (rc > out_end - out_ptr)
+ goto out;
+ out_ptr += rc;
+ }
+ out:
+ *out_ptr = 0;
+}
+
+int do_coredump(long signr, struct pt_regs * regs)
+{
+ struct linux_binfmt * binfmt;
+ char corename[CORENAME_MAX_SIZE + 1];
+ struct file * file;
+ struct inode * inode;
+ int retval = 0;
+ int fsuid = current->fsuid;
+
+ lock_kernel();
+ binfmt = current->binfmt;
+ if (!binfmt || !binfmt->core_dump)
+ goto fail;
+ if (!is_dumpable(current))
+ {
+ if(!core_setuid_ok || !current->task_dumpable)
+ goto fail;
+ current->fsuid = 0;
+ }
+ current->mm->dumpable = 0;
+ if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
+ goto fail;
+
+ format_corename(corename, core_pattern, signr);
+ file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW, 0600);
+ if (IS_ERR(file))
+ goto fail;
+ inode = file->f_dentry->d_inode;
+ if (inode->i_nlink > 1)
+ goto close_fail; /* multiple links - don't dump */
+ if (d_unhashed(file->f_dentry))
+ goto close_fail;
+
+ if (!S_ISREG(inode->i_mode))
+ goto close_fail;
+ if (!file->f_op)
+ goto close_fail;
+ if (!file->f_op->write)
+ goto close_fail;
+ if (do_truncate(file->f_dentry, 0) != 0)
+ goto close_fail;
+
+ retval = binfmt->core_dump(signr, regs, file);
+
+close_fail:
+ filp_close(file, NULL);
+fail:
+ if (fsuid != current->fsuid)
+ current->fsuid = fsuid;
+ unlock_kernel();
+ return retval;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/bugs.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/bugs.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,53 @@
+/*
+ * include/asm-i386/bugs.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Cyrix stuff, June 1998 by:
+ * - Rafael R. Reilova (moved everything from head.S),
+ * <rreilova@xxxxxxxxxxxx>
+ * - Channing Corn (tests & fixes),
+ * - Andrew D. Balsa (code cleanup).
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+
+
+static void __init check_fpu(void)
+{
+ boot_cpu_data.fdiv_bug = 0;
+}
+
+static void __init check_hlt(void)
+{
+ boot_cpu_data.hlt_works_ok = 1;
+}
+
+static void __init check_bugs(void)
+{
+ extern void __init boot_init_fpu(void);
+
+ identify_cpu(&boot_cpu_data);
+ boot_init_fpu();
+#ifndef CONFIG_SMP
+ printk("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+#endif
+ check_fpu();
+ check_hlt();
+ system_utsname.machine[1] = '0' +
+ (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/desc.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/desc.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,41 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <asm/ldt.h>
+
+#ifndef __ASSEMBLY__
+
+struct desc_struct {
+ unsigned long a,b;
+};
+
+struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+};
+
+extern struct desc_struct default_ldt[];
+
+static inline void clear_LDT(void)
+{
+ /*
+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
+ * it slows down context switching. Noone uses it anyway.
+ */
+ queue_set_ldt(0, 0);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+ void *segments = pc->ldt;
+ int count = pc->size;
+
+ if ( count == 0 )
+ segments = NULL;
+
+ queue_set_ldt((unsigned long)segments, count);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_DESC_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/fixmap.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/fixmap.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,105 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+ FIX_BLKRING_BASE,
+ FIX_NETRING0_BASE,
+ FIX_NETRING1_BASE,
+ FIX_NETRING2_BASE,
+ FIX_NETRING3_BASE,
+ FIX_SHARED_INFO,
+ FIX_GNTTAB,
+#ifdef CONFIG_VGA_CONSOLE
+#define NR_FIX_BTMAPS 32 /* 128KB For the Dom0 VGA Console A0000-C0000 */
+#else
+#define NR_FIX_BTMAPS 1 /* in case anyone wants it in future... */
+#endif
+ FIX_BTMAP_END,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+ /* our bt_ioremap is permanent, unlike other architectures */
+
+ __end_of_permanent_fixed_addresses,
+ __end_of_fixed_addresses = __end_of_permanent_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+extern void clear_fixmap(enum fixed_addresses idx);
+
+/*
+ * used by vmalloc.c.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap, and leave one page empty
+ * at the top of mem..
+ */
+#define FIXADDR_TOP (HYPERVISOR_VIRT_START - 2*PAGE_SIZE)
+#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static inline unsigned long fix_to_virt(unsigned int idx)
+{
+ return __fix_to_virt(idx);
+}
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/highmem.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/highmem.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,132 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@xxxxxxxxxxxxxx
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define HIGHMEM_DEBUG 1
+#else
+#define HIGHMEM_DEBUG 0
+#endif
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+extern void kmap_init(void) __init;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23))
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void * FASTCALL(kmap_high(struct page *page, int nonblocking));
+extern void FASTCALL(kunmap_high(struct page *page));
+
+#define kmap(page) __kmap(page, 0)
+#define kmap_nonblock(page) __kmap(page, 1)
+
+static inline void *__kmap(struct page *page, int nonblocking)
+{
+ if (in_interrupt())
+ out_of_line_bug();
+ if (page < highmem_start_page)
+ return page_address(page);
+ return kmap_high(page, nonblocking);
+}
+
+static inline void kunmap(struct page *page)
+{
+ if (in_interrupt())
+ out_of_line_bug();
+ if (page < highmem_start_page)
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+static inline void *kmap_atomic(struct page *page, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ if (page < highmem_start_page)
+ return page_address(page);
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#if HIGHMEM_DEBUG
+ if (!pte_none(*(kmap_pte-idx)))
+ out_of_line_bug();
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+ __flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
+static inline void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#if HIGHMEM_DEBUG
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < FIXADDR_START) // FIXME
+ return;
+
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ out_of_line_bug();
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(kmap_pte-idx);
+ __flush_tlb_one(vaddr);
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/hw_irq.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/hw_irq.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,61 @@
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+/*
+ * linux/include/asm/hw_irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ */
+
+#include <linux/config.h>
+#include <linux/smp.h>
+#include <asm/atomic.h>
+#include <asm/irq.h>
+
+#define SYSCALL_VECTOR 0x80
+
+extern int irq_vector[NR_IRQS];
+
+extern atomic_t irq_err_count;
+extern atomic_t irq_mis_count;
+
+extern char _stext, _etext;
+
+extern unsigned long prof_cpu_mask;
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+/*
+ * x86 profiling function, SMP safe. We might want to do this in
+ * assembly totally?
+ */
+static inline void x86_do_profile (unsigned long eip)
+{
+ if (!prof_buffer)
+ return;
+
+ /*
+ * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
+ * (default is all CPUs.)
+ */
+ if (!((1<<smp_processor_id()) & prof_cpu_mask))
+ return;
+
+ eip -= (unsigned long) &_stext;
+ eip >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds EIP values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (eip > prof_len-1)
+ eip = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[eip]);
+}
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h,
+ unsigned int i)
+{}
+
+#endif /* _ASM_HW_IRQ_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/io.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/io.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,457 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@xxxxxxxxxxxxxxxx>
+ */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
+#define XQUAD_PORTIO_LEN 0x80000 /* Only remapping first 2 quads */
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+
+/*
+ * Temporary debugging check to catch old code using
+ * unmapped ISA addresses. Will be removed in 2.4.
+ */
+#if CONFIG_DEBUG_IOVIRT
+ extern void *__io_virt_debug(unsigned long x, const char *file, int line);
+ extern unsigned long __io_phys_debug(unsigned long x, const char *file, int
line);
+ #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
+//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
+#else
+ #define __io_virt(x) ((void *)(x))
+//#define __io_phys(x) __pa(x)
+#endif
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * We define page_to_phys 'incorrectly' because it is used when merging blkdev
+ * requests, and the correct thing to do there is to use machine addresses.
+ */
+#define page_to_phys(_x) phys_to_machine(((_x) - mem_map) << PAGE_SHIFT)
+
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned
long flags);
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+
+static inline void * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In paticular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ */
+
+static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, _PAGE_PCD);
+}
+
+extern void iounmap(void *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
+#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
+#define page_to_bus(_x) phys_to_machine(((_x) - mem_map) << PAGE_SHIFT)
+#define bus_to_phys(_x) machine_to_phys(_x)
+#define bus_to_page(_x) (mem_map + (bus_to_phys(_x) >> PAGE_SHIFT))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
+#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
+#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
+#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
+#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define memset_io(a,b,c) __memset(__io_virt(a),(b),(c))
+#define memcpy_fromio(a,b,c) __memcpy((a),__io_virt(b),(c))
+#define memcpy_toio(a,b,c) __memcpy(__io_virt(a),(b),(c))
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base +
(b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d)
eth_copy_and_sum((a),__io_virt(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d)
eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/**
+ * isa_check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the ISA mmio address io_addr.
+ * Returns 1 on a match.
+ *
+ * This function is deprecated. New drivers should use ioremap and
+ * check_signature.
+ */
+
+
+static inline int isa_check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (isa_readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/*
+ * Cache management
+ *
+ * This needed for two cases
+ * 1. Out of order aware processors
+ * 2. Accidentally out of order processors (PPro errata #51)
+ */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size) flush_write_buffers()
+#define dma_cache_wback(_start,_size) flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
+#elif defined(__UNSAFE_IO__)
+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
+#else
+#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,2b\n" \
+ ".previous"
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
__SLOW_DOWN_IO
+#else
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+#ifdef CONFIG_MULTIQUAD
+extern void *xquad_portio; /* Where the IO area was mapped */
+#endif /* CONFIG_MULTIQUAD */
+
+/*
+ * Talk about misusing macros..
+ */
+#define __OUT1(s,x) \
+static inline void out##s(unsigned x value, unsigned short port) {
+
+#ifdef __UNSAFE_IO__
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+#else
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("1: out" #s " %" s1 "0,%" s2 "1\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,2b\n" \
+ ".previous"
+#endif
+
+#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE)
+#define __OUTQ(s,ss,x) /* Do the equivalent of the portio op on quads */ \
+static inline void out##ss(unsigned x value, unsigned short port) { \
+ if (xquad_portio) \
+ write##s(value, (unsigned long) xquad_portio + port); \
+ else /* We're still in early boot, running on quad 0 */ \
+ out##ss##_local(value, port); \
+} \
+static inline void out##ss##_quad(unsigned x value, unsigned short port, int
quad) { \
+ if (xquad_portio) \
+ write##s(value, (unsigned long) xquad_portio +
(XQUAD_PORTIO_QUAD*quad)\
+ + port); \
+}
+
+#define __INQ(s,ss) /* Do the equivalent of the portio op on quads */ \
+static inline RETURN_TYPE in##ss(unsigned short port) { \
+ if (xquad_portio) \
+ return read##s((unsigned long) xquad_portio + port); \
+ else /* We're still in early boot, running on quad 0 */ \
+ return in##ss##_local(port); \
+} \
+static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \
+ if (xquad_portio) \
+ return read##s((unsigned long) xquad_portio +
(XQUAD_PORTIO_QUAD*quad)\
+ + port); \
+ else\
+ return 0;\
+}
+#endif /* CONFIG_MULTIQUAD && !STANDALONE */
+
+#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd"
(port));}
+#else
+/* Make the default portio routines operate on quad 0 */
+#define __OUT(s,s1,x) \
+__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value),
"Nd" (port));} \
+__OUTQ(s,s,x) \
+__OUTQ(s,s##_p,x)
+#endif /* !CONFIG_MULTIQUAD || STANDALONE */
+
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#ifdef __UNSAFE_IO__
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+#else
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("1: in" #s " %" s2 "1,%" s1 "0\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov" #s " $~0,%" s1 "0\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous"
+#endif
+
+#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port)
,##i ); return _v; }
+#else
+/* Make the default portio routines operate on quad 0 */
+#define __IN(s,s1,i...) \
+__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v;
} \
+__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd"
(port) ,##i ); return _v; } \
+__INQ(s,s) \
+__INQ(s,s##_p)
+#endif /* !CONFIG_MULTIQUAD || STANDALONE */
+
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long
count) \
+{ __asm__ __volatile__ ("rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned
long count) \
+{ __asm__ __volatile__ ("rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/irq.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/irq.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,65 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@xxxxxxxxxxxxxxxxxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+#include <asm/hypervisor.h>
+#include <asm/ptrace.h>
+
+/*
+ * The flat IRQ space is divided into two regions:
+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
+ * if we have physical device-access privilege. This region is at the
+ * start of the IRQ space so that existing device drivers do not need
+ * to be modified to translate physical IRQ numbers into our IRQ space.
+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ * are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE 0
+#define NR_PIRQS 128
+
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS 128
+
+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
+
+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+extern int bind_virq_to_irq(int virq);
+extern void unbind_virq_from_irq(int virq);
+extern int bind_evtchn_to_irq(int evtchn);
+extern void unbind_evtchn_from_irq(int evtchn);
+
+static __inline__ int irq_cannonicalize(int irq)
+{
+ return (irq == 2) ? 9 : irq;
+}
+
+extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
+extern void enable_irq(unsigned int);
+
+extern void irq_suspend(void);
+extern void irq_resume(void);
+
+
+#define CPU_MASK_NONE 0
+
+/* XXX SMH: no-op for compat w/ 2.6 shared files */
+#define irq_ctx_init(cpu) do { ; } while (0)
+
+#endif /* _ASM_IRQ_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/keyboard.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/keyboard.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,74 @@
+/*
+ * linux/include/asm-i386/keyboard.h
+ *
+ * Created 3 Nov 1996 by Geert Uytterhoeven
+ */
+
+/*
+ * This file contains the i386 architecture specific keyboard definitions
+ */
+
+#ifndef _I386_KEYBOARD_H
+#define _I386_KEYBOARD_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/kd.h>
+#include <linux/pm.h>
+#include <asm/io.h>
+
+#define KEYBOARD_IRQ 1
+#define DISABLE_KBD_DURING_INTERRUPTS 0
+
+extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int pckbd_getkeycode(unsigned int scancode);
+extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
+ char raw_mode);
+extern char pckbd_unexpected_up(unsigned char keycode);
+extern void pckbd_leds(unsigned char leds);
+extern void pckbd_init_hw(void);
+extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *);
+extern pm_callback pm_kbd_request_override;
+extern unsigned char pckbd_sysrq_xlate[128];
+
+#define kbd_setkeycode pckbd_setkeycode
+#define kbd_getkeycode pckbd_getkeycode
+#define kbd_translate pckbd_translate
+#define kbd_unexpected_up pckbd_unexpected_up
+#define kbd_leds pckbd_leds
+#define kbd_init_hw pckbd_init_hw
+#define kbd_sysrq_xlate pckbd_sysrq_xlate
+
+#define SYSRQ_KEY 0x54
+
+#define kbd_controller_present() (xen_start_info.flags & SIF_INITDOMAIN)
+
+/* resource allocation */
+#define kbd_request_region()
+#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \
+ "keyboard", NULL)
+
+/* How to access the keyboard macros on this platform. */
+#define kbd_read_input() inb(KBD_DATA_REG)
+#define kbd_read_status() inb(KBD_STATUS_REG)
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+
+/* Some stoneage hardware needs delays after some operations. */
+#define kbd_pause() do { } while(0)
+
+/*
+ * Machine specific bits for the PS/2 driver
+ */
+
+#define AUX_IRQ 12
+
+#define aux_request_irq(hand, dev_id) \
+ request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id)
+
+#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
+
+#endif /* __KERNEL__ */
+#endif /* _I386_KEYBOARD_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/mmu_context.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/mmu_context.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,74 @@
+#ifndef __I386_MMU_CONTEXT_H
+#define __I386_MMU_CONTEXT_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+
+/*
+ * hooks to add arch specific data into the mm struct.
+ * Note that destroy_context is called even if init_new_context
+ * fails.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+#ifdef CONFIG_SMP
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk, unsigned cpu)
+{
+ if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk, unsigned cpu)
+{
+}
+#endif
+
+extern pgd_t *cur_pgd;
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu)
+{
+ if (prev != next) {
+ /* stop flush ipis for the previous mm */
+ clear_bit(cpu, &prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ cpu_tlbstate[cpu].active_mm = next;
+#endif
+
+ /* Re-load page tables */
+ cur_pgd = next->pgd;
+ queue_pt_switch(__pa(cur_pgd));
+ /* load_LDT, if either the previous or next thread
+ * has a non-default LDT.
+ */
+ if (next->context.size+prev->context.size)
+ load_LDT(&next->context);
+ }
+#ifdef CONFIG_SMP
+ else {
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ if(cpu_tlbstate[cpu].active_mm != next)
+ out_of_line_bug();
+ if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload %cr3.
+ */
+ cur_pgd = next->pgd;
+ queue_pt_switch(__pa(cur_pgd));
+ load_LDT(next);
+ }
+ }
+#endif
+}
+
+#define activate_mm(prev, next) \
+do { \
+ switch_mm((prev),(next),NULL,smp_processor_id()); \
+ flush_page_update_queue(); \
+} while ( 0 )
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/module.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/module.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,14 @@
+#ifndef _ASM_I386_MODULE_H
+#define _ASM_I386_MODULE_H
+/*
+ * This file contains the i386 architecture specific module code.
+ */
+
+extern int xen_module_init(struct module *mod);
+
+#define module_map(x) vmalloc(x)
+#define module_unmap(x) vfree(x)
+#define module_arch_init(x) xen_module_init(x)
+#define arch_init_modules(x) do { } while (0)
+
+#endif /* _ASM_I386_MODULE_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/msr.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/msr.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,138 @@
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr,val1,val2) \
+{ \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 0; \
+ op.u.msr.msr = msr; \
+ op.u.msr.cpu_mask = (1 << current->processor); \
+ HYPERVISOR_dom0_op(&op); \
+ val1 = op.u.msr.out1; \
+ val2 = op.u.msr.out2; \
+}
+
+#define wrmsr(msr,val1,val2) \
+{ \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 1; \
+ op.u.msr.cpu_mask = (1 << current->processor); \
+ op.u.msr.msr = msr; \
+ op.u.msr.in1 = val1; \
+ op.u.msr.in2 = val2; \
+ HYPERVISOR_dom0_op(&op); \
+}
+
+#define rdtsc(low,high) \
+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+ __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+ : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0
+#define MSR_IA32_P5_MC_TYPE 1
+#define MSR_IA32_PLATFORM_ID 0x17
+#define MSR_IA32_EBL_CR_POWERON 0x2a
+
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x79
+#define MSR_IA32_UCODE_REV 0x8b
+
+#define MSR_IA32_BBL_CR_CTL 0x119
+
+#define MSR_IA32_MCG_CAP 0x179
+#define MSR_IA32_MCG_STATUS 0x17a
+#define MSR_IA32_MCG_CTL 0x17b
+
+#define MSR_IA32_THERM_CONTROL 0x19a
+#define MSR_IA32_THERM_INTERRUPT 0x19b
+#define MSR_IA32_THERM_STATUS 0x19c
+#define MSR_IA32_MISC_ENABLE 0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
+#define MSR_P6_PERFCTR0 0xc1
+#define MSR_P6_PERFCTR1 0xc2
+#define MSR_P6_EVNTSEL0 0x186
+#define MSR_P6_EVNTSEL1 0x187
+
+#define MSR_IA32_PERF_STATUS 0x198
+#define MSR_IA32_PERF_CTL 0x199
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER 0xC0000080
+#define MSR_K6_STAR 0xC0000081
+#define MSR_K6_WHCR 0xC0000082
+#define MSR_K6_UWCCR 0xC0000085
+#define MSR_K6_EPMR 0xC0000086
+#define MSR_K6_PSOR 0xC0000087
+#define MSR_K6_PFIR 0xC0000088
+
+#define MSR_K7_EVNTSEL0 0xC0010000
+#define MSR_K7_PERFCTR0 0xC0010004
+#define MSR_K7_HWCR 0xC0010015
+#define MSR_K7_CLK_CTL 0xC001001b
+#define MSR_K7_FID_VID_CTL 0xC0010041
+#define MSR_K7_VID_STATUS 0xC0010042
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x107
+#define MSR_IDT_FCR2 0x108
+#define MSR_IDT_FCR3 0x109
+#define MSR_IDT_FCR4 0x10a
+
+#define MSR_IDT_MCR0 0x110
+#define MSR_IDT_MCR1 0x111
+#define MSR_IDT_MCR2 0x112
+#define MSR_IDT_MCR3 0x113
+#define MSR_IDT_MCR4 0x114
+#define MSR_IDT_MCR5 0x115
+#define MSR_IDT_MCR6 0x116
+#define MSR_IDT_MCR7 0x117
+#define MSR_IDT_MCR_CTRL 0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x1107
+#define MSR_VIA_LONGHAUL 0x110a
+#define MSR_VIA_BCR2 0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+#endif /* __ASM_MSR_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/page.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/page.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,182 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <asm/types.h>
+#include <asm-xen/xen-public/xen.h>
+
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+#define clear_page(page) mmx_clear_page((void *)(page))
+#define copy_page(to,from) mmx_copy_page(to,from)
+
+#else
+
+/*
+ * On older X86 processors its not a win to use MMX here it seems.
+ * Maybe the K6-III ?
+ */
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#endif
+
+#define clear_user_page(page, vaddr) clear_page(page)
+#define copy_user_page(to, from, vaddr) copy_page(to, from)
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern unsigned long *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#if CONFIG_X86_PAE
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long long pgd; } pgd_t;
+#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#else
+typedef struct { unsigned long pte_low; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+static inline unsigned long pte_val(pte_t x)
+{
+ unsigned long ret = x.pte_low;
+ if ( (ret & 1) ) ret = machine_to_phys(ret);
+ return ret;
+}
+#define pte_val_ma(x) ((x).pte_low)
+#endif
+#define PTE_MASK PAGE_MASK
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+static inline unsigned long pmd_val(pmd_t x)
+{
+ unsigned long ret = x.pmd;
+ if ( (ret & 1) ) ret = machine_to_phys(ret);
+ return ret;
+}
+#define pgd_val(x) ({ BUG(); (unsigned long)0; })
+#define pgprot_val(x) ((x).pgprot)
+
+static inline pte_t __pte(unsigned long x)
+{
+ if ( (x & 1) ) x = phys_to_machine(x);
+ return ((pte_t) { (x) });
+}
+static inline pmd_t __pmd(unsigned long x)
+{
+ if ( (x & 1) ) x = phys_to_machine(x);
+ return ((pmd_t) { (x) });
+}
+#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+#define __PAGE_OFFSET (0xC0000000)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Tell the user there is some problem. Beep too, so we can
+ * see^H^H^Hhear bugs in early bootup as well!
+ * The offending file and line are encoded after the "officially
+ * undefined" opcode for parsing in the trap handler.
+ */
+
+#if 1 /* Set to zero for a slightly smaller kernel */
+#define BUG() \
+ __asm__ __volatile__( "ud2\n" \
+ "\t.word %c0\n" \
+ "\t.long %c1\n" \
+ : : "i" (__LINE__), "i" (__FILE__))
+#else
+#define BUG() __asm__ __volatile__("ud2\n")
+#endif
+
+#define PAGE_BUG(page) do { \
+ BUG(); \
+} while (0)
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned
long)(x)+PAGE_OFFSET))
+#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/pci.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/pci.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,283 @@
+#ifndef __i386_PCI_H
+#define __i386_PCI_H
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses() 0
+#endif
+#define pcibios_scan_all_fns() 0
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM (pci_mem_start)
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int
len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int
len, u32 value);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+/* Dynamic DMA mapping stuff.
+ * i386 has everything mapped statically.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* The networking and block device layers use this boolean for bounce
+ * buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (0)
+
+/* Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices,
+ * NULL for PCI-like buses (ISA, EISA).
+ * Returns non-NULL cpu-view pointer to the buffer if successful and
+ * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
+ * is undefined.
+ */
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+
+/* Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings associated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/* Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
+ size_t size, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+ flush_write_buffers();
+ return virt_to_bus(ptr);
+}
+
+/* Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+ /* Nothing to do */
+}
+
+/*
+ * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
+ * to pci_map_single, but takes a struct page instead of a virtual address
+ */
+static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int
direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+
+ return page_to_bus(page) + offset;
+}
+
+static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t
dma_address,
+ size_t size, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+ /* Nothing to do */
+}
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ int i;
+
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+
+ /*
+ * temporary 2.4 hack
+ */
+ for (i = 0; i < nents; i++ ) {
+ if (sg[i].address && sg[i].page)
+ out_of_line_bug();
+ else if (!sg[i].address && !sg[i].page)
+ out_of_line_bug();
+
+ if (sg[i].address)
+ sg[i].dma_address = virt_to_bus(sg[i].address);
+ else
+ sg[i].dma_address = page_to_bus(sg[i].page) +
sg[i].offset;
+ }
+
+ flush_write_buffers();
+ return nents;
+}
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+ /* Nothing to do */
+}
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+static inline void pci_dma_sync_single(struct pci_dev *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+ flush_write_buffers();
+}
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+ if (direction == PCI_DMA_NONE)
+ out_of_line_bug();
+ flush_write_buffers();
+}
+
+/* Return whether the given PCI device DMA address mask can
+ * be supported properly. For example, if your device can
+ * only drive the low 24-bits during PCI bus mastering, then
+ * you would pass 0x00ffffff as the mask to this function.
+ */
+static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if(mask < 0x00ffffff)
+ return 0;
+
+ return 1;
+}
+
+/* This is always fine. */
+#define pci_dac_dma_supported(pci_dev, mask) (1)
+
+static __inline__ dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long
offset, int direction)
+{
+ return ((dma64_addr_t) page_to_bus(page) +
+ (dma64_addr_t) offset);
+}
+
+static __inline__ struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return bus_to_page(dma_addr);
+}
+
+static __inline__ unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return (dma_addr & ~PAGE_MASK);
+}
+
+static __inline__ void
+pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t
len, int direction)
+{
+ flush_write_buffers();
+}
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->length)
+
+/* Return the index of the PCI controller for device. */
+static inline int pci_controller_num(struct pci_dev *dev)
+{
+ return 0;
+}
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int
write_combine);
+
+#endif /* __KERNEL__ */
+
+#endif /* __i386_PCI_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/pgalloc.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/pgalloc.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,285 @@
+#ifndef _I386_PGALLOC_H
+#define _I386_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/hypervisor.h>
+#include <linux/threads.h>
+
+/*
+ * Quick lists are aligned so that least significant bits of array pointer
+ * are all zero when list is empty, and all one when list is full.
+ */
+#define QUICKLIST_ENTRIES 256
+#define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
+#define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist (current_cpu_data.pmd_quick)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+#define pmd_populate(mm, pmd, pte) \
+ do { \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
+ XEN_flush_page_update_queue(); \
+ } while ( 0 )
+
+/*
+ * Allocate and free page tables.
+ */
+
+#if defined (CONFIG_X86_PAE)
+
+#error "no PAE support as yet"
+
+/*
+ * We can't include <linux/slab.h> here, thus these uglinesses.
+ */
+struct kmem_cache_s;
+
+extern struct kmem_cache_s *pae_pgd_cachep;
+extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
+extern void kmem_cache_free(struct kmem_cache_s *, void *);
+
+
+static inline pgd_t *get_pgd_slow(void)
+{
+ int i;
+ pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
+
+ if (pgd) {
+ for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+ unsigned long pmd = __get_free_page(GFP_KERNEL);
+ if (!pmd)
+ goto out_oom;
+ clear_page(pmd);
+ set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
+ }
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return pgd;
+out_oom:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)__va(pgd_val(pgd[i])-1));
+ kmem_cache_free(pae_pgd_cachep, pgd);
+ return NULL;
+}
+
+#else
+
+static inline pgd_t *get_pgd_slow(void)
+{
+ pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (pgd) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ __make_page_readonly(pgd);
+ queue_pgd_pin(__pa(pgd));
+ }
+ return pgd;
+}
+
+#endif /* CONFIG_X86_PAE */
+
+static inline pgd_t *get_pgd_fast(void)
+{
+ unsigned long ret;
+
+ if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
+ ret = *(--pgd_quicklist);
+ pgtable_cache_size--;
+
+ } else
+ ret = (unsigned long)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+static inline void free_pgd_slow(pgd_t *pgd)
+{
+#if defined(CONFIG_X86_PAE)
+#error
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+ free_page((unsigned long)__va(pgd_val(pgd[i])-1));
+ kmem_cache_free(pae_pgd_cachep, pgd);
+#else
+ queue_pgd_unpin(__pa(pgd));
+ __make_page_writable(pgd);
+ free_page((unsigned long)pgd);
+#endif
+}
+
+static inline void free_pgd_fast(pgd_t *pgd)
+{
+ if ( !QUICKLIST_FULL(pgd_quicklist) ) {
+ *(pgd_quicklist++) = (unsigned long)pgd;
+ pgtable_cache_size++;
+ } else
+ free_pgd_slow(pgd);
+}
+
+static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pte)
+ {
+ clear_page(pte);
+ __make_page_readonly(pte);
+ queue_pte_pin(__pa(pte));
+ }
+ return pte;
+
+}
+
+static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
+ unsigned long address)
+{
+ unsigned long ret = 0;
+ if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
+ ret = *(--pte_quicklist);
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+static __inline__ void pte_free_slow(pte_t *pte)
+{
+ queue_pte_unpin(__pa(pte));
+ __make_page_writable(pte);
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free_fast(pte_t *pte)
+{
+ if ( !QUICKLIST_FULL(pte_quicklist) ) {
+ *(pte_quicklist++) = (unsigned long)pte;
+ pgtable_cache_size++;
+ } else
+ pte_free_slow(pte);
+}
+
+#define pte_free(pte) pte_free_fast(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc(mm) get_pgd_fast()
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ * (In the PAE case we free the pmds as part of the pgd.)
+ */
+
+#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free_slow(x) do { } while (0)
+#define pmd_free_fast(x) do { } while (0)
+#define pmd_free(x) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
+
+extern int do_check_pgt_cache(int, int);
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm) queue_tlb_flush();
+ XEN_flush_page_update_queue();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm) queue_invlpg(addr);
+ XEN_flush_page_update_queue();
+}
+
+static inline void flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm == current->active_mm) queue_tlb_flush();
+ XEN_flush_page_update_queue();
+}
+
+#else
+#error no guestos SMP support yet...
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start,
unsigned long end)
+{
+ flush_tlb_mm(mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+struct tlb_state
+{
+ struct mm_struct *active_mm;
+ int state;
+} ____cacheline_aligned;
+extern struct tlb_state cpu_tlbstate[NR_CPUS];
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* i386 does not keep any page table caches in TLB */
+ XEN_flush_page_update_queue();
+}
+
+/*
+ * NB. The 'domid' field should be zero if mapping I/O space (non RAM).
+ * Otherwise it identifies the owner of the memory that is being mapped.
+ */
+extern int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
+
+extern int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v);
+
+
+
+#endif /* _I386_PGALLOC_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/pgtable-2level.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/pgtable-2level.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,103 @@
+#ifndef _I386_PGTABLE_2LEVEL_H
+#define _I386_PGTABLE_2LEVEL_H
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+#define PMD_SHIFT 22
+#define PTRS_PER_PMD 1
+
+#define PTRS_PER_PTE 1024
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+#define pgd_clear(xp) do { } while (0)
+
+#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr,
(pteval).pte_low)
+#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval))
+#define set_pgd(pgdptr, pgdval) ((void)0)
+
+#define pgd_page(pgd) \
+((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+
+#define pte_same(a, b) ((a).pte_low == (b).pte_low)
+
+/*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !VALID_PAGE() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
+ */
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#define pte_page(_pte) \
+({ \
+ unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
+ unsigned long pfn = mfn_to_pfn(mfn); \
+ if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
+ pfn = max_mapnr; /* specia: force !VALID_PAGE() */ \
+ &mem_map[pfn]; \
+})
+
+#define pte_none(x) (!(x).pte_low)
+#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) |
pgprot_val(pgprot))
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because XenoLinux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+ pte_t pte = *xp;
+ if ( !pte_none(pte) )
+ queue_l1_entry_update(xp, 0);
+ return pte;
+}
+
+#endif /* _I386_PGTABLE_2LEVEL_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/pgtable.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/pgtable.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,370 @@
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/hypervisor.h>
+#include <linux/threads.h>
+#include <asm/fixmap.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#define swapper_pg_dir 0
+extern void paging_init(void);
+
+/* Caches aren't brain-dead on the intel. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+
+extern unsigned long pgkern_mask;
+
+#define __flush_tlb() ({ queue_tlb_flush(); XEN_flush_page_update_queue(); })
+#define __flush_tlb_global() __flush_tlb()
+#define __flush_tlb_all() __flush_tlb_global()
+#define __flush_tlb_one(addr) ({ queue_invlpg(addr);
XEN_flush_page_update_queue(); })
+#define __flush_tlb_single(addr) ({ queue_invlpg(addr);
XEN_flush_page_update_queue(); })
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifndef __ASSEMBLY__
+#if CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+
+/*
+ * Need to initialise the X86 PAE caches
+ */
+extern void pgtable_cache_init(void);
+
+#else
+# include <asm/pgtable-2level.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif
+#endif
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT 22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+
+#ifndef __ASSEMBLY__
+/* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
+#define VMALLOC_OFFSET (4*1024*1024)
+extern void * high_memory;
+#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
+ ~(VMALLOC_OFFSET-1))
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#if CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if
present.. */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
+
+#define _PAGE_PROTNONE 0x080 /* If not present */
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED
| _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |
_PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
_PAGE_ACCESSED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_RO \
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+#if 0
+#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
+#else
+#define MAKE_GLOBAL(x) __pgprot(x)
+#endif
+
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) queue_l1_entry_update(xp, 0)
+
+#define pmd_none(x) (!(x).pmd)
+#define pmd_present(x) ((x).pmd & _PAGE_PRESENT)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) (((x).pmd & (~PAGE_MASK & ~_PAGE_USER)) !=
_KERNPG_TABLE)
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return (pte).pte_low &
_PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return (pte).pte_low &
_PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return (pte).pte_low &
_PAGE_RW; }
+
+static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &=
~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &=
~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW;
return pte; }
+static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY;
return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |=
_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW;
return pte; }
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ int ret = pteval & _PAGE_DIRTY;
+ if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
+ return ret;
+}
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ int ret = pteval & _PAGE_ACCESSED;
+ if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
+ return ret;
+}
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ if ( (pteval & _PAGE_RW) )
+ queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ unsigned long pteval = *(unsigned long *)ptep;
+ if ( !(pteval & _PAGE_DIRTY) )
+ queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
+
+/* This takes a physical page address that is used by the remapping functions
*/
+#define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT,
pgprot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low |= pgprot_val(newprot);
+ return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_page(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address) pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define __pmd_offset(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/* Find an entry in the third-level page table.. */
+#define __pte_offset(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
+ __pte_offset(address))
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+
+/* Encode and de-code a swap entry */
+#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
+#define SWP_OFFSET(x) ((x).val >> 8)
+#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1)
| ((offset) << 8) })
+#define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+struct page;
+int change_page_attr(struct page *, int, pgprot_t prot);
+
+static inline void __make_page_readonly(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+}
+
+static inline void __make_page_writable(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+}
+
+static inline void make_page_readonly(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ if ( (unsigned long)va >= VMALLOC_START )
+ __make_page_readonly(machine_to_virt(
+ *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_page_writable(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ if ( (unsigned long)va >= VMALLOC_START )
+ __make_page_writable(machine_to_virt(
+ *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_pages_readonly(void *va, unsigned int nr)
+{
+ while ( nr-- != 0 )
+ {
+ make_page_readonly(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+static inline void make_pages_writable(void *va, unsigned int nr)
+{
+ while ( nr-- != 0 )
+ {
+ make_page_writable(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+static inline unsigned long arbitrary_virt_to_machine(void *va)
+{
+ pgd_t *pgd = pgd_offset_k((unsigned long)va);
+ pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+ pte_t *pte = pte_offset(pmd, (unsigned long)va);
+ unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
+ return pa | ((unsigned long)va & (PAGE_SIZE-1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_page_range remap_page_range
+
+#endif /* _I386_PGTABLE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/processor.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/processor.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,483 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <linux/cache.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc));
pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ char wp_works_ok; /* It doesn't on 386's */
+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
+ char hard_math;
+ char rfu;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no
CPUID */
+ __u32 x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int x86_cache_size; /* in KB - valid for CPUS which support this
+ call */
+ int fdiv_bug;
+ int f00f_bug;
+ int coma_bug;
+ unsigned long loops_per_jiffy;
+ unsigned long *pgd_quick;
+ unsigned long *pmd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_SIS 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct tss_struct init_tss[NR_CPUS];
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_irq13;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ */
+static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx");
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ecx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx");
+ return edx;
+}
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3
*/
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+#define load_cr3(pgdir) \
+ asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)));
+
+extern unsigned long mmu_cr4_features;
+
+#include <asm/hypervisor.h>
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ BUG();
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+ BUG();
+}
+
+/*
+ * Cyrix CPU configuration register indexes
+ */
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ * Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ */
+#ifdef CONFIG_EISA
+extern int EISA_bus;
+#else
+#define EISA_bus (0)
+#endif
+extern int MCA_bus;
+
+/* from system description table in BIOS. Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE (PAGE_OFFSET)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+/*
+ * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
+ */
+#define IO_BITMAP_SIZE 32
+#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_fsave_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ long status; /* software status information */
+};
+
+struct i387_fxsave_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long mxcsr;
+ long reserved;
+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ long padding[56];
+} __attribute__ ((aligned (16)));
+
+struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
+ struct info *info;
+ unsigned long entry_eip;
+};
+
+union i387_union {
+ struct i387_fsave_struct fsave;
+ struct i387_fxsave_struct fxsave;
+ struct i387_soft_struct soft;
+};
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct tss_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h;
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long __cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, bitmap;
+ unsigned long io_bitmap[IO_BITMAP_SIZE+1];
+ /*
+ * pads the TSS to be cacheline-aligned (size is 0x100)
+ */
+ unsigned long __cacheline_filler[5];
+};
+
+struct thread_struct {
+ unsigned long esp0;
+ unsigned long eip;
+ unsigned long esp;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned int io_pl;
+/* Hardware debugging registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
+/* fault info */
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387;
+/* virtual 86 mode info */
+ struct vm86_struct * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, saved_esp0;
+};
+
+#define INIT_THREAD { sizeof(init_stack) + (long) &init_stack, \
+ 0, 0, 0, 0, 0, 0, {0}, 0, 0, 0, {{0}}, 0, 0, 0, 0, 0 }
+
+#define INIT_TSS { \
+ 0,0, /* back_link, __blh */ \
+ sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
+ __KERNEL_DS, 0, /* ss0 */ \
+ 0,0,0,0,0,0, /* stack1, stack2 */ \
+ 0, /* cr3 */ \
+ 0,0, /* eip,eflags */ \
+ 0,0,0,0, /* eax,ecx,edx,ebx */ \
+ 0,0,0,0, /* esp,ebp,esi,edi */ \
+ 0,0,0,0,0,0, /* es,cs,ss */ \
+ 0,0,0,0,0,0, /* ds,fs,gs */ \
+ 0,0, /* ldt */ \
+ 0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */ \
+ {~0, } /* ioperm */ \
+}
+
+#define start_thread(regs, new_eip, new_esp) do { \
+ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+ regs->xss = __USER_DS; \
+ regs->xcs = __USER_CS; \
+ regs->eip = new_eip; \
+ regs->esp = new_esp; \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long
flags);
+
+/* Copy and release all segment info associated with a VM
+ * Unusable due to lack of error handling, use {init_new,destroy}_context
+ * instead.
+ */
+static inline void copy_segments(struct task_struct *p, struct mm_struct * mm)
{ }
+static inline void release_segments(struct mm_struct * mm) { }
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+static inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return ((unsigned long *)t->esp)[3];
+}
+
+unsigned long get_wchan(struct task_struct *p);
+#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
+#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
+
+#define THREAD_SIZE (2*PAGE_SIZE)
+#define alloc_task_struct() ((struct task_struct *)
__get_free_pages(GFP_KERNEL,1))
+#define free_task_struct(p) free_pages((unsigned long) (p), 1)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
+
+#define init_task (init_task_union.task)
+#define init_stack (init_task_union.stack)
+
+struct microcode {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int reserved[5];
+ unsigned int bits[500];
+};
+
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE _IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop" ::: "memory");
+}
+
+#define cpu_relax() rep_nop()
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+#if defined(CONFIG_MPENTIUMIII) || defined (CONFIG_MPENTIUM4)
+
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
+}
+
+#elif CONFIG_X86_USE_3DNOW
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
+}
+
+extern inline void prefetchw(const void *x)
+{
+ __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
+}
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#endif
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/queues.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/queues.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,20 @@
+
+/* Work-queue emulation over task queues. Pretty simple. */
+
+#ifndef __QUEUES_H__
+#define __QUEUES_H__
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/tqueue.h>
+
+#define DECLARE_TQUEUE(_name, _fn, _arg) \
+ struct tq_struct _name = { LIST_HEAD_INIT((_name).list), 0, _fn, _arg }
+#define DECLARE_WORK(_name, _fn, _arg) DECLARE_TQUEUE(_name, _fn, _arg)
+
+#define work_struct tq_struct
+#define INIT_WORK(_work, _fn, _arg) INIT_TQUEUE(_work, _fn, _arg)
+
+#define schedule_work(_w) schedule_task(_w)
+
+#endif /* __QUEUES_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/segment.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/segment.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,15 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm-xen/xen-public/xen.h>
+
+#define __KERNEL_CS FLAT_RING1_CS
+#define __KERNEL_DS FLAT_RING1_DS
+
+#define __USER_CS FLAT_RING3_CS
+#define __USER_DS FLAT_RING3_DS
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/smp.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/smp.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,102 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/ptrace.h>
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
+#include <asm/bitops.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
+#include <asm/io_apic.h>
+#endif
+#include <asm/apic.h>
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+/*
+ * Private routines/data
+ */
+
+extern void smp_alloc_memory(void);
+extern unsigned long phys_cpu_present_map;
+extern unsigned long cpu_online_map;
+extern volatile unsigned long smp_invalidate_needed;
+extern int pic_mode;
+extern int smp_num_siblings;
+extern int cpu_sibling_map[];
+
+extern void smp_flush_tlb(void);
+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+extern void fastcall smp_send_reschedule(int cpu);
+extern void smp_invalidate_rcv(void); /* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings (void);
+
+/*
+ * On x86 all CPUs are mapped 1:1 to the APIC space.
+ * This simplifies scheduling and IPI sending and
+ * compresses data structures.
+ */
+static inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+static inline int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+/*
+ * Some lowlevel functions might want to know about
+ * the real APIC ID <-> CPU # mapping.
+ */
+#define MAX_APICID 256
+extern volatile int cpu_to_physical_apicid[NR_CPUS];
+extern volatile int physical_apicid_to_cpu[MAX_APICID];
+extern volatile int cpu_to_logical_apicid[NR_CPUS];
+extern volatile int logical_apicid_to_cpu[MAX_APICID];
+
+/*
+ * General functions that each host system must provide.
+ */
+
+extern void smp_boot_cpus(void);
+extern void smp_store_cpu_info(int id); /* Store per CPU info
(like the initial udelay numbers */
+
+/*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+
+#define smp_processor_id() (current->processor)
+
+#endif /* !__ASSEMBLY__ */
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+/*
+ * This magic constant controls our willingness to transfer
+ * a process across CPUs. Such a transfer incurs misses on the L1
+ * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
+ * gut feeling is this will vary by board in value. For a board
+ * with separate L2 cache it probably depends also on the RSS, and
+ * for a board with shared L2 cache it ought to decay fast as other
+ * processes are run.
+ */
+
+#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
+
+#endif
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/asm-xen/system.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/system.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,424 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/synch_bitops.h>
+#include <asm/segment.h>
+#include <asm/hypervisor.h>
+#include <asm/evtchn.h>
+
+#ifdef __KERNEL__
+
+struct task_struct;
+extern void FASTCALL(__switch_to(struct task_struct *prev,
+ struct task_struct *next));
+
+#define prepare_to_switch() \
+do { \
+ struct thread_struct *__t = ¤t->thread; \
+ __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (*(int *)&__t->fs) ); \
+ __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (*(int *)&__t->gs) ); \
+} while (0)
+#define switch_to(prev,next,last) do { \
+ asm volatile("pushl %%esi\n\t" \
+ "pushl %%edi\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %3,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %4\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popl %%edi\n\t" \
+ "popl %%esi\n\t" \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
+ "=b" (last) \
+ :"m" (next->thread.esp),"m" (next->thread.eip), \
+ "a" (prev), "d" (next), \
+ "b" (prev)); \
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "0" (base) \
+ ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %2,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "0" (limit) \
+ ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "pushl $0\n\t" \
+ "popl %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" \
+ : :"m" (*(unsigned int *)&(value)))
+
+/* NB. 'clts' is done for us by Xen during virtual trap. */
+#define clts() ((void)0)
+#define stts() (HYPERVISOR_fpu_taskswitch())
+
+#endif /* __KERNEL__ */
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ *
+ * Taken from 2.6 for Xen.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
+}
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned
long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * chmxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+ unsigned int low, unsigned int high)
+{
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "lock cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x) *(((unsigned int*)&(x))+0)
+#define ll_high(x) *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int
size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("lock cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("lock cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("lock cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#else
+/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
+#endif
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() mb()
+
+#ifdef CONFIG_X86_OOSTORE
+#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#else
+#define wmb() __asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define safe_halt() ((void)0)
+
+/*
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operatiosn must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli() \
+do { \
+ HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ barrier(); \
+} while (0)
+
+#define __sti() \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+} while (0)
+
+#define __save_flags(x) \
+do { \
+ (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+} while (0)
+
+#define __restore_flags(x) \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 ) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ } \
+} while (0)
+
+#define __save_and_cli(x) \
+do { \
+ (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+ HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ barrier(); \
+} while (0)
+
+#define __save_and_sti(x) \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
+ _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+} while (0)
+
+#define local_irq_save(x) __save_and_cli(x)
+#define local_irq_set(x) __save_and_sti(x)
+#define local_irq_restore(x) __restore_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+
+#ifdef CONFIG_SMP
+#error no SMP
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
+#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+#define save_and_cli(x) __save_and_cli(x)
+#define save_and_sti(x) __save_and_sti(x)
+
+#endif
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern unsigned long dmi_broken;
+extern int is_sony_vaio_laptop;
+
+#define BROKEN_ACPI_Sx 0x0001
+#define BROKEN_INIT_AFTER_S1 0x0002
+#define BROKEN_PNP_BIOS 0x0004
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/vga.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/vga.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,42 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@xxxxxx>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+#include <asm/io.h>
+
+extern unsigned char *vgacon_mmap;
+
+static unsigned long VGA_MAP_MEM(unsigned long x)
+{
+ if( vgacon_mmap == NULL )
+ {
+ /* This is our first time in this function. This whole thing
+ is a rather grim hack. We know we're going to get asked
+ to map a 32KB region between 0xb0000 and 0xb8000 because
+ that's what VGAs are. We used the boot time permanent
+ fixed map region, and map it to machine pages.
+ */
+ if( x != 0xb8000 )
+ panic("Argghh! VGA Console is weird. 1:%08lx\n",x);
+
+ vgacon_mmap = (unsigned char*) bt_ioremap( 0xa0000, 128*1024 );
+ return (unsigned long) (vgacon_mmap+x-0xa0000);
+ }
+ else
+ {
+ if( x != 0xc0000 && x != 0xa0000 ) /* vidmem_end or charmap fonts */
+ panic("Argghh! VGA Console is weird. 2:%08lx\n",x);
+ return (unsigned long) (vgacon_mmap+x-0xa0000);
+ }
+ return 0;
+}
+
+static inline unsigned char vga_readb(unsigned char * x) { return (*(x)); }
+static inline void vga_writeb(unsigned char x, unsigned char *y) { *(y) = (x);
}
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/asm-xen/xor.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/asm-xen/xor.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,879 @@
+/*
+ * include/asm-i386/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for MMX and SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High-speed RAID5 checksumming functions utilizing MMX instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+#define FPU_SAVE \
+ do { \
+ if (!(current->flags & PF_USEDFPU)) \
+ clts(); \
+ __asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \
+ } while (0)
+
+#define FPU_RESTORE \
+ do { \
+ __asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
+ if (!(current->flags & PF_USEDFPU)) \
+ stts(); \
+ } while (0)
+
+#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
+#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
+#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
+#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
+#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
+#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
+
+
+static void
+xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 7;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ ST(i,0) \
+ XO1(i+1,1) \
+ ST(i+1,1) \
+ XO1(i+2,2) \
+ ST(i+2,2) \
+ XO1(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+static void
+xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 7;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ ST(i,0) \
+ XO2(i+1,1) \
+ ST(i+1,1) \
+ XO2(i+2,2) \
+ ST(i+2,2) \
+ XO2(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ :
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+static void
+xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 7;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ ST(i,0) \
+ XO3(i+1,1) \
+ ST(i+1,1) \
+ XO3(i+2,2) \
+ ST(i+2,2) \
+ XO3(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " addl $128, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+
+static void
+xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 7;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ /* need to save/restore p4/p5 manually otherwise gcc's 10 argument
+ limit gets exceeded (+ counts as two arguments) */
+ __asm__ __volatile__ (
+ " pushl %4\n"
+ " pushl %5\n"
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ XO4(i,0) \
+ ST(i,0) \
+ XO4(i+1,1) \
+ ST(i+1,1) \
+ XO4(i+2,2) \
+ ST(i+2,2) \
+ XO4(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " addl $128, %4 ;\n"
+ " addl $128, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ " popl %5\n"
+ " popl %4\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef BLOCK
+
+static void
+xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 6;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ __asm__ __volatile__ (
+ " .align 32 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+static void
+xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 6;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ __asm__ __volatile__ (
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ :
+ : "memory" );
+
+ FPU_RESTORE;
+}
+
+static void
+xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 6;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ __asm__ __volatile__ (
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor (%4), %%mm0 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " pxor 8(%4), %%mm1 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " pxor 16(%4), %%mm2 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 24(%4), %%mm3 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " pxor 32(%4), %%mm4 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " pxor 40(%4), %%mm5 ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%4), %%mm6 ;\n"
+ " pxor 56(%4), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " addl $64, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+static void
+xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 6;
+ char fpu_save[108];
+
+ FPU_SAVE;
+
+ /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
+ __asm__ __volatile__ (
+ " pushl %4\n"
+ " pushl %5\n"
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " pxor (%4), %%mm0 ;\n"
+ " pxor 8(%4), %%mm1 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " pxor (%5), %%mm0 ;\n"
+ " pxor 8(%5), %%mm1 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 16(%4), %%mm2 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " pxor 16(%5), %%mm2 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 24(%4), %%mm3 ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 24(%5), %%mm3 ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%4), %%mm4 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " pxor 32(%5), %%mm4 ;\n"
+ " pxor 40(%4), %%mm5 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " pxor 40(%5), %%mm5 ;\n"
+ " pxor 48(%4), %%mm6 ;\n"
+ " pxor 56(%4), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%5), %%mm6 ;\n"
+ " pxor 56(%5), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " addl $64, %4 ;\n"
+ " addl $64, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ " popl %5\n"
+ " popl %4\n"
+ : "+g" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ FPU_RESTORE;
+}
+
+static struct xor_block_template xor_block_pII_mmx = {
+ name: "pII_mmx",
+ do_2: xor_pII_mmx_2,
+ do_3: xor_pII_mmx_3,
+ do_4: xor_pII_mmx_4,
+ do_5: xor_pII_mmx_5,
+};
+
+static struct xor_block_template xor_block_p5_mmx = {
+ name: "p5_mmx",
+ do_2: xor_p5_mmx_2,
+ do_3: xor_p5_mmx_3,
+ do_4: xor_p5_mmx_4,
+ do_5: xor_p5_mmx_5,
+};
+
+#undef FPU_SAVE
+#undef FPU_RESTORE
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+#define XMMS_SAVE \
+ if (!(current->flags & PF_USEDFPU)) \
+ clts(); \
+ __asm__ __volatile__ ( \
+ "movups %%xmm0,(%1) ;\n\t" \
+ "movups %%xmm1,0x10(%1) ;\n\t" \
+ "movups %%xmm2,0x20(%1) ;\n\t" \
+ "movups %%xmm3,0x30(%1) ;\n\t" \
+ : "=&r" (cr0) \
+ : "r" (xmm_save) \
+ : "memory")
+
+#define XMMS_RESTORE \
+ __asm__ __volatile__ ( \
+ "sfence ;\n\t" \
+ "movups (%1),%%xmm0 ;\n\t" \
+ "movups 0x10(%1),%%xmm1 ;\n\t" \
+ "movups 0x20(%1),%%xmm2 ;\n\t" \
+ "movups 0x30(%1),%%xmm3 ;\n\t" \
+ : \
+ : "r" (cr0), "r" (xmm_save) \
+ : "memory"); \
+ if (!(current->flags & PF_USEDFPU)) \
+ stts()
+
+#define ALIGN16 __attribute__((aligned(16)))
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1)
;\n"
+#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y"
;\n"
+#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1)
;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
+#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
+#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
+#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
+#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
+#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
+#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r"(p2), "+r"(p3)
+ :
+ : "memory" );
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " addl $256, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory" );
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
+ __asm__ __volatile__ (
+ " pushl %4\n"
+ " pushl %5\n"
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ PF4(i) \
+ PF4(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ XO4(i,0) \
+ XO4(i+1,1) \
+ XO4(i+2,2) \
+ XO4(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " addl $256, %4 ;\n"
+ " addl $256, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ " popl %5\n"
+ " popl %4\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_pIII_sse = {
+ name: "pIII_sse",
+ do_2: xor_sse_2,
+ do_3: xor_sse_3,
+ do_4: xor_sse_4,
+ do_5: xor_sse_5,
+};
+
+/* Also try the generic routines. */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ if (cpu_has_xmm) \
+ xor_speed(&xor_block_pIII_sse); \
+ if (md_cpu_has_mmx()) { \
+ xor_speed(&xor_block_pII_mmx); \
+ xor_speed(&xor_block_p5_mmx); \
+ } \
+ } while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/linux/blk.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/blk.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,409 @@
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/config.h>
+#include <linux/spinlock.h>
+
+/*
+ * Spinlock for protecting the request queue which
+ * is mucked around with in interrupts on potentially
+ * multiple CPU's..
+ */
+extern spinlock_t io_request_lock;
+
+/*
+ * Initialization functions.
+ */
+extern int isp16_init(void);
+extern int cdu31a_init(void);
+extern int acsi_init(void);
+extern int mcd_init(void);
+extern int mcdx_init(void);
+extern int sbpcd_init(void);
+extern int aztcd_init(void);
+extern int sony535_init(void);
+extern int gscd_init(void);
+extern int cm206_init(void);
+extern int optcd_init(void);
+extern int sjcd_init(void);
+extern int cdi_init(void);
+extern int hd_init(void);
+extern int ide_init(void);
+extern int xd_init(void);
+extern int mfm_init(void);
+extern int loop_init(void);
+extern int md_init(void);
+extern int ap_init(void);
+extern int ddv_init(void);
+extern int z2_init(void);
+extern int swim3_init(void);
+extern int swimiop_init(void);
+extern int amiga_floppy_init(void);
+extern int atari_floppy_init(void);
+extern int ez_init(void);
+extern int bpcd_init(void);
+extern int ps2esdi_init(void);
+extern int jsfd_init(void);
+extern int viodasd_init(void);
+extern int viocd_init(void);
+
+#if defined(CONFIG_ARCH_S390)
+extern int dasd_init(void);
+extern int xpram_init(void);
+extern int tapeblock_init(void);
+#endif /* CONFIG_ARCH_S390 */
+
+#if defined(CONFIG_XEN)
+extern int xlblk_init(void);
+#endif /* CONFIG_XEN */
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+extern unsigned long initrd_start,initrd_end;
+extern int initrd_below_start_ok; /* 1 if it is not an error if initrd_start <
memory_start */
+void initrd_init(void);
+
+#endif
+
+
+/*
+ * end_request() and friends. Must be called with the request queue spinlock
+ * acquired. All functions called within end_request() _must_be_ atomic.
+ *
+ * Several drivers define their own end_request and call
+ * end_that_request_first() and end_that_request_last()
+ * for parts of the original function. This prevents
+ * code duplication in drivers.
+ */
+
+static inline void blkdev_dequeue_request(struct request * req)
+{
+ list_del(&req->queue);
+}
+
+int end_that_request_first(struct request *req, int uptodate, char *name);
+void end_that_request_last(struct request *req);
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+#undef DEVICE_ON
+#undef DEVICE_OFF
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_NAME "ide"
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == Z2RAM_MAJOR)
+
+/* Zorro II Ram */
+#define DEVICE_NAME "Z2RAM"
+#define DEVICE_REQUEST do_z2_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >>
5 ))
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* Hard disk: timeout is 6 seconds. */
+#define DEVICE_NAME "hard disk"
+#define DEVICE_INTR do_hd
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+
+#elif (SCSI_DISK_MAJOR(MAJOR_NR))
+
+#define DEVICE_NAME "scsidisk"
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_NR(device) (((MAJOR(device) & SD_MAJOR_MASK) << (8 - 4)) +
(MINOR(device) >> 4))
+
+/* Kludge to use the same number for both char and block major numbers */
+#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+
+#elif (MAJOR_NR == OSST_MAJOR)
+
+#define DEVICE_NAME "onstream"
+#define DEVICE_INTR do_osst
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+
+#elif (MAJOR_NR == PS2ESDI_MAJOR)
+
+#define DEVICE_NAME "PS/2 ESDI"
+#define DEVICE_REQUEST do_ps2esdi_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == ACSI_MAJOR) && (defined(CONFIG_ATARI_ACSI) ||
defined(CONFIG_ATARI_ACSI_MODULE))
+
+#define DEVICE_NAME "ACSI"
+#define DEVICE_INTR do_acsi
+#define DEVICE_REQUEST do_acsi_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS CD-ROM cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == APBLOCK_MAJOR)
+
+#define DEVICE_NAME "apblock"
+#define DEVICE_REQUEST ap_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == DDV_MAJOR)
+
+#define DEVICE_NAME "ddv"
+#define DEVICE_REQUEST ddv_request
+#define DEVICE_NR(device) (MINOR(device)>>PARTN_BITS)
+
+#elif (MAJOR_NR == MFM_ACORN_MAJOR)
+
+#define DEVICE_NAME "mfm disk"
+#define DEVICE_INTR do_mfm
+#define DEVICE_REQUEST do_mfm_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+
+#elif (MAJOR_NR == NBD_MAJOR)
+
+#define DEVICE_NAME "nbd"
+#define DEVICE_REQUEST do_nbd_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == MDISK_MAJOR)
+
+#define DEVICE_NAME "mdisk"
+#define DEVICE_REQUEST mdisk_request
+#define DEVICE_NR(device) (MINOR(device))
+
+#elif (MAJOR_NR == DASD_MAJOR)
+
+#define DEVICE_NAME "dasd"
+#define DEVICE_REQUEST do_dasd_request
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+
+#elif (MAJOR_NR == I2O_MAJOR)
+
+#define DEVICE_NAME "I2O block"
+#define DEVICE_REQUEST i2ob_request
+#define DEVICE_NR(device) (MINOR(device)>>4)
+
+#elif (MAJOR_NR == COMPAQ_SMART2_MAJOR)
+
+#define DEVICE_NAME "ida"
+#define TIMEOUT_VALUE (25*HZ)
+#define DEVICE_REQUEST do_ida_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+
+#endif /* MAJOR_NR == whatever */
+
+/* provide DEVICE_xxx defaults, if not explicitly defined
+ * above in the MAJOR_NR==xxx if-elif tree */
+#ifndef DEVICE_ON
+#define DEVICE_ON(device) do {} while (0)
+#endif
+#ifndef DEVICE_OFF
+#define DEVICE_OFF(device) do {} while (0)
+#endif
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
+#if !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT
blkdev_entry_next_request(&blk_dev[MAJOR_NR].request_queue.queue_head)
+#endif
+#ifndef QUEUE_EMPTY
+#define QUEUE_EMPTY list_empty(&blk_dev[MAJOR_NR].request_queue.queue_head)
+#endif
+
+#ifndef DEVICE_NAME
+#define DEVICE_NAME "unknown"
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+static void (*DEVICE_INTR)(void) = NULL;
+#endif
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#ifdef DEVICE_REQUEST
+static void (DEVICE_REQUEST)(request_queue_t *);
+#endif
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (QUEUE_EMPTY) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* !defined(IDE_DRIVER) */
+
+
+#ifndef LOCAL_END_REQUEST /* If we have our own end_request, we do not
want to include this mess */
+
+#if ! SCSI_BLK_MAJOR(MAJOR_NR) && (MAJOR_NR != COMPAQ_SMART2_MAJOR)
+
+static inline void end_request(int uptodate) {
+ struct request *req = CURRENT;
+
+ if (end_that_request_first(req, uptodate, DEVICE_NAME))
+ return;
+
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+ DEVICE_OFF(req->rq_dev);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req);
+}
+
+#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
+#endif /* LOCAL_END_REQUEST */
+
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4-xen-sparse/include/linux/highmem.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/highmem.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,137 @@
+#ifndef _LINUX_HIGHMEM_H
+#define _LINUX_HIGHMEM_H
+
+#include <linux/config.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_HIGHMEM
+
+extern struct page *highmem_start_page;
+
+#include <asm/highmem.h>
+
+/* declarations for linux/mm/highmem.c */
+unsigned int nr_free_highpages(void);
+void kmap_flush_unused(void);
+
+extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig);
+
+static inline char *bh_kmap(struct buffer_head *bh)
+{
+ return kmap(bh->b_page) + bh_offset(bh);
+}
+
+static inline void bh_kunmap(struct buffer_head *bh)
+{
+ kunmap(bh->b_page);
+}
+
+/*
+ * remember to add offset! and never ever reenable interrupts between a
+ * bh_kmap_irq and bh_kunmap_irq!!
+ */
+static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags)
+{
+ unsigned long addr;
+
+ __save_flags(*flags);
+
+ /*
+ * could be low
+ */
+ if (!PageHighMem(bh->b_page))
+ return bh->b_data;
+
+ /*
+ * it's a highmem page
+ */
+ __cli();
+ addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ);
+
+ if (addr & ~PAGE_MASK)
+ BUG();
+
+ return (char *) addr + bh_offset(bh);
+}
+
+static inline void bh_kunmap_irq(char *buffer, unsigned long *flags)
+{
+ unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
+
+ kunmap_atomic((void *) ptr, KM_BH_IRQ);
+ __restore_flags(*flags);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline void kmap_flush_unused(void) { }
+
+static inline void *kmap(struct page *page) { return page_address(page); }
+
+#define kunmap(page) do { } while (0)
+
+#define kmap_atomic(page,idx) kmap(page)
+#define kunmap_atomic(page,idx) kunmap(page)
+
+#define bh_kmap(bh) ((bh)->b_data)
+#define bh_kunmap(bh) do { } while (0)
+#define kmap_nonblock(page) kmap(page)
+#define bh_kmap_irq(bh, flags) ((bh)->b_data)
+#define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0)
+
+#endif /* CONFIG_HIGHMEM */
+
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_atomic(page, KM_USER0);
+ clear_user_page(addr, vaddr);
+ kunmap_atomic(addr, KM_USER0);
+}
+
+static inline void clear_highpage(struct page *page)
+{
+ clear_page(kmap(page));
+ kunmap(page);
+}
+
+/*
+ * Same but also flushes aliased cache contents to RAM.
+ */
+static inline void memclear_highpage_flush(struct page *page, unsigned int
offset, unsigned int size)
+{
+ char *kaddr;
+
+ if (offset + size > PAGE_SIZE)
+ out_of_line_bug();
+ kaddr = kmap(page);
+ memset(kaddr + offset, 0, size);
+ flush_dcache_page(page);
+ flush_page_to_ram(page);
+ kunmap(page);
+}
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+ copy_user_page(vto, vfrom, vaddr);
+ kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vto, KM_USER1);
+}
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vto, KM_USER1);
+}
+
+#endif /* _LINUX_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/linux/irq.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/irq.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,80 @@
+#ifndef __irq_h
+#define __irq_h
+
+/*
+ * Please do not include this file in generic code. There is currently
+ * no requirement for any architecture to implement anything held
+ * within this file.
+ *
+ * Thanks. --rmk
+ */
+
+#include <linux/config.h>
+
+#if !defined(CONFIG_ARCH_S390)
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+
+#include <asm/irq.h>
+#include <asm/ptrace.h>
+
+/*
+ * IRQ line status.
+ */
+#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
+#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
+#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
+#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
+#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
+#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
+#define IRQ_LEVEL 64 /* IRQ level triggered */
+#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
+#define IRQ_PER_CPU 256 /* IRQ is per CPU */
+
+/*
+ * Interrupt controller descriptor. This is all we need
+ * to describe about the low-level hardware.
+ */
+struct hw_interrupt_type {
+ const char * typename;
+ unsigned int (*startup)(unsigned int irq);
+ void (*shutdown)(unsigned int irq);
+ void (*enable)(unsigned int irq);
+ void (*disable)(unsigned int irq);
+ void (*ack)(unsigned int irq);
+ void (*end)(unsigned int irq);
+ void (*set_affinity)(unsigned int irq, unsigned long mask);
+};
+
+typedef struct hw_interrupt_type hw_irq_controller;
+
+/*
+ * This is the "IRQ descriptor", which contains various information
+ * about the irq, including what kind of hardware handling it has,
+ * whether it is disabled etc etc.
+ *
+ * Pad this out to 32 bytes for cache and indexing reasons.
+ */
+typedef struct {
+ unsigned int status; /* IRQ status */
+ hw_irq_controller *handler;
+ struct irqaction *action; /* IRQ action list */
+ unsigned int depth; /* nested irq disables */
+ spinlock_t lock;
+} ____cacheline_aligned irq_desc_t;
+
+extern irq_desc_t irq_desc [NR_IRQS];
+
+#include <asm/hw_irq.h> /* the arch dependent stuff */
+
+extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction
*);
+extern int setup_irq(unsigned int , struct irqaction * );
+extern int teardown_irq(unsigned int , struct irqaction * );
+
+extern hw_irq_controller no_irq_type; /* needed in every arch ? */
+extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
+
+#endif
+
+#endif /* __irq_h */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/linux/mm.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/mm.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,727 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mmzone.h>
+#include <linux/swap.h>
+#include <linux/rbtree.h>
+
+extern unsigned long max_mapnr;
+extern unsigned long num_physpages;
+extern unsigned long num_mappedpages;
+extern void * high_memory;
+extern int page_cluster;
+/* The inactive_clean lists are per zone. */
+extern struct list_head active_list;
+extern struct list_head inactive_list;
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* The address space we belong to. */
+ unsigned long vm_start; /* Our start address within vm_mm. */
+ unsigned long vm_end; /* The first byte after our end address
+ within vm_mm. */
+
+ /* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct *vm_next;
+
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, listed below. */
+
+ rb_node_t vm_rb;
+
+ /*
+ * For areas with an address space and backing store,
+ * one of the address_space->i_mmap{,shared} lists,
+ * for shm areas, the list of attaches, otherwise unused.
+ */
+ struct vm_area_struct *vm_next_share;
+ struct vm_area_struct **vm_pprev_share;
+
+ /* Function pointers to deal with this struct. */
+ struct vm_operations_struct * vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units, *not* PAGE_CACHE_SIZE */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ unsigned long vm_raend; /* XXX: put full readahead info here. */
+ void * vm_private_data; /* was vm_pte (shared mem) */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x00000001 /* currently active flags */
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_SHARED 0x00000008
+
+#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x00000020
+#define VM_MAYEXEC 0x00000040
+#define VM_MAYSHARE 0x00000080
+
+#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_GROWSUP 0x00000200
+#define VM_SHM 0x00000400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x00001000
+#define VM_LOCKED 0x00002000
+#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+
+ /* Used by sys_madvise() */
+#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
+#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered
reads */
+
+#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
+
+#ifndef VM_STACK_FLAGS
+#define VM_STACK_FLAGS 0x00000177
+#endif
+
+#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
+#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
+#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
+#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
+#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
+
+/* read ahead limits */
+extern int vm_min_readahead;
+extern int vm_max_readahead;
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ struct page * (*nopage)(struct vm_area_struct * area, unsigned long
address, int unused);
+};
+
+/*
+ * Each physical page in the system has a struct page associated with
+ * it to keep track of whatever it is we are using the page for at the
+ * moment. Note that we have no way to track which tasks are using
+ * a page.
+ *
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ *
+ * TODO: make this structure smaller, it could be as small as 32 bytes.
+ */
+typedef struct page {
+ struct list_head list; /* ->mapping has some page lists. */
+ struct address_space *mapping; /* The inode (or ...) we belong to. */
+ unsigned long index; /* Our offset within mapping. */
+ struct page *next_hash; /* Next page sharing our hash bucket in
+ the pagecache hash table. */
+ atomic_t count; /* Usage count, see below. */
+ unsigned long flags; /* atomic flags, some possibly
+ updated asynchronously */
+ struct list_head lru; /* Pageout list, eg. active_list;
+ protected by pagemap_lru_lock !! */
+ struct page **pprev_hash; /* Complement to *next_hash. */
+ struct buffer_head * buffers; /* Buffer maps us to a disk block. */
+
+ /*
+ * On machines where all RAM is mapped into kernel address space,
+ * we can simply calculate the virtual address. On machines with
+ * highmem some memory is mapped into kernel virtual memory
+ * dynamically, so we need a place to store that address.
+ * Note that this field could be 16 bits on x86 ... ;)
+ *
+ * Architectures with slow multiplication can define
+ * WANT_PAGE_VIRTUAL in asm/page.h
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+ void *virtual; /* Kernel virtual address (NULL if
+ not kmapped, ie. highmem) */
+#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
+} mem_map_t;
+
+/*
+ * Methods to modify the page usage count.
+ *
+ * What counts for a page usage:
+ * - cache mapping (page->mapping)
+ * - disk mapping (page->buffers)
+ * - page mapped in a task's page tables, each mapping
+ * is counted separately
+ *
+ * Also, many kernel routines increase the page count before a critical
+ * routine so they can be sure the page doesn't go away from under them.
+ */
+#define get_page(p) atomic_inc(&(p)->count)
+#define put_page(p) __free_page(p)
+#define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
+#define page_count(p) atomic_read(&(p)->count)
+#define set_page_count(p,v) atomic_set(&(p)->count, v)
+
+static inline struct page *nth_page(struct page *page, int n)
+{
+ return page + n;
+}
+
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped
+ * out. Some of them might not even exist (eg empty_bad_page)...
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * __get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->mapping is the pointer to the inode, and page->index is the
+ * file offset of the page, in units of PAGE_CACHE_SIZE.
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page, plus one
+ * for the page cache itself.
+ *
+ * All pages belonging to an inode are in these doubly linked lists:
+ * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
+ * using the page->list list_head. These fields are also used for
+ * freelist managemet (when page->count==0).
+ *
+ * There is also a hash table mapping (mapping,index) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->pprev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, PG_locked is used. This bit is set before I/O
+ * and reset when I/O completes. page_waitqueue(page) is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * PG_uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * PG_referenced bit, which is set any time the system accesses
+ * that page through the (mapping,index) hash table. This referenced
+ * bit, together with the referenced bit in the page tables, is used
+ * to manipulate page->age and move the page across the active,
+ * inactive_dirty and inactive_clean lists.
+ *
+ * Note that the referenced bit, the page->lru list_head and the
+ * active, inactive_dirty and inactive_clean lists are protected by
+ * the pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
+ *
+ * PG_skip is used on sparc/sparc64 architectures to "skip" certain
+ * parts of the address space.
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit. The generic
+ * code guarantees that this bit is cleared for a page when it first
+ * is entered into the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual
+ * address space, they need to be kmapped separately for doing IO on
+ * the pages. The struct page (these bits with information) are always
+ * mapped into kernel address space...
+ */
+#define PG_locked 0 /* Page is locked. Don't touch. */
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_dirty 4
+#define PG_unused 5
+#define PG_lru 6
+#define PG_active 7
+#define PG_slab 8
+#define PG_skip 10
+#define PG_highmem 11
+#define PG_checked 12 /* kill me in 2.5.<early>. */
+#define PG_arch_1 13
+#define PG_reserved 14
+#define PG_launder 15 /* written out by VM pressure.. */
+#define PG_fs_1 16 /* Filesystem specific */
+#define PG_foreign 21 /* Page belongs to foreign allocator */
+
+#ifndef arch_set_page_uptodate
+#define arch_set_page_uptodate(page)
+#endif
+
+/* Make it prettier to test the above... */
+#define UnlockPage(page) unlock_page(page)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+#ifndef SetPageUptodate
+#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
+#endif
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
+#define LockPage(page) set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
+
+/* A foreign page uses a custom destructor rather than the buddy allocator. */
+#ifdef CONFIG_FOREIGN_PAGES
+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
+#define SetPageForeign(page, dtor) do { \
+ set_bit(PG_foreign, &(page)->flags); \
+ (page)->mapping = (void *)dtor; \
+} while (0)
+#define ClearPageForeign(page) do { \
+ clear_bit(PG_foreign, &(page)->flags); \
+ (page)->mapping = NULL; \
+} while (0)
+#define PageForeignDestructor(page) \
+ ( (void (*) (struct page *)) (page)->mapping )
+#else
+#define PageForeign(page) 0
+#define PageForeignDestructor(page) void
+#endif
+
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+#define NODE_SHIFT 4
+#define ZONE_SHIFT (BITS_PER_LONG - 8)
+
+struct zone_struct;
+extern struct zone_struct *zone_table[];
+
+static inline zone_t *page_zone(struct page *page)
+{
+ return zone_table[page->flags >> ZONE_SHIFT];
+}
+
+static inline void set_page_zone(struct page *page, unsigned long zone_num)
+{
+ page->flags &= ~(~0UL << ZONE_SHIFT);
+ page->flags |= zone_num << ZONE_SHIFT;
+}
+
+/*
+ * In order to avoid #ifdefs within C code itself, we define
+ * set_page_address to a noop for non-highmem machines, where
+ * the field isn't useful.
+ * The same is true for page_address() in arch-dependent code.
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+
+#define set_page_address(page, address) \
+ do { \
+ (page)->virtual = (address); \
+ } while(0)
+
+#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+#define set_page_address(page, address) do { } while(0)
+#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+/*
+ * Permanent address of a page. Obviously must never be
+ * called on a highmem page.
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+
+#define page_address(page) ((page)->virtual)
+
+#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+#define page_address(page) \
+ __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ + page_zone(page)->zone_start_paddr)
+
+#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+extern void FASTCALL(set_page_dirty(struct page *));
+
+/*
+ * The first mb is necessary to safely close the critical section opened by the
+ * TryLockPage(), the second mb is necessary to enforce ordering between
+ * the clear_bit and the read of the waitqueue (to avoid SMP races with a
+ * parallel wait_on_page).
+ */
+#define PageError(page) test_bit(PG_error, &(page)->flags)
+#define SetPageError(page) set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+#define PageTestandClearReferenced(page)
test_and_clear_bit(PG_referenced, &(page)->flags)
+#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
+#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
+#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
+#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
+
+#define PageActive(page) test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
+
+#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
+#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
+#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+
+#ifdef CONFIG_HIGHMEM
+#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
+#else
+#define PageHighMem(page) 0 /* needed to optimize away at compile
time */
+#endif
+
+#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
+#define ClearPageReserved(page) clear_bit(PG_reserved,
&(page)->flags)
+
+/*
+ * Error return values for the *_nopage functions
+ */
+#define NOPAGE_SIGBUS (NULL)
+#define NOPAGE_OOM ((struct page *) (-1))
+
+/* The array of struct pages */
+extern mem_map_t * mem_map;
+
+/*
+ * There is only one page-allocator function, and two main namespaces to
+ * it. The alloc_page*() variants return 'struct page *' and as such
+ * can allocate highmem pages, the *get*page*() variants return
+ * virtual kernel addresses to the allocated page(s).
+ */
+extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int
order));
+extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned
int order, zonelist_t *zonelist));
+extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned
int order);
+
+static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int
order)
+{
+ /*
+ * Gets optimized away by the compiler.
+ */
+ if (order >= MAX_ORDER)
+ return NULL;
+ return _alloc_pages(gfp_mask, order);
+}
+
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+
+extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned
int order));
+extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
+
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask),0)
+
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA,(order))
+
+/*
+ * The old interface name will be removed in 2.5:
+ */
+#define get_free_page get_zeroed_page
+
+/*
+ * There is only one 'core' page-freeing function.
+ */
+extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
+extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
+
+#define __free_page(page) __free_pages((page), 0)
+#define free_page(addr) free_pages((addr),0)
+
+extern void show_free_areas(void);
+extern void show_free_areas_node(pg_data_t *pgdat);
+
+extern void clear_page_tables(struct mm_struct *, unsigned long, int);
+
+extern int fail_writepage(struct page *);
+struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address,
int unused);
+struct file *shmem_file_setup(char * name, loff_t size);
+extern void shmem_lock(struct file * file, int lock);
+extern int shmem_zero_setup(struct vm_area_struct *);
+
+extern void zap_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned
long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t
prot);
+
+extern int vmtruncate(struct inode * inode, loff_t offset);
+extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned
long address));
+extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned
long address));
+extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
unsigned long address, int write_access);
+extern int make_pages_present(unsigned long addr, unsigned long end);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void
*buf, int len, int write);
+extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char
*dst, int len);
+extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long
dst, int len);
+extern int ptrace_attach(struct task_struct *tsk);
+extern int ptrace_detach(struct task_struct *, unsigned int);
+extern void ptrace_disable(struct task_struct *);
+extern int ptrace_check_attach(struct task_struct *task, int kill);
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned
long start,
+ int len, int write, int force, struct page **pages, struct
vm_area_struct **vmas);
+
+/*
+ * On a two-level page table, this ends up being trivial. Thus the
+ * inlining and the symmetry break with pte_alloc() that does all
+ * of this out-of-line.
+ */
+static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
+{
+ if (pgd_none(*pgd))
+ return __pmd_alloc(mm, pgd, address);
+ return pmd_offset(pgd, address);
+}
+
+extern int pgt_cache_water[2];
+extern int check_pgt_cache(void);
+
+extern void free_area_init(unsigned long * zones_size);
+extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
+ unsigned long * zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size);
+extern void mem_init(void);
+extern void show_mem(void);
+extern void si_meminfo(struct sysinfo * val);
+extern void swapin_readahead(swp_entry_t);
+
+extern struct address_space swapper_space;
+#define PageSwapCache(page) ((page)->mapping == &swapper_space)
+
+static inline int is_page_cache_freeable(struct page * page)
+{
+ return page_count(page) - !!page->buffers == 1;
+}
+
+extern int FASTCALL(can_share_swap_page(struct page *));
+extern int FASTCALL(remove_exclusive_swap_page(struct page *));
+
+extern void __free_pte(pte_t);
+
+/* mmap.c */
+extern void lock_vma_mappings(struct vm_area_struct *);
+extern void unlock_vma_mappings(struct vm_area_struct *);
+extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void __insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void build_mmap_rb(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned
long, unsigned long, unsigned long);
+
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+
+static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+{
+ unsigned long ret = -EINVAL;
+ if ((offset + PAGE_ALIGN(len)) < offset)
+ goto out;
+ if (!(offset & ~PAGE_MASK))
+ ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >>
PAGE_SHIFT);
+out:
+ return ret;
+}
+
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+
+extern unsigned long do_brk(unsigned long, unsigned long);
+
+static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct *
vma, struct vm_area_struct * prev)
+{
+ prev->vm_next = vma->vm_next;
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = prev;
+}
+
+static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long
vm_flags)
+{
+ if (!vma->vm_file && vma->vm_flags == vm_flags)
+ return 1;
+ else
+ return 0;
+}
+
+struct zone_t;
+/* filemap.c */
+extern void remove_inode_page(struct page *);
+extern unsigned long page_unuse(struct page *);
+extern void truncate_inode_pages(struct address_space *, loff_t);
+
+/* generic vm_area_ops exported for stackable file systems */
+extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t,
unsigned int);
+extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long,
int);
+
+/*
+ * GFP bitmasks..
+ */
+/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
+#define __GFP_DMA 0x01
+#define __GFP_HIGHMEM 0x02
+
+/* Action modifiers - doesn't change the zoning */
+#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
+#define __GFP_HIGH 0x20 /* Should access emergency pools? */
+#define __GFP_IO 0x40 /* Can start low memory physical IO? */
+#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
+#define __GFP_FS 0x100 /* Can call down to low-level FS? */
+
+#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
+#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
+#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
+#define GFP_ATOMIC (__GFP_HIGH)
+#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS)
+#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS | __GFP_HIGHMEM)
+#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS)
+#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO |
__GFP_HIGHIO | __GFP_FS)
+#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS)
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA __GFP_DMA
+
+static inline unsigned int pf_gfp_mask(unsigned int gfp_mask)
+{
+ /* avoid all memory balancing I/O methods if this task cannot block on
I/O */
+ if (current->flags & PF_NOIO)
+ gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS);
+
+ return gfp_mask;
+}
+
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long
address)
+{
+ unsigned long grow;
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+ * page_table_lock lock to serialize against concurrent expand_stacks.
+ */
+ address &= PAGE_MASK;
+ spin_lock(&vma->vm_mm->page_table_lock);
+
+ /* already expanded while we were spinning? */
+ if (vma->vm_start <= address) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return 0;
+ }
+
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+ if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
+ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
current->rlim[RLIMIT_AS].rlim_cur) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return -ENOMEM;
+ }
+
+ if ((vma->vm_flags & VM_LOCKED) &&
+ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
current->rlim[RLIMIT_MEMLOCK].rlim_cur) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return -ENOMEM;
+ }
+
+
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ vma->vm_mm->total_vm += grow;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return 0;
+}
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long
addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned
long addr,
+ struct vm_area_struct **pprev);
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct *
mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma = find_vma(mm,start_addr);
+
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+
+extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned
long addr);
+
+extern struct page * vmalloc_to_page(void *addr);
+
+#endif /* __KERNEL__ */
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/linux/sched.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/sched.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,971 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long event;
+
+#include <linux/config.h>
+#include <linux/binfmts.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/times.h>
+#include <linux/timex.h>
+#include <linux/rbtree.h>
+
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/mmu.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+#include <linux/signal.h>
+#include <linux/securebits.h>
+#include <linux/fs_struct.h>
+
+struct exec_domain;
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at
exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between
processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between
processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked
signals shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing
continue on the child too */
+#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to
wake it up on mm_release */
+#define CLONE_PARENT 0x00008000 /* set if we want to have the same
parent as the cloner */
+#define CLONE_THREAD 0x00010000 /* Same thread group? */
+#define CLONE_NEWNS 0x00020000 /* New namespace group? */
+
+#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_threads;
+extern int last_pid;
+
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#ifdef __KERNEL__
+#include <linux/timer.h>
+#endif
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 4
+#define TASK_STOPPED 8
+
+#define __set_task_state(tsk, state_value) \
+ do { (tsk)->state = (state_value); } while (0)
+#define set_task_state(tsk, state_value) \
+ set_mb((tsk)->state, (state_value))
+
+#define __set_current_state(state_value) \
+ do { current->state = (state_value); } while (0)
+#define set_current_state(state_value) \
+ set_mb(current->state, (state_value))
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+/*
+ * This is an additional bit set when we want to
+ * yield the CPU for one re-schedule..
+ */
+#define SCHED_YIELD 0x10
+
+struct sched_param {
+ int sched_priority;
+};
+
+struct completion;
+
+#ifdef __KERNEL__
+
+#include <linux/spinlock.h>
+
+/*
+ * This serializes "schedule()" and also protects
+ * the run-queue from deletions/modifications (but
+ * _adding_ to the beginning of the run-queue has
+ * a separate lock).
+ */
+extern rwlock_t tasklist_lock;
+extern spinlock_t runqueue_lock;
+extern spinlock_t mmlist_lock;
+
+extern void sched_init(void);
+extern void init_idle(void);
+extern void show_state(void);
+extern void cpu_init (void);
+extern void trap_init(void);
+extern void update_process_times(int user);
+#ifdef CONFIG_NO_IDLE_HZ
+extern void update_process_times_us(int user, int system);
+#endif
+extern void update_one_process(struct task_struct *p, unsigned long user,
+ unsigned long system, int cpu);
+
+#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+extern signed long FASTCALL(schedule_timeout(signed long timeout));
+asmlinkage void schedule(void);
+
+extern int schedule_task(struct tq_struct *task);
+extern void flush_scheduled_tasks(void);
+extern int start_context_thread(void);
+extern int current_is_keventd(void);
+
+#if CONFIG_SMP
+extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask);
+#else
+# define set_cpus_allowed(p, new_mask) do { } while (0)
+#endif
+
+/*
+ * The default fd array needs to be at least BITS_PER_LONG,
+ * as this is the granularity returned by copy_fdset().
+ */
+#define NR_OPEN_DEFAULT BITS_PER_LONG
+
+struct namespace;
+/*
+ * Open file table structure
+ */
+struct files_struct {
+ atomic_t count;
+ rwlock_t file_lock; /* Protects all the below members. Nests
inside tsk->alloc_lock */
+ int max_fds;
+ int max_fdset;
+ int next_fd;
+ struct file ** fd; /* current fd array */
+ fd_set *close_on_exec;
+ fd_set *open_fds;
+ fd_set close_on_exec_init;
+ fd_set open_fds_init;
+ struct file * fd_array[NR_OPEN_DEFAULT];
+};
+
+#define INIT_FILES \
+{ \
+ count: ATOMIC_INIT(1), \
+ file_lock: RW_LOCK_UNLOCKED, \
+ max_fds: NR_OPEN_DEFAULT, \
+ max_fdset: __FD_SETSIZE, \
+ next_fd: 0, \
+ fd: &init_files.fd_array[0], \
+ close_on_exec: &init_files.close_on_exec_init, \
+ open_fds: &init_files.open_fds_init, \
+ close_on_exec_init: { { 0, } }, \
+ open_fds_init: { { 0, } }, \
+ fd_array: { NULL, } \
+}
+
+/* Maximum number of active map areas.. This is a random (large) number */
+#define DEFAULT_MAX_MAP_COUNT (65536)
+
+extern int max_map_count;
+
+struct mm_struct {
+ struct vm_area_struct * mmap; /* list of VMAs */
+ rb_root_t mm_rb;
+ struct vm_area_struct * mmap_cache; /* last find_vma result */
+ pgd_t * pgd;
+ atomic_t mm_users; /* How many users with user
space? */
+ atomic_t mm_count; /* How many references to
"struct mm_struct" (users count as 1) */
+ int map_count; /* number of VMAs */
+ struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock; /* Protects task page tables
and mm->rss */
+
+ struct list_head mmlist; /* List of all active mm's.
These are globally strung
+ * together off init_mm.mmlist,
and are protected
+ * by mmlist_lock
+ */
+
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ unsigned long cpu_vm_mask;
+ unsigned long swap_address;
+
+ unsigned dumpable:1;
+
+ /* Architecture-specific MM context */
+ mm_context_t context;
+};
+
+extern int mmlist_nr;
+
+#define INIT_MM(name) \
+{ \
+ mm_rb: RB_ROOT, \
+ pgd: swapper_pg_dir, \
+ mm_users: ATOMIC_INIT(2), \
+ mm_count: ATOMIC_INIT(1), \
+ mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem), \
+ page_table_lock: SPIN_LOCK_UNLOCKED, \
+ mmlist: LIST_HEAD_INIT(name.mmlist), \
+}
+
+struct signal_struct {
+ atomic_t count;
+ struct k_sigaction action[_NSIG];
+ spinlock_t siglock;
+};
+
+
+#define INIT_SIGNALS { \
+ count: ATOMIC_INIT(1), \
+ action: { {{0,}}, }, \
+ siglock: SPIN_LOCK_UNLOCKED \
+}
+
+/*
+ * Some day this will be a full-fledged user tracking system..
+ */
+struct user_struct {
+ atomic_t __count; /* reference count */
+ atomic_t processes; /* How many processes does this user have? */
+ atomic_t files; /* How many open files does this user have? */
+
+ /* Hash table maintenance information */
+ struct user_struct *next, **pprev;
+ uid_t uid;
+};
+
+#define get_current_user() ({ \
+ struct user_struct *__tmp_user = current->user; \
+ atomic_inc(&__tmp_user->__count); \
+ __tmp_user; })
+
+extern struct user_struct root_user;
+#define INIT_USER (&root_user)
+
+struct task_struct {
+ /*
+ * offsets of these are hardcoded elsewhere - touch with care
+ */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ unsigned long flags; /* per process flags, defined below */
+ int sigpending;
+ mm_segment_t addr_limit; /* thread address space:
+ 0-0xBFFFFFFF for user-thead
+ 0-0xFFFFFFFF for kernel-thread
+ */
+ struct exec_domain *exec_domain;
+ volatile long need_resched;
+ unsigned long ptrace;
+
+ int lock_depth; /* Lock depth */
+
+/*
+ * offset 32 begins here on 32-bit platforms. We keep
+ * all fields in a single cacheline that are needed for
+ * the goodness() loop in schedule().
+ */
+ long counter;
+ long nice;
+ unsigned long policy;
+ struct mm_struct *mm;
+ int processor;
+ /*
+ * cpus_runnable is ~0 if the process is not running on any
+ * CPU. It's (1 << cpu) if it's running on a CPU. This mask
+ * is updated under the runqueue lock.
+ *
+ * To determine whether a process might run on a CPU, this
+ * mask is AND-ed with cpus_allowed.
+ */
+ unsigned long cpus_runnable, cpus_allowed;
+ /*
+ * (only the 'next' pointer fits into the cacheline, but
+ * that's just fine.)
+ */
+ struct list_head run_list;
+ unsigned long sleep_time;
+
+ struct task_struct *next_task, *prev_task;
+ struct mm_struct *active_mm;
+ struct list_head local_pages;
+ unsigned int allocation_order, nr_local_pages;
+
+/* task state */
+ struct linux_binfmt *binfmt;
+ int exit_code, exit_signal;
+ int pdeath_signal; /* The signal sent when the parent dies */
+ /* ??? */
+ unsigned long personality;
+ int did_exec:1;
+ unsigned task_dumpable:1;
+ pid_t pid;
+ pid_t pgrp;
+ pid_t tty_old_pgrp;
+ pid_t session;
+ pid_t tgid;
+ /* boolean value for session group leader */
+ int leader;
+ /*
+ * pointers to (original) parent process, youngest child, younger
sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct list_head thread_group;
+
+ /* PID hash table linkage. */
+ struct task_struct *pidhash_next;
+ struct task_struct **pidhash_pprev;
+
+ wait_queue_head_t wait_chldexit; /* for wait4() */
+ struct completion *vfork_done; /* for vfork() */
+ unsigned long rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ struct tms times;
+ unsigned long start_time;
+ long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
+/* mm fault and swap info: this can arguably be seen as either mm-specific or
thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+/* process credentials */
+ uid_t uid,euid,suid,fsuid;
+ gid_t gid,egid,sgid,fsgid;
+ int ngroups;
+ gid_t groups[NGROUPS];
+ kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
+ int keep_capabilities:1;
+ struct user_struct *user;
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count, total_link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+ unsigned int locks; /* How many file locks are being held */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* CPU-specific state of this task */
+ struct thread_struct thread;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* namespace */
+ struct namespace *namespace;
+/* signal handlers */
+ spinlock_t sigmask_lock; /* Protects signal and blocked */
+ struct signal_struct *sig;
+
+ sigset_t blocked;
+ struct sigpending pending;
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+ int (*notifier)(void *priv);
+ void *notifier_data;
+ sigset_t *notifier_mask;
+
+/* Thread group tracking */
+ u32 parent_exec_id;
+ u32 self_exec_id;
+/* Protection of (de-)allocation: mm, files, fs, tty */
+ spinlock_t alloc_lock;
+
+/* journalling filesystem info */
+ void *journal_info;
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+#define PF_MEMALLOC 0x00000800 /* Allocating memory */
+#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */
+#define PF_FREE_PAGES 0x00002000 /* per process page freeing */
+#define PF_NOIO 0x00004000 /* avoid generating further I/O
*/
+#define PF_FSTRANS 0x00008000 /* inside a filesystem transaction */
+
+#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */
+
+/*
+ * Ptrace flags
+ */
+
+#define PT_PTRACED 0x00000001
+#define PT_TRACESYS 0x00000002
+#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */
+#define PT_TRACESYSGOOD 0x00000008
+#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */
+
+#define is_dumpable(tsk) ((tsk)->task_dumpable && (tsk)->mm &&
(tsk)->mm->dumpable)
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */
+#define MAX_COUNTER (20*HZ/100)
+#define DEF_NICE (0)
+
+extern void yield(void);
+
+/*
+ * The default (Linux) execution domain.
+ */
+extern struct exec_domain default_exec_domain;
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK(tsk) \
+{ \
+ state: 0, \
+ flags: 0, \
+ sigpending: 0,
\
+ addr_limit: KERNEL_DS,
\
+ exec_domain: &default_exec_domain, \
+ lock_depth: -1,
\
+ counter: DEF_COUNTER, \
+ nice: DEF_NICE, \
+ policy: SCHED_OTHER, \
+ mm: NULL,
\
+ active_mm: &init_mm, \
+ cpus_runnable: ~0UL, \
+ cpus_allowed: ~0UL, \
+ run_list: LIST_HEAD_INIT(tsk.run_list), \
+ next_task: &tsk, \
+ prev_task: &tsk, \
+ p_opptr: &tsk, \
+ p_pptr: &tsk, \
+ thread_group: LIST_HEAD_INIT(tsk.thread_group), \
+ wait_chldexit: __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
+ real_timer: {
\
+ function: it_real_fn \
+ }, \
+ cap_effective: CAP_INIT_EFF_SET, \
+ cap_inheritable: CAP_INIT_INH_SET, \
+ cap_permitted: CAP_FULL_SET, \
+ keep_capabilities: 0, \
+ rlim: INIT_RLIMITS, \
+ user: INIT_USER, \
+ comm: "swapper", \
+ thread: INIT_THREAD, \
+ fs: &init_fs,
\
+ files: &init_files, \
+ sigmask_lock: SPIN_LOCK_UNLOCKED, \
+ sig: &init_signals, \
+ pending: { NULL, &tsk.pending.head, {{0}}}, \
+ blocked: {{0}}, \
+ alloc_lock: SPIN_LOCK_UNLOCKED,
\
+ journal_info: NULL, \
+}
+
+
+#ifndef INIT_TASK_SIZE
+# define INIT_TASK_SIZE 2048*sizeof(long)
+#endif
+
+union task_union {
+ struct task_struct task;
+ unsigned long stack[INIT_TASK_SIZE/sizeof(long)];
+};
+
+extern union task_union init_task_union;
+
+extern struct mm_struct init_mm;
+extern struct task_struct *init_tasks[NR_CPUS];
+
+/* PID hashing. (shouldnt this be dynamic?) */
+#define PIDHASH_SZ (4096 >> 2)
+extern struct task_struct *pidhash[PIDHASH_SZ];
+
+#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
+
+static inline void hash_pid(struct task_struct *p)
+{
+ struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
+
+ if((p->pidhash_next = *htable) != NULL)
+ (*htable)->pidhash_pprev = &p->pidhash_next;
+ *htable = p;
+ p->pidhash_pprev = htable;
+}
+
+static inline void unhash_pid(struct task_struct *p)
+{
+ if(p->pidhash_next)
+ p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
+ *p->pidhash_pprev = p->pidhash_next;
+}
+
+static inline struct task_struct *find_task_by_pid(int pid)
+{
+ struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
+
+ for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
+ ;
+
+ return p;
+}
+
+#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
+
+static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
+{
+ tsk->processor = cpu;
+ tsk->cpus_runnable = 1UL << cpu;
+}
+
+static inline void task_release_cpu(struct task_struct *tsk)
+{
+ tsk->cpus_runnable = ~0UL;
+}
+
+/* per-UID process charging. */
+extern struct user_struct * alloc_uid(uid_t);
+extern void free_uid(struct user_struct *);
+extern void switch_uid(struct user_struct *);
+
+#include <asm/current.h>
+
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern void do_timer(struct pt_regs *);
+#ifdef CONFIG_NO_IDLE_HZ
+extern void do_timer_ticks(int ticks);
+#endif
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int
nr));
+extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode,
int nr));
+extern void FASTCALL(sleep_on(wait_queue_head_t *q));
+extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
+ signed long timeout));
+extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
+extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
+ signed long timeout));
+extern int FASTCALL(wake_up_process(struct task_struct * tsk));
+
+#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE, 1)
+#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE, nr)
+#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE, 0)
+#define wake_up_sync(x)
__wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
+#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE, nr)
+#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_nr(x, nr)
__wake_up((x),TASK_INTERRUPTIBLE, nr)
+#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
+#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE,
1)
+#define wake_up_interruptible_sync_nr(x, nr)
__wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
+asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
struct rusage * ru);
+
+extern int in_group_p(gid_t);
+extern int in_egroup_p(gid_t);
+
+extern void proc_caches_init(void);
+extern void flush_signals(struct task_struct *);
+extern void flush_signal_handlers(struct task_struct *);
+extern void sig_exit(int, int, struct siginfo *);
+extern int dequeue_signal(sigset_t *, siginfo_t *);
+extern void block_all_signals(int (*notifier)(void *priv), void *priv,
+ sigset_t *mask);
+extern void unblock_all_signals(void);
+extern int send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int force_sig_info(int, struct siginfo *, struct task_struct *);
+extern int kill_pg_info(int, struct siginfo *, pid_t);
+extern int kill_sl_info(int, struct siginfo *, pid_t);
+extern int kill_proc_info(int, struct siginfo *, pid_t);
+extern void notify_parent(struct task_struct *, int);
+extern void do_notify_parent(struct task_struct *, int);
+extern void force_sig(int, struct task_struct *);
+extern int send_sig(int, struct task_struct *, int);
+extern int kill_pg(pid_t, int, int);
+extern int kill_sl(pid_t, int, int);
+extern int kill_proc(pid_t, int, int);
+extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
+extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
+
+static inline int signal_pending(struct task_struct *p)
+{
+ return (p->sigpending != 0);
+}
+
+/*
+ * Re-calculate pending state from the set of locally pending
+ * signals, globally pending signals, and blocked signals.
+ */
+static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
+{
+ unsigned long ready;
+ long i;
+
+ switch (_NSIG_WORDS) {
+ default:
+ for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
+ ready |= signal->sig[i] &~ blocked->sig[i];
+ break;
+
+ case 4: ready = signal->sig[3] &~ blocked->sig[3];
+ ready |= signal->sig[2] &~ blocked->sig[2];
+ ready |= signal->sig[1] &~ blocked->sig[1];
+ ready |= signal->sig[0] &~ blocked->sig[0];
+ break;
+
+ case 2: ready = signal->sig[1] &~ blocked->sig[1];
+ ready |= signal->sig[0] &~ blocked->sig[0];
+ break;
+
+ case 1: ready = signal->sig[0] &~ blocked->sig[0];
+ }
+ return ready != 0;
+}
+
+/* Reevaluate whether the task has signals pending delivery.
+ This is required every time the blocked sigset_t changes.
+ All callers should have t->sigmask_lock. */
+
+static inline void recalc_sigpending(struct task_struct *t)
+{
+ t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
+}
+
+/* True if we are on the alternate signal stack. */
+
+static inline int on_sig_stack(unsigned long sp)
+{
+ return (sp - current->sas_ss_sp < current->sas_ss_size);
+}
+
+static inline int sas_ss_flags(unsigned long sp)
+{
+ return (current->sas_ss_size == 0 ? SS_DISABLE
+ : on_sig_stack(sp) ? SS_ONSTACK : 0);
+}
+
+extern int request_irq(unsigned int,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long, const char *, void *);
+extern void free_irq(unsigned int, void *);
+
+/*
+ * This has now become a routine instead of a macro, it sets a flag if
+ * it returns true (to do BSD-style accounting where the process is flagged
+ * if it uses root privs). The implication of this is that you should do
+ * normal permissions checks first, and check suser() last.
+ *
+ * [Dec 1997 -- Chris Evans]
+ * For correctness, the above considerations need to be extended to
+ * fsuser(). This is done, along with moving fsuser() checks to be
+ * last.
+ *
+ * These will be removed, but in the mean time, when the SECURE_NOROOT
+ * flag is set, uids don't grant privilege.
+ */
+static inline int suser(void)
+{
+ if (!issecure(SECURE_NOROOT) && current->euid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+
+static inline int fsuser(void)
+{
+ if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * capable() checks for a particular capability.
+ * New privilege checks should use this interface, rather than suser() or
+ * fsuser(). See include/linux/capability.h for defined capabilities.
+ */
+
+static inline int capable(int cap)
+{
+#if 1 /* ok now */
+ if (cap_raised(current->cap_effective, cap))
+#else
+ if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0)
+#endif
+ {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Routines for handling mm_structs
+ */
+extern struct mm_struct * mm_alloc(void);
+
+extern struct mm_struct * start_lazy_tlb(void);
+extern void end_lazy_tlb(struct mm_struct *mm);
+
+/* mmdrop drops the mm and the page tables */
+extern void FASTCALL(__mmdrop(struct mm_struct *));
+static inline void mmdrop(struct mm_struct * mm)
+{
+ if (atomic_dec_and_test(&mm->mm_count))
+ __mmdrop(mm);
+}
+
+/* mmput gets rid of the mappings and all user-space */
+extern void mmput(struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct */
+extern void mm_release(void);
+
+/*
+ * Routines for handling the fd arrays
+ */
+extern struct file ** alloc_fd_array(int);
+extern int expand_fd_array(struct files_struct *, int nr);
+extern void free_fd_array(struct file **, int);
+
+extern fd_set *alloc_fdset(int);
+extern int expand_fdset(struct files_struct *, int nr);
+extern void free_fdset(fd_set *, int);
+
+extern int copy_thread(int, unsigned long, unsigned long, unsigned long,
struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_mm(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+
+extern void reparent_to_init(void);
+extern void daemonize(void);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned
long);
+
+extern void set_task_comm(struct task_struct *tsk, char *from);
+extern void get_task_comm(char *to, struct task_struct *tsk);
+
+extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t *
wait));
+extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q,
wait_queue_t * wait));
+extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t *
wait));
+
+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+#define __wait_event(wq, condition) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ schedule(); \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event(wq, condition); \
+} while (0)
+
+#define __wait_event_interruptible(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ schedule(); \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible(wq, condition)
\
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __wait_event_interruptible(wq, condition, __ret); \
+ __ret; \
+})
+
+#define REMOVE_LINKS(p) do { \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#define for_each_thread(task) \
+ for (task = next_thread(current) ; task != current ; task =
next_thread(task))
+
+#define next_thread(p) \
+ list_entry((p)->thread_group.next, struct task_struct, thread_group)
+
+#define thread_group_leader(p) (p->pid == p->tgid)
+
+static inline void del_from_runqueue(struct task_struct * p)
+{
+ nr_running--;
+ p->sleep_time = jiffies;
+ list_del(&p->run_list);
+ p->run_list.next = NULL;
+}
+
+static inline int task_on_runqueue(struct task_struct *p)
+{
+ return (p->run_list.next != NULL);
+}
+
+static inline void unhash_process(struct task_struct *p)
+{
+ if (task_on_runqueue(p))
+ out_of_line_bug();
+ write_lock_irq(&tasklist_lock);
+ nr_threads--;
+ unhash_pid(p);
+ REMOVE_LINKS(p);
+ list_del(&p->thread_group);
+ write_unlock_irq(&tasklist_lock);
+}
+
+/* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside
tasklist_lock */
+static inline void task_lock(struct task_struct *p)
+{
+ spin_lock(&p->alloc_lock);
+}
+
+static inline void task_unlock(struct task_struct *p)
+{
+ spin_unlock(&p->alloc_lock);
+}
+
+/* write full pathname into buffer and return start of pathname */
+static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
+ char *buf, int buflen)
+{
+ char *res;
+ struct vfsmount *rootmnt;
+ struct dentry *root;
+ read_lock(¤t->fs->lock);
+ rootmnt = mntget(current->fs->rootmnt);
+ root = dget(current->fs->root);
+ read_unlock(¤t->fs->lock);
+ spin_lock(&dcache_lock);
+ res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
+ spin_unlock(&dcache_lock);
+ dput(root);
+ mntput(rootmnt);
+ return res;
+}
+
+static inline int need_resched(void)
+{
+ return (unlikely(current->need_resched));
+}
+
+extern void __cond_resched(void);
+static inline void cond_resched(void)
+{
+ if (need_resched())
+ __cond_resched();
+}
+
+#endif /* __KERNEL__ */
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/linux/skbuff.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/skbuff.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1181 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@xxxxxxxxxxxxxxx>
+ * Florian La Roche, <rzsfl@xxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/cache.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+#define SLAB_SKB /* Slabified skbuffs */
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) &
~(SMP_CACHE_BYTES-1))
+#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct
skb_shared_info))&~(SMP_CACHE_BYTES-1))
+#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
+#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
+
+/* A. Checksumming of received packets by device.
+ *
+ * NONE: device failed to checksum this packet.
+ * skb->csum is undefined.
+ *
+ * UNNECESSARY: device parsed packet and wouldbe verified checksum.
+ * skb->csum is undefined.
+ * It is bad option, but, unfortunately, many of vendors do this.
+ * Apparently with secret goal to sell you new device, when you
+ * will add new protocol to your host. F.e. IPv6. 8)
+ *
+ * HW: the most generic way. Device supplied checksum of _all_
+ * the packet as seen by netif_rx in skb->csum.
+ * NOTE: Even if device supports only some protocols, but
+ * is able to produce some skb->csum, it MUST use HW,
+ * not UNNECESSARY.
+ *
+ * B. Checksumming on output.
+ *
+ * NONE: skb is checksummed by protocol or csum is not required.
+ *
+ * HW: device is required to csum packet as seen by hard_start_xmit
+ * from skb->h.raw to the end and to record the checksum
+ * at skb->h.raw+skb->csum.
+ *
+ * Device must show its capabilities in dev->features, set
+ * at device setup time.
+ * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
+ * everything.
+ * NETIF_F_NO_CSUM - loopback or reliable single hop media.
+ * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
+ * TCP/UDP over IPv4. Sigh. Vendors like this
+ * way by an unknown reason. Though, see comment above
+ * about CHECKSUM_UNNECESSARY. 8)
+ *
+ * Any questions? No questions, good. --ANK
+ */
+
+#ifdef __i386__
+#define NET_CALLER(arg) (*(((void**)&arg)-1))
+#else
+#define NET_CALLER(arg) __builtin_return_address(0)
+#endif
+
+#ifdef CONFIG_NETFILTER
+struct nf_conntrack {
+ atomic_t use;
+ void (*destroy)(struct nf_conntrack *);
+};
+
+struct nf_ct_info {
+ struct nf_conntrack *master;
+};
+#endif
+
+struct sk_buff_head {
+ /* These two members must be first. */
+ struct sk_buff * next;
+ struct sk_buff * prev;
+
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+struct sk_buff;
+
+#define MAX_SKB_FRAGS 6
+
+typedef struct skb_frag_struct skb_frag_t;
+
+struct skb_frag_struct
+{
+ struct page *page;
+ __u16 page_offset;
+ __u16 size;
+};
+
+/* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+struct skb_shared_info {
+ atomic_t dataref;
+ unsigned int nr_frags;
+ struct sk_buff *frag_list;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+struct sk_buff {
+ /* These two members must be first. */
+ struct sk_buff * next; /* Next buffer in list
*/
+ struct sk_buff * prev; /* Previous buffer in list
*/
+
+ struct sk_buff_head * list; /* List we are on
*/
+ struct sock *sk; /* Socket we are owned by
*/
+ struct timeval stamp; /* Time we arrived
*/
+ struct net_device *dev; /* Device we arrived on/are
leaving by */
+ struct net_device *real_dev; /* For support of point to
point protocols
+ (e.g. 802.3ad) over bonding,
we must save the
+ physical device that got the
packet before
+ replacing skb->dev with the
virtual device. */
+
+ /* Transport layer header */
+ union
+ {
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct icmphdr *icmph;
+ struct igmphdr *igmph;
+ struct iphdr *ipiph;
+ struct spxhdr *spxh;
+ unsigned char *raw;
+ } h;
+
+ /* Network layer header */
+ union
+ {
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct arphdr *arph;
+ struct ipxhdr *ipxh;
+ unsigned char *raw;
+ } nh;
+
+ /* Link layer header */
+ union
+ {
+ struct ethhdr *ethernet;
+ unsigned char *raw;
+ } mac;
+
+ struct dst_entry *dst;
+
+ /*
+ * This is the control buffer. It is free to use for every
+ * layer. Please put your private variables there. If you
+ * want to keep them across layers you have to do a skb_clone()
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+ char cb[48];
+
+ unsigned int len; /* Length of actual data
*/
+ unsigned int data_len;
+ unsigned int csum; /* Checksum
*/
+ unsigned char __unused, /* Dead field, may be reused
*/
+ cloned, /* head may be cloned (check
refcnt to be sure). */
+ pkt_type, /* Packet class
*/
+ ip_summed; /* Driver fed us an IP checksum
*/
+ __u32 priority; /* Packet queueing priority
*/
+ atomic_t users; /* User count - see
datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver.
*/
+ unsigned short security; /* Security level of packet
*/
+ unsigned int truesize; /* Buffer size
*/
+
+ unsigned char *head; /* Head of buffer
*/
+ unsigned char *data; /* Data head pointer
*/
+ unsigned char *tail; /* Tail pointer
*/
+ unsigned char *end; /* End pointer
*/
+
+ void (*destructor)(struct sk_buff *); /* Destruct
function */
+#ifdef CONFIG_NETFILTER
+ /* Can be used for communication between hooks. */
+ unsigned long nfmark;
+ /* Cache info */
+ __u32 nfcache;
+ /* Associated connection, if any */
+ struct nf_ct_info *nfct;
+#ifdef CONFIG_NETFILTER_DEBUG
+ unsigned int nf_debug;
+#endif
+#endif /*CONFIG_NETFILTER*/
+
+#if defined(CONFIG_HIPPI)
+ union{
+ __u32 ifield;
+ } private;
+#endif
+
+#ifdef CONFIG_NET_SCHED
+ __u32 tc_index; /* traffic control index */
+#endif
+};
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+extern void __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff * alloc_skb(unsigned int size, int
priority);
+extern struct sk_buff * alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int priority);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int
priority);
+extern struct sk_buff * skb_copy(const struct sk_buff *skb, int
priority);
+extern struct sk_buff * pskb_copy(struct sk_buff *skb, int
gfp_mask);
+extern int pskb_expand_head(struct sk_buff *skb, int
nhead, int ntail, int gfp_mask);
+extern struct sk_buff * skb_realloc_headroom(struct sk_buff
*skb, unsigned int headroom);
+extern struct sk_buff * skb_copy_expand(const struct sk_buff
*skb,
+ int newheadroom,
+ int newtailroom,
+ int priority);
+extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
+#define dev_kfree_skb(a) kfree_skb(a)
+extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
+extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
+
+/* Internal */
+#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
+
+/**
+ * skb_queue_empty - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ */
+
+static inline int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/**
+ * skb_get - reference buffer
+ * @skb: buffer to reference
+ *
+ * Makes another reference to a socket buffer and returns a pointer
+ * to the buffer.
+ */
+
+static inline struct sk_buff *skb_get(struct sk_buff *skb)
+{
+ atomic_inc(&skb->users);
+ return skb;
+}
+
+/*
+ * If users==1, we are the only owner and are can avoid redundant
+ * atomic change.
+ */
+
+/**
+ * kfree_skb - free an sk_buff
+ * @skb: buffer to free
+ *
+ * Drop a reference to the buffer and free it if the usage count has
+ * hit zero.
+ */
+
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return;
+ __kfree_skb(skb);
+}
+
+/**
+ * skb_cloned - is the buffer a clone
+ * @skb: buffer to check
+ *
+ * Returns true if the buffer was generated with skb_clone() and is
+ * one of multiple shared copies of the buffer. Cloned buffers are
+ * shared data so must not be written to under normal circumstances.
+ */
+
+static inline int skb_cloned(struct sk_buff *skb)
+{
+ return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
+}
+
+/**
+ * skb_shared - is the buffer shared
+ * @skb: buffer to check
+ *
+ * Returns true if more than one person has a reference to this
+ * buffer.
+ */
+
+static inline int skb_shared(struct sk_buff *skb)
+{
+ return (atomic_read(&skb->users) != 1);
+}
+
+/**
+ * skb_share_check - check if buffer is shared and if so clone it
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the buffer is shared the buffer is cloned and the old copy
+ * drops a reference. A new clone with a single reference is returned.
+ * If the buffer is not shared the original buffer is returned. When
+ * being called from interrupt status or with spinlocks held pri must
+ * be GFP_ATOMIC.
+ *
+ * NULL is returned on a memory allocation failure.
+ */
+
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+{
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb;
+ nskb = skb_clone(skb, pri);
+ kfree_skb(skb);
+ return nskb;
+ }
+ return skb;
+}
+
+
+/*
+ * Copy shared buffers into a new sk_buff. We effectively do COW on
+ * packets to handle cases where we have a local reader and forward
+ * and a couple of other messy ones. The normal one is tcpdumping
+ * a packet thats being forwarded.
+ */
+
+/**
+ * skb_unshare - make a copy of a shared buffer
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the socket buffer is a clone then this function creates a new
+ * copy of the data, drops a reference count on the old copy and returns
+ * the new copy with the reference count at 1. If the buffer is not a clone
+ * the original buffer is returned. When called with a spinlock held or
+ * from interrupt state @pri must be %GFP_ATOMIC
+ *
+ * %NULL is returned on a memory allocation failure.
+ */
+
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+{
+ struct sk_buff *nskb;
+ if(!skb_cloned(skb))
+ return skb;
+ nskb=skb_copy(skb, pri);
+ kfree_skb(skb); /* Free our shared copy */
+ return nskb;
+}
+
+/**
+ * skb_peek
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the head element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+
+static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/**
+ * skb_peek_tail
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the tail element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+
+static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/**
+ * skb_queue_len - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ */
+
+static inline __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+static inline void skb_queue_head_init(struct sk_buff_head *list)
+{
+ spin_lock_init(&list->lock);
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+/**
+ * __skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff
*newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+
+/**
+ * skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of the list. This function takes the
+ * list lock and can be used safely with other locking &sk_buff functions
+ * safely.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff
*newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_queue_head(list, newsk);
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+/**
+ * __skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the end of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+
+static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff
*newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+/**
+ * skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the tail of the list. This function takes the
+ * list lock and can be used safely with other locking &sk_buff functions
+ * safely.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff
*newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_queue_tail(list, newsk);
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+/**
+ * skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The list lock is taken so the function
+ * may be used safely with other locking list functions. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ unsigned long flags;
+ struct sk_buff *result;
+
+ spin_lock_irqsave(&list->lock, flags);
+ result = __skb_dequeue(list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+static inline void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/**
+ * skb_insert - insert a buffer
+ * @old: buffer to insert before
+ * @newsk: buffer to insert
+ *
+ * Place a packet before a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&old->list->lock, flags);
+ __skb_insert(newsk, old->prev, old, old->list);
+ spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ __skb_insert(newsk, old, old->next, old->list);
+}
+
+/**
+ * skb_append - append a buffer
+ * @old: buffer to insert after
+ * @newsk: buffer to insert
+ *
+ * Place a packet after a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls.
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+
+static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&old->list->lock, flags);
+ __skb_append(old, newsk);
+ spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+
+static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * skb_unlink - remove a buffer from a list
+ * @skb: buffer to remove
+ *
+ * Place a packet after a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls
+ *
+ * Works even without knowing the list it is sitting on, which can be
+ * handy at times. It also means that THE LIST MUST EXIST when you
+ * unlink. Thus a list must have its contents unlinked before it is
+ * destroyed.
+ */
+
+static inline void skb_unlink(struct sk_buff *skb)
+{
+ struct sk_buff_head *list = skb->list;
+
+ if(list) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ if(skb->list == list)
+ __skb_unlink(skb, skb->list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ }
+}
+
+/* XXX: more streamlined implementation */
+
+/**
+ * __skb_dequeue_tail - remove from the tail of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the tail of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+{
+ struct sk_buff *skb = skb_peek_tail(list);
+ if (skb)
+ __skb_unlink(skb, list);
+ return skb;
+}
+
+/**
+ * skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The list lock is taken so the function
+ * may be used safely with other locking list functions. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
+{
+ unsigned long flags;
+ struct sk_buff *result;
+
+ spin_lock_irqsave(&list->lock, flags);
+ result = __skb_dequeue_tail(list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ return result;
+}
+
+static inline int skb_is_nonlinear(const struct sk_buff *skb)
+{
+ return skb->data_len;
+}
+
+static inline unsigned int skb_headlen(const struct sk_buff *skb)
+{
+ return skb->len - skb->data_len;
+}
+
+#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags)
out_of_line_bug(); } while (0)
+#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list)
out_of_line_bug(); } while (0)
+#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb))
out_of_line_bug(); } while (0)
+
+/*
+ * Add data to an sk_buff
+ */
+
+static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp=skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail+=len;
+ skb->len+=len;
+ return tmp;
+}
+
+/**
+ * skb_put - add data to a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer. If this would
+ * exceed the total buffer size the kernel will panic. A pointer to the
+ * first byte of the extra data is returned.
+ */
+
+static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp=skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end) {
+ skb_over_panic(skb, len, current_text_addr());
+ }
+ return tmp;
+}
+
+static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ return skb->data;
+}
+
+/**
+ * skb_push - add data to the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer at the buffer
+ * start. If this would exceed the total buffer headroom the kernel will
+ * panic. A pointer to the first byte of the extra data is returned.
+ */
+
+static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head) {
+ skb_under_panic(skb, len, current_text_addr());
+ }
+ return skb->data;
+}
+
+static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ skb->len-=len;
+ if (skb->len < skb->data_len)
+ out_of_line_bug();
+ return skb->data+=len;
+}
+
+/**
+ * skb_pull - remove data from the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to remove
+ *
+ * This function removes data from the start of a buffer, returning
+ * the memory to the headroom. A pointer to the next data in the buffer
+ * is returned. Once the data has been pulled future pushes will overwrite
+ * the old data.
+ */
+
+static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb->len)
+ return NULL;
+ return __skb_pull(skb,len);
+}
+
+extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
+
+static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb_headlen(skb) &&
+ __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
+ return NULL;
+ skb->len -= len;
+ return skb->data += len;
+}
+
+static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb->len)
+ return NULL;
+ return __pskb_pull(skb,len);
+}
+
+static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len <= skb_headlen(skb))
+ return 1;
+ if (len > skb->len)
+ return 0;
+ return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
+}
+
+/**
+ * skb_headroom - bytes at buffer head
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the head of an &sk_buff.
+ */
+
+static inline int skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+/**
+ * skb_tailroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ */
+
+static inline int skb_tailroom(const struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
+}
+
+/**
+ * skb_reserve - adjust headroom
+ * @skb: buffer to alter
+ * @len: bytes to move
+ *
+ * Increase the headroom of an empty &sk_buff by reducing the tail
+ * room. This is only allowed for an empty buffer.
+ */
+
+static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
+
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (!skb->data_len) {
+ skb->len = len;
+ skb->tail = skb->data+len;
+ } else {
+ ___pskb_trim(skb, len, 0);
+ }
+}
+
+/**
+ * skb_trim - remove end from a buffer
+ * @skb: buffer to alter
+ * @len: new length
+ *
+ * Cut the length of a buffer down by removing data from the tail. If
+ * the buffer is already under the length specified it is not modified.
+ */
+
+static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->len > len) {
+ __skb_trim(skb, len);
+ }
+}
+
+
+static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (!skb->data_len) {
+ skb->len = len;
+ skb->tail = skb->data+len;
+ return 0;
+ } else {
+ return ___pskb_trim(skb, len, 1);
+ }
+}
+
+static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (len < skb->len)
+ return __pskb_trim(skb, len);
+ return 0;
+}
+
+/**
+ * skb_orphan - orphan a buffer
+ * @skb: buffer to orphan
+ *
+ * If a buffer currently has an owner then we call the owner's
+ * destructor function and make the @skb unowned. The buffer continues
+ * to exist but is no longer charged to its former owner.
+ */
+
+
+static inline void skb_orphan(struct sk_buff *skb)
+{
+ if (skb->destructor)
+ skb->destructor(skb);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+}
+
+/**
+ * skb_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an &sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function takes the list
+ * lock and is atomic with respect to other list locking functions.
+ */
+
+
+static inline void skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ while ((skb=skb_dequeue(list))!=NULL)
+ kfree_skb(skb);
+}
+
+/**
+ * __skb_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an &sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function does not take the
+ * list lock and the caller must hold the relevant locks to use it.
+ */
+
+
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ while ((skb=__skb_dequeue(list))!=NULL)
+ kfree_skb(skb);
+}
+
+/**
+ * __dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ * @gfp_mask: get_free_pages mask, passed to alloc_skb
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned in there is no free memory.
+ */
+#ifndef CONFIG_XEN
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ int gfp_mask)
+{
+ struct sk_buff *skb = alloc_skb(length+16, gfp_mask);
+ if (skb)
+ skb_reserve(skb,16);
+ return skb;
+}
+#else
+extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
+#endif
+
+/**
+ * dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned in there is no free memory. Although this function
+ * allocates memory it can be called from an interrupt.
+ */
+
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+ return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+
+/**
+ * skb_cow - copy header of skb when it is required
+ * @skb: buffer to cow
+ * @headroom: needed headroom
+ *
+ * If the skb passed lacks sufficient headroom or its data part
+ * is shared, data is reallocated. If reallocation fails, an error
+ * is returned and original skb is not changed.
+ *
+ * The result is skb with writable area skb->head...skb->tail
+ * and at least @headroom of space at head.
+ */
+
+static inline int
+skb_cow(struct sk_buff *skb, unsigned int headroom)
+{
+ int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
+
+ if (delta < 0)
+ delta = 0;
+
+ if (delta || skb_cloned(skb))
+ return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
+ return 0;
+}
+
+/**
+ * skb_padto - pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Returns the buffer, which may be a replacement
+ * for the original, or NULL for out of memory - in which case
+ * the original buffer is still freed.
+ */
+
+static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if(likely(size >= len))
+ return skb;
+ return skb_pad(skb, len-size);
+}
+
+/**
+ * skb_linearize - convert paged skb to linear one
+ * @skb: buffer to linarize
+ * @gfp: allocation mode
+ *
+ * If there is no free memory -ENOMEM is returned, otherwise zero
+ * is returned and the old skb data released. */
+int skb_linearize(struct sk_buff *skb, int gfp);
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+ if (in_irq())
+ out_of_line_bug();
+
+ local_bh_disable();
+#endif
+ return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+ local_bh_enable();
+#endif
+}
+
+#define skb_queue_walk(queue, skb) \
+ for (skb = (queue)->next; \
+ (skb != (struct sk_buff *)(queue)); \
+ skb=skb->next)
+
+
+extern struct sk_buff * skb_recv_datagram(struct sock
*sk,unsigned flags,int noblock, int *err);
+extern unsigned int datagram_poll(struct file *file, struct socket
*sock, struct poll_table_struct *wait);
+extern int skb_copy_datagram(const struct sk_buff *from,
int offset, char *to,int size);
+extern int skb_copy_datagram_iovec(const struct sk_buff
*from, int offset, struct iovec *to,int size);
+extern int skb_copy_and_csum_datagram(const struct sk_buff
*skb, int offset, u8 *to, int len, unsigned int *csump);
+extern int skb_copy_and_csum_datagram_iovec(const struct
sk_buff *skb, int hlen, struct iovec *iov);
+extern void skb_free_datagram(struct sock * sk, struct
sk_buff *skb);
+
+extern unsigned int skb_checksum(const struct sk_buff *skb, int
offset, int len, unsigned int csum);
+extern int skb_copy_bits(const struct sk_buff *skb, int
offset, void *to, int len);
+extern unsigned int skb_copy_and_csum_bits(const struct sk_buff
*skb, int offset, u8 *to, int len, unsigned int csum);
+extern void skb_copy_and_csum_dev(const struct sk_buff
*skb, u8 *to);
+
+extern void skb_init(void);
+extern void skb_add_mtu(int mtu);
+
+#ifdef CONFIG_NETFILTER
+static inline void
+nf_conntrack_put(struct nf_ct_info *nfct)
+{
+ if (nfct && atomic_dec_and_test(&nfct->master->use))
+ nfct->master->destroy(nfct->master);
+}
+static inline void
+nf_conntrack_get(struct nf_ct_info *nfct)
+{
+ if (nfct)
+ atomic_inc(&nfct->master->use);
+}
+static inline void
+nf_reset(struct sk_buff *skb)
+{
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+#ifdef CONFIG_NETFILTER_DEBUG
+ skb->nf_debug = 0;
+#endif
+}
+#else /* CONFIG_NETFILTER */
+static inline void nf_reset(struct sk_buff *skb) {}
+#endif /* CONFIG_NETFILTER */
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/include/linux/timer.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/include/linux/timer.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,77 @@
+#ifndef _LINUX_TIMER_H
+#define _LINUX_TIMER_H
+
+#include <linux/config.h>
+#include <linux/list.h>
+
+/*
+ * In Linux 2.4, static timers have been removed from the kernel.
+ * Timers may be dynamically created and destroyed, and should be initialized
+ * by a call to init_timer() upon creation.
+ *
+ * The "data" field enables use of a common timeout function for several
+ * timeouts. You can use this field to distinguish between the different
+ * invocations.
+ */
+struct timer_list {
+ struct list_head list;
+ unsigned long expires;
+ unsigned long data;
+ void (*function)(unsigned long);
+};
+
+extern void add_timer(struct timer_list * timer);
+extern int del_timer(struct timer_list * timer);
+#ifdef CONFIG_NO_IDLE_HZ
+extern struct timer_list *next_timer_event(void);
+#endif
+
+#ifdef CONFIG_SMP
+extern int del_timer_sync(struct timer_list * timer);
+extern void sync_timers(void);
+#else
+#define del_timer_sync(t) del_timer(t)
+#define sync_timers() do { } while (0)
+#endif
+
+/*
+ * mod_timer is a more efficient way to update the expire field of an
+ * active timer (if the timer is inactive it will be activated)
+ * mod_timer(a,b) is equivalent to del_timer(a); a->expires = b; add_timer(a).
+ * If the timer is known to be not pending (ie, in the handler), mod_timer
+ * is less efficient than a->expires = b; add_timer(a).
+ */
+int mod_timer(struct timer_list *timer, unsigned long expires);
+
+extern void it_real_fn(unsigned long);
+
+static inline void init_timer(struct timer_list * timer)
+{
+ timer->list.next = timer->list.prev = NULL;
+}
+
+static inline int timer_pending (const struct timer_list * timer)
+{
+ return timer->list.next != NULL;
+}
+
+/*
+ * These inlines deal with timer wrapping correctly. You are
+ * strongly encouraged to use them
+ * 1. Because people otherwise forget
+ * 2. Because if the timer wrap changes in future you wont have to
+ * alter your driver code.
+ *
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_before(a,b) time_after(b,a)
+
+#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0)
+#define time_before_eq(a,b) time_after_eq(b,a)
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/kernel/time.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/kernel/time.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,415 @@
+/*
+ * linux/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file contains the interface functions for the various
+ * time related system calls: time, stime, gettimeofday, settimeofday,
+ * adjtime
+ */
+/*
+ * Modification history kernel/time.c
+ *
+ * 1993-09-02 Philip Gladstone
+ * Created file with time related functions from sched.c and adjtimex()
+ * 1993-10-08 Torsten Duwe
+ * adjtime interface update and CMOS clock write code
+ * 1995-08-13 Torsten Duwe
+ * kernel PLL updated to 1994-12-13 specs (rfc-1589)
+ * 1999-01-16 Ulrich Windl
+ * Introduced error checking for many cases in adjtimex().
+ * Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
+ * (Even though the technical memorandum forbids it)
+ */
+
+#include <linux/mm.h>
+#include <linux/timex.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * The timezone where the local system is located. Used as a default by some
+ * programs who obtain this value by using gettimeofday.
+ */
+struct timezone sys_tz;
+
+/* The xtime_lock is not only serializing the xtime read/writes but it's also
+ serializing all accesses to the global NTP variables now. */
+extern rwlock_t xtime_lock;
+
+#if !defined(__alpha__) && !defined(__ia64__)
+
+/*
+ * sys_time() can be implemented in user-level using
+ * sys_gettimeofday(). Is this for backwards compatibility? If so,
+ * why not move it into the appropriate arch directory (for those
+ * architectures that need it).
+ *
+ * XXX This function is NOT 64-bit clean!
+ */
+asmlinkage long sys_time(int * tloc)
+{
+ struct timeval now;
+ int i;
+
+ do_gettimeofday(&now);
+ i = now.tv_sec;
+ if (tloc) {
+ if (put_user(i,tloc))
+ i = -EFAULT;
+ }
+ return i;
+}
+
+#if !defined(CONFIG_XEN)
+
+/*
+ * sys_stime() can be implemented in user-level using
+ * sys_settimeofday(). Is this for backwards compatibility? If so,
+ * why not move it into the appropriate arch directory (for those
+ * architectures that need it).
+ */
+
+asmlinkage long sys_stime(int * tptr)
+{
+ int value;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+ if (get_user(value, tptr))
+ return -EFAULT;
+ write_lock_irq(&xtime_lock);
+ vxtime_lock();
+ xtime.tv_sec = value;
+ xtime.tv_usec = 0;
+ vxtime_unlock();
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_unlock_irq(&xtime_lock);
+ return 0;
+}
+
+#endif
+
+#endif
+
+asmlinkage long sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (copy_to_user(tv, &ktv, sizeof(ktv)))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * Adjust the time obtained from the CMOS to be UTC time instead of
+ * local time.
+ *
+ * This is ugly, but preferable to the alternatives. Otherwise we
+ * would either need to write a program to do it in /etc/rc (and risk
+ * confusion if the program gets run more than once; it would also be
+ * hard to make the program warp the clock precisely n hours) or
+ * compile in the timezone information into the kernel. Bad, bad....
+ *
+ * - TYT, 1992-01-01
+ *
+ * The best thing to do is to keep the CMOS clock in universal time (UTC)
+ * as real UNIX machines always do it. This avoids all headaches about
+ * daylight saving times and warping kernel clocks.
+ */
+inline static void warp_clock(void)
+{
+ write_lock_irq(&xtime_lock);
+ vxtime_lock();
+ xtime.tv_sec += sys_tz.tz_minuteswest * 60;
+ vxtime_unlock();
+ write_unlock_irq(&xtime_lock);
+}
+
+/*
+ * In case for some reason the CMOS clock has not already been running
+ * in UTC, but in some local time: The first time we set the timezone,
+ * we will warp the clock so that it is ticking UTC time instead of
+ * local time. Presumably, if someone is setting the timezone then we
+ * are running in an environment where the programs understand about
+ * timezones. This should be done at boot time in the /etc/rc script,
+ * as soon as possible, so that the clock can be set right. Otherwise,
+ * various programs will get confused when the clock gets warped.
+ */
+
+int do_sys_settimeofday(struct timeval *tv, struct timezone *tz)
+{
+ static int firsttime = 1;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ if (tz) {
+ /* SMP safe, global irq locking makes it work. */
+ sys_tz = *tz;
+ if (firsttime) {
+ firsttime = 0;
+ if (!tv)
+ warp_clock();
+ }
+ }
+ if (tv)
+ {
+ /* SMP safe, again the code in arch/foo/time.c should
+ * globally block out interrupts when it runs.
+ */
+ do_settimeofday(tv);
+ }
+ return 0;
+}
+
+asmlinkage long sys_settimeofday(struct timeval *tv, struct timezone *tz)
+{
+ struct timeval new_tv;
+ struct timezone new_tz;
+
+ if (tv) {
+ if (copy_from_user(&new_tv, tv, sizeof(*tv)))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&new_tz, tz, sizeof(*tz)))
+ return -EFAULT;
+ }
+
+ return do_sys_settimeofday(tv ? &new_tv : NULL, tz ? &new_tz : NULL);
+}
+
+long pps_offset; /* pps time offset (us) */
+long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
+
+long pps_freq; /* frequency offset (scaled ppm) */
+long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
+
+long pps_valid = PPS_VALID; /* pps signal watchdog counter */
+
+int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
+
+long pps_jitcnt; /* jitter limit exceeded */
+long pps_calcnt; /* calibration intervals */
+long pps_errcnt; /* calibration errors */
+long pps_stbcnt; /* stability limit exceeded */
+
+/* hook for a loadable hardpps kernel module */
+void (*hardpps_ptr)(struct timeval *);
+
+/* adjtimex mainly allows reading (and writing, if superuser) of
+ * kernel time-keeping variables. used by xntpd.
+ */
+int do_adjtimex(struct timex *txc)
+{
+ long ltemp, mtemp, save_adjust;
+ int result;
+
+ /* In order to modify anything, you gotta be super-user! */
+ if (txc->modes && !capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ /* Now we validate the data before disabling interrupts */
+
+ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ /* singleshot must not be used with any other mode bits */
+ if (txc->modes != ADJ_OFFSET_SINGLESHOT)
+ return -EINVAL;
+
+ if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
+ /* adjustment Offset limited to +- .512 seconds */
+ if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE )
+ return -EINVAL;
+
+ /* if the quartz is off by more than 10% something is VERY wrong ! */
+ if (txc->modes & ADJ_TICK)
+ if (txc->tick < 900000/HZ || txc->tick > 1100000/HZ)
+ return -EINVAL;
+
+ write_lock_irq(&xtime_lock);
+ result = time_state; /* mostly `TIME_OK' */
+
+ /* Save for later - semantics of adjtime is to return old value */
+ save_adjust = time_adjust;
+
+#if 0 /* STA_CLOCKERR is never set yet */
+ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */
+#endif
+ /* If there are input parameters, then process them */
+ if (txc->modes)
+ {
+ if (txc->modes & ADJ_STATUS) /* only set allowed bits */
+ time_status = (txc->status & ~STA_RONLY) |
+ (time_status & STA_RONLY);
+
+ if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */
+ if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) {
+ result = -EINVAL;
+ goto leave;
+ }
+ time_freq = txc->freq - pps_freq;
+ }
+
+ if (txc->modes & ADJ_MAXERROR) {
+ if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) {
+ result = -EINVAL;
+ goto leave;
+ }
+ time_maxerror = txc->maxerror;
+ }
+
+ if (txc->modes & ADJ_ESTERROR) {
+ if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) {
+ result = -EINVAL;
+ goto leave;
+ }
+ time_esterror = txc->esterror;
+ }
+
+ if (txc->modes & ADJ_TIMECONST) { /* p. 24 */
+ if (txc->constant < 0) { /* NTP v4 uses values > 6 */
+ result = -EINVAL;
+ goto leave;
+ }
+ time_constant = txc->constant;
+ }
+
+ if (txc->modes & ADJ_OFFSET) { /* values checked earlier */
+ if (txc->modes == ADJ_OFFSET_SINGLESHOT) {
+ /* adjtime() is independent from ntp_adjtime() */
+ time_adjust = txc->offset;
+ }
+ else if ( time_status & (STA_PLL | STA_PPSTIME) ) {
+ ltemp = (time_status & (STA_PPSTIME | STA_PPSSIGNAL)) ==
+ (STA_PPSTIME | STA_PPSSIGNAL) ?
+ pps_offset : txc->offset;
+
+ /*
+ * Scale the phase adjustment and
+ * clamp to the operating range.
+ */
+ if (ltemp > MAXPHASE)
+ time_offset = MAXPHASE << SHIFT_UPDATE;
+ else if (ltemp < -MAXPHASE)
+ time_offset = -(MAXPHASE << SHIFT_UPDATE);
+ else
+ time_offset = ltemp << SHIFT_UPDATE;
+
+ /*
+ * Select whether the frequency is to be controlled
+ * and in which mode (PLL or FLL). Clamp to the operating
+ * range. Ugly multiply/divide should be replaced someday.
+ */
+
+ if (time_status & STA_FREQHOLD || time_reftime == 0)
+ time_reftime = xtime.tv_sec;
+ mtemp = xtime.tv_sec - time_reftime;
+ time_reftime = xtime.tv_sec;
+ if (time_status & STA_FLL) {
+ if (mtemp >= MINSEC) {
+ ltemp = (time_offset / mtemp) << (SHIFT_USEC -
+ SHIFT_UPDATE);
+ if (ltemp < 0)
+ time_freq -= -ltemp >> SHIFT_KH;
+ else
+ time_freq += ltemp >> SHIFT_KH;
+ } else /* calibration interval too short (p. 12) */
+ result = TIME_ERROR;
+ } else { /* PLL mode */
+ if (mtemp < MAXSEC) {
+ ltemp *= mtemp;
+ if (ltemp < 0)
+ time_freq -= -ltemp >> (time_constant +
+ time_constant +
+ SHIFT_KF - SHIFT_USEC);
+ else
+ time_freq += ltemp >> (time_constant +
+ time_constant +
+ SHIFT_KF - SHIFT_USEC);
+ } else /* calibration interval too long (p. 12) */
+ result = TIME_ERROR;
+ }
+ if (time_freq > time_tolerance)
+ time_freq = time_tolerance;
+ else if (time_freq < -time_tolerance)
+ time_freq = -time_tolerance;
+ } /* STA_PLL || STA_PPSTIME */
+ } /* txc->modes & ADJ_OFFSET */
+ if (txc->modes & ADJ_TICK) {
+ /* if the quartz is off by more than 10% something is
+ VERY wrong ! */
+ if (txc->tick < 900000/HZ || txc->tick > 1100000/HZ) {
+ result = -EINVAL;
+ goto leave;
+ }
+ tick = txc->tick;
+ }
+ } /* txc->modes */
+leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0
+ || ((time_status & (STA_PPSFREQ|STA_PPSTIME)) != 0
+ && (time_status & STA_PPSSIGNAL) == 0)
+ /* p. 24, (b) */
+ || ((time_status & (STA_PPSTIME|STA_PPSJITTER))
+ == (STA_PPSTIME|STA_PPSJITTER))
+ /* p. 24, (c) */
+ || ((time_status & STA_PPSFREQ) != 0
+ && (time_status & (STA_PPSWANDER|STA_PPSERROR)) != 0))
+ /* p. 24, (d) */
+ result = TIME_ERROR;
+
+ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ txc->offset = save_adjust;
+ else {
+ if (time_offset < 0)
+ txc->offset = -(-time_offset >> SHIFT_UPDATE);
+ else
+ txc->offset = time_offset >> SHIFT_UPDATE;
+ }
+ txc->freq = time_freq + pps_freq;
+ txc->maxerror = time_maxerror;
+ txc->esterror = time_esterror;
+ txc->status = time_status;
+ txc->constant = time_constant;
+ txc->precision = time_precision;
+ txc->tolerance = time_tolerance;
+ txc->tick = tick;
+ txc->ppsfreq = pps_freq;
+ txc->jitter = pps_jitter >> PPS_AVG;
+ txc->shift = pps_shift;
+ txc->stabil = pps_stabil;
+ txc->jitcnt = pps_jitcnt;
+ txc->calcnt = pps_calcnt;
+ txc->errcnt = pps_errcnt;
+ txc->stbcnt = pps_stbcnt;
+ write_unlock_irq(&xtime_lock);
+ do_gettimeofday(&txc->time);
+ return(result);
+}
+
+asmlinkage long sys_adjtimex(struct timex *txc_p)
+{
+ struct timex txc; /* Local copy of parameter */
+ int ret;
+
+ /* Copy the user data space into the kernel copy
+ * structure. But bear in mind that the structures
+ * may change
+ */
+ if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
+ return -EFAULT;
+ ret = do_adjtimex(&txc);
+ return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/kernel/timer.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/kernel/timer.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,968 @@
+/*
+ * linux/kernel/timer.c
+ *
+ * Kernel internal timers, kernel timekeeping, basic process system calls
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
+ *
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ * serialize accesses to xtime/lost_ticks).
+ * Copyright (C) 1998 Andrea Arcangeli
+ * 1999-03-10 Improved NTP compatibility by Ulrich Windl
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/timex.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * Timekeeping variables
+ */
+
+long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
+
+/* The current time */
+struct timeval xtime __attribute__ ((aligned (16)));
+
+/* Don't completely fail for HZ > 500. */
+int tickadj = 500/HZ ? : 1; /* microsecs */
+
+DECLARE_TASK_QUEUE(tq_timer);
+DECLARE_TASK_QUEUE(tq_immediate);
+
+/*
+ * phase-lock loop variables
+ */
+/* TIME_ERROR prevents overwriting the CMOS clock */
+int time_state = TIME_OK; /* clock synchronization status */
+int time_status = STA_UNSYNC; /* clock status bits */
+long time_offset; /* time adjustment (us) */
+long time_constant = 2; /* pll time constant
*/
+long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
+long time_precision = 1; /* clock precision (us) */
+long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
+long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
+long time_phase; /* phase offset (scaled us) */
+long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC;
+ /* frequency offset (scaled ppm)*/
+long time_adj; /* tick adjust (scaled 1 / HZ) */
+long time_reftime; /* time at last adjustment (s) */
+
+long time_adjust;
+long time_adjust_step;
+
+unsigned long event;
+
+extern int do_setitimer(int, struct itimerval *, struct itimerval *);
+
+unsigned long volatile jiffies;
+
+unsigned int * prof_buffer;
+unsigned long prof_len;
+unsigned long prof_shift;
+
+/*
+ * Event timer code
+ */
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+struct timer_vec {
+ int index;
+ struct list_head vec[TVN_SIZE];
+};
+
+struct timer_vec_root {
+ int index;
+ struct list_head vec[TVR_SIZE];
+};
+
+static struct timer_vec tv5;
+static struct timer_vec tv4;
+static struct timer_vec tv3;
+static struct timer_vec tv2;
+static struct timer_vec_root tv1;
+
+static struct timer_vec * const tvecs[] = {
+ (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+static struct list_head * run_timer_list_running;
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+void init_timervecs (void)
+{
+ int i;
+
+ for (i = 0; i < TVN_SIZE; i++) {
+ INIT_LIST_HEAD(tv5.vec + i);
+ INIT_LIST_HEAD(tv4.vec + i);
+ INIT_LIST_HEAD(tv3.vec + i);
+ INIT_LIST_HEAD(tv2.vec + i);
+ }
+ for (i = 0; i < TVR_SIZE; i++)
+ INIT_LIST_HEAD(tv1.vec + i);
+}
+
+static unsigned long timer_jiffies;
+
+static inline void internal_add_timer(struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+ struct list_head * vec;
+
+ if (run_timer_list_running)
+ vec = run_timer_list_running;
+ else if (idx < TVR_SIZE) {
+ int i = expires & TVR_MASK;
+ vec = tv1.vec + i;
+ } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ vec = tv2.vec + i;
+ } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ vec = tv3.vec + i;
+ } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ vec = tv4.vec + i;
+ } else if ((signed long) idx < 0) {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ vec = tv1.vec + tv1.index;
+ } else if (idx <= 0xffffffffUL) {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ vec = tv5.vec + i;
+ } else {
+ /* Can only get here on architectures with 64-bit jiffies */
+ INIT_LIST_HEAD(&timer->list);
+ return;
+ }
+ /*
+ * Timers are FIFO!
+ */
+ list_add(&timer->list, vec->prev);
+}
+
+/* Initialize both explicitly - let's try to have them in the same cache line
*/
+spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
+
+#ifdef CONFIG_SMP
+volatile struct timer_list * volatile running_timer;
+#define timer_enter(t) do { running_timer = t; mb(); } while (0)
+#define timer_exit() do { running_timer = NULL; } while (0)
+#define timer_is_running(t) (running_timer == t)
+#define timer_synchronize(t) while (timer_is_running(t)) barrier()
+#else
+#define timer_enter(t) do { } while (0)
+#define timer_exit() do { } while (0)
+#endif
+
+void add_timer(struct timer_list *timer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ if (timer_pending(timer))
+ goto bug;
+ internal_add_timer(timer);
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+ return;
+bug:
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+ printk("bug: kernel timer added twice at %p.\n",
+ __builtin_return_address(0));
+}
+
+static inline int detach_timer (struct timer_list *timer)
+{
+ if (!timer_pending(timer))
+ return 0;
+ list_del(&timer->list);
+ return 1;
+}
+
+int mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ timer->expires = expires;
+ ret = detach_timer(timer);
+ internal_add_timer(timer);
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+ return ret;
+}
+
+int del_timer(struct timer_list * timer)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ ret = detach_timer(timer);
+ timer->list.next = timer->list.prev = NULL;
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+ return ret;
+}
+
+#ifdef CONFIG_SMP
+void sync_timers(void)
+{
+ spin_unlock_wait(&global_bh_lock);
+}
+
+/*
+ * SMP specific function to delete periodic timer.
+ * Caller must disable by some means restarting the timer
+ * for new. Upon exit the timer is not queued and handler is not running
+ * on any CPU. It returns number of times, which timer was deleted
+ * (for reference counting).
+ */
+
+int del_timer_sync(struct timer_list * timer)
+{
+ int ret = 0;
+
+ for (;;) {
+ unsigned long flags;
+ int running;
+
+ spin_lock_irqsave(&timerlist_lock, flags);
+ ret += detach_timer(timer);
+ timer->list.next = timer->list.prev = 0;
+ running = timer_is_running(timer);
+ spin_unlock_irqrestore(&timerlist_lock, flags);
+
+ if (!running)
+ break;
+
+ timer_synchronize(timer);
+ }
+
+ return ret;
+}
+#endif
+
+
+static inline void cascade_timers(struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct list_head *head, *curr, *next;
+
+ head = tv->vec + tv->index;
+ curr = head->next;
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (curr != head) {
+ struct timer_list *tmp;
+
+ tmp = list_entry(curr, struct timer_list, list);
+ next = curr->next;
+ list_del(curr); // not needed
+ internal_add_timer(tmp);
+ curr = next;
+ }
+ INIT_LIST_HEAD(head);
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void run_timer_list(void)
+{
+ spin_lock_irq(&timerlist_lock);
+ while ((long)(jiffies - timer_jiffies) >= 0) {
+ LIST_HEAD(queued);
+ struct list_head *head, *curr;
+ if (!tv1.index) {
+ int n = 1;
+ do {
+ cascade_timers(tvecs[n]);
+ } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ run_timer_list_running = &queued;
+repeat:
+ head = tv1.vec + tv1.index;
+ curr = head->next;
+ if (curr != head) {
+ struct timer_list *timer;
+ void (*fn)(unsigned long);
+ unsigned long data;
+
+ timer = list_entry(curr, struct timer_list, list);
+ fn = timer->function;
+ data= timer->data;
+
+ detach_timer(timer);
+ timer->list.next = timer->list.prev = NULL;
+ timer_enter(timer);
+ spin_unlock_irq(&timerlist_lock);
+ fn(data);
+ spin_lock_irq(&timerlist_lock);
+ timer_exit();
+ goto repeat;
+ }
+ run_timer_list_running = NULL;
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+
+ curr = queued.next;
+ while (curr != &queued) {
+ struct timer_list *timer;
+
+ timer = list_entry(curr, struct timer_list, list);
+ curr = curr->next;
+ internal_add_timer(timer);
+ }
+ }
+ spin_unlock_irq(&timerlist_lock);
+}
+
+#ifdef CONFIG_NO_IDLE_HZ
+/*
+ * Find out when the next timer event is due to happen. This
+ * is used on S/390 to stop all activity when all cpus are idle.
+ * And in XenoLinux to achieve the same.
+ * The timerlist_lock must be acquired before calling this function.
+ */
+struct timer_list *next_timer_event(void)
+{
+ struct timer_list *nte, *tmp;
+ struct list_head *lst;
+ int i, j;
+
+ /* Look for the next timer event in tv1. */
+ i = 0;
+ j = tvecs[0]->index;
+ do {
+ struct list_head *head = tvecs[0]->vec + j;
+ if (!list_empty(head)) {
+ nte = list_entry(head->next, struct timer_list, list);
+ goto found;
+ }
+ j = (j + 1) & TVR_MASK;
+ } while (j != tv1.index);
+
+ /* No event found in tv1. Check tv2-tv5. */
+ for (i = 1; i < NOOF_TVECS; i++) {
+ j = tvecs[i]->index;
+ do {
+ nte = NULL;
+ list_for_each(lst, tvecs[i]->vec + j) {
+ tmp = list_entry(lst, struct timer_list, list);
+ if (nte == NULL ||
+ time_before(tmp->expires, nte->expires))
+ nte = tmp;
+ }
+ if (nte)
+ goto found;
+ j = (j + 1) & TVN_MASK;
+ } while (j != tvecs[i]->index);
+ }
+ return NULL;
+found:
+ /* Found timer event in tvecs[i]->vec[j] */
+ if (j < tvecs[i]->index && i < NOOF_TVECS-1) {
+ /*
+ * The search wrapped. We need to look at the next list
+ * from tvecs[i+1] that would cascade into tvecs[i].
+ */
+ list_for_each(lst, tvecs[i+1]->vec+tvecs[i+1]->index) {
+ tmp = list_entry(lst, struct timer_list, list);
+ if (time_before(tmp->expires, nte->expires))
+ nte = tmp;
+ }
+ }
+ return nte;
+}
+#endif
+
+spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
+
+void tqueue_bh(void)
+{
+ run_task_queue(&tq_timer);
+}
+
+void immediate_bh(void)
+{
+ run_task_queue(&tq_immediate);
+}
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@xxxxxxxx) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ */
+static void second_overflow(void)
+{
+ long ltemp;
+
+ /* Bump the maxerror field */
+ time_maxerror += time_tolerance >> SHIFT_USEC;
+ if ( time_maxerror > NTP_PHASE_LIMIT ) {
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_status |= STA_UNSYNC;
+ }
+
+ /*
+ * Leap second processing. If in leap-insert state at
+ * the end of the day, the system clock is set back one
+ * second; if in leap-delete state, the system clock is
+ * set ahead one second. The microtime() routine or
+ * external clock driver will insure that reported time
+ * is always monotonic. The ugly divides should be
+ * replaced.
+ */
+ switch (time_state) {
+
+ case TIME_OK:
+ if (time_status & STA_INS)
+ time_state = TIME_INS;
+ else if (time_status & STA_DEL)
+ time_state = TIME_DEL;
+ break;
+
+ case TIME_INS:
+ if (xtime.tv_sec % 86400 == 0) {
+ xtime.tv_sec--;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+ }
+ break;
+
+ case TIME_DEL:
+ if ((xtime.tv_sec + 1) % 86400 == 0) {
+ xtime.tv_sec++;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+ }
+ break;
+
+ case TIME_OOP:
+ time_state = TIME_WAIT;
+ break;
+
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ }
+
+ /*
+ * Compute the phase adjustment for the next second. In
+ * PLL mode, the offset is reduced by a fixed factor
+ * times the time constant. In FLL mode the offset is
+ * used directly. In either mode, the maximum phase
+ * adjustment for each second is clamped so as to spread
+ * the adjustment over not more than the number of
+ * seconds between updates.
+ */
+ if (time_offset < 0) {
+ ltemp = -time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset += ltemp;
+ time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ } else {
+ ltemp = time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset -= ltemp;
+ time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ }
+
+ /*
+ * Compute the frequency estimate and additional phase
+ * adjustment due to frequency error for the next
+ * second. When the PPS signal is engaged, gnaw on the
+ * watchdog counter and update the frequency computed by
+ * the pll and the PPS signal.
+ */
+ pps_valid++;
+ if (pps_valid == PPS_VALID) { /* PPS signal lost */
+ pps_jitter = MAXTIME;
+ pps_stabil = MAXFREQ;
+ time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+ STA_PPSWANDER | STA_PPSERROR);
+ }
+ ltemp = time_freq + pps_freq;
+ if (ltemp < 0)
+ time_adj -= -ltemp >>
+ (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+ else
+ time_adj += ltemp >>
+ (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+
+#if HZ == 100
+ /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
+ * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
+ */
+ if (time_adj < 0)
+ time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
+ else
+ time_adj += (time_adj >> 2) + (time_adj >> 5);
+#endif
+}
+
+/* in the NTP reference this is called "hardclock()" */
+static void update_wall_time_one_tick(void)
+{
+ if ( (time_adjust_step = time_adjust) != 0 ) {
+ /* We are doing an adjtime thing.
+ *
+ * Prepare time_adjust_step to be within bounds.
+ * Note that a positive time_adjust means we want the clock
+ * to run faster.
+ *
+ * Limit the amount of the step to be in the range
+ * -tickadj .. +tickadj
+ */
+ if (time_adjust > tickadj)
+ time_adjust_step = tickadj;
+ else if (time_adjust < -tickadj)
+ time_adjust_step = -tickadj;
+
+ /* Reduce by this step the amount of time left */
+ time_adjust -= time_adjust_step;
+ }
+ xtime.tv_usec += tick + time_adjust_step;
+ /*
+ * Advance the phase, once it gets to one microsecond, then
+ * advance the tick more.
+ */
+ time_phase += time_adj;
+ if (time_phase <= -FINEUSEC) {
+ long ltemp = -time_phase >> SHIFT_SCALE;
+ time_phase += ltemp << SHIFT_SCALE;
+ xtime.tv_usec -= ltemp;
+ }
+ else if (time_phase >= FINEUSEC) {
+ long ltemp = time_phase >> SHIFT_SCALE;
+ time_phase -= ltemp << SHIFT_SCALE;
+ xtime.tv_usec += ltemp;
+ }
+}
+
+/*
+ * Using a loop looks inefficient, but "ticks" is
+ * usually just one (we shouldn't be losing ticks,
+ * we're doing this this way mainly for interrupt
+ * latency reasons, not because we think we'll
+ * have lots of lost timer ticks
+ */
+static void update_wall_time(unsigned long ticks)
+{
+ do {
+ ticks--;
+ update_wall_time_one_tick();
+ } while (ticks);
+
+ while (xtime.tv_usec >= 1000000) {
+ xtime.tv_usec -= 1000000;
+ xtime.tv_sec++;
+ second_overflow();
+ }
+}
+
+static inline void do_process_times(struct task_struct *p,
+ unsigned long user, unsigned long system)
+{
+ unsigned long psecs;
+
+ psecs = (p->times.tms_utime += user);
+ psecs += (p->times.tms_stime += system);
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+ /* Send SIGXCPU every second.. */
+ if (!(psecs % HZ))
+ send_sig(SIGXCPU, p, 1);
+ /* and SIGKILL when we go over max.. */
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
+ send_sig(SIGKILL, p, 1);
+ }
+}
+
+static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_virt = p->it_virt_value;
+
+ if (it_virt) {
+ it_virt -= ticks;
+ if (!it_virt) {
+ it_virt = p->it_virt_incr;
+ send_sig(SIGVTALRM, p, 1);
+ }
+ p->it_virt_value = it_virt;
+ }
+}
+
+static inline void do_it_prof(struct task_struct *p)
+{
+ unsigned long it_prof = p->it_prof_value;
+
+ if (it_prof) {
+ if (--it_prof == 0) {
+ it_prof = p->it_prof_incr;
+ send_sig(SIGPROF, p, 1);
+ }
+ p->it_prof_value = it_prof;
+ }
+}
+
+void update_one_process(struct task_struct *p, unsigned long user,
+ unsigned long system, int cpu)
+{
+ p->per_cpu_utime[cpu] += user;
+ p->per_cpu_stime[cpu] += system;
+ do_process_times(p, user, system);
+ do_it_virt(p, user);
+ do_it_prof(p);
+}
+
+/*
+ * Called from the timer interrupt handler to charge one tick to the current
+ * process. user_tick is 1 if the tick is user time, 0 for system.
+ */
+void update_process_times(int user_tick)
+{
+ struct task_struct *p = current;
+ int cpu = smp_processor_id(), system = user_tick ^ 1;
+
+ update_one_process(p, user_tick, system, cpu);
+ if (p->pid) {
+ if (--p->counter <= 0) {
+ p->counter = 0;
+ /*
+ * SCHED_FIFO is priority preemption, so this is
+ * not the place to decide whether to reschedule a
+ * SCHED_FIFO task or not - Bhavesh Davda
+ */
+ if (p->policy != SCHED_FIFO) {
+ p->need_resched = 1;
+ }
+ }
+ if (p->nice > 0)
+ kstat.per_cpu_nice[cpu] += user_tick;
+ else
+ kstat.per_cpu_user[cpu] += user_tick;
+ kstat.per_cpu_system[cpu] += system;
+ } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
+ kstat.per_cpu_system[cpu] += system;
+}
+
+/*
+ * Called from the timer interrupt handler to charge a couple of ticks
+ * to the current process.
+ */
+void update_process_times_us(int user_ticks, int system_ticks)
+{
+ struct task_struct *p = current;
+ int cpu = smp_processor_id();
+
+ update_one_process(p, user_ticks, system_ticks, cpu);
+ if (p->pid) {
+ p->counter -= user_ticks + system_ticks;
+ if (p->counter <= 0) {
+ p->counter = 0;
+ p->need_resched = 1;
+ }
+ if (p->nice > 0)
+ kstat.per_cpu_nice[cpu] += user_ticks;
+ else
+ kstat.per_cpu_user[cpu] += user_ticks;
+ kstat.per_cpu_system[cpu] += system_ticks;
+ } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
+ kstat.per_cpu_system[cpu] += system_ticks;
+}
+
+/*
+ * Nr of active tasks - counted in fixed-point numbers
+ */
+static unsigned long count_active_tasks(void)
+{
+ struct task_struct *p;
+ unsigned long nr = 0;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if ((p->state == TASK_RUNNING ||
+ (p->state & TASK_UNINTERRUPTIBLE)))
+ nr += FIXED_1;
+ }
+ read_unlock(&tasklist_lock);
+ return nr;
+}
+
+/*
+ * Hmm.. Changed this, as the GNU make sources (load.c) seems to
+ * imply that avenrun[] is the standard name for this kind of thing.
+ * Nothing else seems to be standardized: the fractional size etc
+ * all seem to differ on different machines.
+ */
+unsigned long avenrun[3];
+
+static inline void calc_load(unsigned long ticks)
+{
+ unsigned long active_tasks; /* fixed-point */
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+ while (count < 0) {
+ count += LOAD_FREQ;
+ active_tasks = count_active_tasks();
+ CALC_LOAD(avenrun[0], EXP_1, active_tasks);
+ CALC_LOAD(avenrun[1], EXP_5, active_tasks);
+ CALC_LOAD(avenrun[2], EXP_15, active_tasks);
+ }
+}
+
+/* jiffies at the most recent update of wall time */
+unsigned long wall_jiffies;
+
+/*
+ * This spinlock protect us from races in SMP while playing with xtime. -arca
+ */
+rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
+
+static inline void update_times(void)
+{
+ unsigned long ticks;
+
+ /*
+ * update_times() is run from the raw timer_bh handler so we
+ * just know that the irqs are locally enabled and so we don't
+ * need to save/restore the flags of the local CPU here. -arca
+ */
+ write_lock_irq(&xtime_lock);
+ vxtime_lock();
+
+ ticks = jiffies - wall_jiffies;
+ if (ticks) {
+ wall_jiffies += ticks;
+ update_wall_time(ticks);
+ }
+ vxtime_unlock();
+ write_unlock_irq(&xtime_lock);
+ calc_load(ticks);
+}
+
+void timer_bh(void)
+{
+ update_times();
+ run_timer_list();
+}
+
+void do_timer(struct pt_regs *regs)
+{
+ (*(unsigned long *)&jiffies)++;
+#ifndef CONFIG_SMP
+ /* SMP process accounting uses the local APIC timer */
+
+ update_process_times(user_mode(regs));
+#endif
+ mark_bh(TIMER_BH);
+ if (TQ_ACTIVE(tq_timer))
+ mark_bh(TQUEUE_BH);
+}
+
+void do_timer_ticks(int ticks)
+{
+ (*(unsigned long *)&jiffies) += ticks;
+ mark_bh(TIMER_BH);
+ if (TQ_ACTIVE(tq_timer))
+ mark_bh(TQUEUE_BH);
+}
+
+#if !defined(__alpha__) && !defined(__ia64__)
+
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage unsigned long sys_alarm(unsigned int seconds)
+{
+ struct itimerval it_new, it_old;
+ unsigned int oldalarm;
+
+ it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+ it_new.it_value.tv_sec = seconds;
+ it_new.it_value.tv_usec = 0;
+ do_setitimer(ITIMER_REAL, &it_new, &it_old);
+ oldalarm = it_old.it_value.tv_sec;
+ /* ehhh.. We can't return 0 if we have an alarm pending.. */
+ /* And we'd better return too much than too little anyway */
+ if (it_old.it_value.tv_usec)
+ oldalarm++;
+ return oldalarm;
+}
+
+#endif
+
+#ifndef __alpha__
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
+ * should be moved into arch/i386 instead?
+ */
+
+/**
+ * sys_getpid - return the thread group id of the current process
+ *
+ * Note, despite the name, this returns the tgid not the pid. The tgid and
+ * the pid are identical unless CLONE_THREAD was specified on clone() in
+ * which case the tgid is the same in all threads of the same group.
+ *
+ * This is SMP safe as current->tgid does not change.
+ */
+asmlinkage long sys_getpid(void)
+{
+ return current->tgid;
+}
+
+/*
+ * This is not strictly SMP safe: p_opptr could change
+ * from under us. However, rather than getting any lock
+ * we can use an optimistic algorithm: get the parent
+ * pid, and go back and check that the parent is still
+ * the same. If it has changed (which is extremely unlikely
+ * indeed), we just try again..
+ *
+ * NOTE! This depends on the fact that even if we _do_
+ * get an old value of "parent", we can happily dereference
+ * the pointer: we just can't necessarily trust the result
+ * until we know that the parent pointer is valid.
+ *
+ * The "mb()" macro is a memory barrier - a synchronizing
+ * event. It also makes sure that gcc doesn't optimize
+ * away the necessary memory references.. The barrier doesn't
+ * have to have all that strong semantics: on x86 we don't
+ * really require a synchronizing instruction, for example.
+ * The barrier is more important for code generation than
+ * for any real memory ordering semantics (even if there is
+ * a small window for a race, using the old pointer is
+ * harmless for a while).
+ */
+asmlinkage long sys_getppid(void)
+{
+ int pid;
+ struct task_struct * me = current;
+ struct task_struct * parent;
+
+ parent = me->p_opptr;
+ for (;;) {
+ pid = parent->pid;
+#if CONFIG_SMP
+{
+ struct task_struct *old = parent;
+ mb();
+ parent = me->p_opptr;
+ if (old != parent)
+ continue;
+}
+#endif
+ break;
+ }
+ return pid;
+}
+
+asmlinkage long sys_getuid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->uid;
+}
+
+asmlinkage long sys_geteuid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->euid;
+}
+
+asmlinkage long sys_getgid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->gid;
+}
+
+asmlinkage long sys_getegid(void)
+{
+ /* Only we change this so SMP safe */
+ return current->egid;
+}
+
+#endif
+
+/* Thread ID - the internal kernel "pid" */
+asmlinkage long sys_gettid(void)
+{
+ return current->pid;
+}
+
+asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
+{
+ struct timespec t;
+ unsigned long expire;
+
+ if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
+ return -EFAULT;
+
+ if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
+ return -EINVAL;
+
+
+ if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
+ current->policy != SCHED_OTHER)
+ {
+ /*
+ * Short delay requests up to 2 ms will be handled with
+ * high precision by a busy wait for all real-time processes.
+ *
+ * Its important on SMP not to do this holding locks.
+ */
+ udelay((t.tv_nsec + 999) / 1000);
+ return 0;
+ }
+
+ expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
+
+ current->state = TASK_INTERRUPTIBLE;
+ expire = schedule_timeout(expire);
+
+ if (expire) {
+ if (rmtp) {
+ jiffies_to_timespec(expire, &t);
+ if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
+ return -EFAULT;
+ }
+ return -EINTR;
+ }
+ return 0;
+}
+
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mkbuildtree
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mkbuildtree Tue Aug 9 15:17:45 2005
@@ -0,0 +1,283 @@
+#!/bin/bash
+
+# mkbuildtree <build tree>
+#
+# Creates symbolic links in <build tree> for the sparse tree
+# in the current directory.
+
+# Script to determine the relative path between two directories.
+# Copyright (c) D. J. Hawkey Jr. 2002
+# Fixed for Xen project by K. Fraser in 2003.
+abs_to_rel ()
+{
+ local CWD SRCPATH
+
+ if [ "$1" != "/" -a "${1##*[^/]}" = "/" ]; then
+ SRCPATH=${1%?}
+ else
+ SRCPATH=$1
+ fi
+ if [ "$2" != "/" -a "${2##*[^/]}" = "/" ]; then
+ DESTPATH=${2%?}
+ else
+ DESTPATH=$2
+ fi
+
+ CWD=$PWD
+ [ "${1%%[^/]*}" != "/" ] && cd $1 && SRCPATH=$PWD
+ [ "${2%%[^/]*}" != "/" ] && cd $2 && DESTPATH=$PWD
+ [ "$CWD" != "$PWD" ] && cd $CWD
+
+ BASEPATH=$SRCPATH
+
+ [ "$SRCPATH" = "$DESTPATH" ] && DESTPATH="." && return
+ [ "$SRCPATH" = "/" ] && DESTPATH=${DESTPATH#?} && return
+
+ while [ "$BASEPATH/" != "${DESTPATH%${DESTPATH#$BASEPATH/}}" ]; do
+ BASEPATH=${BASEPATH%/*}
+ done
+
+ SRCPATH=${SRCPATH#$BASEPATH}
+ DESTPATH=${DESTPATH#$BASEPATH}
+ DESTPATH=${DESTPATH#?}
+ while [ -n "$SRCPATH" ]; do
+ SRCPATH=${SRCPATH%/*}
+ DESTPATH="../$DESTPATH"
+ done
+
+ [ -z "$BASEPATH" ] && BASEPATH="/"
+ [ "${DESTPATH##*[^/]}" = "/" ] && DESTPATH=${DESTPATH%?}
+}
+
+# relative_lndir <target_dir>
+# Creates a tree of symlinks in the current working directory that mirror
+# real files in <target_dir>. <target_dir> should be relative to the current
+# working directory. Symlinks in <target_dir> are ignored. Source-control files
+# are ignored.
+relative_lndir ()
+{
+ local SYMLINK_DIR REAL_DIR pref i j
+ SYMLINK_DIR=$PWD
+ REAL_DIR=$1
+ (
+ cd $REAL_DIR
+ for i in `find . -type d | grep -v SCCS`; do
+ [ -d $SYMLINK_DIR/$i ] || mkdir -p $SYMLINK_DIR/$i
+ (
+ cd $i
+ pref=`echo $i | sed -e 's#/[^/]*#../#g' -e 's#^\.##'`
+ for j in `find . -maxdepth 1 -type f -o -type l`; do
+ ln -sf ${pref}${REAL_DIR}/$i/$j ${SYMLINK_DIR}/$i/$j
+ done
+ )
+ done
+ )
+}
+
+[ "$1" == "" ] && { echo "Syntax: $0 <linux tree to xenify>"; exit 1; }
+
+# Get absolute path to the destination directory
+pushd . >/dev/null
+cd ${1} || { echo "cannot cd to ${1}"; exit 1; }
+AD=$PWD
+popd >/dev/null
+
+# Get absolute path to the source directory
+AS=`pwd`
+
+# Get path to source, relative to destination
+abs_to_rel ${AD} ${AS}
+RS=$DESTPATH
+
+# Remove old copies of files and directories at the destination
+for i in `find . -type f -o -type l` ; do rm -f ${AD}/${i#./} ; done
+
+# We now work from the destination directory
+cd ${AD} || { echo "cannot cd to ${AD}"; exit 1; }
+
+# Remove old symlinks
+for i in `find . -type l`; do rm -f $i; done
+
+# Create symlinks of files and directories which exist in the sparse source
+relative_lndir ${RS}
+rm -f mkbuildtree
+
+set ${RS}/../linux-2.6.*-xen-sparse
+[ "$1" == "${RS}/../linux-2.6.*-xen-parse" ] && { echo "no Linux 2.6 sparse
tree at ${RS}/../linux-2.6.*-xen-sparse"; exit 1; }
+LINUX_26="$1"
+
+
+# Create links to the shared definitions of the Xen interfaces.
+rm -rf ${AD}/include/asm-xen/xen-public
+mkdir ${AD}/include/asm-xen/xen-public
+cd ${AD}/include/asm-xen/xen-public
+relative_lndir ../../../${RS}/../xen/include/public
+
+# Create a link to the shared definitions for the control interface
+cd ${AD}/include/asm-xen
+
+## Symlinks for files:
+## - which are identical in the i386 and xen-i386 architecture-dependent
+## subdirectories.
+## - which are identical in the Linux 2.6 and Linux 2.4 ports.
+
+cd ${AD}/include/asm-xen
+ln -sf ../asm-i386/a.out.h
+ln -sf ../asm-i386/apicdef.h
+ln -sf ../asm-i386/apic.h
+ln -sf ../asm-i386/atomic.h
+ln -sf ../asm-i386/bitops.h
+ln -sf ../asm-i386/boot.h
+ln -sf ../asm-i386/byteorder.h
+ln -sf ../asm-i386/cache.h
+ln -sf ../asm-i386/checksum.h
+ln -sf ../asm-i386/cpufeature.h
+ln -sf ../asm-i386/current.h
+ln -sf ../asm-i386/debugreg.h
+ln -sf ../asm-i386/delay.h
+ln -sf ../asm-i386/div64.h
+ln -sf ../asm-i386/dma.h
+ln -sf ../asm-i386/elf.h
+ln -sf ../asm-i386/errno.h
+ln -sf ../asm-i386/fcntl.h
+ln -sf ../asm-i386/floppy.h
+ln -sf ../asm-i386/hardirq.h
+ln -sf ../asm-i386/hdreg.h
+ln -sf ../asm-i386/i387.h
+ln -sf ../asm-i386/ide.h
+ln -sf ../asm-i386/init.h
+ln -sf ../asm-i386/io_apic.h
+ln -sf ../asm-i386/ioctl.h
+ln -sf ../asm-i386/ioctls.h
+ln -sf ../asm-i386/ipcbuf.h
+ln -sf ../asm-i386/ipc.h
+ln -sf ../asm-i386/kmap_types.h
+ln -sf ../asm-i386/ldt.h
+ln -sf ../asm-i386/linux_logo.h
+ln -sf ../asm-i386/locks.h
+ln -sf ../asm-i386/math_emu.h
+ln -sf ../asm-i386/mc146818rtc.h
+ln -sf ../asm-i386/mca_dma.h
+ln -sf ../asm-i386/mman.h
+ln -sf ../asm-i386/mmu.h
+ln -sf ../asm-i386/mmx.h
+ln -sf ../asm-i386/mpspec.h
+ln -sf ../asm-i386/msgbuf.h
+ln -sf ../asm-i386/mtrr.h
+ln -sf ../asm-i386/namei.h
+ln -sf ../asm-i386/param.h
+ln -sf ../asm-i386/parport.h
+ln -sf ../asm-i386/pgtable-3level.h
+ln -sf ../asm-i386/poll.h
+ln -sf ../asm-i386/posix_types.h
+ln -sf ../asm-i386/ptrace.h
+ln -sf ../asm-i386/resource.h
+ln -sf ../asm-i386/rwlock.h
+ln -sf ../asm-i386/rwsem.h
+ln -sf ../asm-i386/scatterlist.h
+ln -sf ../asm-i386/semaphore.h
+ln -sf ../asm-i386/sembuf.h
+ln -sf ../asm-i386/serial.h
+ln -sf ../asm-i386/setup.h
+ln -sf ../asm-i386/shmbuf.h
+ln -sf ../asm-i386/shmparam.h
+ln -sf ../asm-i386/sigcontext.h
+ln -sf ../asm-i386/siginfo.h
+ln -sf ../asm-i386/signal.h
+ln -sf ../asm-i386/smplock.h
+ln -sf ../asm-i386/socket.h
+ln -sf ../asm-i386/sockios.h
+ln -sf ../asm-i386/softirq.h
+ln -sf ../asm-i386/spinlock.h
+ln -sf ../asm-i386/statfs.h
+ln -sf ../asm-i386/stat.h
+ln -sf ../asm-i386/string-486.h
+ln -sf ../asm-i386/string.h
+ln -sf ../asm-i386/termbits.h
+ln -sf ../asm-i386/termios.h
+ln -sf ../asm-i386/timex.h
+ln -sf ../asm-i386/tlb.h
+ln -sf ../asm-i386/types.h
+ln -sf ../asm-i386/uaccess.h
+ln -sf ../asm-i386/ucontext.h
+ln -sf ../asm-i386/unaligned.h
+ln -sf ../asm-i386/unistd.h
+ln -sf ../asm-i386/user.h
+ln -sf ../asm-i386/vm86.h
+ln -sf ../../${LINUX_26}/include/asm-xen/balloon.h
+ln -sf ../../${LINUX_26}/include/asm-xen/ctrl_if.h
+ln -sf ../../${LINUX_26}/include/asm-xen/evtchn.h
+ln -sf ../../${LINUX_26}/include/asm-xen/hypervisor.h
+ln -sf ../../${LINUX_26}/include/asm-xen/multicall.h
+ln -sf ../../${LINUX_26}/include/asm-xen/xen_proc.h
+ln -sf ../../${LINUX_26}/include/asm-xen/asm-i386/synch_bitops.h
+
+mkdir -p linux-public && cd linux-public
+ln -sf ../../../${LINUX_26}/include/asm-xen/linux-public/privcmd.h
+ln -sf ../../../${LINUX_26}/include/asm-xen/linux-public/suspend.h
+
+cd ${AD}/arch/xen/kernel
+ln -sf ../../i386/kernel/i387.c
+ln -sf ../../i386/kernel/init_task.c
+ln -sf ../../i386/kernel/pci-i386.c
+ln -sf ../../i386/kernel/pci-i386.h
+ln -sf ../../i386/kernel/ptrace.c
+ln -sf ../../i386/kernel/semaphore.c
+ln -sf ../../i386/kernel/sys_i386.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/ctrl_if.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/evtchn.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/fixup.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/reboot.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/skbuff.c
+ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/ioport.c
+ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/pci-dma.c
+
+cd ${AD}/arch/xen/lib
+ln -sf ../../i386/lib/checksum.S
+ln -sf ../../i386/lib/dec_and_lock.c
+ln -sf ../../i386/lib/getuser.S
+ln -sf ../../i386/lib/iodebug.c
+ln -sf ../../i386/lib/memcpy.c
+ln -sf ../../i386/lib/mmx.c
+ln -sf ../../i386/lib/old-checksum.c
+ln -sf ../../i386/lib/strstr.c
+ln -sf ../../i386/lib/usercopy.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/xen_proc.c
+
+cd ${AD}/arch/xen/mm
+ln -sf ../../i386/mm/extable.c
+ln -sf ../../i386/mm/pageattr.c
+ln -sf ../../../${LINUX_26}/arch/xen/i386/mm/hypervisor.c
+
+cd ${AD}/arch/xen/drivers/balloon
+ln -sf ../../../../${LINUX_26}/drivers/xen/balloon/balloon.c
+
+cd ${AD}/arch/xen/drivers/console
+ln -sf ../../../../${LINUX_26}/drivers/xen/console/console.c
+
+cd ${AD}/arch/xen/drivers/dom0
+ln -sf ../../../../${LINUX_26}/drivers/xen/privcmd/privcmd.c core.c
+
+cd ${AD}/arch/xen/drivers/evtchn
+ln -sf ../../../../${LINUX_26}/drivers/xen/evtchn/evtchn.c
+
+cd ${AD}/arch/xen/drivers/netif/frontend
+ln -sf ../../../../../${LINUX_26}/drivers/xen/netfront/netfront.c main.c
+
+cd ${AD}/arch/xen/drivers/netif/backend
+ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/common.h
+ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/control.c
+ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/interface.c
+ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/netback.c main.c
+
+cd ${AD}/arch/xen/drivers/blkif/backend
+ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/common.h
+ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/blkback.c main.c
+ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/control.c
+ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/interface.c
+ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/vbd.c
+
+cd ${AD}/arch/xen/drivers/blkif/frontend
+ln -sf ../../../../../${LINUX_26}/drivers/xen/blkfront/blkfront.c
+
+
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/highmem.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/highmem.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,462 @@
+/*
+ * High memory handling common code and variables.
+ *
+ * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@xxxxxxx
+ * Gerhard Wichert, Siemens AG, Gerhard.Wichert@xxxxxxxxxxxxxx
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * 64-bit physical space. With current x86 CPUs this
+ * means up to 64 Gigabytes physical RAM.
+ *
+ * Rewrote high memory support to move the page cache into
+ * high memory. Implemented permanent (schedulable) kmaps
+ * based on Linus' idea.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+
+/*
+ * Virtual_count is not a pure "count".
+ * 0 means that it is not mapped, and has not been mapped
+ * since a TLB flush - it is usable.
+ * 1 means that there are no users, but it has been mapped
+ * since the last TLB flush - so we can't use it.
+ * n means that there are (n-1) current users of it.
+ */
+static int pkmap_count[LAST_PKMAP];
+static unsigned int last_pkmap_nr;
+static spinlock_cacheline_t kmap_lock_cacheline = {SPIN_LOCK_UNLOCKED};
+#define kmap_lock kmap_lock_cacheline.lock
+
+pte_t * pkmap_page_table;
+
+static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+
+static void flush_all_zero_pkmaps(void)
+{
+ int i;
+
+ flush_cache_all();
+
+ for (i = 0; i < LAST_PKMAP; i++) {
+ struct page *page;
+
+ /*
+ * zero means we don't have anything to do,
+ * >1 means that it is still in use. Only
+ * a count of 1 means that it is free but
+ * needs to be unmapped
+ */
+ if (pkmap_count[i] != 1)
+ continue;
+ pkmap_count[i] = 0;
+
+ /* sanity check */
+ if (pte_none(pkmap_page_table[i]))
+ BUG();
+
+ /*
+ * Don't need an atomic fetch-and-clear op here;
+ * no-one has the page mapped, and cannot get at
+ * its virtual address (and hence PTE) without first
+ * getting the kmap_lock (which is held here).
+ * So no dangers, even with speculative execution.
+ */
+ page = pte_page(pkmap_page_table[i]);
+ pte_clear(&pkmap_page_table[i]);
+
+ page->virtual = NULL;
+ }
+ flush_tlb_all();
+}
+
+static inline unsigned long map_new_virtual(struct page *page, int nonblocking)
+{
+ unsigned long vaddr;
+ int count;
+
+start:
+ count = LAST_PKMAP;
+ /* Find an empty entry */
+ for (;;) {
+ last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
+ if (!last_pkmap_nr) {
+ flush_all_zero_pkmaps();
+ count = LAST_PKMAP;
+ }
+ if (!pkmap_count[last_pkmap_nr])
+ break; /* Found a usable entry */
+ if (--count)
+ continue;
+
+ if (nonblocking)
+ return 0;
+
+ /*
+ * Sleep for somebody else to unmap their entries
+ */
+ {
+ DECLARE_WAITQUEUE(wait, current);
+
+ current->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue(&pkmap_map_wait, &wait);
+ spin_unlock(&kmap_lock);
+ schedule();
+ remove_wait_queue(&pkmap_map_wait, &wait);
+ spin_lock(&kmap_lock);
+
+ /* Somebody else might have mapped it while we slept */
+ if (page->virtual)
+ return (unsigned long) page->virtual;
+
+ /* Re-start */
+ goto start;
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
+ set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+ XEN_flush_page_update_queue();
+
+ pkmap_count[last_pkmap_nr] = 1;
+ page->virtual = (void *) vaddr;
+
+ return vaddr;
+}
+
+void kmap_flush_unused(void)
+{
+ spin_lock(&kmap_lock);
+ flush_all_zero_pkmaps();
+ spin_unlock(&kmap_lock);
+}
+
+void fastcall *kmap_high(struct page *page, int nonblocking)
+{
+ unsigned long vaddr;
+
+ /*
+ * For highmem pages, we can't trust "virtual" until
+ * after we have the lock.
+ *
+ * We cannot call this from interrupts, as it may block
+ */
+ spin_lock(&kmap_lock);
+ vaddr = (unsigned long) page->virtual;
+ if (!vaddr) {
+ vaddr = map_new_virtual(page, nonblocking);
+ if (!vaddr)
+ goto out;
+ }
+ pkmap_count[PKMAP_NR(vaddr)]++;
+ if (pkmap_count[PKMAP_NR(vaddr)] < 2)
+ BUG();
+ out:
+ spin_unlock(&kmap_lock);
+ return (void*) vaddr;
+}
+
+void fastcall kunmap_high(struct page *page)
+{
+ unsigned long vaddr;
+ unsigned long nr;
+ int need_wakeup;
+
+ spin_lock(&kmap_lock);
+ vaddr = (unsigned long) page->virtual;
+ if (!vaddr)
+ BUG();
+ nr = PKMAP_NR(vaddr);
+
+ /*
+ * A count must never go down to zero
+ * without a TLB flush!
+ */
+ need_wakeup = 0;
+ switch (--pkmap_count[nr]) {
+ case 0:
+ BUG();
+ case 1:
+ /*
+ * Avoid an unnecessary wake_up() function call.
+ * The common case is pkmap_count[] == 1, but
+ * no waiters.
+ * The tasks queued in the wait-queue are guarded
+ * by both the lock in the wait-queue-head and by
+ * the kmap_lock. As the kmap_lock is held here,
+ * no need for the wait-queue-head's lock. Simply
+ * test if the queue is empty.
+ */
+ need_wakeup = waitqueue_active(&pkmap_map_wait);
+ }
+ spin_unlock(&kmap_lock);
+
+ /* do wake-up, if needed, race-free outside of the spin lock */
+ if (need_wakeup)
+ wake_up(&pkmap_map_wait);
+}
+
+#define POOL_SIZE 32
+
+/*
+ * This lock gets no contention at all, normally.
+ */
+static spinlock_t emergency_lock = SPIN_LOCK_UNLOCKED;
+
+int nr_emergency_pages;
+static LIST_HEAD(emergency_pages);
+
+int nr_emergency_bhs;
+static LIST_HEAD(emergency_bhs);
+
+/*
+ * Simple bounce buffer support for highmem pages.
+ * This will be moved to the block layer in 2.5.
+ */
+
+static inline void copy_from_high_bh (struct buffer_head *to,
+ struct buffer_head *from)
+{
+ struct page *p_from;
+ char *vfrom;
+
+ p_from = from->b_page;
+
+ vfrom = kmap_atomic(p_from, KM_USER0);
+ memcpy(to->b_data, vfrom + bh_offset(from), to->b_size);
+ kunmap_atomic(vfrom, KM_USER0);
+}
+
+static inline void copy_to_high_bh_irq (struct buffer_head *to,
+ struct buffer_head *from)
+{
+ struct page *p_to;
+ char *vto;
+ unsigned long flags;
+
+ p_to = to->b_page;
+ __save_flags(flags);
+ __cli();
+ vto = kmap_atomic(p_to, KM_BOUNCE_READ);
+ memcpy(vto + bh_offset(to), from->b_data, to->b_size);
+ kunmap_atomic(vto, KM_BOUNCE_READ);
+ __restore_flags(flags);
+}
+
+static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
+{
+ struct page *page;
+ struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
+ unsigned long flags;
+
+ bh_orig->b_end_io(bh_orig, uptodate);
+
+ page = bh->b_page;
+
+ spin_lock_irqsave(&emergency_lock, flags);
+ if (nr_emergency_pages >= POOL_SIZE)
+ __free_page(page);
+ else {
+ /*
+ * We are abusing page->list to manage
+ * the highmem emergency pool:
+ */
+ list_add(&page->list, &emergency_pages);
+ nr_emergency_pages++;
+ }
+
+ if (nr_emergency_bhs >= POOL_SIZE) {
+#ifdef HIGHMEM_DEBUG
+ /* Don't clobber the constructed slab cache */
+ init_waitqueue_head(&bh->b_wait);
+#endif
+ kmem_cache_free(bh_cachep, bh);
+ } else {
+ /*
+ * Ditto in the bh case, here we abuse b_inode_buffers:
+ */
+ list_add(&bh->b_inode_buffers, &emergency_bhs);
+ nr_emergency_bhs++;
+ }
+ spin_unlock_irqrestore(&emergency_lock, flags);
+}
+
+static __init int init_emergency_pool(void)
+{
+ struct sysinfo i;
+ si_meminfo(&i);
+ si_swapinfo(&i);
+
+ if (!i.totalhigh)
+ return 0;
+
+ spin_lock_irq(&emergency_lock);
+ while (nr_emergency_pages < POOL_SIZE) {
+ struct page * page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ printk("couldn't refill highmem emergency pages");
+ break;
+ }
+ list_add(&page->list, &emergency_pages);
+ nr_emergency_pages++;
+ }
+ while (nr_emergency_bhs < POOL_SIZE) {
+ struct buffer_head * bh = kmem_cache_alloc(bh_cachep,
SLAB_ATOMIC);
+ if (!bh) {
+ printk("couldn't refill highmem emergency bhs");
+ break;
+ }
+ list_add(&bh->b_inode_buffers, &emergency_bhs);
+ nr_emergency_bhs++;
+ }
+ spin_unlock_irq(&emergency_lock);
+ printk("allocated %d pages and %d bhs reserved for the highmem
bounces\n",
+ nr_emergency_pages, nr_emergency_bhs);
+
+ return 0;
+}
+
+__initcall(init_emergency_pool);
+
+static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
+{
+ bounce_end_io(bh, uptodate);
+}
+
+static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
+{
+ struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
+
+ if (uptodate)
+ copy_to_high_bh_irq(bh_orig, bh);
+ bounce_end_io(bh, uptodate);
+}
+
+struct page *alloc_bounce_page (void)
+{
+ struct list_head *tmp;
+ struct page *page;
+
+ page = alloc_page(GFP_NOHIGHIO);
+ if (page)
+ return page;
+ /*
+ * No luck. First, kick the VM so it doesn't idle around while
+ * we are using up our emergency rations.
+ */
+ wakeup_bdflush();
+
+repeat_alloc:
+ /*
+ * Try to allocate from the emergency pool.
+ */
+ tmp = &emergency_pages;
+ spin_lock_irq(&emergency_lock);
+ if (!list_empty(tmp)) {
+ page = list_entry(tmp->next, struct page, list);
+ list_del(tmp->next);
+ nr_emergency_pages--;
+ }
+ spin_unlock_irq(&emergency_lock);
+ if (page)
+ return page;
+
+ /* we need to wait I/O completion */
+ run_task_queue(&tq_disk);
+
+ yield();
+ goto repeat_alloc;
+}
+
+struct buffer_head *alloc_bounce_bh (void)
+{
+ struct list_head *tmp;
+ struct buffer_head *bh;
+
+ bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO);
+ if (bh)
+ return bh;
+ /*
+ * No luck. First, kick the VM so it doesn't idle around while
+ * we are using up our emergency rations.
+ */
+ wakeup_bdflush();
+
+repeat_alloc:
+ /*
+ * Try to allocate from the emergency pool.
+ */
+ tmp = &emergency_bhs;
+ spin_lock_irq(&emergency_lock);
+ if (!list_empty(tmp)) {
+ bh = list_entry(tmp->next, struct buffer_head, b_inode_buffers);
+ list_del(tmp->next);
+ nr_emergency_bhs--;
+ }
+ spin_unlock_irq(&emergency_lock);
+ if (bh)
+ return bh;
+
+ /* we need to wait I/O completion */
+ run_task_queue(&tq_disk);
+
+ yield();
+ goto repeat_alloc;
+}
+
+struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
+{
+ struct page *page;
+ struct buffer_head *bh;
+
+ if (!PageHighMem(bh_orig->b_page))
+ return bh_orig;
+
+ bh = alloc_bounce_bh();
+ /*
+ * This is wasteful for 1k buffers, but this is a stopgap measure
+ * and we are being ineffective anyway. This approach simplifies
+ * things immensly. On boxes with more than 4GB RAM this should
+ * not be an issue anyway.
+ */
+ page = alloc_bounce_page();
+
+ set_bh_page(bh, page, 0);
+
+ bh->b_next = NULL;
+ bh->b_blocknr = bh_orig->b_blocknr;
+ bh->b_size = bh_orig->b_size;
+ bh->b_list = -1;
+ bh->b_dev = bh_orig->b_dev;
+ bh->b_count = bh_orig->b_count;
+ bh->b_rdev = bh_orig->b_rdev;
+ bh->b_state = bh_orig->b_state;
+#ifdef HIGHMEM_DEBUG
+ bh->b_flushtime = jiffies;
+ bh->b_next_free = NULL;
+ bh->b_prev_free = NULL;
+ /* bh->b_this_page */
+ bh->b_reqnext = NULL;
+ bh->b_pprev = NULL;
+#endif
+ /* bh->b_page */
+ if (rw == WRITE) {
+ bh->b_end_io = bounce_end_io_write;
+ copy_from_high_bh(bh, bh_orig);
+ } else
+ bh->b_end_io = bounce_end_io_read;
+ bh->b_private = (void *)bh_orig;
+ bh->b_rsector = bh_orig->b_rsector;
+#ifdef HIGHMEM_DEBUG
+ memset(&bh->b_wait, -1, sizeof(bh->b_wait));
+#endif
+
+ return bh;
+}
+
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/memory.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/memory.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1548 @@
+/*
+ * linux/mm/memory.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+/*
+ * demand-loading started 01.12.91 - seems it is high on the list of
+ * things wanted, and it should be easy to implement. - Linus
+ */
+
+/*
+ * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
+ * pages started 02.12.91, seems to work. - Linus.
+ *
+ * Tested sharing by executing about 30 /bin/sh: under the old kernel it
+ * would have taken more than the 6M I have free, but it worked well as
+ * far as I could see.
+ *
+ * Also corrected some "invalidate()"s - I wasn't doing enough of them.
+ */
+
+/*
+ * Real VM (paging to/from disk) started 18.12.91. Much more work and
+ * thought has to go into this. Oh, well..
+ * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
+ * Found it. Everything seems to work now.
+ * 20.12.91 - Ok, making the swap-device changeable like the root.
+ */
+
+/*
+ * 05.04.94 - Multi-page memory management added for v1.1.
+ * Idea by Alex Bligh (alex@xxxxxxxxxxxxxxx)
+ *
+ * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
+ * (Gerhard.Wichert@xxxxxxxxxxxxxx)
+ */
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/swapctl.h>
+#include <linux/iobuf.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/module.h>
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+
+unsigned long max_mapnr;
+unsigned long num_physpages;
+unsigned long num_mappedpages;
+void * high_memory;
+struct page *highmem_start_page;
+
+/*
+ * We special-case the C-O-W ZERO_PAGE, because it's such
+ * a common occurrence (no need to read the page to know
+ * that it's zero - better for the cache and memory subsystem).
+ */
+static inline void copy_cow_page(struct page * from, struct page * to,
unsigned long address)
+{
+ if (from == ZERO_PAGE(address)) {
+ clear_user_highpage(to, address);
+ return;
+ }
+ copy_user_highpage(to, from, address);
+}
+
+mem_map_t * mem_map;
+
+/*
+ * Called by TLB shootdown
+ */
+void __free_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ if ((!VALID_PAGE(page)) || PageReserved(page))
+ return;
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+ free_page_and_swap_cache(page);
+}
+
+
+/*
+ * Note: this doesn't free the actual pages themselves. That
+ * has been handled earlier when unmapping all the memory regions.
+ */
+static inline void free_one_pmd(pmd_t * dir)
+{
+ pte_t * pte;
+
+ if (pmd_none(*dir))
+ return;
+ if (pmd_bad(*dir)) {
+ pmd_ERROR(*dir);
+ pmd_clear(dir);
+ return;
+ }
+ pte = pte_offset(dir, 0);
+ pmd_clear(dir);
+ pte_free(pte);
+}
+
+static inline void free_one_pgd(pgd_t * dir)
+{
+ int j;
+ pmd_t * pmd;
+
+ if (pgd_none(*dir))
+ return;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return;
+ }
+ pmd = pmd_offset(dir, 0);
+ pgd_clear(dir);
+ for (j = 0; j < PTRS_PER_PMD ; j++) {
+ prefetchw(pmd+j+(PREFETCH_STRIDE/16));
+ free_one_pmd(pmd+j);
+ }
+ pmd_free(pmd);
+}
+
+/* Low and high watermarks for page table cache.
+ The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
+ */
+int pgt_cache_water[2] = { 25, 50 };
+
+/* Returns the number of pages freed */
+int check_pgt_cache(void)
+{
+ return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
+}
+
+
+/*
+ * This function clears all user-level page tables of a process - this
+ * is needed by execve(), so that old pages aren't in the way.
+ */
+void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
+{
+ pgd_t * page_dir = mm->pgd;
+
+ spin_lock(&mm->page_table_lock);
+ page_dir += first;
+ do {
+ free_one_pgd(page_dir);
+ page_dir++;
+ } while (--nr);
+ XEN_flush_page_update_queue();
+ spin_unlock(&mm->page_table_lock);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+}
+
+#define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
+#define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
+
+/*
+ * copy one vm_area from one task to the other. Assumes the page tables
+ * already present in the new task to be cleared in the whole range
+ * covered by this vma.
+ *
+ * 08Jan98 Merged into one routine from several inline routines to reduce
+ * variable count and make things faster. -jj
+ *
+ * dst->page_table_lock is held on entry and exit,
+ * but may be dropped within pmd_alloc() and pte_alloc().
+ */
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma)
+{
+ pgd_t * src_pgd, * dst_pgd;
+ unsigned long address = vma->vm_start;
+ unsigned long end = vma->vm_end;
+ unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
VM_MAYWRITE;
+
+ src_pgd = pgd_offset(src, address)-1;
+ dst_pgd = pgd_offset(dst, address)-1;
+
+ for (;;) {
+ pmd_t * src_pmd, * dst_pmd;
+
+ src_pgd++; dst_pgd++;
+
+ /* copy_pmd_range */
+
+ if (pgd_none(*src_pgd))
+ goto skip_copy_pmd_range;
+ if (pgd_bad(*src_pgd)) {
+ pgd_ERROR(*src_pgd);
+ pgd_clear(src_pgd);
+skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ if (!address || (address >= end))
+ goto out;
+ continue;
+ }
+
+ src_pmd = pmd_offset(src_pgd, address);
+ dst_pmd = pmd_alloc(dst, dst_pgd, address);
+ if (!dst_pmd)
+ goto nomem;
+
+ do {
+ pte_t * src_pte, * dst_pte;
+
+ /* copy_pte_range */
+
+ if (pmd_none(*src_pmd))
+ goto skip_copy_pte_range;
+ if (pmd_bad(*src_pmd)) {
+ pmd_ERROR(*src_pmd);
+ pmd_clear(src_pmd);
+skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
+ if (address >= end)
+ goto out;
+ goto cont_copy_pmd_range;
+ }
+
+ src_pte = pte_offset(src_pmd, address);
+ dst_pte = pte_alloc(dst, dst_pmd, address);
+ if (!dst_pte)
+ goto nomem;
+
+ spin_lock(&src->page_table_lock);
+ do {
+ pte_t pte = *src_pte;
+ struct page *ptepage;
+
+ /* copy_one_pte */
+
+ if (pte_none(pte))
+ goto cont_copy_pte_range_noset;
+ if (!pte_present(pte)) {
+ swap_duplicate(pte_to_swp_entry(pte));
+ goto cont_copy_pte_range;
+ }
+ ptepage = pte_page(pte);
+ if ((!VALID_PAGE(ptepage)) ||
+ PageReserved(ptepage))
+ goto cont_copy_pte_range;
+
+ /* If it's a COW mapping, write protect it both
in the parent and the child */
+ if (cow && pte_write(pte)) {
+ /* XEN modification: modified ordering
here to avoid RaW hazard. */
+ pte = *src_pte;
+ pte = pte_wrprotect(pte);
+ ptep_set_wrprotect(src_pte);
+ }
+
+ /* If it's a shared mapping, mark it clean in
the child */
+ if (vma->vm_flags & VM_SHARED)
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+ get_page(ptepage);
+ dst->rss++;
+
+cont_copy_pte_range: set_pte(dst_pte, pte);
+cont_copy_pte_range_noset: address += PAGE_SIZE;
+ if (address >= end)
+ goto out_unlock;
+ src_pte++;
+ dst_pte++;
+ } while ((unsigned long)src_pte & PTE_TABLE_MASK);
+ spin_unlock(&src->page_table_lock);
+
+cont_copy_pmd_range: src_pmd++;
+ dst_pmd++;
+ } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
+ }
+out_unlock:
+ spin_unlock(&src->page_table_lock);
+out:
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+/*
+ * Return indicates whether a page was freed so caller can adjust rss
+ */
+static inline void forget_pte(pte_t page)
+{
+ if (!pte_none(page)) {
+ printk("forget_pte: old mapping existed!\n");
+ BUG();
+ }
+}
+
+static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long
address, unsigned long size)
+{
+ unsigned long offset;
+ pte_t * ptep;
+ int freed = 0;
+
+ if (pmd_none(*pmd))
+ return 0;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return 0;
+ }
+ ptep = pte_offset(pmd, address);
+ offset = address & ~PMD_MASK;
+ if (offset + size > PMD_SIZE)
+ size = PMD_SIZE - offset;
+ size &= PAGE_MASK;
+ for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+ pte_t pte = *ptep;
+ if (pte_none(pte))
+ continue;
+ if (pte_present(pte)) {
+ struct page *page = pte_page(pte);
+ if (VALID_PAGE(page) && !PageReserved(page))
+ freed ++;
+ /* This will eventually call __free_pte on the pte. */
+ tlb_remove_page(tlb, ptep, address + offset);
+ } else {
+ free_swap_and_cache(pte_to_swp_entry(pte));
+ pte_clear(ptep);
+ }
+ }
+
+ return freed;
+}
+
+static inline int zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long
address, unsigned long size)
+{
+ pmd_t * pmd;
+ unsigned long end;
+ int freed;
+
+ if (pgd_none(*dir))
+ return 0;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return 0;
+ }
+ pmd = pmd_offset(dir, address);
+ end = address + size;
+ if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+ end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+ freed = 0;
+ do {
+ freed += zap_pte_range(tlb, pmd, address, end - address);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return freed;
+}
+
+/*
+ * remove user pages in a given range.
+ */
+void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long
size)
+{
+ mmu_gather_t *tlb;
+ pgd_t * dir;
+ unsigned long start = address, end = address + size;
+ int freed = 0;
+
+ dir = pgd_offset(mm, address);
+
+ /*
+ * This is a long-lived spinlock. That's fine.
+ * There's no contention, because the page table
+ * lock only protects against kswapd anyway, and
+ * even if kswapd happened to be looking at this
+ * process we _want_ it to get stuck.
+ */
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ flush_cache_range(mm, address, end);
+ tlb = tlb_gather_mmu(mm);
+
+ do {
+ freed += zap_pmd_range(tlb, dir, address, end - address);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+
+ /* this will flush any remaining tlb entries */
+ tlb_finish_mmu(tlb, start, end);
+
+ /*
+ * Update rss for the mm_struct (not necessarily current->mm)
+ * Notice that rss is an unsigned long.
+ */
+ if (mm->rss > freed)
+ mm->rss -= freed;
+ else
+ mm->rss = 0;
+ spin_unlock(&mm->page_table_lock);
+}
+
+/*
+ * Do a quick page-table lookup for a single page.
+ */
+static struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ goto out;
+
+ pmd = pmd_offset(pgd, address);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ goto out;
+
+ ptep = pte_offset(pmd, address);
+ if (!ptep)
+ goto out;
+
+ pte = *ptep;
+ if (pte_present(pte)) {
+ if (!write ||
+ (pte_write(pte) && pte_dirty(pte)))
+ return pte_page(pte);
+ }
+
+out:
+ return 0;
+}
+
+/*
+ * Given a physical address, is there a useful struct page pointing to
+ * it? This may become more complex in the future if we start dealing
+ * with IO-aperture pages in kiobufs.
+ */
+
+static inline struct page * get_page_map(struct page *page)
+{
+ if (!VALID_PAGE(page))
+ return 0;
+ return page;
+}
+
+/*
+ * Please read Documentation/cachetlb.txt before using this function,
+ * accessing foreign memory spaces can cause cache coherency problems.
+ *
+ * Accessing a VM_IO area is even more dangerous, therefore the function
+ * fails if pages is != NULL and a VM_IO area is found.
+ */
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned
long start,
+ int len, int write, int force, struct page **pages, struct
vm_area_struct **vmas)
+{
+ int i;
+ unsigned int flags;
+
+ /*
+ * Require read or write permissions.
+ * If 'force' is set, we only require the "MAY" flags.
+ */
+ flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+ do {
+ struct vm_area_struct * vma;
+
+ vma = find_extend_vma(mm, start);
+
+ if ( !vma || (pages && vma->vm_flags & VM_IO) || !(flags &
vma->vm_flags) )
+ return i ? : -EFAULT;
+
+ spin_lock(&mm->page_table_lock);
+ do {
+ struct page *map;
+ while (!(map = follow_page(mm, start, write))) {
+ spin_unlock(&mm->page_table_lock);
+ switch (handle_mm_fault(mm, vma, start, write))
{
+ case 1:
+ tsk->min_flt++;
+ break;
+ case 2:
+ tsk->maj_flt++;
+ break;
+ case 0:
+ if (i) return i;
+ return -EFAULT;
+ default:
+ if (i) return i;
+ return -ENOMEM;
+ }
+ spin_lock(&mm->page_table_lock);
+ }
+ if (pages) {
+ pages[i] = get_page_map(map);
+ /* FIXME: call the correct function,
+ * depending on the type of the found page
+ */
+ if (!pages[i] || PageReserved(pages[i])) {
+ if (pages[i] != ZERO_PAGE(start))
+ goto bad_page;
+ } else
+ page_cache_get(pages[i]);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ i++;
+ start += PAGE_SIZE;
+ len--;
+ } while(len && start < vma->vm_end);
+ spin_unlock(&mm->page_table_lock);
+ } while(len);
+out:
+ return i;
+
+ /*
+ * We found an invalid page in the VMA. Release all we have
+ * so far and fail.
+ */
+bad_page:
+ spin_unlock(&mm->page_table_lock);
+ while (i--)
+ page_cache_release(pages[i]);
+ i = -EFAULT;
+ goto out;
+}
+
+EXPORT_SYMBOL(get_user_pages);
+
+/*
+ * Force in an entire range of pages from the current process's user VA,
+ * and pin them in physical memory.
+ */
+#define dprintk(x...)
+
+int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
+{
+ int pgcount, err;
+ struct mm_struct * mm;
+
+ /* Make sure the iobuf is not already mapped somewhere. */
+ if (iobuf->nr_pages)
+ return -EINVAL;
+
+ mm = current->mm;
+ dprintk ("map_user_kiobuf: begin\n");
+
+ pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;
+ /* mapping 0 bytes is not permitted */
+ if (!pgcount) BUG();
+ err = expand_kiobuf(iobuf, pgcount);
+ if (err)
+ return err;
+
+ iobuf->locked = 0;
+ iobuf->offset = va & (PAGE_SIZE-1);
+ iobuf->length = len;
+
+ /* Try to fault in all of the necessary pages */
+ down_read(&mm->mmap_sem);
+ /* rw==READ means read from disk, write into memory area */
+ err = get_user_pages(current, mm, va, pgcount,
+ (rw==READ), 0, iobuf->maplist, NULL);
+ up_read(&mm->mmap_sem);
+ if (err < 0) {
+ unmap_kiobuf(iobuf);
+ dprintk ("map_user_kiobuf: end %d\n", err);
+ return err;
+ }
+ iobuf->nr_pages = err;
+ while (pgcount--) {
+ /* FIXME: flush superflous for rw==READ,
+ * probably wrong function for rw==WRITE
+ */
+ flush_dcache_page(iobuf->maplist[pgcount]);
+ }
+ dprintk ("map_user_kiobuf: end OK\n");
+ return 0;
+}
+
+/*
+ * Mark all of the pages in a kiobuf as dirty
+ *
+ * We need to be able to deal with short reads from disk: if an IO error
+ * occurs, the number of bytes read into memory may be less than the
+ * size of the kiobuf, so we have to stop marking pages dirty once the
+ * requested byte count has been reached.
+ *
+ * Must be called from process context - set_page_dirty() takes VFS locks.
+ */
+
+void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
+{
+ int index, offset, remaining;
+ struct page *page;
+
+ index = iobuf->offset >> PAGE_SHIFT;
+ offset = iobuf->offset & ~PAGE_MASK;
+ remaining = bytes;
+ if (remaining > iobuf->length)
+ remaining = iobuf->length;
+
+ while (remaining > 0 && index < iobuf->nr_pages) {
+ page = iobuf->maplist[index];
+
+ if (!PageReserved(page))
+ set_page_dirty(page);
+
+ remaining -= (PAGE_SIZE - offset);
+ offset = 0;
+ index++;
+ }
+}
+
+/*
+ * Unmap all of the pages referenced by a kiobuf. We release the pages,
+ * and unlock them if they were locked.
+ */
+
+void unmap_kiobuf (struct kiobuf *iobuf)
+{
+ int i;
+ struct page *map;
+
+ for (i = 0; i < iobuf->nr_pages; i++) {
+ map = iobuf->maplist[i];
+ if (map) {
+ if (iobuf->locked)
+ UnlockPage(map);
+ /* FIXME: cache flush missing for rw==READ
+ * FIXME: call the correct reference counting function
+ */
+ page_cache_release(map);
+ }
+ }
+
+ iobuf->nr_pages = 0;
+ iobuf->locked = 0;
+}
+
+
+/*
+ * Lock down all of the pages of a kiovec for IO.
+ *
+ * If any page is mapped twice in the kiovec, we return the error -EINVAL.
+ *
+ * The optional wait parameter causes the lock call to block until all
+ * pages can be locked if set. If wait==0, the lock operation is
+ * aborted if any locked pages are found and -EAGAIN is returned.
+ */
+
+int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
+{
+ struct kiobuf *iobuf;
+ int i, j;
+ struct page *page, **ppage;
+ int doublepage = 0;
+ int repeat = 0;
+
+ repeat:
+
+ for (i = 0; i < nr; i++) {
+ iobuf = iovec[i];
+
+ if (iobuf->locked)
+ continue;
+
+ ppage = iobuf->maplist;
+ for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
+ page = *ppage;
+ if (!page)
+ continue;
+
+ if (TryLockPage(page)) {
+ while (j--) {
+ struct page *tmp = *--ppage;
+ if (tmp)
+ UnlockPage(tmp);
+ }
+ goto retry;
+ }
+ }
+ iobuf->locked = 1;
+ }
+
+ return 0;
+
+ retry:
+
+ /*
+ * We couldn't lock one of the pages. Undo the locking so far,
+ * wait on the page we got to, and try again.
+ */
+
+ unlock_kiovec(nr, iovec);
+ if (!wait)
+ return -EAGAIN;
+
+ /*
+ * Did the release also unlock the page we got stuck on?
+ */
+ if (!PageLocked(page)) {
+ /*
+ * If so, we may well have the page mapped twice
+ * in the IO address range. Bad news. Of
+ * course, it _might_ just be a coincidence,
+ * but if it happens more than once, chances
+ * are we have a double-mapped page.
+ */
+ if (++doublepage >= 3)
+ return -EINVAL;
+
+ /* Try again... */
+ wait_on_page(page);
+ }
+
+ if (++repeat < 16)
+ goto repeat;
+ return -EAGAIN;
+}
+
+/*
+ * Unlock all of the pages of a kiovec after IO.
+ */
+
+int unlock_kiovec(int nr, struct kiobuf *iovec[])
+{
+ struct kiobuf *iobuf;
+ int i, j;
+ struct page *page, **ppage;
+
+ for (i = 0; i < nr; i++) {
+ iobuf = iovec[i];
+
+ if (!iobuf->locked)
+ continue;
+ iobuf->locked = 0;
+
+ ppage = iobuf->maplist;
+ for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
+ page = *ppage;
+ if (!page)
+ continue;
+ UnlockPage(page);
+ }
+ }
+ return 0;
+}
+
+static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address),
prot));
+ pte_t oldpage = ptep_get_and_clear(pte);
+ set_pte(pte, zero_pte);
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd,
unsigned long address,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ pte_t * pte = pte_alloc(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ zeromap_pte_range(pte, address, end - address, prot);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t
prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = address;
+ unsigned long end = address + size;
+ struct mm_struct *mm = current->mm;
+
+ dir = pgd_offset(mm, address);
+ flush_cache_range(mm, beg, end);
+ if (address >= end)
+ BUG();
+
+ spin_lock(&mm->page_table_lock);
+ do {
+ pmd_t *pmd = pmd_alloc(mm, dir, address);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = zeromap_pmd_range(mm, pmd, address, end - address,
prot);
+ if (error)
+ break;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ spin_unlock(&mm->page_table_lock);
+ flush_tlb_range(mm, beg, end);
+ return error;
+}
+
+/*
+ * maps a range of physical memory into the requested pages. the old
+ * mappings are removed. any references to nonexistent pages results
+ * in null mappings (currently treated as "copy-on-access")
+ */
+static inline void remap_pte_range(pte_t * pte, unsigned long address,
unsigned long size,
+ unsigned long phys_addr, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ struct page *page;
+ pte_t oldpage;
+ oldpage = ptep_get_and_clear(pte);
+
+ page = virt_to_page(__va(phys_addr));
+ if ((!VALID_PAGE(page)) || PageReserved(page))
+ set_pte(pte, mk_pte_phys(phys_addr, prot));
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned
long address, unsigned long size,
+ unsigned long phys_addr, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ do {
+ pte_t * pte = pte_alloc(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ remap_pte_range(pte, address, end - address, address +
phys_addr, prot);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+/* Note: this is only safe if the mm semaphore is held when called. */
+int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned
long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = current->mm;
+
+ phys_addr -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(mm, beg, end);
+ if (from >= end)
+ BUG();
+
+ spin_lock(&mm->page_table_lock);
+ do {
+ pmd_t *pmd = pmd_alloc(mm, dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = remap_pmd_range(mm, pmd, from, end - from, phys_addr +
from, prot);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (from && (from < end));
+ spin_unlock(&mm->page_table_lock);
+ flush_tlb_range(mm, beg, end);
+ return error;
+}
+
+/*
+ * Establish a new mapping:
+ * - flush the old one
+ * - update the page tables
+ * - inform the TLB about the new one
+ *
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+ */
+static inline void establish_pte(struct vm_area_struct * vma, unsigned long
address, pte_t *page_table, pte_t entry)
+{
+#ifdef CONFIG_XEN
+ if ( likely(vma->vm_mm == current->mm) ) {
+ XEN_flush_page_update_queue();
+ HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, entry,
UVMF_INVLPG);
+ } else {
+ set_pte(page_table, entry);
+ flush_tlb_page(vma, address);
+ }
+#else
+ set_pte(page_table, entry);
+ flush_tlb_page(vma, address);
+#endif
+ update_mmu_cache(vma, address, entry);
+}
+
+/*
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+ */
+static inline void break_cow(struct vm_area_struct * vma, struct page *
new_page, unsigned long address,
+ pte_t *page_table)
+{
+ flush_page_to_ram(new_page);
+ flush_cache_page(vma, address);
+ establish_pte(vma, address, page_table,
pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
+}
+
+/*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+ * and decrementing the shared-page counter for the old page.
+ *
+ * Goto-purists beware: the only reason for goto's here is that it results
+ * in better assembly code.. The "default" path will see no jumps at all.
+ *
+ * Note that this routine assumes that the protection checks have been
+ * done by the caller (the low-level page fault routine in most cases).
+ * Thus we can safely just mark it writable once we've done any necessary
+ * COW.
+ *
+ * We also mark the page dirty at this point even though the page will
+ * change only once the write actually happens. This avoids a few races,
+ * and potentially makes it more efficient.
+ *
+ * We hold the mm semaphore and the page_table_lock on entry and exit
+ * with the page_table_lock released.
+ */
+static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
+ unsigned long address, pte_t *page_table, pte_t pte)
+{
+ struct page *old_page, *new_page;
+
+ old_page = pte_page(pte);
+ if (!VALID_PAGE(old_page))
+ goto bad_wp_page;
+
+ if (!TryLockPage(old_page)) {
+ int reuse = can_share_swap_page(old_page);
+ unlock_page(old_page);
+ if (reuse) {
+ flush_cache_page(vma, address);
+ establish_pte(vma, address, page_table,
pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
+ spin_unlock(&mm->page_table_lock);
+ return 1; /* Minor fault */
+ }
+ }
+
+ /*
+ * Ok, we need to copy. Oh, well..
+ */
+ page_cache_get(old_page);
+ spin_unlock(&mm->page_table_lock);
+
+ new_page = alloc_page(GFP_HIGHUSER);
+ if (!new_page)
+ goto no_mem;
+ copy_cow_page(old_page,new_page,address);
+
+ /*
+ * Re-check the pte - we dropped the lock
+ */
+ spin_lock(&mm->page_table_lock);
+ if (pte_same(*page_table, pte)) {
+ if (PageReserved(old_page))
+ ++mm->rss;
+ break_cow(vma, new_page, address, page_table);
+ if (vm_anon_lru)
+ lru_cache_add(new_page);
+
+ /* Free the old page.. */
+ new_page = old_page;
+ }
+ spin_unlock(&mm->page_table_lock);
+ page_cache_release(new_page);
+ page_cache_release(old_page);
+ return 1; /* Minor fault */
+
+bad_wp_page:
+ spin_unlock(&mm->page_table_lock);
+ printk("do_wp_page: bogus page at address %08lx (page
0x%lx)\n",address,(unsigned long)old_page);
+ return -1;
+no_mem:
+ page_cache_release(old_page);
+ return -1;
+}
+
+static void vmtruncate_list(struct vm_area_struct *mpnt, unsigned long pgoff)
+{
+ do {
+ struct mm_struct *mm = mpnt->vm_mm;
+ unsigned long start = mpnt->vm_start;
+ unsigned long end = mpnt->vm_end;
+ unsigned long len = end - start;
+ unsigned long diff;
+
+ /* mapping wholly truncated? */
+ if (mpnt->vm_pgoff >= pgoff) {
+ zap_page_range(mm, start, len);
+ continue;
+ }
+
+ /* mapping wholly unaffected? */
+ len = len >> PAGE_SHIFT;
+ diff = pgoff - mpnt->vm_pgoff;
+ if (diff >= len)
+ continue;
+
+ /* Ok, partially affected.. */
+ start += diff << PAGE_SHIFT;
+ len = (len - diff) << PAGE_SHIFT;
+ zap_page_range(mm, start, len);
+ } while ((mpnt = mpnt->vm_next_share) != NULL);
+}
+
+/*
+ * Handle all mappings that got truncated by a "truncate()"
+ * system call.
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page. Ugly, but necessary.
+ */
+int vmtruncate(struct inode * inode, loff_t offset)
+{
+ unsigned long pgoff;
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long limit;
+
+ if (inode->i_size < offset)
+ goto do_expand;
+ inode->i_size = offset;
+ spin_lock(&mapping->i_shared_lock);
+ if (!mapping->i_mmap && !mapping->i_mmap_shared)
+ goto out_unlock;
+
+ pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (mapping->i_mmap != NULL)
+ vmtruncate_list(mapping->i_mmap, pgoff);
+ if (mapping->i_mmap_shared != NULL)
+ vmtruncate_list(mapping->i_mmap_shared, pgoff);
+
+out_unlock:
+ spin_unlock(&mapping->i_shared_lock);
+ truncate_inode_pages(mapping, offset);
+ goto out_truncate;
+
+do_expand:
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+ goto out;
+ inode->i_size = offset;
+
+out_truncate:
+ if (inode->i_op && inode->i_op->truncate) {
+ lock_kernel();
+ inode->i_op->truncate(inode);
+ unlock_kernel();
+ }
+ return 0;
+out_sig:
+ send_sig(SIGXFSZ, current, 0);
+out:
+ return -EFBIG;
+}
+
+/*
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time. We also make sure to queue
+ * the 'original' request together with the readahead ones...
+ */
+void swapin_readahead(swp_entry_t entry)
+{
+ int i, num;
+ struct page *new_page;
+ unsigned long offset;
+
+ /*
+ * Get the number of handles we should do readahead io to.
+ */
+ num = valid_swaphandles(entry, &offset);
+ for (i = 0; i < num; offset++, i++) {
+ /* Ok, do the async read-ahead now */
+ new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry),
offset));
+ if (!new_page)
+ break;
+ page_cache_release(new_page);
+ }
+ return;
+}
+
+/*
+ * We hold the mm semaphore and the page_table_lock on entry and
+ * should release the pagetable lock on exit..
+ */
+static int do_swap_page(struct mm_struct * mm,
+ struct vm_area_struct * vma, unsigned long address,
+ pte_t * page_table, pte_t orig_pte, int write_access)
+{
+ struct page *page;
+ swp_entry_t entry = pte_to_swp_entry(orig_pte);
+ pte_t pte;
+ int ret = 1;
+
+ spin_unlock(&mm->page_table_lock);
+ page = lookup_swap_cache(entry);
+ if (!page) {
+ swapin_readahead(entry);
+ page = read_swap_cache_async(entry);
+ if (!page) {
+ /*
+ * Back out if somebody else faulted in this pte while
+ * we released the page table lock.
+ */
+ int retval;
+ spin_lock(&mm->page_table_lock);
+ retval = pte_same(*page_table, orig_pte) ? -1 : 1;
+ spin_unlock(&mm->page_table_lock);
+ return retval;
+ }
+
+ /* Had to read the page from swap area: Major fault */
+ ret = 2;
+ }
+
+ mark_page_accessed(page);
+
+ lock_page(page);
+
+ /*
+ * Back out if somebody else faulted in this pte while we
+ * released the page table lock.
+ */
+ spin_lock(&mm->page_table_lock);
+ if (!pte_same(*page_table, orig_pte)) {
+ spin_unlock(&mm->page_table_lock);
+ unlock_page(page);
+ page_cache_release(page);
+ return 1;
+ }
+
+ /* The page isn't present yet, go ahead with the fault. */
+
+ swap_free(entry);
+ if (vm_swap_full())
+ remove_exclusive_swap_page(page);
+
+ mm->rss++;
+ pte = mk_pte(page, vma->vm_page_prot);
+ if (write_access && can_share_swap_page(page))
+ pte = pte_mkdirty(pte_mkwrite(pte));
+ unlock_page(page);
+
+ flush_page_to_ram(page);
+ flush_icache_page(vma, page);
+#ifdef CONFIG_XEN
+ if ( likely(vma->vm_mm == current->mm) ) {
+ XEN_flush_page_update_queue();
+ HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, pte, 0);
+ } else {
+ set_pte(page_table, pte);
+ XEN_flush_page_update_queue();
+ }
+#else
+ set_pte(page_table, pte);
+#endif
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, pte);
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
+
+/*
+ * We are called with the MM semaphore and page_table_lock
+ * spinlock held to protect against concurrent faults in
+ * multithreaded programs.
+ */
+static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct *
vma, pte_t *page_table, int write_access, unsigned long addr)
+{
+ pte_t entry;
+
+ /* Read-only mapping of ZERO_PAGE. */
+ entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
+
+ /* ..except if it's a write access */
+ if (write_access) {
+ struct page *page;
+
+ /* Allocate our own private page. */
+ spin_unlock(&mm->page_table_lock);
+
+ page = alloc_page(GFP_HIGHUSER);
+ if (!page)
+ goto no_mem;
+ clear_user_highpage(page, addr);
+
+ spin_lock(&mm->page_table_lock);
+ if (!pte_none(*page_table)) {
+ page_cache_release(page);
+ spin_unlock(&mm->page_table_lock);
+ return 1;
+ }
+ mm->rss++;
+ flush_page_to_ram(page);
+ entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
vma->vm_page_prot)));
+ if (vm_anon_lru)
+ lru_cache_add(page);
+ mark_page_accessed(page);
+ }
+
+#ifdef CONFIG_XEN
+ if ( likely(vma->vm_mm == current->mm) ) {
+ XEN_flush_page_update_queue();
+ HYPERVISOR_update_va_mapping(addr>>PAGE_SHIFT, entry, 0);
+ } else {
+ set_pte(page_table, entry);
+ XEN_flush_page_update_queue();
+ }
+#else
+ set_pte(page_table, entry);
+#endif
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, entry);
+ spin_unlock(&mm->page_table_lock);
+ return 1; /* Minor fault */
+
+no_mem:
+ return -1;
+}
+
+/*
+ * do_no_page() tries to create a new page mapping. It aggressively
+ * tries to share with existing pages, but makes a separate copy if
+ * the "write_access" parameter is true in order to avoid the next
+ * page fault.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
+ *
+ * This is called with the MM semaphore held and the page table
+ * spinlock held. Exit with the spinlock released.
+ */
+static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
+ unsigned long address, int write_access, pte_t *page_table)
+{
+ struct page * new_page;
+ pte_t entry;
+
+ if (!vma->vm_ops || !vma->vm_ops->nopage)
+ return do_anonymous_page(mm, vma, page_table, write_access,
address);
+ spin_unlock(&mm->page_table_lock);
+
+ new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
+
+ if (new_page == NULL) /* no page was available -- SIGBUS */
+ return 0;
+ if (new_page == NOPAGE_OOM)
+ return -1;
+
+ /*
+ * Should we do an early C-O-W break?
+ */
+ if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ struct page * page = alloc_page(GFP_HIGHUSER);
+ if (!page) {
+ page_cache_release(new_page);
+ return -1;
+ }
+ copy_user_highpage(page, new_page, address);
+ page_cache_release(new_page);
+ if (vm_anon_lru)
+ lru_cache_add(page);
+ new_page = page;
+ }
+
+ spin_lock(&mm->page_table_lock);
+ /*
+ * This silly early PAGE_DIRTY setting removes a race
+ * due to the bad i386 page protection. But it's valid
+ * for other architectures too.
+ *
+ * Note that if write_access is true, we either now have
+ * an exclusive copy of the page, or this is a shared mapping,
+ * so we can make it writable and dirty to avoid having to
+ * handle that later.
+ */
+ /* Only go through if we didn't race with anybody else... */
+ if (pte_none(*page_table)) {
+ if (!PageReserved(new_page))
+ ++mm->rss;
+ flush_page_to_ram(new_page);
+ flush_icache_page(vma, new_page);
+ entry = mk_pte(new_page, vma->vm_page_prot);
+ if (write_access)
+ entry = pte_mkwrite(pte_mkdirty(entry));
+#ifdef CONFIG_XEN
+ if ( likely(vma->vm_mm == current->mm) ) {
+ XEN_flush_page_update_queue();
+ HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT,
entry, 0);
+ } else {
+ set_pte(page_table, entry);
+ XEN_flush_page_update_queue();
+ }
+#else
+ set_pte(page_table, entry);
+#endif
+ } else {
+ /* One of our sibling threads was faster, back out. */
+ page_cache_release(new_page);
+ spin_unlock(&mm->page_table_lock);
+ return 1;
+ }
+
+ /* no need to invalidate: a not-present page shouldn't be cached */
+ update_mmu_cache(vma, address, entry);
+ spin_unlock(&mm->page_table_lock);
+ return 2; /* Major fault */
+}
+
+/*
+ * These routines also need to handle stuff like marking pages dirty
+ * and/or accessed for architectures that don't do it in hardware (most
+ * RISC architectures). The early dirtying is also good on the i386.
+ *
+ * There is also a hook called "update_mmu_cache()" that architectures
+ * with external mmu caches can use to update those (ie the Sparc or
+ * PowerPC hashed page tables that act as extended TLBs).
+ *
+ * Note the "page_table_lock". It is to protect against kswapd removing
+ * pages from under us. Note that kswapd only ever _removes_ pages, never
+ * adds them. As such, once we have noticed that the page is not present,
+ * we can drop the lock early.
+ *
+ * The adding of pages is protected by the MM semaphore (which we hold),
+ * so we don't need to worry about a page being suddenly been added into
+ * our VM.
+ *
+ * We enter with the pagetable spinlock held, we are supposed to
+ * release it when done.
+ */
+static inline int handle_pte_fault(struct mm_struct *mm,
+ struct vm_area_struct * vma, unsigned long address,
+ int write_access, pte_t * pte)
+{
+ pte_t entry;
+
+ entry = *pte;
+ if (!pte_present(entry)) {
+ /*
+ * If it truly wasn't present, we know that kswapd
+ * and the PTE updates will not touch it later. So
+ * drop the lock.
+ */
+ if (pte_none(entry))
+ return do_no_page(mm, vma, address, write_access, pte);
+ return do_swap_page(mm, vma, address, pte, entry, write_access);
+ }
+
+ if (write_access) {
+ if (!pte_write(entry))
+ return do_wp_page(mm, vma, address, pte, entry);
+
+ entry = pte_mkdirty(entry);
+ }
+ entry = pte_mkyoung(entry);
+ establish_pte(vma, address, pte, entry);
+ spin_unlock(&mm->page_table_lock);
+ return 1;
+}
+
+/*
+ * By the time we get here, we already hold the mm semaphore
+ */
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
+ unsigned long address, int write_access)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+
+ current->state = TASK_RUNNING;
+ pgd = pgd_offset(mm, address);
+
+ /*
+ * We need the page table lock to synchronize with kswapd
+ * and the SMP-safe atomic PTE updates.
+ */
+ spin_lock(&mm->page_table_lock);
+ pmd = pmd_alloc(mm, pgd, address);
+
+ if (pmd) {
+ pte_t * pte = pte_alloc(mm, pmd, address);
+ if (pte)
+ return handle_pte_fault(mm, vma, address, write_access,
pte);
+ }
+ spin_unlock(&mm->page_table_lock);
+ return -1;
+}
+
+/*
+ * Allocate page middle directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ *
+ * On a two-level page table, this ends up actually being entirely
+ * optimized away.
+ */
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
+{
+ pmd_t *new;
+
+ /* "fast" allocation can happen without dropping the lock.. */
+ new = pmd_alloc_one_fast(mm, address);
+ if (!new) {
+ spin_unlock(&mm->page_table_lock);
+ new = pmd_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (!pgd_none(*pgd)) {
+ pmd_free(new);
+ check_pgt_cache();
+ goto out;
+ }
+ }
+ pgd_populate(mm, pgd, new);
+out:
+ return pmd_offset(pgd, address);
+}
+
+/*
+ * Allocate the page table directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ */
+pte_t fastcall *pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long
address)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *new;
+
+ /* "fast" allocation can happen without dropping the lock.. */
+ new = pte_alloc_one_fast(mm, address);
+ if (!new) {
+ XEN_flush_page_update_queue();
+ spin_unlock(&mm->page_table_lock);
+ new = pte_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (!pmd_none(*pmd)) {
+ pte_free(new);
+ check_pgt_cache();
+ goto out;
+ }
+ }
+ pmd_populate(mm, pmd, new);
+ }
+out:
+ return pte_offset(pmd, address);
+}
+
+int make_pages_present(unsigned long addr, unsigned long end)
+{
+ int ret, len, write;
+ struct vm_area_struct * vma;
+
+ vma = find_vma(current->mm, addr);
+ write = (vma->vm_flags & VM_WRITE) != 0;
+ if (addr >= end)
+ BUG();
+ if (end > vma->vm_end)
+ BUG();
+ len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
+ ret = get_user_pages(current, current->mm, addr,
+ len, write, 0, NULL, NULL);
+ return ret == len ? 0 : -1;
+}
+
+struct page * vmalloc_to_page(void * vmalloc_addr)
+{
+ unsigned long addr = (unsigned long) vmalloc_addr;
+ struct page *page = NULL;
+ pmd_t *pmd;
+ pte_t *pte;
+ pgd_t *pgd;
+
+ pgd = pgd_offset_k(addr);
+ if (!pgd_none(*pgd)) {
+ pmd = pmd_offset(pgd, addr);
+ if (!pmd_none(*pmd)) {
+ pte = pte_offset(pmd, addr);
+ if (pte_present(*pte)) {
+ page = pte_page(*pte);
+ }
+ }
+ }
+ return page;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/mprotect.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/mprotect.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,344 @@
+/*
+ * linux/mm/mprotect.c
+ *
+ * (C) Copyright 1994 Linus Torvalds
+ */
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+static inline void change_pte_range(pmd_t * pmd, unsigned long address,
+ unsigned long size, pgprot_t newprot)
+{
+ pte_t * pte;
+ unsigned long end;
+
+ if (pmd_none(*pmd))
+ return;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return;
+ }
+ pte = pte_offset(pmd, address);
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ if (pte_present(*pte)) {
+ pte_t entry;
+
+ /* Avoid an SMP race with hardware updated dirty/clean
+ * bits by wiping the pte and then setting the new pte
+ * into place.
+ */
+ entry = ptep_get_and_clear(pte);
+ set_pte(pte, pte_modify(entry, newprot));
+ }
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
+ unsigned long size, pgprot_t newprot)
+{
+ pmd_t * pmd;
+ unsigned long end;
+
+ if (pgd_none(*pgd))
+ return;
+ if (pgd_bad(*pgd)) {
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+ return;
+ }
+ pmd = pmd_offset(pgd, address);
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ change_pte_range(pmd, address, end - address, newprot);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+}
+
+static void change_protection(unsigned long start, unsigned long end, pgprot_t
newprot)
+{
+ pgd_t *dir;
+ unsigned long beg = start;
+
+ dir = pgd_offset(current->mm, start);
+ flush_cache_range(current->mm, beg, end);
+ if (start >= end)
+ BUG();
+ spin_lock(¤t->mm->page_table_lock);
+ do {
+ change_pmd_range(dir, start, end - start, newprot);
+ start = (start + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (start && (start < end));
+ spin_unlock(¤t->mm->page_table_lock);
+ flush_tlb_range(current->mm, beg, end);
+ return;
+}
+
+static inline int mprotect_fixup_all(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
+ int newflags, pgprot_t prot)
+{
+ struct vm_area_struct * prev = *pprev;
+ struct mm_struct * mm = vma->vm_mm;
+
+ if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev,
newflags) &&
+ !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
+ spin_lock(&mm->page_table_lock);
+ prev->vm_end = vma->vm_end;
+ __vma_unlink(mm, vma, prev);
+ spin_unlock(&mm->page_table_lock);
+
+ kmem_cache_free(vm_area_cachep, vma);
+ mm->map_count--;
+
+ return 0;
+ }
+
+ spin_lock(&mm->page_table_lock);
+ vma->vm_flags = newflags;
+ vma->vm_page_prot = prot;
+ spin_unlock(&mm->page_table_lock);
+
+ *pprev = vma;
+
+ return 0;
+}
+
+static inline int mprotect_fixup_start(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
+ unsigned long end,
+ int newflags, pgprot_t prot)
+{
+ struct vm_area_struct * n, * prev = *pprev;
+
+ *pprev = vma;
+
+ if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev,
newflags) &&
+ !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
+ spin_lock(&vma->vm_mm->page_table_lock);
+ prev->vm_end = end;
+ vma->vm_start = end;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ return 0;
+ }
+ n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!n)
+ return -ENOMEM;
+ *n = *vma;
+ n->vm_end = end;
+ n->vm_flags = newflags;
+ n->vm_raend = 0;
+ n->vm_page_prot = prot;
+ if (n->vm_file)
+ get_file(n->vm_file);
+ if (n->vm_ops && n->vm_ops->open)
+ n->vm_ops->open(n);
+ vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
+ lock_vma_mappings(vma);
+ spin_lock(&vma->vm_mm->page_table_lock);
+ vma->vm_start = end;
+ __insert_vm_struct(current->mm, n);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ unlock_vma_mappings(vma);
+
+ return 0;
+}
+
+static inline int mprotect_fixup_end(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
+ unsigned long start,
+ int newflags, pgprot_t prot)
+{
+ struct vm_area_struct * n;
+
+ n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+ *n = *vma;
+ n->vm_start = start;
+ n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
+ n->vm_flags = newflags;
+ n->vm_raend = 0;
+ n->vm_page_prot = prot;
+ if (n->vm_file)
+ get_file(n->vm_file);
+ if (n->vm_ops && n->vm_ops->open)
+ n->vm_ops->open(n);
+ lock_vma_mappings(vma);
+ spin_lock(&vma->vm_mm->page_table_lock);
+ vma->vm_end = start;
+ __insert_vm_struct(current->mm, n);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ unlock_vma_mappings(vma);
+
+ *pprev = n;
+
+ return 0;
+}
+
+static inline int mprotect_fixup_middle(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
+ unsigned long start, unsigned long end,
+ int newflags, pgprot_t prot)
+{
+ struct vm_area_struct * left, * right;
+
+ left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!left)
+ return -ENOMEM;
+ right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!right) {
+ kmem_cache_free(vm_area_cachep, left);
+ return -ENOMEM;
+ }
+ *left = *vma;
+ *right = *vma;
+ left->vm_end = start;
+ right->vm_start = end;
+ right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
+ left->vm_raend = 0;
+ right->vm_raend = 0;
+ if (vma->vm_file)
+ atomic_add(2,&vma->vm_file->f_count);
+ if (vma->vm_ops && vma->vm_ops->open) {
+ vma->vm_ops->open(left);
+ vma->vm_ops->open(right);
+ }
+ vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
+ vma->vm_raend = 0;
+ vma->vm_page_prot = prot;
+ lock_vma_mappings(vma);
+ spin_lock(&vma->vm_mm->page_table_lock);
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_flags = newflags;
+ __insert_vm_struct(current->mm, left);
+ __insert_vm_struct(current->mm, right);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ unlock_vma_mappings(vma);
+
+ *pprev = right;
+
+ return 0;
+}
+
+static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct
** pprev,
+ unsigned long start, unsigned long end, unsigned int newflags)
+{
+ pgprot_t newprot;
+ int error;
+
+ if (newflags == vma->vm_flags) {
+ *pprev = vma;
+ return 0;
+ }
+ newprot = protection_map[newflags & 0xf];
+ if (start == vma->vm_start) {
+ if (end == vma->vm_end)
+ error = mprotect_fixup_all(vma, pprev, newflags,
newprot);
+ else
+ error = mprotect_fixup_start(vma, pprev, end, newflags,
newprot);
+ } else if (end == vma->vm_end)
+ error = mprotect_fixup_end(vma, pprev, start, newflags,
newprot);
+ else
+ error = mprotect_fixup_middle(vma, pprev, start, end, newflags,
newprot);
+
+ if (error)
+ return error;
+
+ change_protection(start, end, newprot);
+ return 0;
+}
+
+asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long
prot)
+{
+ unsigned long nstart, end, tmp;
+ struct vm_area_struct * vma, * next, * prev;
+ int error = -EINVAL;
+
+ if (start & ~PAGE_MASK)
+ return -EINVAL;
+ len = PAGE_ALIGN(len);
+ end = start + len;
+ if (end < start)
+ return -ENOMEM;
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
+ return -EINVAL;
+ if (end == start)
+ return 0;
+
+ down_write(¤t->mm->mmap_sem);
+
+ vma = find_vma_prev(current->mm, start, &prev);
+ error = -ENOMEM;
+ if (!vma || vma->vm_start > start)
+ goto out;
+
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ /* mprotect() unsupported for I/O mappings in Xenolinux. */
+ error = -EINVAL;
+ if (vma->vm_flags & VM_IO)
+ goto out;
+#endif
+
+ for (nstart = start ; ; ) {
+ unsigned int newflags;
+ int last = 0;
+
+ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
+
+ newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE |
PROT_EXEC));
+ if ((newflags & ~(newflags >> 4)) & 0xf) {
+ error = -EACCES;
+ goto out;
+ }
+
+ if (vma->vm_end > end) {
+ error = mprotect_fixup(vma, &prev, nstart, end,
newflags);
+ goto out;
+ }
+ if (vma->vm_end == end)
+ last = 1;
+
+ tmp = vma->vm_end;
+ next = vma->vm_next;
+ error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
+ if (error)
+ goto out;
+ if (last)
+ break;
+ nstart = tmp;
+ vma = next;
+ if (!vma || vma->vm_start != nstart) {
+ error = -ENOMEM;
+ goto out;
+ }
+ }
+ if (next && prev->vm_end == next->vm_start && can_vma_merge(next,
prev->vm_flags) &&
+ !prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
+ spin_lock(&prev->vm_mm->page_table_lock);
+ prev->vm_end = next->vm_end;
+ __vma_unlink(prev->vm_mm, next, prev);
+ spin_unlock(&prev->vm_mm->page_table_lock);
+
+ kmem_cache_free(vm_area_cachep, next);
+ prev->vm_mm->map_count--;
+ }
+out:
+ up_write(¤t->mm->mmap_sem);
+ return error;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/mremap.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/mremap.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,392 @@
+/*
+ * linux/mm/remap.c
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ */
+
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+extern int vm_enough_memory(long pages);
+
+static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t * pgd;
+ pmd_t * pmd;
+ pte_t * pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none(*pgd))
+ goto end;
+ if (pgd_bad(*pgd)) {
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+ goto end;
+ }
+
+ pmd = pmd_offset(pgd, addr);
+ if (pmd_none(*pmd))
+ goto end;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ goto end;
+ }
+
+ pte = pte_offset(pmd, addr);
+ if (pte_none(*pte))
+ pte = NULL;
+end:
+ return pte;
+}
+
+static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pmd_t * pmd;
+ pte_t * pte = NULL;
+
+ pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
+ if (pmd)
+ pte = pte_alloc(mm, pmd, addr);
+ return pte;
+}
+
+static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
+{
+ int error = 0;
+ pte_t pte;
+
+ if (!pte_none(*src)) {
+ pte = ptep_get_and_clear(src);
+ if (!dst) {
+ /* No dest? We must put it back. */
+ dst = src;
+ error++;
+ }
+ set_pte(dst, pte);
+ }
+ return error;
+}
+
+static int move_one_page(struct mm_struct *mm, unsigned long old_addr,
unsigned long new_addr)
+{
+ int error = 0;
+ pte_t * src, * dst;
+
+ spin_lock(&mm->page_table_lock);
+ src = get_one_pte(mm, old_addr);
+ if (src) {
+ dst = alloc_one_pte(mm, new_addr);
+ src = get_one_pte(mm, old_addr);
+ if (src)
+ error = copy_one_pte(mm, src, dst);
+ }
+ spin_unlock(&mm->page_table_lock);
+ return error;
+}
+
+static int move_page_tables(struct mm_struct * mm,
+ unsigned long new_addr, unsigned long old_addr, unsigned long len)
+{
+ unsigned long offset = len;
+
+ flush_cache_range(mm, old_addr, old_addr + len);
+
+ /*
+ * This is not the clever way to do this, but we're taking the
+ * easy way out on the assumption that most remappings will be
+ * only a few pages.. This also makes error recovery easier.
+ */
+ while (offset) {
+ offset -= PAGE_SIZE;
+ if (move_one_page(mm, old_addr + offset, new_addr + offset))
+ goto oops_we_failed;
+ }
+ flush_tlb_range(mm, old_addr, old_addr + len);
+ return 0;
+
+ /*
+ * Ok, the move failed because we didn't have enough pages for
+ * the new page table tree. This is unlikely, but we have to
+ * take the possibility into account. In that case we just move
+ * all the pages back (this will work, because we still have
+ * the old page tables)
+ */
+oops_we_failed:
+ XEN_flush_page_update_queue();
+ flush_cache_range(mm, new_addr, new_addr + len);
+ while ((offset += PAGE_SIZE) < len)
+ move_one_page(mm, new_addr + offset, old_addr + offset);
+ XEN_flush_page_update_queue();
+ zap_page_range(mm, new_addr, len);
+ return -1;
+}
+
+static inline unsigned long move_vma(struct vm_area_struct * vma,
+ unsigned long addr, unsigned long old_len, unsigned long new_len,
+ unsigned long new_addr)
+{
+ struct mm_struct * mm = vma->vm_mm;
+ struct vm_area_struct * new_vma, * next, * prev;
+ int allocated_vma;
+
+ new_vma = NULL;
+ next = find_vma_prev(mm, new_addr, &prev);
+ if (next) {
+ if (prev && prev->vm_end == new_addr &&
+ can_vma_merge(prev, vma->vm_flags) && !vma->vm_file &&
!(vma->vm_flags & VM_SHARED)) {
+ spin_lock(&mm->page_table_lock);
+ prev->vm_end = new_addr + new_len;
+ spin_unlock(&mm->page_table_lock);
+ new_vma = prev;
+ if (next != prev->vm_next)
+ BUG();
+ if (prev->vm_end == next->vm_start &&
can_vma_merge(next, prev->vm_flags)) {
+ spin_lock(&mm->page_table_lock);
+ prev->vm_end = next->vm_end;
+ __vma_unlink(mm, next, prev);
+ spin_unlock(&mm->page_table_lock);
+
+ mm->map_count--;
+ kmem_cache_free(vm_area_cachep, next);
+ }
+ } else if (next->vm_start == new_addr + new_len &&
+ can_vma_merge(next, vma->vm_flags) && !vma->vm_file
&& !(vma->vm_flags & VM_SHARED)) {
+ spin_lock(&mm->page_table_lock);
+ next->vm_start = new_addr;
+ spin_unlock(&mm->page_table_lock);
+ new_vma = next;
+ }
+ } else {
+ prev = find_vma(mm, new_addr-1);
+ if (prev && prev->vm_end == new_addr &&
+ can_vma_merge(prev, vma->vm_flags) && !vma->vm_file &&
!(vma->vm_flags & VM_SHARED)) {
+ spin_lock(&mm->page_table_lock);
+ prev->vm_end = new_addr + new_len;
+ spin_unlock(&mm->page_table_lock);
+ new_vma = prev;
+ }
+ }
+
+ allocated_vma = 0;
+ if (!new_vma) {
+ new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!new_vma)
+ goto out;
+ allocated_vma = 1;
+ }
+
+ if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
+ unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
+
+ if (allocated_vma) {
+ *new_vma = *vma;
+ new_vma->vm_start = new_addr;
+ new_vma->vm_end = new_addr+new_len;
+ new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
+ new_vma->vm_raend = 0;
+ if (new_vma->vm_file)
+ get_file(new_vma->vm_file);
+ if (new_vma->vm_ops && new_vma->vm_ops->open)
+ new_vma->vm_ops->open(new_vma);
+ insert_vm_struct(current->mm, new_vma);
+ }
+
+ /* XXX: possible errors masked, mapping might remain */
+ do_munmap(current->mm, addr, old_len);
+
+ current->mm->total_vm += new_len >> PAGE_SHIFT;
+ if (vm_locked) {
+ current->mm->locked_vm += new_len >> PAGE_SHIFT;
+ if (new_len > old_len)
+ make_pages_present(new_addr + old_len,
+ new_addr + new_len);
+ }
+ return new_addr;
+ }
+ if (allocated_vma)
+ kmem_cache_free(vm_area_cachep, new_vma);
+ out:
+ return -ENOMEM;
+}
+
+/*
+ * Expand (or shrink) an existing mapping, potentially moving it at the
+ * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
+ *
+ * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
+ * This option implies MREMAP_MAYMOVE.
+ */
+unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr)
+{
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ goto out;
+
+ if (addr & ~PAGE_MASK)
+ goto out;
+
+ old_len = PAGE_ALIGN(old_len);
+ new_len = PAGE_ALIGN(new_len);
+
+ if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
+ goto out;
+
+ if (addr >= TASK_SIZE)
+ goto out;
+
+ /* new_addr is only valid if MREMAP_FIXED is specified */
+ if (flags & MREMAP_FIXED) {
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out;
+
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+
+ if (new_addr >= TASK_SIZE)
+ goto out;
+
+ /*
+ * Allow new_len == 0 only if new_addr == addr
+ * to preserve truncation in place (that was working
+ * safe and some app may depend on it).
+ */
+ if (unlikely(!new_len && new_addr != addr))
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+ if ((new_addr <= addr) && (new_addr+new_len) > addr)
+ goto out;
+
+ if ((addr <= new_addr) && (addr+old_len) > new_addr)
+ goto out;
+
+ ret = do_munmap(current->mm, new_addr, new_len);
+ if (ret && new_len)
+ goto out;
+ }
+
+ /*
+ * Always allow a shrinking remap: that just unmaps
+ * the unnecessary pages..
+ */
+ if (old_len >= new_len) {
+ ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
+ if (ret && old_len != new_len)
+ goto out;
+ ret = addr;
+ if (!(flags & MREMAP_FIXED) || (new_addr == addr))
+ goto out;
+ }
+
+ /*
+ * Ok, we need to grow.. or relocate.
+ */
+ ret = -EFAULT;
+ vma = find_vma(current->mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto out;
+ if (vma->vm_flags & VM_DONTEXPAND) {
+ if (new_len > old_len)
+ goto out;
+ }
+ if (vma->vm_flags & VM_LOCKED) {
+ unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
+ locked += new_len - old_len;
+ ret = -EAGAIN;
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ goto out;
+ }
+ ret = -ENOMEM;
+ if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ goto out;
+ /* Private writable mapping? Check memory availability.. */
+ if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
+ !(flags & MAP_NORESERVE) &&
+ !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
+ goto out;
+
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ /* mremap() unsupported for I/O mappings in Xenolinux. */
+ ret = -EINVAL;
+ if (vma->vm_flags & VM_IO)
+ goto out;
+#endif
+
+ /* old_len exactly to the end of the area..
+ * And we're not relocating the area.
+ */
+ if (old_len == vma->vm_end - addr &&
+ !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
+ (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
+ unsigned long max_addr = TASK_SIZE;
+ if (vma->vm_next)
+ max_addr = vma->vm_next->vm_start;
+ /* can we just expand the current mapping? */
+ if (max_addr - addr >= new_len) {
+ int pages = (new_len - old_len) >> PAGE_SHIFT;
+ spin_lock(&vma->vm_mm->page_table_lock);
+ vma->vm_end = addr + new_len;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ current->mm->total_vm += pages;
+ if (vma->vm_flags & VM_LOCKED) {
+ current->mm->locked_vm += pages;
+ make_pages_present(addr + old_len,
+ addr + new_len);
+ }
+ ret = addr;
+ goto out;
+ }
+ }
+
+ /*
+ * We weren't able to just expand or shrink the area,
+ * we need to create a new one and move it..
+ */
+ ret = -ENOMEM;
+ if (flags & MREMAP_MAYMOVE) {
+ if (!(flags & MREMAP_FIXED)) {
+ unsigned long map_flags = 0;
+ if (vma->vm_flags & VM_SHARED)
+ map_flags |= MAP_SHARED;
+
+ new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma->vm_pgoff, map_flags);
+ ret = new_addr;
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+ }
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
+ }
+out:
+ return ret;
+}
+
+asmlinkage unsigned long sys_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr)
+{
+ unsigned long ret;
+
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+ up_write(¤t->mm->mmap_sem);
+ return ret;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/page_alloc.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/page_alloc.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,972 @@
+/*
+ * linux/mm/page_alloc.c
+ *
+ * Manages the free list, the system allocates free pages here.
+ * Note that kmalloc() lives in slab.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
+ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+int nr_swap_pages;
+int nr_active_pages;
+int nr_inactive_pages;
+LIST_HEAD(inactive_list);
+LIST_HEAD(active_list);
+pg_data_t *pgdat_list;
+
+/*
+ *
+ * The zone_table array is used to look up the address of the
+ * struct zone corresponding to a given zone number (ZONE_DMA,
+ * ZONE_NORMAL, or ZONE_HIGHMEM).
+ */
+zone_t *zone_table[MAX_NR_ZONES*MAX_NR_NODES];
+EXPORT_SYMBOL(zone_table);
+
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
+static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
+static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
+static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
+static int lower_zone_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
+
+int vm_gfp_debug = 0;
+
+static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order));
+
+static spinlock_t free_pages_ok_no_irq_lock = SPIN_LOCK_UNLOCKED;
+struct page * free_pages_ok_no_irq_head;
+
+static void do_free_pages_ok_no_irq(void * arg)
+{
+ struct page * page, * __page;
+
+ spin_lock_irq(&free_pages_ok_no_irq_lock);
+
+ page = free_pages_ok_no_irq_head;
+ free_pages_ok_no_irq_head = NULL;
+
+ spin_unlock_irq(&free_pages_ok_no_irq_lock);
+
+ while (page) {
+ __page = page;
+ page = page->next_hash;
+ __free_pages_ok(__page, __page->index);
+ }
+}
+
+static struct tq_struct free_pages_ok_no_irq_task = {
+ .routine = do_free_pages_ok_no_irq,
+};
+
+
+/*
+ * Temporary debugging check.
+ */
+#define BAD_RANGE(zone, page) \
+( \
+ (((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \
+ || (((page) - mem_map) < (zone)->zone_start_mapnr) \
+ || ((zone) != page_zone(page)) \
+)
+
+/*
+ * Freeing function for a buddy system allocator.
+ * Contrary to prior comments, this is *NOT* hairy, and there
+ * is no reason for anyone not to understand it.
+ *
+ * The concept of a buddy system is to maintain direct-mapped tables
+ * (containing bit values) for memory blocks of various "orders".
+ * The bottom level table contains the map for the smallest allocatable
+ * units of memory (here, pages), and each level above it describes
+ * pairs of units from the levels below, hence, "buddies".
+ * At a high level, all that happens here is marking the table entry
+ * at the bottom level available, and propagating the changes upward
+ * as necessary, plus some accounting needed to play nicely with other
+ * parts of the VM system.
+ * At each level, we keep one bit for each pair of blocks, which
+ * is set to 1 iff only one of the pair is allocated. So when we
+ * are allocating or freeing one, we can derive the state of the
+ * other. That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
+ * If a block is freed, and its buddy is also free, then this
+ * triggers coalescing into a block of larger size.
+ *
+ * -- wli
+ */
+
+static void fastcall __free_pages_ok (struct page *page, unsigned int order)
+{
+ unsigned long index, page_idx, mask, flags;
+ free_area_t *area;
+ struct page *base;
+ zone_t *zone;
+
+ if (PageForeign(page))
+ return (PageForeignDestructor(page))(page);
+
+ /*
+ * Yes, think what happens when other parts of the kernel take
+ * a reference to a page in order to pin it for io. -ben
+ */
+ if (PageLRU(page)) {
+ if (unlikely(in_interrupt())) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&free_pages_ok_no_irq_lock, flags);
+ page->next_hash = free_pages_ok_no_irq_head;
+ free_pages_ok_no_irq_head = page;
+ page->index = order;
+
+ spin_unlock_irqrestore(&free_pages_ok_no_irq_lock,
flags);
+
+ schedule_task(&free_pages_ok_no_irq_task);
+ return;
+ }
+
+ lru_cache_del(page);
+ }
+
+ if (page->buffers)
+ BUG();
+ if (page->mapping)
+ BUG();
+ if (!VALID_PAGE(page))
+ BUG();
+ if (PageLocked(page))
+ BUG();
+ if (PageActive(page))
+ BUG();
+ ClearPageReferenced(page);
+ ClearPageDirty(page);
+
+ if (current->flags & PF_FREE_PAGES)
+ goto local_freelist;
+ back_local_freelist:
+
+ zone = page_zone(page);
+
+ mask = (~0UL) << order;
+ base = zone->zone_mem_map;
+ page_idx = page - base;
+ if (page_idx & ~mask)
+ BUG();
+ index = page_idx >> (1 + order);
+
+ area = zone->free_area + order;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ zone->free_pages -= mask;
+
+ while (mask + (1 << (MAX_ORDER-1))) {
+ struct page *buddy1, *buddy2;
+
+ if (area >= zone->free_area + MAX_ORDER)
+ BUG();
+ if (!__test_and_change_bit(index, area->map))
+ /*
+ * the buddy page is still allocated.
+ */
+ break;
+ /*
+ * Move the buddy up one level.
+ * This code is taking advantage of the identity:
+ * -mask = 1+~mask
+ */
+ buddy1 = base + (page_idx ^ -mask);
+ buddy2 = base + page_idx;
+ if (BAD_RANGE(zone,buddy1))
+ BUG();
+ if (BAD_RANGE(zone,buddy2))
+ BUG();
+
+ list_del(&buddy1->list);
+ mask <<= 1;
+ area++;
+ index >>= 1;
+ page_idx &= mask;
+ }
+ list_add(&(base + page_idx)->list, &area->free_list);
+
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return;
+
+ local_freelist:
+ if (current->nr_local_pages)
+ goto back_local_freelist;
+ if (in_interrupt())
+ goto back_local_freelist;
+
+ list_add(&page->list, ¤t->local_pages);
+ page->index = order;
+ current->nr_local_pages++;
+}
+
+#define MARK_USED(index, order, area) \
+ __change_bit((index) >> (1+(order)), (area)->map)
+
+static inline struct page * expand (zone_t *zone, struct page *page,
+ unsigned long index, int low, int high, free_area_t * area)
+{
+ unsigned long size = 1 << high;
+
+ while (high > low) {
+ if (BAD_RANGE(zone,page))
+ BUG();
+ area--;
+ high--;
+ size >>= 1;
+ list_add(&(page)->list, &(area)->free_list);
+ MARK_USED(index, high, area);
+ index += size;
+ page += size;
+ }
+ if (BAD_RANGE(zone,page))
+ BUG();
+ return page;
+}
+
+static FASTCALL(struct page * rmqueue(zone_t *zone, unsigned int order));
+static struct page * fastcall rmqueue(zone_t *zone, unsigned int order)
+{
+ free_area_t * area = zone->free_area + order;
+ unsigned int curr_order = order;
+ struct list_head *head, *curr;
+ unsigned long flags;
+ struct page *page;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ do {
+ head = &area->free_list;
+ curr = head->next;
+
+ if (curr != head) {
+ unsigned int index;
+
+ page = list_entry(curr, struct page, list);
+ if (BAD_RANGE(zone,page))
+ BUG();
+ list_del(curr);
+ index = page - zone->zone_mem_map;
+ if (curr_order != MAX_ORDER-1)
+ MARK_USED(index, curr_order, area);
+ zone->free_pages -= 1UL << order;
+
+ page = expand(zone, page, index, order, curr_order,
area);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ set_page_count(page, 1);
+ if (BAD_RANGE(zone,page))
+ BUG();
+ if (PageLRU(page))
+ BUG();
+ if (PageActive(page))
+ BUG();
+ return page;
+ }
+ curr_order++;
+ area++;
+ } while (curr_order < MAX_ORDER);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ return NULL;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+struct page * fastcall _alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+ return __alloc_pages(gfp_mask, order,
+ contig_page_data.node_zonelists+(gfp_mask & GFP_ZONEMASK));
+}
+#endif
+
+static struct page * FASTCALL(balance_classzone(zone_t *, unsigned int,
unsigned int, int *));
+static struct page * fastcall balance_classzone(zone_t * classzone, unsigned
int gfp_mask, unsigned int order, int * freed)
+{
+ struct page * page = NULL;
+ int __freed;
+
+ if (in_interrupt())
+ BUG();
+
+ current->allocation_order = order;
+ current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
+
+ __freed = try_to_free_pages_zone(classzone, gfp_mask);
+
+ current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES);
+
+ if (current->nr_local_pages) {
+ struct list_head * entry, * local_pages;
+ struct page * tmp;
+ int nr_pages;
+
+ local_pages = ¤t->local_pages;
+
+ if (likely(__freed)) {
+ /* pick from the last inserted so we're lifo */
+ entry = local_pages->next;
+ do {
+ tmp = list_entry(entry, struct page, list);
+ if (tmp->index == order &&
memclass(page_zone(tmp), classzone)) {
+ list_del(entry);
+ current->nr_local_pages--;
+ set_page_count(tmp, 1);
+ page = tmp;
+
+ if (page->buffers)
+ BUG();
+ if (page->mapping)
+ BUG();
+ if (!VALID_PAGE(page))
+ BUG();
+ if (PageLocked(page))
+ BUG();
+ if (PageLRU(page))
+ BUG();
+ if (PageActive(page))
+ BUG();
+ if (PageDirty(page))
+ BUG();
+
+ break;
+ }
+ } while ((entry = entry->next) != local_pages);
+ }
+
+ nr_pages = current->nr_local_pages;
+ /* free in reverse order so that the global order will be lifo
*/
+ while ((entry = local_pages->prev) != local_pages) {
+ list_del(entry);
+ tmp = list_entry(entry, struct page, list);
+ __free_pages_ok(tmp, tmp->index);
+ if (!nr_pages--)
+ BUG();
+ }
+ current->nr_local_pages = 0;
+ }
+
+ *freed = __freed;
+ return page;
+}
+
+static inline unsigned long zone_free_pages(zone_t * zone, unsigned int order)
+{
+ long free = zone->free_pages - (1UL << order);
+ return free >= 0 ? free : 0;
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator:
+ */
+struct page * fastcall __alloc_pages(unsigned int gfp_mask, unsigned int
order, zonelist_t *zonelist)
+{
+ zone_t **zone, * classzone;
+ struct page * page;
+ int freed, class_idx;
+
+ zone = zonelist->zones;
+ classzone = *zone;
+ class_idx = zone_idx(classzone);
+
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ if (zone_free_pages(z, order) > z->watermarks[class_idx].low) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+
+ classzone->need_balance = 1;
+ mb();
+ if (waitqueue_active(&kswapd_wait))
+ wake_up_interruptible(&kswapd_wait);
+
+ zone = zonelist->zones;
+ for (;;) {
+ unsigned long min;
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ min = z->watermarks[class_idx].min;
+ if (!(gfp_mask & __GFP_WAIT))
+ min >>= 2;
+ if (zone_free_pages(z, order) > min) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+
+ /* here we're in the low on memory slow path */
+
+ if ((current->flags & PF_MEMALLOC) &&
+ (!in_interrupt() || (current->flags & PF_MEMDIE))) {
+ zone = zonelist->zones;
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ return NULL;
+ }
+
+ /* Atomic allocations - we can't balance anything */
+ if (!(gfp_mask & __GFP_WAIT))
+ goto out;
+
+ rebalance:
+ page = balance_classzone(classzone, gfp_mask, order, &freed);
+ if (page)
+ return page;
+
+ zone = zonelist->zones;
+ if (likely(freed)) {
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ if (zone_free_pages(z, order) >
z->watermarks[class_idx].min) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+ goto rebalance;
+ } else {
+ /*
+ * Check that no other task is been killed meanwhile,
+ * in such a case we can succeed the allocation.
+ */
+ for (;;) {
+ zone_t *z = *(zone++);
+ if (!z)
+ break;
+
+ if (zone_free_pages(z, order) >
z->watermarks[class_idx].high) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
+ }
+ }
+
+ out:
+ printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed
(gfp=0x%x/%i)\n",
+ order, gfp_mask, !!(current->flags & PF_MEMALLOC));
+ if (unlikely(vm_gfp_debug))
+ dump_stack();
+ return NULL;
+}
+
+/*
+ * Common helper functions.
+ */
+fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int
order)
+{
+ struct page * page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return 0;
+ return (unsigned long) page_address(page);
+}
+
+fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
+{
+ struct page * page;
+
+ page = alloc_pages(gfp_mask, 0);
+ if (page) {
+ void *address = page_address(page);
+ clear_page(address);
+ return (unsigned long) address;
+ }
+ return 0;
+}
+
+fastcall void __free_pages(struct page *page, unsigned int order)
+{
+ if (!PageReserved(page) && put_page_testzero(page))
+ __free_pages_ok(page, order);
+}
+
+fastcall void free_pages(unsigned long addr, unsigned int order)
+{
+ if (addr != 0)
+ __free_pages(virt_to_page(addr), order);
+}
+
+/*
+ * Total amount of free (allocatable) RAM:
+ */
+unsigned int nr_free_pages (void)
+{
+ unsigned int sum = 0;
+ zone_t *zone;
+
+ for_each_zone(zone)
+ sum += zone->free_pages;
+
+ return sum;
+}
+
+/*
+ * Amount of free RAM allocatable as buffer memory:
+ */
+unsigned int nr_free_buffer_pages (void)
+{
+ pg_data_t *pgdat;
+ unsigned int sum = 0;
+ zonelist_t *zonelist;
+ zone_t **zonep, *zone;
+
+ for_each_pgdat(pgdat) {
+ int class_idx;
+ zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
+ zonep = zonelist->zones;
+ zone = *zonep;
+ class_idx = zone_idx(zone);
+
+ sum += zone->nr_cache_pages;
+ for (; zone; zone = *zonep++) {
+ int free = zone->free_pages -
zone->watermarks[class_idx].high;
+ if (free <= 0)
+ continue;
+ sum += free;
+ }
+ }
+
+ return sum;
+}
+
+#if CONFIG_HIGHMEM
+unsigned int nr_free_highpages (void)
+{
+ pg_data_t *pgdat;
+ unsigned int pages = 0;
+
+ for_each_pgdat(pgdat)
+ pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+
+ return pages;
+}
+
+unsigned int freeable_lowmem(void)
+{
+ unsigned int pages = 0;
+ pg_data_t *pgdat;
+
+ for_each_pgdat(pgdat) {
+ pages += pgdat->node_zones[ZONE_DMA].free_pages;
+ pages += pgdat->node_zones[ZONE_DMA].nr_active_pages;
+ pages += pgdat->node_zones[ZONE_DMA].nr_inactive_pages;
+ pages += pgdat->node_zones[ZONE_NORMAL].free_pages;
+ pages += pgdat->node_zones[ZONE_NORMAL].nr_active_pages;
+ pages += pgdat->node_zones[ZONE_NORMAL].nr_inactive_pages;
+ }
+
+ return pages;
+}
+#endif
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
+/*
+ * Show free area list (used inside shift_scroll-lock stuff)
+ * We also calculate the percentage fragmentation. We do this by counting the
+ * memory on each free list with the exception of the first item on the list.
+ */
+void show_free_areas_core(pg_data_t *pgdat)
+{
+ unsigned int order;
+ unsigned type;
+ pg_data_t *tmpdat = pgdat;
+
+ printk("Free pages: %6dkB (%6dkB HighMem)\n",
+ K(nr_free_pages()),
+ K(nr_free_highpages()));
+
+ while (tmpdat) {
+ zone_t *zone;
+ for (zone = tmpdat->node_zones;
+ zone < tmpdat->node_zones + MAX_NR_ZONES;
zone++)
+ printk("Zone:%s freepages:%6lukB\n",
+ zone->name,
+ K(zone->free_pages));
+
+ tmpdat = tmpdat->node_next;
+ }
+
+ printk("( Active: %d, inactive: %d, free: %d )\n",
+ nr_active_pages,
+ nr_inactive_pages,
+ nr_free_pages());
+
+ for (type = 0; type < MAX_NR_ZONES; type++) {
+ struct list_head *head, *curr;
+ zone_t *zone = pgdat->node_zones + type;
+ unsigned long nr, total, flags;
+
+ total = 0;
+ if (zone->size) {
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ head = &(zone->free_area + order)->free_list;
+ curr = head;
+ nr = 0;
+ for (;;) {
+ if ((curr = curr->next) == head)
+ break;
+ nr++;
+ }
+ total += nr * (1 << order);
+ printk("%lu*%lukB ", nr, K(1UL) << order);
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+ printk("= %lukB)\n", K(total));
+ }
+
+#ifdef SWAP_CACHE_INFO
+ show_swap_cache_info();
+#endif
+}
+
+void show_free_areas(void)
+{
+ show_free_areas_core(pgdat_list);
+}
+
+/*
+ * Builds allocation fallback zone lists.
+ */
+static inline void build_zonelists(pg_data_t *pgdat)
+{
+ int i, j, k;
+
+ for (i = 0; i <= GFP_ZONEMASK; i++) {
+ zonelist_t *zonelist;
+ zone_t *zone;
+
+ zonelist = pgdat->node_zonelists + i;
+ memset(zonelist, 0, sizeof(*zonelist));
+
+ j = 0;
+ k = ZONE_NORMAL;
+ if (i & __GFP_HIGHMEM)
+ k = ZONE_HIGHMEM;
+ if (i & __GFP_DMA)
+ k = ZONE_DMA;
+
+ switch (k) {
+ default:
+ BUG();
+ /*
+ * fallthrough:
+ */
+ case ZONE_HIGHMEM:
+ zone = pgdat->node_zones + ZONE_HIGHMEM;
+ if (zone->size) {
+#ifndef CONFIG_HIGHMEM
+ BUG();
+#endif
+ zonelist->zones[j++] = zone;
+ }
+ case ZONE_NORMAL:
+ zone = pgdat->node_zones + ZONE_NORMAL;
+ if (zone->size)
+ zonelist->zones[j++] = zone;
+ case ZONE_DMA:
+ zone = pgdat->node_zones + ZONE_DMA;
+ if (zone->size)
+ zonelist->zones[j++] = zone;
+ }
+ zonelist->zones[j++] = NULL;
+ }
+}
+
+/*
+ * Helper functions to size the waitqueue hash table.
+ * Essentially these want to choose hash table sizes sufficiently
+ * large so that collisions trying to wait on pages are rare.
+ * But in fact, the number of active page waitqueues on typical
+ * systems is ridiculously low, less than 200. So this is even
+ * conservative, even though it seems large.
+ *
+ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
+ * waitqueues, i.e. the size of the waitq table given the number of pages.
+ */
+#define PAGES_PER_WAITQUEUE 256
+
+static inline unsigned long wait_table_size(unsigned long pages)
+{
+ unsigned long size = 1;
+
+ pages /= PAGES_PER_WAITQUEUE;
+
+ while (size < pages)
+ size <<= 1;
+
+ /*
+ * Once we have dozens or even hundreds of threads sleeping
+ * on IO we've got bigger problems than wait queue collision.
+ * Limit the size of the wait table to a reasonable size.
+ */
+ size = min(size, 4096UL);
+
+ return size;
+}
+
+/*
+ * This is an integer logarithm so that shifts can be used later
+ * to extract the more random high bits from the multiplicative
+ * hash function before the remainder is taken.
+ */
+static inline unsigned long wait_table_bits(unsigned long size)
+{
+ return ffz(~size);
+}
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+/*
+ * Set up the zone data structures:
+ * - mark all pages reserved
+ * - mark all memory queues empty
+ * - clear the memory bitmaps
+ */
+void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
+ unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size, struct page *lmem_map)
+{
+ unsigned long i, j;
+ unsigned long map_size;
+ unsigned long totalpages, offset, realtotalpages;
+ const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
+
+ if (zone_start_paddr & ~PAGE_MASK)
+ BUG();
+
+ totalpages = 0;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ unsigned long size = zones_size[i];
+ totalpages += size;
+ }
+ realtotalpages = totalpages;
+ if (zholes_size)
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ realtotalpages -= zholes_size[i];
+
+ printk("On node %d totalpages: %lu\n", nid, realtotalpages);
+
+ /*
+ * Some architectures (with lots of mem and discontinous memory
+ * maps) have to search for a good mem_map area:
+ * For discontigmem, the conceptual mem map array starts from
+ * PAGE_OFFSET, we need to align the actual array onto a mem map
+ * boundary, so that MAP_NR works.
+ */
+ map_size = (totalpages + 1)*sizeof(struct page);
+ if (lmem_map == (struct page *)0) {
+ lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
+ lmem_map = (struct page *)(PAGE_OFFSET +
+ MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
+ }
+ *gmap = pgdat->node_mem_map = lmem_map;
+ pgdat->node_size = totalpages;
+ pgdat->node_start_paddr = zone_start_paddr;
+ pgdat->node_start_mapnr = (lmem_map - mem_map);
+ pgdat->nr_zones = 0;
+
+ offset = lmem_map - mem_map;
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ zone_t *zone = pgdat->node_zones + j;
+ unsigned long mask;
+ unsigned long size, realsize;
+ int idx;
+
+ zone_table[nid * MAX_NR_ZONES + j] = zone;
+ realsize = size = zones_size[j];
+ if (zholes_size)
+ realsize -= zholes_size[j];
+
+ printk("zone(%lu): %lu pages.\n", j, size);
+ zone->size = size;
+ zone->realsize = realsize;
+ zone->name = zone_names[j];
+ zone->lock = SPIN_LOCK_UNLOCKED;
+ zone->zone_pgdat = pgdat;
+ zone->free_pages = 0;
+ zone->need_balance = 0;
+ zone->nr_active_pages = zone->nr_inactive_pages = 0;
+
+
+ if (!size)
+ continue;
+
+ /*
+ * The per-page waitqueue mechanism uses hashed waitqueues
+ * per zone.
+ */
+ zone->wait_table_size = wait_table_size(size);
+ zone->wait_table_shift =
+ BITS_PER_LONG - wait_table_bits(zone->wait_table_size);
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, zone->wait_table_size
+ * sizeof(wait_queue_head_t));
+
+ for(i = 0; i < zone->wait_table_size; ++i)
+ init_waitqueue_head(zone->wait_table + i);
+
+ pgdat->nr_zones = j+1;
+
+ mask = (realsize / zone_balance_ratio[j]);
+ if (mask < zone_balance_min[j])
+ mask = zone_balance_min[j];
+ else if (mask > zone_balance_max[j])
+ mask = zone_balance_max[j];
+ zone->watermarks[j].min = mask;
+ zone->watermarks[j].low = mask*2;
+ zone->watermarks[j].high = mask*3;
+ /* now set the watermarks of the lower zones in the "j"
classzone */
+ for (idx = j-1; idx >= 0; idx--) {
+ zone_t * lower_zone = pgdat->node_zones + idx;
+ unsigned long lower_zone_reserve;
+ if (!lower_zone->size)
+ continue;
+
+ mask = lower_zone->watermarks[idx].min;
+ lower_zone->watermarks[j].min = mask;
+ lower_zone->watermarks[j].low = mask*2;
+ lower_zone->watermarks[j].high = mask*3;
+
+ /* now the brainer part */
+ lower_zone_reserve = realsize /
lower_zone_reserve_ratio[idx];
+ lower_zone->watermarks[j].min += lower_zone_reserve;
+ lower_zone->watermarks[j].low += lower_zone_reserve;
+ lower_zone->watermarks[j].high += lower_zone_reserve;
+
+ realsize += lower_zone->realsize;
+ }
+
+ zone->zone_mem_map = mem_map + offset;
+ zone->zone_start_mapnr = offset;
+ zone->zone_start_paddr = zone_start_paddr;
+
+ if ((zone_start_paddr >> PAGE_SHIFT) &
(zone_required_alignment-1))
+ printk("BUG: wrong zone alignment, it will crash\n");
+
+ /*
+ * Initially all pages are reserved - free ones are freed
+ * up by free_all_bootmem() once the early boot process is
+ * done. Non-atomic initialization, single-pass.
+ */
+ for (i = 0; i < size; i++) {
+ struct page *page = mem_map + offset + i;
+ set_page_zone(page, nid * MAX_NR_ZONES + j);
+ set_page_count(page, 0);
+ SetPageReserved(page);
+ INIT_LIST_HEAD(&page->list);
+ if (j != ZONE_HIGHMEM)
+ set_page_address(page, __va(zone_start_paddr));
+ zone_start_paddr += PAGE_SIZE;
+ }
+
+ offset += size;
+ for (i = 0; ; i++) {
+ unsigned long bitmap_size;
+
+ INIT_LIST_HEAD(&zone->free_area[i].free_list);
+ if (i == MAX_ORDER-1) {
+ zone->free_area[i].map = NULL;
+ break;
+ }
+
+ /*
+ * Page buddy system uses "index >> (i+1)",
+ * where "index" is at most "size-1".
+ *
+ * The extra "+3" is to round down to byte
+ * size (8 bits per byte assumption). Thus
+ * we get "(size-1) >> (i+4)" as the last byte
+ * we can access.
+ *
+ * The "+1" is because we want to round the
+ * byte allocation up rather than down. So
+ * we should have had a "+7" before we shifted
+ * down by three. Also, we have to add one as
+ * we actually _use_ the last bit (it's [0,n]
+ * inclusive, not [0,n[).
+ *
+ * So we actually had +7+1 before we shift
+ * down by 3. But (n+8) >> 3 == (n >> 3) + 1
+ * (modulo overflows, which we do not have).
+ *
+ * Finally, we LONG_ALIGN because all bitmap
+ * operations are on longs.
+ */
+ bitmap_size = (size-1) >> (i+4);
+ bitmap_size = LONG_ALIGN(bitmap_size+1);
+ zone->free_area[i].map =
+ (unsigned long *) alloc_bootmem_node(pgdat,
bitmap_size);
+ }
+ }
+ build_zonelists(pgdat);
+}
+
+void __init free_area_init(unsigned long *zones_size)
+{
+ free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0,
0);
+}
+
+static int __init setup_mem_frac(char *str)
+{
+ int j = 0;
+
+ while (get_option(&str, &zone_balance_ratio[j++]) == 2);
+ printk("setup_mem_frac: ");
+ for (j = 0; j < MAX_NR_ZONES; j++) printk("%d ",
zone_balance_ratio[j]);
+ printk("\n");
+ return 1;
+}
+
+__setup("memfrac=", setup_mem_frac);
+
+static int __init setup_lower_zone_reserve(char *str)
+{
+ int j = 0;
+
+ while (get_option(&str, &lower_zone_reserve_ratio[j++]) == 2);
+ printk("setup_lower_zone_reserve: ");
+ for (j = 0; j < MAX_NR_ZONES-1; j++) printk("%d ",
lower_zone_reserve_ratio[j]);
+ printk("\n");
+ return 1;
+}
+
+__setup("lower_zone_reserve=", setup_lower_zone_reserve);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/swapfile.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/swapfile.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1269 @@
+/*
+ * linux/mm/swapfile.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ */
+
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel_stat.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/blkdev.h> /* for blk_size */
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/shm.h>
+
+#include <asm/pgtable.h>
+
+spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
+unsigned int nr_swapfiles;
+int total_swap_pages;
+static int swap_overflow;
+
+static const char Bad_file[] = "Bad swap file entry ";
+static const char Unused_file[] = "Unused swap file entry ";
+static const char Bad_offset[] = "Bad swap offset entry ";
+static const char Unused_offset[] = "Unused swap offset entry ";
+
+struct swap_list_t swap_list = {-1, -1};
+
+struct swap_info_struct swap_info[MAX_SWAPFILES];
+
+#define SWAPFILE_CLUSTER 256
+
+static inline int scan_swap_map(struct swap_info_struct *si)
+{
+ unsigned long offset;
+ /*
+ * We try to cluster swap pages by allocating them
+ * sequentially in swap. Once we've allocated
+ * SWAPFILE_CLUSTER pages this way, however, we resort to
+ * first-free allocation, starting a new cluster. This
+ * prevents us from scattering swap pages all over the entire
+ * swap partition, so that we reduce overall disk seek times
+ * between swap pages. -- sct */
+ if (si->cluster_nr) {
+ while (si->cluster_next <= si->highest_bit) {
+ offset = si->cluster_next++;
+ if (si->swap_map[offset])
+ continue;
+ si->cluster_nr--;
+ goto got_page;
+ }
+ }
+ si->cluster_nr = SWAPFILE_CLUSTER;
+
+ /* try to find an empty (even not aligned) cluster. */
+ offset = si->lowest_bit;
+ check_next_cluster:
+ if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
+ {
+ int nr;
+ for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
+ if (si->swap_map[nr])
+ {
+ offset = nr+1;
+ goto check_next_cluster;
+ }
+ /* We found a completly empty cluster, so start
+ * using it.
+ */
+ goto got_page;
+ }
+ /* No luck, so now go finegrined as usual. -Andrea */
+ for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
+ if (si->swap_map[offset])
+ continue;
+ si->lowest_bit = offset+1;
+ got_page:
+ if (offset == si->lowest_bit)
+ si->lowest_bit++;
+ if (offset == si->highest_bit)
+ si->highest_bit--;
+ if (si->lowest_bit > si->highest_bit) {
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ }
+ si->swap_map[offset] = 1;
+ nr_swap_pages--;
+ si->cluster_next = offset+1;
+ return offset;
+ }
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ return 0;
+}
+
+swp_entry_t get_swap_page(void)
+{
+ struct swap_info_struct * p;
+ unsigned long offset;
+ swp_entry_t entry;
+ int type, wrapped = 0;
+
+ entry.val = 0; /* Out of memory */
+ swap_list_lock();
+ type = swap_list.next;
+ if (type < 0)
+ goto out;
+ if (nr_swap_pages <= 0)
+ goto out;
+
+ while (1) {
+ p = &swap_info[type];
+ if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
+ swap_device_lock(p);
+ offset = scan_swap_map(p);
+ swap_device_unlock(p);
+ if (offset) {
+ entry = SWP_ENTRY(type,offset);
+ type = swap_info[type].next;
+ if (type < 0 ||
+ p->prio != swap_info[type].prio) {
+ swap_list.next = swap_list.head;
+ } else {
+ swap_list.next = type;
+ }
+ goto out;
+ }
+ }
+ type = p->next;
+ if (!wrapped) {
+ if (type < 0 || p->prio != swap_info[type].prio) {
+ type = swap_list.head;
+ wrapped = 1;
+ }
+ } else
+ if (type < 0)
+ goto out; /* out of swap space */
+ }
+out:
+ swap_list_unlock();
+ return entry;
+}
+
+static struct swap_info_struct * swap_info_get(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+ unsigned long offset, type;
+
+ if (!entry.val)
+ goto out;
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles)
+ goto bad_nofile;
+ p = & swap_info[type];
+ if (!(p->flags & SWP_USED))
+ goto bad_device;
+ offset = SWP_OFFSET(entry);
+ if (offset >= p->max)
+ goto bad_offset;
+ if (!p->swap_map[offset])
+ goto bad_free;
+ swap_list_lock();
+ if (p->prio > swap_info[swap_list.next].prio)
+ swap_list.next = type;
+ swap_device_lock(p);
+ return p;
+
+bad_free:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
+ goto out;
+bad_offset:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
+ goto out;
+bad_device:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
+ goto out;
+bad_nofile:
+ printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
+out:
+ return NULL;
+}
+
+static void swap_info_put(struct swap_info_struct * p)
+{
+ swap_device_unlock(p);
+ swap_list_unlock();
+}
+
+static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
+{
+ int count = p->swap_map[offset];
+
+ if (count < SWAP_MAP_MAX) {
+ count--;
+ p->swap_map[offset] = count;
+ if (!count) {
+ if (offset < p->lowest_bit)
+ p->lowest_bit = offset;
+ if (offset > p->highest_bit)
+ p->highest_bit = offset;
+ nr_swap_pages++;
+ }
+ }
+ return count;
+}
+
+/*
+ * Caller has made sure that the swapdevice corresponding to entry
+ * is still around or has not been recycled.
+ */
+void swap_free(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+
+ p = swap_info_get(entry);
+ if (p) {
+ swap_entry_free(p, SWP_OFFSET(entry));
+ swap_info_put(p);
+ }
+}
+
+/*
+ * Check if we're the only user of a swap page,
+ * when the page is locked.
+ */
+static int exclusive_swap_page(struct page *page)
+{
+ int retval = 0;
+ struct swap_info_struct * p;
+ swp_entry_t entry;
+
+ entry.val = page->index;
+ p = swap_info_get(entry);
+ if (p) {
+ /* Is the only swap cache user the cache itself? */
+ if (p->swap_map[SWP_OFFSET(entry)] == 1) {
+ /* Recheck the page count with the pagecache lock
held.. */
+ spin_lock(&pagecache_lock);
+ if (page_count(page) - !!page->buffers == 2)
+ retval = 1;
+ spin_unlock(&pagecache_lock);
+ }
+ swap_info_put(p);
+ }
+ return retval;
+}
+
+/*
+ * We can use this swap cache entry directly
+ * if there are no other references to it.
+ *
+ * Here "exclusive_swap_page()" does the real
+ * work, but we opportunistically check whether
+ * we need to get all the locks first..
+ */
+int fastcall can_share_swap_page(struct page *page)
+{
+ int retval = 0;
+
+ if (!PageLocked(page))
+ BUG();
+ switch (page_count(page)) {
+ case 3:
+ if (!page->buffers)
+ break;
+ /* Fallthrough */
+ case 2:
+ if (!PageSwapCache(page))
+ break;
+ retval = exclusive_swap_page(page);
+ break;
+ case 1:
+ if (PageReserved(page))
+ break;
+ retval = 1;
+ }
+ return retval;
+}
+
+/*
+ * Work out if there are any other processes sharing this
+ * swap cache page. Free it if you can. Return success.
+ */
+int fastcall remove_exclusive_swap_page(struct page *page)
+{
+ int retval;
+ struct swap_info_struct * p;
+ swp_entry_t entry;
+
+ if (!PageLocked(page))
+ BUG();
+ if (!PageSwapCache(page))
+ return 0;
+ if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
+ return 0;
+
+ entry.val = page->index;
+ p = swap_info_get(entry);
+ if (!p)
+ return 0;
+
+ /* Is the only swap cache user the cache itself? */
+ retval = 0;
+ if (p->swap_map[SWP_OFFSET(entry)] == 1) {
+ /* Recheck the page count with the pagecache lock held.. */
+ spin_lock(&pagecache_lock);
+ if (page_count(page) - !!page->buffers == 2) {
+ __delete_from_swap_cache(page);
+ SetPageDirty(page);
+ retval = 1;
+ }
+ spin_unlock(&pagecache_lock);
+ }
+ swap_info_put(p);
+
+ if (retval) {
+ block_flushpage(page, 0);
+ swap_free(entry);
+ page_cache_release(page);
+ }
+
+ return retval;
+}
+
+/*
+ * Free the swap entry like above, but also try to
+ * free the page cache entry if it is the last user.
+ */
+void free_swap_and_cache(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+ struct page *page = NULL;
+
+ p = swap_info_get(entry);
+ if (p) {
+ if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
+ page = find_trylock_page(&swapper_space, entry.val);
+ swap_info_put(p);
+ }
+ if (page) {
+ page_cache_get(page);
+ /* Only cache user (+us), or swap space full? Free it! */
+ if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
+ delete_from_swap_cache(page);
+ SetPageDirty(page);
+ }
+ UnlockPage(page);
+ page_cache_release(page);
+ }
+}
+
+/*
+ * The swap entry has been read in advance, and we return 1 to indicate
+ * that the page has been used or is no longer needed.
+ *
+ * Always set the resulting pte to be nowrite (the same as COW pages
+ * after one process has exited). We don't know just how many PTEs will
+ * share this swap entry, so be cautious and let do_wp_page work out
+ * what to do if a write is requested later.
+ */
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static inline void unuse_pte(struct vm_area_struct * vma, unsigned long
address,
+ pte_t *dir, swp_entry_t entry, struct page* page)
+{
+ pte_t pte = *dir;
+
+ if (likely(pte_to_swp_entry(pte).val != entry.val))
+ return;
+ if (unlikely(pte_none(pte) || pte_present(pte)))
+ return;
+ get_page(page);
+ set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
+ swap_free(entry);
+ ++vma->vm_mm->rss;
+}
+
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
+ unsigned long address, unsigned long size, unsigned long offset,
+ swp_entry_t entry, struct page* page)
+{
+ pte_t * pte;
+ unsigned long end;
+
+ if (pmd_none(*dir))
+ return;
+ if (pmd_bad(*dir)) {
+ pmd_ERROR(*dir);
+ pmd_clear(dir);
+ return;
+ }
+ pte = pte_offset(dir, address);
+ offset += address & PMD_MASK;
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
+ unsigned long address, unsigned long size,
+ swp_entry_t entry, struct page* page)
+{
+ pmd_t * pmd;
+ unsigned long offset, end;
+
+ if (pgd_none(*dir))
+ return;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return;
+ }
+ pmd = pmd_offset(dir, address);
+ offset = address & PGDIR_MASK;
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
+ unuse_pmd(vma, pmd, address, end - address, offset, entry,
+ page);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+}
+
+/* mmlist_lock and vma->vm_mm->page_table_lock are held */
+static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
+ swp_entry_t entry, struct page* page)
+{
+ unsigned long start = vma->vm_start, end = vma->vm_end;
+
+ if (start >= end)
+ BUG();
+ do {
+ unuse_pgd(vma, pgdir, start, end - start, entry, page);
+ start = (start + PGDIR_SIZE) & PGDIR_MASK;
+ pgdir++;
+ } while (start && (start < end));
+}
+
+static void unuse_process(struct mm_struct * mm,
+ swp_entry_t entry, struct page* page)
+{
+ struct vm_area_struct* vma;
+
+ /*
+ * Go through process' page directory.
+ */
+ spin_lock(&mm->page_table_lock);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ pgd_t * pgd = pgd_offset(mm, vma->vm_start);
+ unuse_vma(vma, pgd, entry, page);
+ }
+ XEN_flush_page_update_queue();
+ spin_unlock(&mm->page_table_lock);
+ return;
+}
+
+/*
+ * Scan swap_map from current position to next entry still in use.
+ * Recycle to start on reaching the end, returning 0 when empty.
+ */
+static int find_next_to_unuse(struct swap_info_struct *si, int prev)
+{
+ int max = si->max;
+ int i = prev;
+ int count;
+
+ /*
+ * No need for swap_device_lock(si) here: we're just looking
+ * for whether an entry is in use, not modifying it; false
+ * hits are okay, and sys_swapoff() has already prevented new
+ * allocations from this area (while holding swap_list_lock()).
+ */
+ for (;;) {
+ if (++i >= max) {
+ if (!prev) {
+ i = 0;
+ break;
+ }
+ /*
+ * No entries in use at top of swap_map,
+ * loop back to start and recheck there.
+ */
+ max = prev + 1;
+ prev = 0;
+ i = 1;
+ }
+ count = si->swap_map[i];
+ if (count && count != SWAP_MAP_BAD)
+ break;
+ }
+ return i;
+}
+
+/*
+ * We completely avoid races by reading each swap page in advance,
+ * and then search for the process using it. All the necessary
+ * page table adjustments can then be made atomically.
+ */
+static int try_to_unuse(unsigned int type)
+{
+ struct swap_info_struct * si = &swap_info[type];
+ struct mm_struct *start_mm;
+ unsigned short *swap_map;
+ unsigned short swcount;
+ struct page *page;
+ swp_entry_t entry;
+ int i = 0;
+ int retval = 0;
+ int reset_overflow = 0;
+ int shmem;
+
+ /*
+ * When searching mms for an entry, a good strategy is to
+ * start at the first mm we freed the previous entry from
+ * (though actually we don't notice whether we or coincidence
+ * freed the entry). Initialize this start_mm with a hold.
+ *
+ * A simpler strategy would be to start at the last mm we
+ * freed the previous entry from; but that would take less
+ * advantage of mmlist ordering (now preserved by swap_out()),
+ * which clusters forked address spaces together, most recent
+ * child immediately after parent. If we race with dup_mmap(),
+ * we very much want to resolve parent before child, otherwise
+ * we may miss some entries: using last mm would invert that.
+ */
+ start_mm = &init_mm;
+ atomic_inc(&init_mm.mm_users);
+
+ /*
+ * Keep on scanning until all entries have gone. Usually,
+ * one pass through swap_map is enough, but not necessarily:
+ * mmput() removes mm from mmlist before exit_mmap() and its
+ * zap_page_range(). That's not too bad, those entries are
+ * on their way out, and handled faster there than here.
+ * do_munmap() behaves similarly, taking the range out of mm's
+ * vma list before zap_page_range(). But unfortunately, when
+ * unmapping a part of a vma, it takes the whole out first,
+ * then reinserts what's left after (might even reschedule if
+ * open() method called) - so swap entries may be invisible
+ * to swapoff for a while, then reappear - but that is rare.
+ */
+ while ((i = find_next_to_unuse(si, i))) {
+ /*
+ * Get a page for the entry, using the existing swap
+ * cache page if there is one. Otherwise, get a clean
+ * page and read the swap into it.
+ */
+ swap_map = &si->swap_map[i];
+ entry = SWP_ENTRY(type, i);
+ page = read_swap_cache_async(entry);
+ if (!page) {
+ /*
+ * Either swap_duplicate() failed because entry
+ * has been freed independently, and will not be
+ * reused since sys_swapoff() already disabled
+ * allocation from here, or alloc_page() failed.
+ */
+ if (!*swap_map)
+ continue;
+ retval = -ENOMEM;
+ break;
+ }
+
+ /*
+ * Don't hold on to start_mm if it looks like exiting.
+ */
+ if (atomic_read(&start_mm->mm_users) == 1) {
+ mmput(start_mm);
+ start_mm = &init_mm;
+ atomic_inc(&init_mm.mm_users);
+ }
+
+ /*
+ * Wait for and lock page. When do_swap_page races with
+ * try_to_unuse, do_swap_page can handle the fault much
+ * faster than try_to_unuse can locate the entry. This
+ * apparently redundant "wait_on_page" lets try_to_unuse
+ * defer to do_swap_page in such a case - in some tests,
+ * do_swap_page and try_to_unuse repeatedly compete.
+ */
+ wait_on_page(page);
+ lock_page(page);
+
+ /*
+ * Remove all references to entry, without blocking.
+ * Whenever we reach init_mm, there's no address space
+ * to search, but use it as a reminder to search shmem.
+ */
+ shmem = 0;
+ swcount = *swap_map;
+ if (swcount > 1) {
+ flush_page_to_ram(page);
+ if (start_mm == &init_mm)
+ shmem = shmem_unuse(entry, page);
+ else
+ unuse_process(start_mm, entry, page);
+ }
+ if (*swap_map > 1) {
+ int set_start_mm = (*swap_map >= swcount);
+ struct list_head *p = &start_mm->mmlist;
+ struct mm_struct *new_start_mm = start_mm;
+ struct mm_struct *mm;
+
+ spin_lock(&mmlist_lock);
+ while (*swap_map > 1 &&
+ (p = p->next) != &start_mm->mmlist) {
+ mm = list_entry(p, struct mm_struct, mmlist);
+ swcount = *swap_map;
+ if (mm == &init_mm) {
+ set_start_mm = 1;
+ spin_unlock(&mmlist_lock);
+ shmem = shmem_unuse(entry, page);
+ spin_lock(&mmlist_lock);
+ } else
+ unuse_process(mm, entry, page);
+ if (set_start_mm && *swap_map < swcount) {
+ new_start_mm = mm;
+ set_start_mm = 0;
+ }
+ }
+ atomic_inc(&new_start_mm->mm_users);
+ spin_unlock(&mmlist_lock);
+ mmput(start_mm);
+ start_mm = new_start_mm;
+ }
+
+ /*
+ * How could swap count reach 0x7fff when the maximum
+ * pid is 0x7fff, and there's no way to repeat a swap
+ * page within an mm (except in shmem, where it's the
+ * shared object which takes the reference count)?
+ * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
+ *
+ * If that's wrong, then we should worry more about
+ * exit_mmap() and do_munmap() cases described above:
+ * we might be resetting SWAP_MAP_MAX too early here.
+ * We know "Undead"s can happen, they're okay, so don't
+ * report them; but do report if we reset SWAP_MAP_MAX.
+ */
+ if (*swap_map == SWAP_MAP_MAX) {
+ swap_list_lock();
+ swap_device_lock(si);
+ nr_swap_pages++;
+ *swap_map = 1;
+ swap_device_unlock(si);
+ swap_list_unlock();
+ reset_overflow = 1;
+ }
+
+ /*
+ * If a reference remains (rare), we would like to leave
+ * the page in the swap cache; but try_to_swap_out could
+ * then re-duplicate the entry once we drop page lock,
+ * so we might loop indefinitely; also, that page could
+ * not be swapped out to other storage meanwhile. So:
+ * delete from cache even if there's another reference,
+ * after ensuring that the data has been saved to disk -
+ * since if the reference remains (rarer), it will be
+ * read from disk into another page. Splitting into two
+ * pages would be incorrect if swap supported "shared
+ * private" pages, but they are handled by tmpfs files.
+ *
+ * Note shmem_unuse already deleted swappage from cache,
+ * unless corresponding filepage found already in cache:
+ * in which case it left swappage in cache, lowered its
+ * swap count to pass quickly through the loops above,
+ * and now we must reincrement count to try again later.
+ */
+ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
+ rw_swap_page(WRITE, page);
+ lock_page(page);
+ }
+ if (PageSwapCache(page)) {
+ if (shmem)
+ swap_duplicate(entry);
+ else
+ delete_from_swap_cache(page);
+ }
+
+ /*
+ * So we could skip searching mms once swap count went
+ * to 1, we did not mark any present ptes as dirty: must
+ * mark page dirty so try_to_swap_out will preserve it.
+ */
+ SetPageDirty(page);
+ UnlockPage(page);
+ page_cache_release(page);
+
+ /*
+ * Make sure that we aren't completely killing
+ * interactive performance. Interruptible check on
+ * signal_pending() would be nice, but changes the spec?
+ */
+ if (current->need_resched)
+ schedule();
+ }
+
+ mmput(start_mm);
+ if (reset_overflow) {
+ printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
+ swap_overflow = 0;
+ }
+ return retval;
+}
+
+asmlinkage long sys_swapoff(const char * specialfile)
+{
+ struct swap_info_struct * p = NULL;
+ unsigned short *swap_map;
+ struct nameidata nd;
+ int i, type, prev;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = user_path_walk(specialfile, &nd);
+ if (err)
+ goto out;
+
+ lock_kernel();
+ prev = -1;
+ swap_list_lock();
+ for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
+ p = swap_info + type;
+ if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
+ if (p->swap_file == nd.dentry ||
+ (S_ISBLK(nd.dentry->d_inode->i_mode) &&
+ p->swap_device == nd.dentry->d_inode->i_rdev))
+ break;
+ }
+ prev = type;
+ }
+ err = -EINVAL;
+ if (type < 0) {
+ swap_list_unlock();
+ goto out_dput;
+ }
+
+ if (prev < 0) {
+ swap_list.head = p->next;
+ } else {
+ swap_info[prev].next = p->next;
+ }
+ if (type == swap_list.next) {
+ /* just pick something that's safe... */
+ swap_list.next = swap_list.head;
+ }
+ nr_swap_pages -= p->pages;
+ total_swap_pages -= p->pages;
+ p->flags = SWP_USED;
+ swap_list_unlock();
+ unlock_kernel();
+ err = try_to_unuse(type);
+ lock_kernel();
+ if (err) {
+ /* re-insert swap space back into swap_list */
+ swap_list_lock();
+ for (prev = -1, i = swap_list.head; i >= 0; prev = i, i =
swap_info[i].next)
+ if (p->prio >= swap_info[i].prio)
+ break;
+ p->next = i;
+ if (prev < 0)
+ swap_list.head = swap_list.next = p - swap_info;
+ else
+ swap_info[prev].next = p - swap_info;
+ nr_swap_pages += p->pages;
+ total_swap_pages += p->pages;
+ p->flags = SWP_WRITEOK;
+ swap_list_unlock();
+ goto out_dput;
+ }
+ if (p->swap_device)
+ blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
+ path_release(&nd);
+
+ swap_list_lock();
+ swap_device_lock(p);
+ nd.mnt = p->swap_vfsmnt;
+ nd.dentry = p->swap_file;
+ p->swap_vfsmnt = NULL;
+ p->swap_file = NULL;
+ p->swap_device = 0;
+ p->max = 0;
+ swap_map = p->swap_map;
+ p->swap_map = NULL;
+ p->flags = 0;
+ swap_device_unlock(p);
+ swap_list_unlock();
+ vfree(swap_map);
+ err = 0;
+
+out_dput:
+ unlock_kernel();
+ path_release(&nd);
+out:
+ return err;
+}
+
+int get_swaparea_info(char *buf)
+{
+ char * page = (char *) __get_free_page(GFP_KERNEL);
+ struct swap_info_struct *ptr = swap_info;
+ int i, j, len = 0, usedswap;
+
+ if (!page)
+ return -ENOMEM;
+
+ len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
+ for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
+ if ((ptr->flags & SWP_USED) && ptr->swap_map) {
+ char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt,
+ page, PAGE_SIZE);
+
+ len += sprintf(buf + len, "%-31s ", path);
+
+ if (!ptr->swap_device)
+ len += sprintf(buf + len, "file\t\t");
+ else
+ len += sprintf(buf + len, "partition\t");
+
+ usedswap = 0;
+ for (j = 0; j < ptr->max; ++j)
+ switch (ptr->swap_map[j]) {
+ case SWAP_MAP_BAD:
+ case 0:
+ continue;
+ default:
+ usedswap++;
+ }
+ len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages <<
(PAGE_SHIFT - 10),
+ usedswap << (PAGE_SHIFT - 10), ptr->prio);
+ }
+ }
+ free_page((unsigned long) page);
+ return len;
+}
+
+int is_swap_partition(kdev_t dev) {
+ struct swap_info_struct *ptr = swap_info;
+ int i;
+
+ for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
+ if (ptr->flags & SWP_USED)
+ if (ptr->swap_device == dev)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
+ *
+ * The swapon system call
+ */
+asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
+{
+ struct swap_info_struct * p;
+ struct nameidata nd;
+ struct inode * swap_inode;
+ unsigned int type;
+ int i, j, prev;
+ int error;
+ static int least_priority = 0;
+ union swap_header *swap_header = 0;
+ int swap_header_version;
+ int nr_good_pages = 0;
+ unsigned long maxpages = 1;
+ int swapfilesize;
+ struct block_device *bdev = NULL;
+ unsigned short *swap_map;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ lock_kernel();
+ swap_list_lock();
+ p = swap_info;
+ for (type = 0 ; type < nr_swapfiles ; type++,p++)
+ if (!(p->flags & SWP_USED))
+ break;
+ error = -EPERM;
+ if (type >= MAX_SWAPFILES) {
+ swap_list_unlock();
+ goto out;
+ }
+ if (type >= nr_swapfiles)
+ nr_swapfiles = type+1;
+ p->flags = SWP_USED;
+ p->swap_file = NULL;
+ p->swap_vfsmnt = NULL;
+ p->swap_device = 0;
+ p->swap_map = NULL;
+ p->lowest_bit = 0;
+ p->highest_bit = 0;
+ p->cluster_nr = 0;
+ p->sdev_lock = SPIN_LOCK_UNLOCKED;
+ p->next = -1;
+ if (swap_flags & SWAP_FLAG_PREFER) {
+ p->prio =
+ (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
+ } else {
+ p->prio = --least_priority;
+ }
+ swap_list_unlock();
+ error = user_path_walk(specialfile, &nd);
+ if (error)
+ goto bad_swap_2;
+
+ p->swap_file = nd.dentry;
+ p->swap_vfsmnt = nd.mnt;
+ swap_inode = nd.dentry->d_inode;
+ error = -EINVAL;
+
+ if (S_ISBLK(swap_inode->i_mode)) {
+ kdev_t dev = swap_inode->i_rdev;
+ struct block_device_operations *bdops;
+ devfs_handle_t de;
+
+ if (is_mounted(dev)) {
+ error = -EBUSY;
+ goto bad_swap_2;
+ }
+
+ p->swap_device = dev;
+ set_blocksize(dev, PAGE_SIZE);
+
+ bd_acquire(swap_inode);
+ bdev = swap_inode->i_bdev;
+ de = devfs_get_handle_from_inode(swap_inode);
+ bdops = devfs_get_ops(de); /* Increments module use count */
+ if (bdops) bdev->bd_op = bdops;
+
+ error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP);
+ devfs_put_ops(de);/*Decrement module use count now we're safe*/
+ if (error)
+ goto bad_swap_2;
+ set_blocksize(dev, PAGE_SIZE);
+ error = -ENODEV;
+ if (!dev || (blk_size[MAJOR(dev)] &&
+ !blk_size[MAJOR(dev)][MINOR(dev)]))
+ goto bad_swap;
+ swapfilesize = 0;
+ if (blk_size[MAJOR(dev)])
+ swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
+ >> (PAGE_SHIFT - 10);
+ } else if (S_ISREG(swap_inode->i_mode))
+ swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
+ else
+ goto bad_swap;
+
+ error = -EBUSY;
+ for (i = 0 ; i < nr_swapfiles ; i++) {
+ struct swap_info_struct *q = &swap_info[i];
+ if (i == type || !q->swap_file)
+ continue;
+ if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
+ goto bad_swap;
+ }
+
+ swap_header = (void *) __get_free_page(GFP_USER);
+ if (!swap_header) {
+ printk("Unable to start swapping: out of memory :-)\n");
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+
+ lock_page(virt_to_page(swap_header));
+ rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
+
+ if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
+ swap_header_version = 1;
+ else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
+ swap_header_version = 2;
+ else {
+ printk("Unable to find swap-space signature\n");
+ error = -EINVAL;
+ goto bad_swap;
+ }
+
+ switch (swap_header_version) {
+ case 1:
+ memset(((char *) swap_header)+PAGE_SIZE-10,0,10);
+ j = 0;
+ p->lowest_bit = 0;
+ p->highest_bit = 0;
+ for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
+ if (test_bit(i,(char *) swap_header)) {
+ if (!p->lowest_bit)
+ p->lowest_bit = i;
+ p->highest_bit = i;
+ maxpages = i+1;
+ j++;
+ }
+ }
+ nr_good_pages = j;
+ p->swap_map = vmalloc(maxpages * sizeof(short));
+ if (!p->swap_map) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+ for (i = 1 ; i < maxpages ; i++) {
+ if (test_bit(i,(char *) swap_header))
+ p->swap_map[i] = 0;
+ else
+ p->swap_map[i] = SWAP_MAP_BAD;
+ }
+ break;
+
+ case 2:
+ /* Check the swap header's sub-version and the size of
+ the swap file and bad block lists */
+ if (swap_header->info.version != 1) {
+ printk(KERN_WARNING
+ "Unable to handle swap header version %d\n",
+ swap_header->info.version);
+ error = -EINVAL;
+ goto bad_swap;
+ }
+
+ p->lowest_bit = 1;
+ maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
+ if (maxpages > swap_header->info.last_page)
+ maxpages = swap_header->info.last_page;
+ p->highest_bit = maxpages - 1;
+
+ error = -EINVAL;
+ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+ goto bad_swap;
+
+ /* OK, set up the swap map and apply the bad block list */
+ if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+
+ error = 0;
+ memset(p->swap_map, 0, maxpages * sizeof(short));
+ for (i=0; i<swap_header->info.nr_badpages; i++) {
+ int page = swap_header->info.badpages[i];
+ if (page <= 0 || page >= swap_header->info.last_page)
+ error = -EINVAL;
+ else
+ p->swap_map[page] = SWAP_MAP_BAD;
+ }
+ nr_good_pages = swap_header->info.last_page -
+ swap_header->info.nr_badpages -
+ 1 /* header page */;
+ if (error)
+ goto bad_swap;
+ }
+
+ if (swapfilesize && maxpages > swapfilesize) {
+ printk(KERN_WARNING
+ "Swap area shorter than signature indicates\n");
+ error = -EINVAL;
+ goto bad_swap;
+ }
+ if (!nr_good_pages) {
+ printk(KERN_WARNING "Empty swap-file\n");
+ error = -EINVAL;
+ goto bad_swap;
+ }
+ p->swap_map[0] = SWAP_MAP_BAD;
+ swap_list_lock();
+ swap_device_lock(p);
+ p->max = maxpages;
+ p->flags = SWP_WRITEOK;
+ p->pages = nr_good_pages;
+ nr_swap_pages += nr_good_pages;
+ total_swap_pages += nr_good_pages;
+ printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
+ nr_good_pages<<(PAGE_SHIFT-10), p->prio);
+
+ /* insert swap space into swap_list: */
+ prev = -1;
+ for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
+ if (p->prio >= swap_info[i].prio) {
+ break;
+ }
+ prev = i;
+ }
+ p->next = i;
+ if (prev < 0) {
+ swap_list.head = swap_list.next = p - swap_info;
+ } else {
+ swap_info[prev].next = p - swap_info;
+ }
+ swap_device_unlock(p);
+ swap_list_unlock();
+ error = 0;
+ goto out;
+bad_swap:
+ if (bdev)
+ blkdev_put(bdev, BDEV_SWAP);
+bad_swap_2:
+ swap_list_lock();
+ swap_map = p->swap_map;
+ nd.mnt = p->swap_vfsmnt;
+ nd.dentry = p->swap_file;
+ p->swap_device = 0;
+ p->swap_file = NULL;
+ p->swap_vfsmnt = NULL;
+ p->swap_map = NULL;
+ p->flags = 0;
+ if (!(swap_flags & SWAP_FLAG_PREFER))
+ ++least_priority;
+ swap_list_unlock();
+ if (swap_map)
+ vfree(swap_map);
+ path_release(&nd);
+out:
+ if (swap_header)
+ free_page((long) swap_header);
+ unlock_kernel();
+ return error;
+}
+
+void si_swapinfo(struct sysinfo *val)
+{
+ unsigned int i;
+ unsigned long nr_to_be_unused = 0;
+
+ swap_list_lock();
+ for (i = 0; i < nr_swapfiles; i++) {
+ unsigned int j;
+ if (swap_info[i].flags != SWP_USED)
+ continue;
+ for (j = 0; j < swap_info[i].max; ++j) {
+ switch (swap_info[i].swap_map[j]) {
+ case 0:
+ case SWAP_MAP_BAD:
+ continue;
+ default:
+ nr_to_be_unused++;
+ }
+ }
+ }
+ val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->totalswap = total_swap_pages + nr_to_be_unused;
+ swap_list_unlock();
+}
+
+/*
+ * Verify that a swap entry is valid and increment its swap map count.
+ *
+ * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
+ * "permanent", but will be reclaimed by the next swapoff.
+ */
+int swap_duplicate(swp_entry_t entry)
+{
+ struct swap_info_struct * p;
+ unsigned long offset, type;
+ int result = 0;
+
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles)
+ goto bad_file;
+ p = type + swap_info;
+ offset = SWP_OFFSET(entry);
+
+ swap_device_lock(p);
+ if (offset < p->max && p->swap_map[offset]) {
+ if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
+ p->swap_map[offset]++;
+ result = 1;
+ } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
+ if (swap_overflow++ < 5)
+ printk(KERN_WARNING "swap_dup: swap entry
overflow\n");
+ p->swap_map[offset] = SWAP_MAP_MAX;
+ result = 1;
+ }
+ }
+ swap_device_unlock(p);
+out:
+ return result;
+
+bad_file:
+ printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
+ goto out;
+}
+
+/*
+ * Prior swap_duplicate protects against swap device deletion.
+ */
+void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
+ kdev_t *dev, struct inode **swapf)
+{
+ unsigned long type;
+ struct swap_info_struct *p;
+
+ type = SWP_TYPE(entry);
+ if (type >= nr_swapfiles) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
+ return;
+ }
+
+ p = &swap_info[type];
+ *offset = SWP_OFFSET(entry);
+ if (*offset >= p->max && *offset != 0) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset,
entry.val);
+ return;
+ }
+ if (p->swap_map && !p->swap_map[*offset]) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset,
entry.val);
+ return;
+ }
+ if (!(p->flags & SWP_USED)) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file,
entry.val);
+ return;
+ }
+
+ if (p->swap_device) {
+ *dev = p->swap_device;
+ } else if (p->swap_file) {
+ *swapf = p->swap_file->d_inode;
+ } else {
+ printk(KERN_ERR "rw_swap_page: no swap file or device\n");
+ }
+ return;
+}
+
+/*
+ * swap_device_lock prevents swap_map being freed. Don't grab an extra
+ * reference on the swaphandle, it doesn't matter if it becomes unused.
+ */
+int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
+{
+ int ret = 0, i = 1 << page_cluster;
+ unsigned long toff;
+ struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
+
+ if (!page_cluster) /* no readahead */
+ return 0;
+ toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
+ if (!toff) /* first page is swap header */
+ toff++, i--;
+ *offset = toff;
+
+ swap_device_lock(swapdev);
+ do {
+ /* Don't read-ahead past the end of the swap area */
+ if (toff >= swapdev->max)
+ break;
+ /* Don't read in free or bad pages */
+ if (!swapdev->swap_map[toff])
+ break;
+ if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
+ break;
+ toff++;
+ ret++;
+ } while (--i);
+ swap_device_unlock(swapdev);
+ return ret;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/mm/vmalloc.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/mm/vmalloc.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,385 @@
+/*
+ * linux/mm/vmalloc.c
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@xxxxxxxxxxx>, May
2000
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
+struct vm_struct * vmlist;
+
+static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned
long size)
+{
+ pte_t * pte;
+ unsigned long end;
+
+ if (pmd_none(*pmd))
+ return;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return;
+ }
+ pte = pte_offset(pmd, address);
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t page;
+ page = ptep_get_and_clear(pte);
+ address += PAGE_SIZE;
+ pte++;
+ if (pte_none(page))
+ continue;
+ if (pte_present(page)) {
+ struct page *ptpage = pte_page(page);
+ if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+ __free_page(ptpage);
+ continue;
+ }
+ printk(KERN_CRIT "Whee.. Swapped out page in kernel page
table\n");
+ } while (address < end);
+}
+
+static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned
long size)
+{
+ pmd_t * pmd;
+ unsigned long end;
+
+ if (pgd_none(*dir))
+ return;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return;
+ }
+ pmd = pmd_offset(dir, address);
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ free_area_pte(pmd, address, end - address);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+}
+
+void vmfree_area_pages(unsigned long address, unsigned long size)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ dir = pgd_offset_k(address);
+ flush_cache_all();
+ do {
+ free_area_pmd(dir, address, end - address);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ flush_tlb_all();
+}
+
+static inline int alloc_area_pte (pte_t * pte, unsigned long address,
+ unsigned long size, int gfp_mask,
+ pgprot_t prot, struct page ***pages)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ struct page * page;
+
+ if (!pages) {
+ spin_unlock(&init_mm.page_table_lock);
+ page = alloc_page(gfp_mask);
+ spin_lock(&init_mm.page_table_lock);
+ } else {
+ page = (**pages);
+ (*pages)++;
+
+ /* Add a reference to the page so we can free later */
+ if (page)
+ atomic_inc(&page->count);
+
+ }
+ if (!pte_none(*pte))
+ printk(KERN_ERR "alloc_area_pte: page already
exists\n");
+ if (!page)
+ return -ENOMEM;
+ set_pte(pte, mk_pte(page, prot));
+ address += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+ return 0;
+}
+
+static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
+ unsigned long size, int gfp_mask,
+ pgprot_t prot, struct page ***pages)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ pte_t * pte = pte_alloc(&init_mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ if (alloc_area_pte(pte, address, end - address,
+ gfp_mask, prot, pages))
+ return -ENOMEM;
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+/*static inline*/ int __vmalloc_area_pages (unsigned long address,
+ unsigned long size,
+ int gfp_mask,
+ pgprot_t prot,
+ struct page ***pages)
+{
+ pgd_t * dir;
+ unsigned long start = address;
+ unsigned long end = address + size;
+
+ dir = pgd_offset_k(address);
+ spin_lock(&init_mm.page_table_lock);
+ do {
+ pmd_t *pmd;
+
+ pmd = pmd_alloc(&init_mm, dir, address);
+ if (!pmd)
+ goto err;
+
+ if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot,
pages))
+ goto err; // The kernel NEVER reclaims pmds, so
no need to undo pmd_alloc() here
+
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ spin_unlock(&init_mm.page_table_lock);
+ flush_cache_all();
+ XEN_flush_page_update_queue();
+ return 0;
+err:
+ spin_unlock(&init_mm.page_table_lock);
+ flush_cache_all();
+ if (address > start)
+ vmfree_area_pages(start, address - start);
+ return -ENOMEM;
+}
+
+int vmalloc_area_pages(unsigned long address, unsigned long size,
+ int gfp_mask, pgprot_t prot)
+{
+ return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
+}
+
+struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
+{
+ unsigned long addr, next;
+ struct vm_struct **p, *tmp, *area;
+
+ area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
+ if (!area)
+ return NULL;
+
+ size += PAGE_SIZE;
+ if (!size) {
+ kfree (area);
+ return NULL;
+ }
+
+ addr = VMALLOC_START;
+ write_lock(&vmlist_lock);
+ for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
+ if ((size + addr) < addr)
+ goto out;
+ if (size + addr <= (unsigned long) tmp->addr)
+ break;
+ next = tmp->size + (unsigned long) tmp->addr;
+ if (next > addr)
+ addr = next;
+ if (addr > VMALLOC_END-size)
+ goto out;
+ }
+ area->flags = flags;
+ area->addr = (void *)addr;
+ area->size = size;
+ area->next = *p;
+ *p = area;
+ write_unlock(&vmlist_lock);
+ return area;
+
+out:
+ write_unlock(&vmlist_lock);
+ kfree(area);
+ return NULL;
+}
+
+void __vfree(void * addr, int free_area_pages)
+{
+ struct vm_struct **p, *tmp;
+
+ if (!addr)
+ return;
+ if ((PAGE_SIZE-1) & (unsigned long) addr) {
+ printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
+ return;
+ }
+ write_lock(&vmlist_lock);
+ for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
+ if (tmp->addr == addr) {
+ *p = tmp->next;
+ if (free_area_pages)
+ vmfree_area_pages(VMALLOC_VMADDR(tmp->addr),
tmp->size);
+ write_unlock(&vmlist_lock);
+ kfree(tmp);
+ return;
+ }
+ }
+ write_unlock(&vmlist_lock);
+ printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
+}
+
+void vfree(void * addr)
+{
+ __vfree(addr,1);
+}
+
+void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
+{
+ void * addr;
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size || (size >> PAGE_SHIFT) > num_physpages)
+ return NULL;
+ area = get_vm_area(size, VM_ALLOC);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
+ prot, NULL)) {
+ __vfree(addr, 0);
+ return NULL;
+ }
+ return addr;
+}
+
+void * vmap(struct page **pages, int count,
+ unsigned long flags, pgprot_t prot)
+{
+ void * addr;
+ struct vm_struct *area;
+ unsigned long size = count << PAGE_SHIFT;
+
+ if (!size || size > (max_mapnr << PAGE_SHIFT))
+ return NULL;
+ area = get_vm_area(size, flags);
+ if (!area) {
+ return NULL;
+ }
+ addr = area->addr;
+ if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
+ prot, &pages)) {
+ __vfree(addr, 0);
+ return NULL;
+ }
+ return addr;
+}
+
+long vread(char *buf, char *addr, unsigned long count)
+{
+ struct vm_struct *tmp;
+ char *vaddr, *buf_start = buf;
+ unsigned long n;
+
+ /* Don't allow overflow */
+ if ((unsigned long) addr + count < count)
+ count = -(unsigned long) addr;
+
+ read_lock(&vmlist_lock);
+ for (tmp = vmlist; tmp; tmp = tmp->next) {
+ vaddr = (char *) tmp->addr;
+ if (addr >= vaddr + tmp->size - PAGE_SIZE)
+ continue;
+ while (addr < vaddr) {
+ if (count == 0)
+ goto finished;
+ *buf = '\0';
+ buf++;
+ addr++;
+ count--;
+ }
+ n = vaddr + tmp->size - PAGE_SIZE - addr;
+ do {
+ if (count == 0)
+ goto finished;
+ *buf = *addr;
+ buf++;
+ addr++;
+ count--;
+ } while (--n > 0);
+ }
+finished:
+ read_unlock(&vmlist_lock);
+ return buf - buf_start;
+}
+
+long vwrite(char *buf, char *addr, unsigned long count)
+{
+ struct vm_struct *tmp;
+ char *vaddr, *buf_start = buf;
+ unsigned long n;
+
+ /* Don't allow overflow */
+ if ((unsigned long) addr + count < count)
+ count = -(unsigned long) addr;
+
+ read_lock(&vmlist_lock);
+ for (tmp = vmlist; tmp; tmp = tmp->next) {
+ vaddr = (char *) tmp->addr;
+ if (addr >= vaddr + tmp->size - PAGE_SIZE)
+ continue;
+ while (addr < vaddr) {
+ if (count == 0)
+ goto finished;
+ buf++;
+ addr++;
+ count--;
+ }
+ n = vaddr + tmp->size - PAGE_SIZE - addr;
+ do {
+ if (count == 0)
+ goto finished;
+ *addr = *buf;
+ buf++;
+ addr++;
+ count--;
+ } while (--n > 0);
+ }
+finished:
+ read_unlock(&vmlist_lock);
+ return buf - buf_start;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4-xen-sparse/net/core/skbuff.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.4-xen-sparse/net/core/skbuff.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1309 @@
+/*
+ * Routines having to do with the 'struct sk_buff' memory handlers.
+ *
+ * Authors: Alan Cox <iiitac@xxxxxxxxxxxxxx>
+ * Florian La Roche <rzsfl@xxxxxxxxxxxx>
+ *
+ * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
+ *
+ * Fixes:
+ * Alan Cox : Fixed the worst of the load balancer
bugs.
+ * Dave Platt : Interrupt stacking fix.
+ * Richard Kooijman : Timestamp fixes.
+ * Alan Cox : Changed buffer format.
+ * Alan Cox : destructor hook for AF_UNIX etc.
+ * Linus Torvalds : Better skb_clone.
+ * Alan Cox : Added skb_copy.
+ * Alan Cox : Added all the changed routines Linus
+ * only put in the headers
+ * Ray VanTassle : Fixed --skb->lock in free
+ * Alan Cox : skb_copy copy arp field
+ * Andi Kleen : slabified it.
+ *
+ * NOTE:
+ * The __skb_ routines should be called with interrupts
+ * disabled, or you better be *real* sure that the operation is atomic
+ * with respect to whatever list is being frobbed (e.g. via lock_sock()
+ * or via disabling bottom half handlers, etc).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * The functions in this file will not compile correctly with gcc 2.4.x
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <net/protocol.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/checksum.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+int sysctl_hot_list_len = 128;
+
+static kmem_cache_t *skbuff_head_cache;
+
+static union {
+ struct sk_buff_head list;
+ char pad[SMP_CACHE_BYTES];
+} skb_head_pool[NR_CPUS];
+
+/*
+ * Keep out-of-line to prevent kernel bloat.
+ * __builtin_return_address is not used because it is not always
+ * reliable.
+ */
+
+/**
+ * skb_over_panic - private function
+ * @skb: buffer
+ * @sz: size
+ * @here: address
+ *
+ * Out of line support code for skb_put(). Not user callable.
+ */
+
+void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+{
+ printk("skput:over: %p:%d put:%d dev:%s",
+ here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+ BUG();
+}
+
+/**
+ * skb_under_panic - private function
+ * @skb: buffer
+ * @sz: size
+ * @here: address
+ *
+ * Out of line support code for skb_push(). Not user callable.
+ */
+
+
+void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+{
+ printk("skput:under: %p:%d put:%d dev:%s",
+ here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+ BUG();
+}
+
+static __inline__ struct sk_buff *skb_head_from_pool(void)
+{
+ struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+
+ if (skb_queue_len(list)) {
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ skb = __skb_dequeue(list);
+ local_irq_restore(flags);
+ return skb;
+ }
+ return NULL;
+}
+
+static __inline__ void skb_head_to_pool(struct sk_buff *skb)
+{
+ struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+
+ if (skb_queue_len(list) < sysctl_hot_list_len) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __skb_queue_head(list, skb);
+ local_irq_restore(flags);
+
+ return;
+ }
+ kmem_cache_free(skbuff_head_cache, skb);
+}
+
+
+/* Allocate a new skbuff. We do this ourselves so we can fill in a few
+ * 'private' fields and also do memory statistics to find all the
+ * [BEEP] leaks.
+ *
+ */
+
+/**
+ * alloc_skb - allocate a network buffer
+ * @size: size to allocate
+ * @gfp_mask: allocation mask
+ *
+ * Allocate a new &sk_buff. The returned buffer has no headroom and a
+ * tail room of size bytes. The object has a reference count of one.
+ * The return is the buffer. On a failure the return is %NULL.
+ *
+ * Buffers may only be allocated from interrupts using a @gfp_mask of
+ * %GFP_ATOMIC.
+ */
+
+struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
+{
+ struct sk_buff *skb;
+ u8 *data;
+
+ if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
+ static int count = 0;
+ if (++count < 5) {
+ printk(KERN_ERR "alloc_skb called nonatomically "
+ "from interrupt %p\n", NET_CALLER(size));
+ BUG();
+ }
+ gfp_mask &= ~__GFP_WAIT;
+ }
+
+ /* Get the HEAD */
+ skb = skb_head_from_pool();
+ if (skb == NULL) {
+ skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask &
~__GFP_DMA);
+ if (skb == NULL)
+ goto nohead;
+ }
+
+ /* Get the DATA. Size must match skb_add_mtu(). */
+ size = SKB_DATA_ALIGN(size);
+ data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ if (data == NULL)
+ goto nodata;
+
+ /* XXX: does not include slab overhead */
+ skb->truesize = size + sizeof(struct sk_buff);
+
+ /* Load the data pointers. */
+ skb->head = data;
+ skb->data = data;
+ skb->tail = data;
+ skb->end = data + size;
+
+ /* Set up other state */
+ skb->len = 0;
+ skb->cloned = 0;
+ skb->data_len = 0;
+
+ atomic_set(&skb->users, 1);
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+ return skb;
+
+nodata:
+ skb_head_to_pool(skb);
+nohead:
+ return NULL;
+}
+
+/**
+ * alloc_skb_from_cache - allocate a network buffer
+ * @cp: kmem_cache from which to allocate the data area
+ * (object size must be big enough for @size bytes + skb overheads)
+ * @size: size to allocate
+ * @gfp_mask: allocation mask
+ *
+ * Allocate a new &sk_buff. The returned buffer has no headroom and a
+ * tail room of size bytes. The object has a reference count of one.
+ * The return is the buffer. On a failure the return is %NULL.
+ *
+ * Buffers may only be allocated from interrupts using a @gfp_mask of
+ * %GFP_ATOMIC.
+ */
+
+struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+ unsigned int size, int gfp_mask)
+{
+ struct sk_buff *skb;
+ u8 *data;
+
+ if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
+ static int count = 0;
+ if (++count < 5) {
+ printk(KERN_ERR "alloc_skb called nonatomically "
+ "from interrupt %p\n", NET_CALLER(size));
+ BUG();
+ }
+ gfp_mask &= ~__GFP_WAIT;
+ }
+
+ /* Get the HEAD */
+ skb = skb_head_from_pool();
+ if (skb == NULL) {
+ skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask &
~__GFP_DMA);
+ if (skb == NULL)
+ goto nohead;
+ }
+
+ /* Get the DATA. */
+ size = SKB_DATA_ALIGN(size);
+ data = kmem_cache_alloc(cp, gfp_mask);
+ if (data == NULL)
+ goto nodata;
+
+ /* XXX: does not include slab overhead */
+ skb->truesize = size + sizeof(struct sk_buff);
+
+ /* Load the data pointers. */
+ skb->head = data;
+ skb->data = data;
+ skb->tail = data;
+ skb->end = data + size;
+
+ /* Set up other state */
+ skb->len = 0;
+ skb->cloned = 0;
+ skb->data_len = 0;
+
+ atomic_set(&skb->users, 1);
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+ return skb;
+
+nodata:
+ skb_head_to_pool(skb);
+nohead:
+ return NULL;
+}
+
+
+/*
+ * Slab constructor for a skb head.
+ */
+static inline void skb_headerinit(void *p, kmem_cache_t *cache,
+ unsigned long flags)
+{
+ struct sk_buff *skb = p;
+
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ skb->sk = NULL;
+ skb->stamp.tv_sec=0; /* No idea about time */
+ skb->dev = NULL;
+ skb->real_dev = NULL;
+ skb->dst = NULL;
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb->pkt_type = PACKET_HOST; /* Default type */
+ skb->ip_summed = 0;
+ skb->priority = 0;
+ skb->security = 0; /* By default packets are insecure */
+ skb->destructor = NULL;
+
+#ifdef CONFIG_NETFILTER
+ skb->nfmark = skb->nfcache = 0;
+ skb->nfct = NULL;
+#ifdef CONFIG_NETFILTER_DEBUG
+ skb->nf_debug = 0;
+#endif
+#endif
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+#endif
+}
+
+static void skb_drop_fraglist(struct sk_buff *skb)
+{
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ skb_shinfo(skb)->frag_list = NULL;
+
+ do {
+ struct sk_buff *this = list;
+ list = list->next;
+ kfree_skb(this);
+ } while (list);
+}
+
+static void skb_clone_fraglist(struct sk_buff *skb)
+{
+ struct sk_buff *list;
+
+ for (list = skb_shinfo(skb)->frag_list; list; list=list->next)
+ skb_get(list);
+}
+
+static void skb_release_data(struct sk_buff *skb)
+{
+ if (!skb->cloned ||
+ atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ put_page(skb_shinfo(skb)->frags[i].page);
+ }
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_drop_fraglist(skb);
+
+ kfree(skb->head);
+ }
+}
+
+/*
+ * Free an skbuff by memory without cleaning the state.
+ */
+void kfree_skbmem(struct sk_buff *skb)
+{
+ skb_release_data(skb);
+ skb_head_to_pool(skb);
+}
+
+/**
+ * __kfree_skb - private function
+ * @skb: buffer
+ *
+ * Free an sk_buff. Release anything attached to the buffer.
+ * Clean the state. This is an internal helper function. Users should
+ * always call kfree_skb
+ */
+
+void __kfree_skb(struct sk_buff *skb)
+{
+ if (skb->list) {
+ printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
+ "on a list (from %p).\n", NET_CALLER(skb));
+ BUG();
+ }
+
+ dst_release(skb->dst);
+ if(skb->destructor) {
+ if (in_irq()) {
+ printk(KERN_WARNING "Warning: kfree_skb on hard IRQ
%p\n",
+ NET_CALLER(skb));
+ }
+ skb->destructor(skb);
+ }
+#ifdef CONFIG_NETFILTER
+ nf_conntrack_put(skb->nfct);
+#endif
+ skb_headerinit(skb, NULL, 0); /* clean state */
+ kfree_skbmem(skb);
+}
+
+/**
+ * skb_clone - duplicate an sk_buff
+ * @skb: buffer to clone
+ * @gfp_mask: allocation priority
+ *
+ * Duplicate an &sk_buff. The new one is not owned by a socket. Both
+ * copies share the same packet data but not structure. The new
+ * buffer has a reference count of 1. If the allocation fails the
+ * function returns %NULL otherwise the new buffer is returned.
+ *
+ * If this function is called from an interrupt gfp_mask() must be
+ * %GFP_ATOMIC.
+ */
+
+struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
+{
+ struct sk_buff *n;
+
+ n = skb_head_from_pool();
+ if (!n) {
+ n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+ if (!n)
+ return NULL;
+ }
+
+#define C(x) n->x = skb->x
+
+ n->next = n->prev = NULL;
+ n->list = NULL;
+ n->sk = NULL;
+ C(stamp);
+ C(dev);
+ C(real_dev);
+ C(h);
+ C(nh);
+ C(mac);
+ C(dst);
+ dst_clone(n->dst);
+ memcpy(n->cb, skb->cb, sizeof(skb->cb));
+ C(len);
+ C(data_len);
+ C(csum);
+ n->cloned = 1;
+ C(pkt_type);
+ C(ip_summed);
+ C(priority);
+ atomic_set(&n->users, 1);
+ C(protocol);
+ C(security);
+ C(truesize);
+ C(head);
+ C(data);
+ C(tail);
+ C(end);
+ n->destructor = NULL;
+#ifdef CONFIG_NETFILTER
+ C(nfmark);
+ C(nfcache);
+ C(nfct);
+#ifdef CONFIG_NETFILTER_DEBUG
+ C(nf_debug);
+#endif
+#endif /*CONFIG_NETFILTER*/
+#if defined(CONFIG_HIPPI)
+ C(private);
+#endif
+#ifdef CONFIG_NET_SCHED
+ C(tc_index);
+#endif
+
+ atomic_inc(&(skb_shinfo(skb)->dataref));
+ skb->cloned = 1;
+#ifdef CONFIG_NETFILTER
+ nf_conntrack_get(skb->nfct);
+#endif
+ return n;
+}
+
+static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+ /*
+ * Shift between the two data areas in bytes
+ */
+ unsigned long offset = new->data - old->data;
+
+ new->list=NULL;
+ new->sk=NULL;
+ new->dev=old->dev;
+ new->real_dev=old->real_dev;
+ new->priority=old->priority;
+ new->protocol=old->protocol;
+ new->dst=dst_clone(old->dst);
+ new->h.raw=old->h.raw+offset;
+ new->nh.raw=old->nh.raw+offset;
+ new->mac.raw=old->mac.raw+offset;
+ memcpy(new->cb, old->cb, sizeof(old->cb));
+ atomic_set(&new->users, 1);
+ new->pkt_type=old->pkt_type;
+ new->stamp=old->stamp;
+ new->destructor = NULL;
+ new->security=old->security;
+#ifdef CONFIG_NETFILTER
+ new->nfmark=old->nfmark;
+ new->nfcache=old->nfcache;
+ new->nfct=old->nfct;
+ nf_conntrack_get(new->nfct);
+#ifdef CONFIG_NETFILTER_DEBUG
+ new->nf_debug=old->nf_debug;
+#endif
+#endif
+#ifdef CONFIG_NET_SCHED
+ new->tc_index = old->tc_index;
+#endif
+}
+
+/**
+ * skb_copy - create private copy of an sk_buff
+ * @skb: buffer to copy
+ * @gfp_mask: allocation priority
+ *
+ * Make a copy of both an &sk_buff and its data. This is used when the
+ * caller wishes to modify the data and needs a private copy of the
+ * data to alter. Returns %NULL on failure or the pointer to the buffer
+ * on success. The returned buffer has a reference count of 1.
+ *
+ * As by-product this function converts non-linear &sk_buff to linear
+ * one, so that &sk_buff becomes completely private and caller is allowed
+ * to modify all the data of returned buffer. This means that this
+ * function is not recommended for use in circumstances when only
+ * header is going to be modified. Use pskb_copy() instead.
+ */
+
+struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
+{
+ struct sk_buff *n;
+ int headerlen = skb->data-skb->head;
+
+ /*
+ * Allocate the copy buffer
+ */
+ n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
+ if(n==NULL)
+ return NULL;
+
+ /* Set the data pointer */
+ skb_reserve(n,headerlen);
+ /* Set the tail pointer and length */
+ skb_put(n,skb->len);
+ n->csum = skb->csum;
+ n->ip_summed = skb->ip_summed;
+
+ if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len))
+ BUG();
+
+ copy_skb_header(n, skb);
+
+ return n;
+}
+
+/* Keep head the same: replace data */
+int skb_linearize(struct sk_buff *skb, int gfp_mask)
+{
+ unsigned int size;
+ u8 *data;
+ long offset;
+ int headerlen = skb->data - skb->head;
+ int expand = (skb->tail+skb->data_len) - skb->end;
+
+ if (skb_shared(skb))
+ BUG();
+
+ if (expand <= 0)
+ expand = 0;
+
+ size = (skb->end - skb->head + expand);
+ size = SKB_DATA_ALIGN(size);
+ data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ if (data == NULL)
+ return -ENOMEM;
+
+ /* Copy entire thing */
+ if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
+ BUG();
+
+ /* Offset between the two in bytes */
+ offset = data - skb->head;
+
+ /* Free old data. */
+ skb_release_data(skb);
+
+ skb->head = data;
+ skb->end = data + size;
+
+ /* Set up new pointers */
+ skb->h.raw += offset;
+ skb->nh.raw += offset;
+ skb->mac.raw += offset;
+ skb->tail += offset;
+ skb->data += offset;
+
+ /* Set up shinfo */
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+
+ /* We are no longer a clone, even if we were. */
+ skb->cloned = 0;
+
+ skb->tail += skb->data_len;
+ skb->data_len = 0;
+ return 0;
+}
+
+
+/**
+ * pskb_copy - create copy of an sk_buff with private head.
+ * @skb: buffer to copy
+ * @gfp_mask: allocation priority
+ *
+ * Make a copy of both an &sk_buff and part of its data, located
+ * in header. Fragmented data remain shared. This is used when
+ * the caller wishes to modify only header of &sk_buff and needs
+ * private copy of the header to alter. Returns %NULL on failure
+ * or the pointer to the buffer on success.
+ * The returned buffer has a reference count of 1.
+ */
+
+struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
+{
+ struct sk_buff *n;
+
+ /*
+ * Allocate the copy buffer
+ */
+ n=alloc_skb(skb->end - skb->head, gfp_mask);
+ if(n==NULL)
+ return NULL;
+
+ /* Set the data pointer */
+ skb_reserve(n,skb->data-skb->head);
+ /* Set the tail pointer and length */
+ skb_put(n,skb_headlen(skb));
+ /* Copy the bytes */
+ memcpy(n->data, skb->data, n->len);
+ n->csum = skb->csum;
+ n->ip_summed = skb->ip_summed;
+
+ n->data_len = skb->data_len;
+ n->len = skb->len;
+
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+ get_page(skb_shinfo(n)->frags[i].page);
+ }
+ skb_shinfo(n)->nr_frags = i;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
+ skb_clone_fraglist(n);
+ }
+
+ copy_skb_header(n, skb);
+
+ return n;
+}
+
+/**
+ * pskb_expand_head - reallocate header of &sk_buff
+ * @skb: buffer to reallocate
+ * @nhead: room to add at head
+ * @ntail: room to add at tail
+ * @gfp_mask: allocation priority
+ *
+ * Expands (or creates identical copy, if &nhead and &ntail are zero)
+ * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
+ * reference count of 1. Returns zero in the case of success or error,
+ * if expansion failed. In the last case, &sk_buff is not changed.
+ *
+ * All the pointers pointing into skb header may change and must be
+ * reloaded after call to this function.
+ */
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
+{
+ int i;
+ u8 *data;
+ int size = nhead + (skb->end - skb->head) + ntail;
+ long off;
+
+ if (skb_shared(skb))
+ BUG();
+
+ size = SKB_DATA_ALIGN(size);
+
+ data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ if (data == NULL)
+ goto nodata;
+
+ /* Copy only real data... and, alas, header. This should be
+ * optimized for the cases when header is void. */
+ memcpy(data+nhead, skb->head, skb->tail-skb->head);
+ memcpy(data+size, skb->end, sizeof(struct skb_shared_info));
+
+ for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
+ get_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_clone_fraglist(skb);
+
+ skb_release_data(skb);
+
+ off = (data+nhead) - skb->head;
+
+ skb->head = data;
+ skb->end = data+size;
+
+ skb->data += off;
+ skb->tail += off;
+ skb->mac.raw += off;
+ skb->h.raw += off;
+ skb->nh.raw += off;
+ skb->cloned = 0;
+ atomic_set(&skb_shinfo(skb)->dataref, 1);
+ return 0;
+
+nodata:
+ return -ENOMEM;
+}
+
+/* Make private copy of skb with writable head and some headroom */
+
+struct sk_buff *
+skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
+{
+ struct sk_buff *skb2;
+ int delta = headroom - skb_headroom(skb);
+
+ if (delta <= 0)
+ return pskb_copy(skb, GFP_ATOMIC);
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2 == NULL ||
+ !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
+ return skb2;
+
+ kfree_skb(skb2);
+ return NULL;
+}
+
+
+/**
+ * skb_copy_expand - copy and expand sk_buff
+ * @skb: buffer to copy
+ * @newheadroom: new free bytes at head
+ * @newtailroom: new free bytes at tail
+ * @gfp_mask: allocation priority
+ *
+ * Make a copy of both an &sk_buff and its data and while doing so
+ * allocate additional space.
+ *
+ * This is used when the caller wishes to modify the data and needs a
+ * private copy of the data to alter as well as more space for new fields.
+ * Returns %NULL on failure or the pointer to the buffer
+ * on success. The returned buffer has a reference count of 1.
+ *
+ * You must pass %GFP_ATOMIC as the allocation priority if this function
+ * is called from an interrupt.
+ */
+
+
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+ int newheadroom,
+ int newtailroom,
+ int gfp_mask)
+{
+ struct sk_buff *n;
+
+ /*
+ * Allocate the copy buffer
+ */
+
+ n=alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask);
+ if(n==NULL)
+ return NULL;
+
+ skb_reserve(n,newheadroom);
+
+ /* Set the tail pointer and length */
+ skb_put(n,skb->len);
+
+ /* Copy the data only. */
+ if (skb_copy_bits(skb, 0, n->data, skb->len))
+ BUG();
+
+ copy_skb_header(n, skb);
+ return n;
+}
+
+/**
+ * skb_pad - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return NULL in out of memory cases.
+ */
+
+struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
+{
+ struct sk_buff *nskb;
+
+ /* If the skbuff is non linear tailroom is always zero.. */
+ if(skb_tailroom(skb) >= pad)
+ {
+ memset(skb->data+skb->len, 0, pad);
+ return skb;
+ }
+
+ nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad,
GFP_ATOMIC);
+ kfree_skb(skb);
+ if(nskb)
+ memset(nskb->data+nskb->len, 0, pad);
+ return nskb;
+}
+
+/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
+ * If realloc==0 and trimming is impossible without change of data,
+ * it is BUG().
+ */
+
+int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
+{
+ int offset = skb_headlen(skb);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int i;
+
+ for (i=0; i<nfrags; i++) {
+ int end = offset + skb_shinfo(skb)->frags[i].size;
+ if (end > len) {
+ if (skb_cloned(skb)) {
+ if (!realloc)
+ BUG();
+ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return -ENOMEM;
+ }
+ if (len <= offset) {
+ put_page(skb_shinfo(skb)->frags[i].page);
+ skb_shinfo(skb)->nr_frags--;
+ } else {
+ skb_shinfo(skb)->frags[i].size = len-offset;
+ }
+ }
+ offset = end;
+ }
+
+ if (offset < len) {
+ skb->data_len -= skb->len - len;
+ skb->len = len;
+ } else {
+ if (len <= skb_headlen(skb)) {
+ skb->len = len;
+ skb->data_len = 0;
+ skb->tail = skb->data + len;
+ if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
+ skb_drop_fraglist(skb);
+ } else {
+ skb->data_len -= skb->len - len;
+ skb->len = len;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * __pskb_pull_tail - advance tail of skb header
+ * @skb: buffer to reallocate
+ * @delta: number of bytes to advance tail
+ *
+ * The function makes a sense only on a fragmented &sk_buff,
+ * it expands header moving its tail forward and copying necessary
+ * data from fragmented part.
+ *
+ * &sk_buff MUST have reference count of 1.
+ *
+ * Returns %NULL (and &sk_buff does not change) if pull failed
+ * or value of new tail of skb in the case of success.
+ *
+ * All the pointers pointing into skb header may change and must be
+ * reloaded after call to this function.
+ */
+
+/* Moves tail of skb head forward, copying data from fragmented part,
+ * when it is necessary.
+ * 1. It may fail due to malloc failure.
+ * 2. It may change skb pointers.
+ *
+ * It is pretty complicated. Luckily, it is called only in exceptional cases.
+ */
+unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
+{
+ int i, k, eat;
+
+ /* If skb has not enough free space at tail, get new one
+ * plus 128 bytes for future expansions. If we have enough
+ * room at tail, reallocate without expansion only if skb is cloned.
+ */
+ eat = (skb->tail+delta) - skb->end;
+
+ if (eat > 0 || skb_cloned(skb)) {
+ if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))
+ return NULL;
+ }
+
+ if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
+ BUG();
+
+ /* Optimization: no fragments, no reasons to preestimate
+ * size of pulled pages. Superb.
+ */
+ if (skb_shinfo(skb)->frag_list == NULL)
+ goto pull_pages;
+
+ /* Estimate size of pulled pages. */
+ eat = delta;
+ for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ if (skb_shinfo(skb)->frags[i].size >= eat)
+ goto pull_pages;
+ eat -= skb_shinfo(skb)->frags[i].size;
+ }
+
+ /* If we need update frag list, we are in troubles.
+ * Certainly, it possible to add an offset to skb data,
+ * but taking into account that pulling is expected to
+ * be very rare operation, it is worth to fight against
+ * further bloating skb head and crucify ourselves here instead.
+ * Pure masohism, indeed. 8)8)
+ */
+ if (eat) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ struct sk_buff *clone = NULL;
+ struct sk_buff *insp = NULL;
+
+ do {
+ if (list == NULL)
+ BUG();
+
+ if (list->len <= eat) {
+ /* Eaten as whole. */
+ eat -= list->len;
+ list = list->next;
+ insp = list;
+ } else {
+ /* Eaten partially. */
+
+ if (skb_shared(list)) {
+ /* Sucks! We need to fork list. :-( */
+ clone = skb_clone(list, GFP_ATOMIC);
+ if (clone == NULL)
+ return NULL;
+ insp = list->next;
+ list = clone;
+ } else {
+ /* This may be pulled without
+ * problems. */
+ insp = list;
+ }
+ if (pskb_pull(list, eat) == NULL) {
+ if (clone)
+ kfree_skb(clone);
+ return NULL;
+ }
+ break;
+ }
+ } while (eat);
+
+ /* Free pulled out fragments. */
+ while ((list = skb_shinfo(skb)->frag_list) != insp) {
+ skb_shinfo(skb)->frag_list = list->next;
+ kfree_skb(list);
+ }
+ /* And insert new clone at head. */
+ if (clone) {
+ clone->next = list;
+ skb_shinfo(skb)->frag_list = clone;
+ }
+ }
+ /* Success! Now we may commit changes to skb data. */
+
+pull_pages:
+ eat = delta;
+ k = 0;
+ for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ if (skb_shinfo(skb)->frags[i].size <= eat) {
+ put_page(skb_shinfo(skb)->frags[i].page);
+ eat -= skb_shinfo(skb)->frags[i].size;
+ } else {
+ skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+ if (eat) {
+ skb_shinfo(skb)->frags[k].page_offset += eat;
+ skb_shinfo(skb)->frags[k].size -= eat;
+ eat = 0;
+ }
+ k++;
+ }
+ }
+ skb_shinfo(skb)->nr_frags = k;
+
+ skb->tail += delta;
+ skb->data_len -= delta;
+
+ return skb->tail;
+}
+
+/* Copy some data bits from skb to kernel buffer. */
+
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
+{
+ int i, copy;
+ int start = skb->len - skb->data_len;
+
+ if (offset > (int)skb->len-len)
+ goto fault;
+
+ /* Copy header. */
+ if ((copy = start-offset) > 0) {
+ if (copy > len)
+ copy = len;
+ memcpy(to, skb->data + offset, copy);
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ BUG_TRAP(start <= offset+len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end-offset) > 0) {
+ u8 *vaddr;
+
+ if (copy > len)
+ copy = len;
+
+ vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+ memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+
+ offset-start, copy);
+ kunmap_skb_frag(vaddr);
+
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list;
+
+ for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+ int end;
+
+ BUG_TRAP(start <= offset+len);
+
+ end = start + list->len;
+ if ((copy = end-offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_copy_bits(list, offset-start, to, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ }
+ start = end;
+ }
+ }
+ if (len == 0)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+
+/* Checksum skb data. */
+
+unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len,
unsigned int csum)
+{
+ int i, copy;
+ int start = skb->len - skb->data_len;
+ int pos = 0;
+
+ /* Checksum header. */
+ if ((copy = start-offset) > 0) {
+ if (copy > len)
+ copy = len;
+ csum = csum_partial(skb->data+offset, copy, csum);
+ if ((len -= copy) == 0)
+ return csum;
+ offset += copy;
+ pos = copy;
+ }
+
+ for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ BUG_TRAP(start <= offset+len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end-offset) > 0) {
+ unsigned int csum2;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap_skb_frag(frag);
+ csum2 = csum_partial(vaddr + frag->page_offset +
+ offset-start, copy, 0);
+ kunmap_skb_frag(vaddr);
+ csum = csum_block_add(csum, csum2, pos);
+ if (!(len -= copy))
+ return csum;
+ offset += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list;
+
+ for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+ int end;
+
+ BUG_TRAP(start <= offset+len);
+
+ end = start + list->len;
+ if ((copy = end-offset) > 0) {
+ unsigned int csum2;
+ if (copy > len)
+ copy = len;
+ csum2 = skb_checksum(list, offset-start, copy,
0);
+ csum = csum_block_add(csum, csum2, pos);
+ if ((len -= copy) == 0)
+ return csum;
+ offset += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+ }
+ if (len == 0)
+ return csum;
+
+ BUG();
+ return csum;
+}
+
+/* Both of above in one bottle. */
+
+unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8
*to, int len, unsigned int csum)
+{
+ int i, copy;
+ int start = skb->len - skb->data_len;
+ int pos = 0;
+
+ /* Copy header. */
+ if ((copy = start-offset) > 0) {
+ if (copy > len)
+ copy = len;
+ csum = csum_partial_copy_nocheck(skb->data+offset, to, copy,
csum);
+ if ((len -= copy) == 0)
+ return csum;
+ offset += copy;
+ to += copy;
+ pos = copy;
+ }
+
+ for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ BUG_TRAP(start <= offset+len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end-offset) > 0) {
+ unsigned int csum2;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap_skb_frag(frag);
+ csum2 = csum_partial_copy_nocheck(vaddr +
frag->page_offset +
+ offset-start, to, copy,
0);
+ kunmap_skb_frag(vaddr);
+ csum = csum_block_add(csum, csum2, pos);
+ if (!(len -= copy))
+ return csum;
+ offset += copy;
+ to += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list;
+
+ for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+ unsigned int csum2;
+ int end;
+
+ BUG_TRAP(start <= offset+len);
+
+ end = start + list->len;
+ if ((copy = end-offset) > 0) {
+ if (copy > len)
+ copy = len;
+ csum2 = skb_copy_and_csum_bits(list,
offset-start, to, copy, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ if ((len -= copy) == 0)
+ return csum;
+ offset += copy;
+ to += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+ }
+ if (len == 0)
+ return csum;
+
+ BUG();
+ return csum;
+}
+
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
+{
+ unsigned int csum;
+ long csstart;
+
+ if (skb->ip_summed == CHECKSUM_HW)
+ csstart = skb->h.raw - skb->data;
+ else
+ csstart = skb->len - skb->data_len;
+
+ if (csstart > skb->len - skb->data_len)
+ BUG();
+
+ memcpy(to, skb->data, csstart);
+
+ csum = 0;
+ if (csstart != skb->len)
+ csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,
+ skb->len-csstart, 0);
+
+ if (skb->ip_summed == CHECKSUM_HW) {
+ long csstuff = csstart + skb->csum;
+
+ *((unsigned short *)(to + csstuff)) = csum_fold(csum);
+ }
+}
+
+#if 0
+/*
+ * Tune the memory allocator for a new MTU size.
+ */
+void skb_add_mtu(int mtu)
+{
+ /* Must match allocation in alloc_skb */
+ mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
+
+ kmem_add_cache_size(mtu);
+}
+#endif
+
+void __init skb_init(void)
+{
+ int i;
+
+ skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+ sizeof(struct sk_buff),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ skb_headerinit, NULL);
+ if (!skbuff_head_cache)
+ panic("cannot create skbuff cache");
+
+ for (i=0; i<NR_CPUS; i++)
+ skb_queue_head_init(&skb_head_pool[i].list);
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/arch/xen/Kconfig
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Kconfig Tue Aug 9 15:17:45 2005
@@ -0,0 +1,152 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+mainmenu "Linux Kernel Configuration"
+
+config XEN
+ bool
+ default y
+ help
+ This is the Linux Xen port.
+
+config ARCH_XEN
+ bool
+ default y
+
+
+config NO_IDLE_HZ
+ bool
+ default y
+
+
+menu "XEN"
+
+config XEN_PRIVILEGED_GUEST
+ bool "Privileged Guest (domain 0)"
+ default n
+ select XEN_PHYSDEV_ACCESS
+ help
+ Support for privileged operation (domain 0)
+
+config XEN_PHYSDEV_ACCESS
+ bool "Physical device access"
+ default XEN_PRIVILEGED_GUEST
+ help
+ Assume access is available to physical hardware devices
+ (e.g., hard drives, network cards). This allows you to configure
+ such devices and also includes some low-level support that is
+ otherwise not compiled into the kernel.
+
+config XEN_BLKDEV_BACKEND
+ bool "Block-device backend driver"
+ depends on XEN_PHYSDEV_ACCESS
+ default y
+ help
+ The block-device backend driver allows the kernel to export its
+ block devices to other guests via a high-performance shared-memory
+ interface.
+
+config XEN_NETDEV_BACKEND
+ bool "Network-device backend driver"
+ depends on XEN_PHYSDEV_ACCESS
+ default y
+ help
+ The network-device backend driver allows the kernel to export its
+ network devices to other guests via a high-performance shared-memory
+ interface.
+
+config XEN_BLKDEV_FRONTEND
+ bool "Block-device frontend driver"
+ default y
+ help
+ The block-device frontend driver allows the kernel to access block
+ devices mounted within another guest OS. Unless you are building a
+ dedicated device-driver domain, or your master control domain
+ (domain 0), then you almost certainly want to say Y here.
+
+config XEN_NETDEV_FRONTEND
+ bool "Network-device frontend driver"
+ default y
+ help
+ The network-device frontend driver allows the kernel to access
+ network interfaces within another guest OS. Unless you are building a
+ dedicated device-driver domain, or your master control domain
+ (domain 0), then you almost certainly want to say Y here.
+
+config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+ bool "Pipelined transmitter (DANGEROUS)"
+ depends on XEN_NETDEV_FRONTEND
+ default n
+ help
+ The driver will assume that the backend is pipelining packets for
+ transmission: whenever packets are pending in the remote backend,
+ the driver will not send asynchronous notifications when it queues
+ additional packets for transmission.
+ If the backend is a dumb domain, such as a transparent Ethernet
+ bridge with no local IP interface, it is safe to say Y here to get
+ slightly lower network overhead.
+ If the backend has a local IP interface; or may be doing smart things
+ like reassembling packets to perform firewall filtering; or if you
+ are unsure; or if you experience network hangs when this option is
+ enabled; then you must say N here.
+
+config XEN_WRITABLE_PAGETABLES
+ bool
+ default y
+
+config XEN_SCRUB_PAGES
+ bool "Scrub memory before freeing it to Xen"
+ default y
+ help
+ Erase memory contents before freeing it back to Xen's global
+ pool. This ensures that any secrets contained within that
+ memory (e.g., private keys) cannot be found by other guests that
+ may be running on the machine. Most people will want to say Y here.
+ If security is not a concern then you may increase performance by
+ saying N.
+
+choice
+ prompt "Processor Type"
+ default X86
+
+config X86
+ bool "X86"
+ help
+ Choose this option if your computer is a X86 architecture.
+
+config X86_64
+ bool "X86_64"
+ help
+ Choose this option if your computer is a X86 architecture.
+
+endchoice
+
+endmenu
+
+config HAVE_ARCH_DEV_ALLOC_SKB
+ bool
+ default y
+
+source "init/Kconfig"
+
+if X86
+source "arch/xen/i386/Kconfig"
+endif
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "arch/xen/Kconfig.drivers"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/Kconfig.drivers
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers Tue Aug 9 15:17:45 2005
@@ -0,0 +1,94 @@
+# arch/xen/Kconfig.drivers
+
+menu "Device Drivers"
+
+source "drivers/base/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/mtd/Kconfig"
+source "drivers/parport/Kconfig"
+source "drivers/pnp/Kconfig"
+endif
+
+source "drivers/block/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/ide/Kconfig"
+endif
+
+source "drivers/scsi/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/cdrom/Kconfig"
+endif
+
+source "drivers/md/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/message/fusion/Kconfig"
+source "drivers/ieee1394/Kconfig"
+source "drivers/message/i2o/Kconfig"
+endif
+
+source "net/Kconfig"
+
+if XEN_PHYSDEV_ACCESS
+source "drivers/isdn/Kconfig"
+source "drivers/telephony/Kconfig"
+source "drivers/input/Kconfig"
+source "drivers/char/Kconfig"
+source "drivers/i2c/Kconfig"
+source "drivers/w1/Kconfig"
+source "drivers/misc/Kconfig"
+source "drivers/media/Kconfig"
+source "drivers/video/Kconfig"
+source "sound/Kconfig"
+source "drivers/usb/Kconfig"
+source "drivers/mmc/Kconfig"
+source "drivers/infiniband/Kconfig"
+endif
+
+if !XEN_PHYSDEV_ACCESS
+
+menu "Character devices"
+
+config UNIX98_PTYS
+ bool
+ default y
+
+config LEGACY_PTYS
+ bool "Legacy (BSD) PTY support"
+ default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+ a physical terminal; the master device is used by a process to
+ read data from and write data to the slave, thereby emulating a
+ terminal. Typical programs for the master side are telnet servers
+ and xterms.
+
+ Linux has traditionally used the BSD-like names /dev/ptyxx
+ for masters and /dev/ttyxx for slaves of pseudo
+ terminals. This scheme has a number of problems, including
+ security. This option enables these legacy devices; on most
+ systems, it is safe to say N.
+
+
+config LEGACY_PTY_COUNT
+ int "Maximum number of legacy PTY in use"
+ depends on LEGACY_PTYS
+ range 1 256
+ default "256"
+ ---help---
+ The maximum number of legacy PTYs that can be used at any one time.
+ The default is 256, and should be more than enough. Embedded
+ systems may want to reduce this to save memory.
+
+ When not in use, each legacy PTY occupies 12 bytes on 32-bit
+ architectures and 24 bytes on 64-bit architectures.
+
+endmenu
+
+endif
+
+endmenu
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/arch/xen/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,81 @@
+#
+# xen/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2004 by Christian Limpach
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+# pick up headers from include/asm-xen/asm in preference over include/asm
+NOSTDINC_FLAGS = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen
-iwithprefix include
+
+# make uname return the processor arch
+UTS_MACHINE := $(XENARCH)
+
+core-y += arch/xen/kernel/
+
+include/.asm-ignore: include/asm
+ @rm -f include/.asm-ignore
+ @mv include/asm include/.asm-ignore
+ @echo ' SYMLINK include/asm -> include/asm-$(XENARCH)'
+ $(Q)if [ ! -d include ]; then mkdir -p include; fi;
+ @ln -fsn $(srctree)/include/asm-$(XENARCH) include/asm
+
+include/asm-xen/asm:
+ @echo ' SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
+ @mkdir -p include/asm-xen
+ @ln -fsn $(srctree)/include/asm-xen/asm-$(XENARCH) $@
+
+arch/xen/arch:
+ @rm -f $@
+ @mkdir -p arch/xen
+ @ln -fsn $(srctree)/arch/xen/$(XENARCH) $@
+
+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
+ include/config/MARKER
+
+include/asm-$(ARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
+
+prepare: include/.asm-ignore include/asm-xen/asm \
+ arch/xen/arch include/asm-$(ARCH)/asm_offsets.h ;
+
+all: vmlinuz
+
+vmlinuz: vmlinux
+ $(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
+
+XINSTALL_NAME ?= $(KERNELRELEASE)
+install: vmlinuz
+ mkdir -p $(INSTALL_PATH)/boot
+ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
+ rm -f $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0644 vmlinuz
$(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0644 vmlinux
$(INSTALL_PATH)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0664 .config
$(INSTALL_PATH)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+ install -m0664 System.map
$(INSTALL_PATH)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
+ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
+ mkdir -p $(INSTALL_PATH)/usr/include/xen/linux
+ install -m0644 include/asm-xen/linux-public/*.h
$(INSTALL_PATH)/usr/include/xen/linux
+
+archclean:
+ @if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
+ @rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
+ @rm -f vmlinux-stripped vmlinuz
+
+define archhelp
+ echo '* vmlinuz - Compressed kernel image'
+ echo ' install - Install kernel image and config file'
+endef
+
+ifneq ($(XENARCH),)
+include $(srctree)/arch/xen/$(XENARCH)/Makefile
+endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/arch/xen/boot/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/boot/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,8 @@
+
+OBJCOPYFLAGS := -g --strip-unneeded
+
+vmlinuz: vmlinux-stripped FORCE
+ $(call if_changed,gzip)
+
+vmlinux-stripped: vmlinux FORCE
+ $(call if_changed,objcopy)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig Tue Aug 9
15:17:45 2005
@@ -0,0 +1,1204 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11-xen0
+# Tue May 3 13:22:55 2005
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_BLKDEV_BACKEND=y
+CONFIG_XEN_NETDEV_BACKEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+# CONFIG_CLEAN_COMPILE is not set
+CONFIG_BROKEN=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_BKL=y
+CONFIG_MICROCODE=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_MTRR=y
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_NAMES is not set
+CONFIG_ISA=y
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PC-card bridges
+#
+CONFIG_PCMCIA_PROBE=y
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+# CONFIG_PNP is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_BLK_CPQ_DA is not set
+CONFIG_BLK_CPQ_CISS_DA=y
+# CONFIG_CISS_SCSI_TAPE is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+CONFIG_BLK_DEV_SVWKS=y
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
+# CONFIG_IDE_CHIPSETS is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+CONFIG_SCSI_AIC79XX=y
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+CONFIG_MEGARAID_NEWGEN=y
+# CONFIG_MEGARAID_MM is not set
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+CONFIG_SCSI_SATA_PROMISE=y
+# CONFIG_SCSI_SATA_QSTOR is not set
+CONFIG_SCSI_SATA_SX4=y
+CONFIG_SCSI_SATA_SIL=y
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_ULI is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_LINEAR is not set
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=y
+# CONFIG_MD_RAID6 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_CRYPT is not set
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_ZERO is not set
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=y
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_CTL is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_CT_ACCT=y
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_IRC is not set
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+CONFIG_IP_NF_MATCH_IPRANGE=m
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+# CONFIG_IP_NF_MATCH_STATE is not set
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_SAME is not set
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_FTP=m
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# Bridge: Netfilter Configuration
+#
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL1 is not set
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_EL16 is not set
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_E100_NAPI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+CONFIG_NE2K_PCI=y
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+CONFIG_VIA_RHINE=y
+# CONFIG_VIA_RHINE_MMIO is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=y
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_TIGON3=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+CONFIG_AGP=m
+CONFIG_AGP_ALI=m
+CONFIG_AGP_ATI=m
+CONFIG_AGP_AMD=m
+CONFIG_AGP_AMD64=m
+CONFIG_AGP_INTEL=m
+CONFIG_AGP_INTEL_MCH=m
+CONFIG_AGP_NVIDIA=m
+CONFIG_AGP_SIS=m
+CONFIG_AGP_SWORKS=m
+CONFIG_AGP_VIA=m
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_DRM=m
+CONFIG_DRM_TDFX=m
+# CONFIG_DRM_GAMMA is not set
+CONFIG_DRM_R128=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_I810=m
+CONFIG_DRM_I830=m
+CONFIG_DRM_I915=m
+CONFIG_DRM_MGA=m
+CONFIG_DRM_SIS=m
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+# CONFIG_IBM_ASM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+# CONFIG_VIDEO_SELECT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_BLUETOOTH_TTY is not set
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed;
see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT=y
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_HPUSBSCSI is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+
+#
+# USB ATM/DSL drivers
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+
+#
+# XFS support
+#
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig Tue Aug 9
15:17:45 2005
@@ -0,0 +1,536 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11-xenU
+# Wed Apr 13 23:18:37 2005
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_XEN_PHYSDEV_ACCESS is not set
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_BKL=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# Character devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+
+#
+# XFS support
+#
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+# CONFIG_DEVPTS_FS_SECURITY is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/arch/xen/i386/Kconfig
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/Kconfig Tue Aug 9 15:17:45 2005
@@ -0,0 +1,974 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+menu "X86 Processor Configuration"
+
+config XENARCH
+ string
+ default i386
+
+config MMU
+ bool
+ default y
+
+config SBUS
+ bool
+
+config UID16
+ bool
+ default y
+
+config GENERIC_ISA_DMA
+ bool
+ default y
+
+config GENERIC_IOMAP
+ bool
+ default y
+
+choice
+ prompt "Processor family"
+ default M686
+
+config M386
+ bool "386"
+ ---help---
+ This is the processor type of your CPU. This information is used for
+ optimizing purposes. In order to compile a kernel that can run on
+ all x86 CPU types (albeit not optimally fast), you can specify
+ "386" here.
+
+ The kernel will not necessarily run on earlier architectures than
+ the one you have chosen, e.g. a Pentium optimized kernel will run on
+ a PPro, but not necessarily on a i486.
+
+ Here are the settings recommended for greatest speed:
+ - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
+ 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
+ will run on a 386 class machine.
+ - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
+ SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
+ - "586" for generic Pentium CPUs lacking the TSC
+ (time stamp counter) register.
+ - "Pentium-Classic" for the Intel Pentium.
+ - "Pentium-MMX" for the Intel Pentium MMX.
+ - "Pentium-Pro" for the Intel Pentium Pro.
+ - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
+ - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
+ - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
+ - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
+ - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
+ - "Crusoe" for the Transmeta Crusoe series.
+ - "Efficeon" for the Transmeta Efficeon series.
+ - "Winchip-C6" for original IDT Winchip.
+ - "Winchip-2" for IDT Winchip 2.
+ - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
+ - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
+ - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
+
+ If you don't know what to do, choose "386".
+
+config M486
+ bool "486"
+ help
+ Select this for a 486 series processor, either Intel or one of the
+ compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
+ DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
+ U5S.
+
+config M586
+ bool "586/K5/5x86/6x86/6x86MX"
+ help
+ Select this for an 586 or 686 series processor such as the AMD K5,
+ the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
+ assume the RDTSC (Read Time Stamp Counter) instruction.
+
+config M586TSC
+ bool "Pentium-Classic"
+ help
+ Select this for a Pentium Classic processor with the RDTSC (Read
+ Time Stamp Counter) instruction for benchmarking.
+
+config M586MMX
+ bool "Pentium-MMX"
+ help
+ Select this for a Pentium with the MMX graphics/multimedia
+ extended instructions.
+
+config M686
+ bool "Pentium-Pro"
+ help
+ Select this for Intel Pentium Pro chips. This enables the use of
+ Pentium Pro extended instructions, and disables the init-time guard
+ against the f00f bug found in earlier Pentiums.
+
+config MPENTIUMII
+ bool "Pentium-II/Celeron(pre-Coppermine)"
+ help
+ Select this for Intel chips based on the Pentium-II and
+ pre-Coppermine Celeron core. This option enables an unaligned
+ copy optimization, compiles the kernel with optimization flags
+ tailored for the chip, and applies any applicable Pentium Pro
+ optimizations.
+
+config MPENTIUMIII
+ bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
+ help
+ Select this for Intel chips based on the Pentium-III and
+ Celeron-Coppermine core. This option enables use of some
+ extended prefetch instructions in addition to the Pentium II
+ extensions.
+
+config MPENTIUMM
+ bool "Pentium M"
+ help
+ Select this for Intel Pentium M (not Pentium-4 M)
+ notebook chips.
+
+config MPENTIUM4
+ bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+ help
+ Select this for Intel Pentium 4 chips. This includes the
+ Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
+ (not Pentium M) chips. This option enables compile flags
+ optimized for the chip, uses the correct cache shift, and
+ applies any applicable Pentium III optimizations.
+
+config MK6
+ bool "K6/K6-II/K6-III"
+ help
+ Select this for an AMD K6-family processor. Enables use of
+ some extended instructions, and passes appropriate optimization
+ flags to GCC.
+
+config MK7
+ bool "Athlon/Duron/K7"
+ help
+ Select this for an AMD Athlon K7-family processor. Enables use of
+ some extended instructions, and passes appropriate optimization
+ flags to GCC.
+
+config MK8
+ bool "Opteron/Athlon64/Hammer/K8"
+ help
+ Select this for an AMD Opteron or Athlon64 Hammer-family processor.
Enables
+ use of some extended instructions, and passes appropriate optimization
+ flags to GCC.
+
+config MCRUSOE
+ bool "Crusoe"
+ help
+ Select this for a Transmeta Crusoe processor. Treats the processor
+ like a 586 with TSC, and sets some GCC optimization flags (like a
+ Pentium Pro with no alignment requirements).
+
+config MEFFICEON
+ bool "Efficeon"
+ help
+ Select this for a Transmeta Efficeon processor.
+
+config MWINCHIPC6
+ bool "Winchip-C6"
+ help
+ Select this for an IDT Winchip C6 chip. Linux and GCC
+ treat this chip as a 586TSC with some extended instructions
+ and alignment requirements.
+
+config MWINCHIP2
+ bool "Winchip-2"
+ help
+ Select this for an IDT Winchip-2. Linux and GCC
+ treat this chip as a 586TSC with some extended instructions
+ and alignment requirements.
+
+config MWINCHIP3D
+ bool "Winchip-2A/Winchip-3"
+ help
+ Select this for an IDT Winchip-2A or 3. Linux and GCC
+ treat this chip as a 586TSC with some extended instructions
+ and alignment reqirements. Also enable out of order memory
+ stores for this CPU, which can increase performance of some
+ operations.
+
+config MCYRIXIII
+ bool "CyrixIII/VIA-C3"
+ help
+ Select this for a Cyrix III or C3 chip. Presently Linux and GCC
+ treat this chip as a generic 586. Whilst the CPU is 686 class,
+ it lacks the cmov extension which gcc assumes is present when
+ generating 686 code.
+ Note that Nehemiah (Model 9) and above will not boot with this
+ kernel due to them lacking the 3DNow! instructions used in earlier
+ incarnations of the CPU.
+
+config MVIAC3_2
+ bool "VIA C3-2 (Nehemiah)"
+ help
+ Select this for a VIA C3 "Nehemiah". Selecting this enables usage
+ of SSE and tells gcc to treat the CPU as a 686.
+ Note, this kernel will not boot on older (pre model 9) C3s.
+
+endchoice
+
+config X86_GENERIC
+ bool "Generic x86 support"
+ help
+ Instead of just including optimizations for the selected
+ x86 variant (e.g. PII, Crusoe or Athlon), include some more
+ generic optimizations as well. This will make the kernel
+ perform better on x86 CPUs other than that selected.
+
+ This is really intended for distributors who need more
+ generic optimizations.
+
+#
+# Define implied options from the CPU selection here
+#
+config X86_CMPXCHG
+ bool
+ depends on !M386
+ default y
+
+config X86_XADD
+ bool
+ depends on !M386
+ default y
+
+config X86_L1_CACHE_SHIFT
+ int
+ default "7" if MPENTIUM4 || X86_GENERIC
+ default "4" if X86_ELAN || M486 || M386
+ default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE ||
MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX
|| M586TSC || M586 || MVIAC3_2
+ default "6" if MK7 || MK8 || MPENTIUMM
+
+config RWSEM_GENERIC_SPINLOCK
+ bool
+ depends on M386
+ default y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+ depends on !M386
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config X86_PPRO_FENCE
+ bool
+ depends on M686 || M586MMX || M586TSC || M586 || M486 || M386
+ default y
+
+config X86_F00F_BUG
+ bool
+ depends on M586MMX || M586TSC || M586 || M486 || M386
+ default y
+
+config X86_WP_WORKS_OK
+ bool
+ depends on !M386
+ default y
+
+config X86_INVLPG
+ bool
+ depends on !M386
+ default y
+
+config X86_BSWAP
+ bool
+ depends on !M386
+ default y
+
+config X86_POPAD_OK
+ bool
+ depends on !M386
+ default y
+
+config X86_ALIGNMENT_16
+ bool
+ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII ||
X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
+ default y
+
+config X86_GOOD_APIC
+ bool
+ depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII
|| M686 || M586MMX || MK8 || MEFFICEON
+ default y
+
+config X86_INTEL_USERCOPY
+ bool
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII ||
M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
+ default y
+
+config X86_USE_PPRO_CHECKSUM
+ bool
+ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 ||
MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 ||
MVIAC3_2 || MEFFICEON
+ default y
+
+config X86_USE_3DNOW
+ bool
+ depends on MCYRIXIII || MK7
+ default y
+
+config X86_OOSTORE
+ bool
+ depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
+ default y
+
+config HPET_TIMER
+ bool
+ default n
+#config HPET_TIMER
+# bool "HPET Timer Support"
+# help
+# This enables the use of the HPET for the kernel's internal timer.
+# HPET is the next generation timer replacing legacy 8254s.
+# You can safely choose Y here. However, HPET will only be
+# activated if the platform and the BIOS support this feature.
+# Otherwise the 8254 will be used for timing services.
+#
+# Choose N to continue using the legacy 8254 timer.
+
+config HPET_EMULATE_RTC
+ def_bool HPET_TIMER && RTC=y
+
+config SMP
+ bool
+ default n
+#config SMP
+# bool "Symmetric multi-processing support"
+# ---help---
+# This enables support for systems with more than one CPU. If you have
+# a system with only one CPU, like most personal computers, say N. If
+# you have a system with more than one CPU, say Y.
+#
+# If you say N here, the kernel will run on single and multiprocessor
+# machines, but will use only one CPU of a multiprocessor machine. If
+# you say Y here, the kernel will run on many, but not all,
+# singleprocessor machines. On a singleprocessor machine, the kernel
+# will run faster if you say N here.
+#
+# Note that if you say Y here and choose architecture "586" or
+# "Pentium" under "Processor family", the kernel will not work on 486
+# architectures. Similarly, multiprocessor kernels for the "PPro"
+# architecture may not work on all Pentium based boards.
+#
+# People using multiprocessor machines who say Y here should also say
+# Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
+# Management" code will be disabled if you say Y here.
+#
+# See also the <file:Documentation/smp.txt>,
+# <file:Documentation/i386/IO-APIC.txt>,
+# <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+# <http://www.tldp.org/docs.html#howto>.
+#
+# If you don't know what to do here, say N.
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-255)"
+ range 2 255
+ depends on SMP
+ default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. The maximum supported value is 255 and the
+ minimum value which makes sense is 2.
+
+ This is purely to save memory - each supported CPU adds
+ approximately eight kilobytes to the kernel image.
+
+config SCHED_SMT
+ bool "SMT (Hyperthreading) scheduler support"
+ depends on SMP
+ default off
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+ when dealing with Intel Pentium 4 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
+config PREEMPT
+ bool "Preemptible Kernel"
+ help
+ This option reduces the latency of the kernel when reacting to
+ real-time or interactive events by allowing a low priority process to
+ be preempted even if it is in kernel mode executing a system call.
+ This allows applications to run more reliably even when the system is
+ under load.
+
+ Say Y here if you are building a kernel for a desktop, embedded
+ or real-time system. Say N if you are unsure.
+
+config PREEMPT_BKL
+ bool "Preempt The Big Kernel Lock"
+ depends on PREEMPT
+ default y
+ help
+ This option reduces the latency of the kernel by making the
+ big kernel lock preemptible.
+
+ Say Y here if you are building a kernel for a desktop system.
+ Say N if you are unsure.
+
+#config X86_TSC
+# bool
+# depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON ||
MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII
|| M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
+# default y
+
+#config X86_MCE
+# bool "Machine Check Exception"
+# depends on !X86_VOYAGER
+# ---help---
+# Machine Check Exception support allows the processor to notify the
+# kernel if it detects a problem (e.g. overheating, component failure).
+# The action the kernel takes depends on the severity of the problem,
+# ranging from a warning message on the console, to halting the
machine.
+# Your processor must be a Pentium or newer to support this - check the
+# flags in /proc/cpuinfo for mce. Note that some older Pentium systems
+# have a design flaw which leads to false MCE events - hence MCE is
+# disabled on all P5 processors, unless explicitly enabled with "mce"
+# as a boot argument. Similarly, if MCE is built in and creates a
+# problem on some new non-standard machine, you can boot with "nomce"
+# to disable it. MCE support simply ignores non-MCE processors like
+# the 386 and 486, so nearly everyone can say Y here.
+
+#config X86_MCE_NONFATAL
+# tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel
Pentium 4"
+# depends on X86_MCE
+# help
+# Enabling this feature starts a timer that triggers every 5 seconds
which
+# will look at the machine check registers to see if anything happened.
+# Non-fatal problems automatically get corrected (but still logged).
+# Disable this if you don't want to see these messages.
+# Seeing the messages this option prints out may be indicative of
dying hardware,
+# or out-of-spec (ie, overclocked) hardware.
+# This option only does something on certain CPUs.
+# (AMD Athlon/Duron and Intel Pentium 4)
+
+#config X86_MCE_P4THERMAL
+# bool "check for P4 thermal throttling interrupt."
+# depends on X86_MCE && (X86_UP_APIC || SMP)
+# help
+# Enabling this feature will cause a message to be printed when the P4
+# enters thermal throttling.
+
+config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
+ depends on XEN_PRIVILEGED_GUEST
+ ---help---
+ If you say Y here and also to "/dev file system support" in the
+ 'File systems' section, you will be able to update the microcode on
+ Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
+ Pentium III, Pentium 4, Xeon etc. You will obviously need the
+ actual microcode binary data itself which is not shipped with the
+ Linux kernel.
+
+ For latest news and information on obtaining all the required
+ ingredients for this driver, check:
+ <http://www.urbanmyth.org/microcode/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called microcode.
+
+#config X86_MSR
+# tristate "/dev/cpu/*/msr - Model-specific register support"
+# help
+# This device gives privileged processes access to the x86
+# Model-Specific Registers (MSRs). It is a character device with
+# major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
+# MSR accesses are directed to a specific CPU on multi-processor
+# systems.
+
+config X86_CPUID
+ tristate "/dev/cpu/*/cpuid - CPU information support"
+ help
+ This device gives processes access to the x86 CPUID instruction to
+ be executed on a specific processor. It is a character device
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
+source "drivers/firmware/Kconfig"
+
+choice
+ prompt "High Memory Support"
+ default NOHIGHMEM
+
+config NOHIGHMEM
+ bool "off"
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+ Gigabytes large. That means that, if you have a large amount of
+ physical memory, not all of it can be "permanently mapped" by the
+ kernel. The physical memory that's not permanently mapped is called
+ "high memory".
+
+ If you are compiling a kernel which will never run on a machine with
+ more than 1 Gigabyte total physical RAM, answer "off" here (default
+ choice and suitable for most users). This will result in a "3GB/1GB"
+ split: 3GB are mapped so that each process sees a 3GB virtual memory
+ space and the remaining part of the 4GB virtual memory space is used
+ by the kernel to permanently map as much physical memory as
+ possible.
+
+ If the machine has between 1 and 4 Gigabytes physical RAM, then
+ answer "4GB" here.
+
+ If more than 4 Gigabytes is used then answer "64GB" here. This
+ selection turns Intel PAE (Physical Address Extension) mode on.
+ PAE implements 3-level paging on IA32 processors. PAE is fully
+ supported by Linux, PAE mode is implemented on all recent Intel
+ processors (Pentium Pro and better). NOTE: If you say "64GB" here,
+ then the kernel will not boot on CPUs that don't support PAE!
+
+ The actual amount of total physical memory will either be
+ auto detected or can be forced by using a kernel command line option
+ such as "mem=256M". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+ If unsure, say "off".
+
+config HIGHMEM4G
+ bool "4GB"
+ help
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+
+#config HIGHMEM64G
+# bool "64GB"
+# help
+# Select this if you have a 32-bit processor and more than 4
+# gigabytes of physical RAM.
+
+endchoice
+
+config HIGHMEM
+ bool
+ depends on HIGHMEM64G || HIGHMEM4G
+ default y
+
+config X86_PAE
+ bool
+ depends on HIGHMEM64G
+ default y
+
+# Common NUMA Features
+config NUMA
+ bool "Numa Memory Allocation and Scheduler Support"
+ depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH ||
(X86_SUMMIT && ACPI))
+ default n if X86_PC
+ default y if (X86_NUMAQ || X86_SUMMIT)
+
+# Need comments to help the hapless user trying to turn on NUMA support
+comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
+ depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
+
+comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
+ depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
+
+config DISCONTIGMEM
+ bool
+ depends on NUMA
+ default y
+
+config HAVE_ARCH_BOOTMEM_NODE
+ bool
+ depends on NUMA
+ default y
+
+#config HIGHPTE
+# bool "Allocate 3rd-level pagetables from highmem"
+# depends on HIGHMEM4G || HIGHMEM64G
+# help
+# The VM uses one page table entry for each page of physical memory.
+# For systems with a lot of RAM, this can be wasteful of precious
+# low memory. Setting this option will put user-space page table
+# entries in high memory.
+
+config MTRR
+ bool
+ depends on XEN_PRIVILEGED_GUEST
+ default y
+
+#config MTRR
+# bool "MTRR (Memory Type Range Register) support"
+# ---help---
+# On Intel P6 family processors (Pentium Pro, Pentium II and later)
+# the Memory Type Range Registers (MTRRs) may be used to control
+# processor access to memory ranges. This is most useful if you have
+# a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+# allows bus write transfers to be combined into a larger transfer
+# before bursting over the PCI/AGP bus. This can increase performance
+# of image write operations 2.5 times or more. Saying Y here creates a
+# /proc/mtrr file which may be used to manipulate your processor's
+# MTRRs. Typically the X server should use this.
+#
+# This code has a reasonably generic interface so that similar
+# control registers on other processors can be easily supported
+# as well:
+#
+# The Cyrix 6x86, 6x86MX and M II processors have Address Range
+# Registers (ARRs) which provide a similar functionality to MTRRs. For
+# these, the ARRs are used to emulate the MTRRs.
+# The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
+# MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
+# write-combining. All of these processors are supported by this code
+# and it makes sense to say Y here if you have one of them.
+#
+# Saying Y here also fixes a problem with buggy SMP BIOSes which only
+# set the MTRRs for the boot CPU and not for the secondary CPUs. This
+# can lead to all sorts of problems, so it's good to say Y here.
+#
+# You can safely say Y even if your machine doesn't have MTRRs, you'll
+# just add about 9 KB to your kernel.
+#
+# See <file:Documentation/mtrr.txt> for more information.
+
+config IRQBALANCE
+ bool "Enable kernel irq balancing"
+ depends on SMP && X86_IO_APIC
+ default y
+ help
+ The default yes will allow the kernel to do irq load balancing.
+ Saying no will keep the kernel from doing irq load balancing.
+
+config HAVE_DEC_LOCK
+ bool
+ depends on (SMP || PREEMPT) && X86_CMPXCHG
+ default y
+
+# turning this on wastes a bunch of space.
+# Summit needs it only when NUMA is on
+config BOOT_IOREMAP
+ bool
+ depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
+ default y
+
+config REGPARM
+ bool "Use register arguments (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
+ help
+ Compile the kernel with -mregparm=3. This uses a different ABI
+ and passes the first three arguments of a function call in registers.
+ This will probably break binary only modules.
+
+ This feature is only enabled for gcc-3.0 and later - earlier compilers
+ generate incorrect output with certain kernel constructs when
+ -mregparm=3 is used.
+
+
+if XEN_PHYSDEV_ACCESS
+
+menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
+
+config X86_VISWS_APIC
+ bool
+ depends on X86_VISWS
+ default y
+
+config X86_LOCAL_APIC
+ bool
+ depends on (X86_VISWS || SMP) && !X86_VOYAGER
+ default y
+
+config X86_IO_APIC
+ bool
+ depends on SMP && !(X86_VISWS || X86_VOYAGER)
+ default y
+
+config PCI
+ bool "PCI support" if !X86_VISWS
+ depends on !X86_VOYAGER
+ default y if X86_VISWS
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+ The PCI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, contains valuable
+ information about which PCI hardware does work under Linux and which
+ doesn't.
+
+#choice
+# prompt "PCI access mode"
+# depends on PCI && !X86_VISWS
+# default PCI_GOANY
+# ---help---
+# On PCI systems, the BIOS can be used to detect the PCI devices and
+# determine their configuration. However, some old PCI motherboards
+# have BIOS bugs and may crash if this is done. Also, some embedded
+# PCI-based systems don't have any BIOS at all. Linux can also try to
+# detect the PCI hardware directly without using the BIOS.
+#
+# With this option, you can specify how Linux should detect the
+# PCI devices. If you choose "BIOS", the BIOS will be used,
+# if you choose "Direct", the BIOS won't be used, and if you
+# choose "MMConfig", then PCI Express MMCONFIG will be used.
+# If you choose "Any", the kernel will try MMCONFIG, then the
+# direct access method and falls back to the BIOS if that doesn't
+# work. If unsure, go with the default, which is "Any".
+#
+#config PCI_GOBIOS
+# bool "BIOS"
+#
+#config PCI_GOMMCONFIG
+# bool "MMConfig"
+#
+#config PCI_GODIRECT
+# bool "Direct"
+#
+#config PCI_GOANY
+# bool "Any"
+#
+#endchoice
+#
+#config PCI_BIOS
+# bool
+# depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
+# default y
+#
+#config PCI_DIRECT
+# bool
+# depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+# default y
+
+config PCI_DIRECT
+ bool
+ depends on PCI
+ default y
+
+source "drivers/pci/pcie/Kconfig"
+
+source "drivers/pci/Kconfig"
+
+config ISA
+ bool "ISA support"
+ depends on !(X86_VOYAGER || X86_VISWS)
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+ inside your box. Other bus systems are PCI, EISA, MicroChannel
+ (MCA) or VESA. ISA is an older system, now being displaced by PCI;
+ newer boards don't support it. If you have ISA, say Y, otherwise N.
+
+config EISA
+ bool "EISA support"
+ depends on ISA
+ ---help---
+ The Extended Industry Standard Architecture (EISA) bus was
+ developed as an open alternative to the IBM MicroChannel bus.
+
+ The EISA bus provided some of the features of the IBM MicroChannel
+ bus while maintaining backward compatibility with cards made for
+ the older ISA bus. The EISA bus saw limited use between 1988 and
+ 1995 when it was made obsolete by the PCI bus.
+
+ Say Y here if you are building a kernel for an EISA-based machine.
+
+ Otherwise, say N.
+
+source "drivers/eisa/Kconfig"
+
+config MCA
+ bool "MCA support"
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+ laptops. It is a bus system similar to PCI or ISA. See
+ <file:Documentation/mca.txt> (and especially the web page given
+ there) before attempting to build an MCA bus kernel.
+
+config MCA
+ depends on X86_VOYAGER
+ default y if X86_VOYAGER
+
+source "drivers/mca/Kconfig"
+
+config SCx200
+ tristate "NatSemi SCx200 support"
+ depends on !X86_VOYAGER
+ help
+ This provides basic support for the National Semiconductor SCx200
+ processor. Right now this is just a driver for the GPIO pins.
+
+ If you don't know what to do here, say N.
+
+ This support is also available as a module. If compiled as a
+ module, it will be called scx200.
+
+source "drivers/pcmcia/Kconfig"
+
+source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+endif
+
+menu "Kernel hacking"
+
+config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
+config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ default y
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+
+config DEBUG_STACK_USAGE
+ bool "Stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T and sysrq-P debug output.
+
+ This option will slow down process creation somewhat.
+
+config DEBUG_SLAB
+ bool "Debug memory allocations"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to have the kernel do limited verification on memory
+ allocation as well as poisoning memory on free to catch use of freed
+ memory.
+
+config MAGIC_SYSRQ
+ bool "Magic SysRq key"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, you will have some control over the system even
+ if the system crashes for example during kernel debugging (e.g., you
+ will be able to flush the buffer cache to disk, reboot the system
+ immediately or dump some status information). This is accomplished
+ by pressing various keys while holding SysRq (Alt+PrintScreen). It
+ also works on a serial console (on PC hardware at least), if you
+ send a BREAK and then within 5 seconds a command keypress. The
+ keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+ unless you really know what this hack does.
+
+config DEBUG_SPINLOCK
+ bool "Spinlock debugging"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here and build SMP to catch missing spinlock initialization
+ and certain other kinds of spinlock errors commonly made. This is
+ best used in conjunction with the NMI watchdog so that spinlock
+ deadlocks are also debuggable.
+
+config DEBUG_PAGEALLOC
+ bool "Page alloc debugging"
+ depends on DEBUG_KERNEL
+ help
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a large slowdown, but helps to find certain types
+ of memory corruptions.
+
+config DEBUG_HIGHMEM
+ bool "Highmem debugging"
+ depends on DEBUG_KERNEL && HIGHMEM
+ help
+ This options enables addition error checking for high memory systems.
+ Disable for production systems.
+
+config DEBUG_INFO
+ bool "Compile the kernel with debug info"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here the resulting kernel image will include
+ debugging info resulting in a larger kernel image.
+ Say Y here only if you plan to use gdb to debug the kernel.
+ If you don't debug the kernel, you can say N.
+
+config DEBUG_SPINLOCK_SLEEP
+ bool "Sleep-inside-spinlock checking"
+ help
+ If you say Y here, various routines which may sleep will become very
+ noisy if they are called with a spinlock held.
+
+config FRAME_POINTER
+ bool "Compile the kernel with frame pointers"
+ help
+ If you say Y here the resulting kernel image will be slightly larger
+ and slower, but it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame pointers.
+
+config 4KSTACKS
+ bool "Use 4Kb for kernel stacks instead of 8Kb"
+ help
+ If you say Y here the kernel will use a 4Kb stacksize for the
+ kernel stack attached to each process/thread. This facilitates
+ running more threads on a system and also reduces the pressure
+ on the VM subsystem for higher order allocations. This option
+ will also use IRQ stacks to compensate for the reduced stackspace.
+
+config X86_FIND_SMP_CONFIG
+ bool
+ depends on X86_LOCAL_APIC || X86_VOYAGER
+ default y
+
+config X86_MPPARSE
+ bool
+ depends on X86_LOCAL_APIC && !X86_VISWS
+ default y
+
+endmenu
+
+#
+# Use the generic interrupt handling code in kernel/irq/:
+#
+config GENERIC_HARDIRQS
+ bool
+ default y
+
+config GENERIC_IRQ_PROBE
+ bool
+ default y
+
+config X86_SMP
+ bool
+ depends on SMP && !X86_VOYAGER
+ default y
+
+config X86_HT
+ bool
+ depends on SMP && !(X86_VISWS || X86_VOYAGER)
+ default y
+
+config X86_BIOS_REBOOT
+ bool
+ depends on !(X86_VISWS || X86_VOYAGER)
+ default y
+
+config X86_TRAMPOLINE
+ bool
+ depends on X86_SMP || (X86_VOYAGER && SMP)
+ default y
+
+config PC
+ bool
+ depends on X86 && !EMBEDDED
+ default y
+
+endmenu
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/arch/xen/i386/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,102 @@
+#
+# i386/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713 Artur Skawina <skawina@xxxxxxxxxxxxx>
+# Added '-march' and '-mpreferred-stack-boundary' support
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+LDFLAGS := -m elf_i386
+LDFLAGS_vmlinux :=
+CHECK := $(CHECK) -D__i386__=1
+
+CFLAGS += -m32
+AFLAGS += -m32
+
+CFLAGS += -pipe -msoft-float
+
+# prevent gcc from keeping the stack 16 byte aligned
+CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2,)
+
+align := $(cc-option-align)
+cflags-$(CONFIG_M386) += -march=i386
+cflags-$(CONFIG_M486) += -march=i486
+cflags-$(CONFIG_M586) += -march=i586
+cflags-$(CONFIG_M586TSC) += -march=i586
+cflags-$(CONFIG_M586MMX) += $(call
cc-option,-march=pentium-mmx,-march=i586)
+cflags-$(CONFIG_M686) += -march=i686
+cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call cc-option,-mtune=pentium2)
+cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call cc-option,-mtune=pentium3)
+cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call cc-option,-mtune=pentium3)
+cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call cc-option,-mtune=pentium4)
+cflags-$(CONFIG_MK6) += -march=k6
+# Please note, that patches that add -march=athlon-xp and friends are
pointless.
+# They make zero difference whatsosever to performance at this time.
+cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686
$(align)-functions=4)
+cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call
cc-option,-march=athlon,-march=i686 $(align)-functions=4))
+cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0
$(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call
cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0
$(align)-loops=0
+cflags-$(CONFIG_MWINCHIPC6) += $(call
cc-option,-march=winchip-c6,-march=i586)
+cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
+cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
+cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486)
$(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
+
+# AMD Elan support
+cflags-$(CONFIG_X86_ELAN) += -march=i486
+
+# -mregparm=3 works ok on gcc-3.0 and later
+#
+GCC_VERSION := $(shell $(CONFIG_SHELL)
$(srctree)/scripts/gcc-version.sh $(CC))
+cflags-$(CONFIG_REGPARM) += $(shell if [ $(GCC_VERSION) -ge 0300 ] ;
then echo "-mregparm=3"; fi ;)
+
+# Disable unit-at-a-time mode, it makes gcc use a lot more stack
+# due to the lack of sharing of stacklots.
+CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
+
+CFLAGS += $(cflags-y)
+
+head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
+
+libs-y += arch/i386/lib/
+core-y += arch/xen/i386/kernel/ \
+ arch/xen/i386/mm/ \
+ arch/i386/crypto/
+# \
+# arch/xen/$(mcore-y)/
+drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
+drivers-$(CONFIG_PCI) += arch/xen/i386/pci/
+# must be linked after kernel/
+drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
+drivers-$(CONFIG_PM) += arch/i386/power/
+
+# for clean
+obj- += kernel/ mm/ pci/
+#obj- += ../../i386/lib/ ../../i386/mm/
+#../../i386/$(mcore-y)/
+#obj- += ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
+
+xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen \
+ -Iinclude/asm-i386/mach-default
+CFLAGS += $(xenflags-y)
+AFLAGS += $(xenflags-y)
+
+prepare: include/asm-$(XENARCH)/asm_offsets.h
+CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
+
+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
+ include/linux/version.h include/config/MARKER
+
+include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,93 @@
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/kernel
+
+extra-y := head.o init_task.o
+
+obj-y := process.o signal.o entry.o traps.o \
+ time.o ioport.o ldt.o setup.o \
+ pci-dma.o i386_ksyms.o
+
+c-obj-y := semaphore.o irq.o vm86.o \
+ ptrace.o sys_i386.o \
+ i387.o dmi_scan.o bootflag.o \
+ doublefault.o quirks.o
+s-obj-y :=
+
+obj-y += cpu/
+obj-y += timers/
+c-obj-$(CONFIG_ACPI_BOOT) += acpi/
+#c-obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
+c-obj-$(CONFIG_MCA) += mca.o
+c-obj-$(CONFIG_X86_MSR) += msr.o
+c-obj-$(CONFIG_X86_CPUID) += cpuid.o
+obj-$(CONFIG_MICROCODE) += microcode.o
+c-obj-$(CONFIG_APM) += apm.o
+c-obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
+c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+c-obj-$(CONFIG_X86_MPPARSE) += mpparse.o
+c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
+c-obj-$(CONFIG_X86_IO_APIC) += io_apic.o
+c-obj-$(CONFIG_X86_NUMAQ) += numaq.o
+c-obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
+c-obj-$(CONFIG_MODULES) += module.o
+c-obj-y += sysenter.o
+obj-y += vsyscall.o
+c-obj-$(CONFIG_ACPI_SRAT) += srat.o
+c-obj-$(CONFIG_HPET_TIMER) += time_hpet.o
+c-obj-$(CONFIG_EFI) += efi.o efi_stub.o
+c-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+EXTRA_AFLAGS := -traditional
+
+c-obj-$(CONFIG_SCx200) += scx200.o
+
+# vsyscall.o contains the vsyscall DSO images as __initdata.
+# We must build both images before we can assemble it.
+# Note: kbuild does not track this dependency due to usage of .incbin
+$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
+targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
+targets += vsyscall.lds
+
+# The DSO images are built using a special linker script.
+quiet_cmd_syscall = SYSCALL $@
+ cmd_syscall = $(CC) -nostdlib -m32 $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
+
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
+SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
+SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
+
+$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ $(call if_changed,syscall)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO. With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+extra-y += vsyscall-syms.o
+$(obj)/built-in.o: $(obj)/vsyscall-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+
+SYSCFLAGS_vsyscall-syms.o = -r
+$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds $(obj)/vsyscall-sysenter.o FORCE
+ $(call if_changed,syscall)
+
+c-link := init_task.o
+s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o
vsyscall.lds.o
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst
%.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
+ @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
+
+$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
+
+obj-y += $(c-obj-y) $(s-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,31 @@
+#
+# Makefile for x86-compatible CPU details and quirks
+#
+
+CFLAGS += -Iarch/i386/kernel/cpu
+
+obj-y := common.o
+c-obj-y += proc.o
+
+c-obj-y += amd.o
+c-obj-y += cyrix.o
+c-obj-y += centaur.o
+c-obj-y += transmeta.o
+c-obj-y += intel.o intel_cacheinfo.o
+c-obj-y += rise.o
+c-obj-y += nexgen.o
+c-obj-y += umc.o
+
+#obj-$(CONFIG_X86_MCE) += ../../../../i386/kernel/cpu/mcheck/
+
+obj-$(CONFIG_MTRR) += mtrr/
+#obj-$(CONFIG_CPU_FREQ) += ../../../../i386/kernel/cpu/cpufreq/
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,643 @@
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <mach_apic.h>
+#endif
+#include <asm-xen/hypervisor.h>
+
+#include "cpu.h"
+
+DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
+
+static int cachesize_override __initdata = -1;
+static int disable_x86_fxsr __initdata = 0;
+static int disable_x86_serial_nr __initdata = 1;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+extern void mcheck_init(struct cpuinfo_x86 *c);
+
+extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
+
+extern int disable_pse;
+
+static void default_init(struct cpuinfo_x86 * c)
+{
+ /* Not much we can do here... */
+ /* Check if at least it has cpuid */
+ if (c->cpuid_level == -1) {
+ /* No cpuid. It must be an ancient CPU */
+ if (c->x86 == 4)
+ strcpy(c->x86_model_id, "486");
+ else if (c->x86 == 3)
+ strcpy(c->x86_model_id, "386");
+ }
+}
+
+static struct cpu_dev default_cpu = {
+ .c_init = default_init,
+};
+static struct cpu_dev * this_cpu = &default_cpu;
+
+static int __init cachesize_setup(char *str)
+{
+ get_option (&str, &cachesize_override);
+ return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
+int __init get_model_name(struct cpuinfo_x86 *c)
+{
+ unsigned int *v;
+ char *p, *q;
+
+ if (cpuid_eax(0x80000000) < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+
+ /* Intel chips right-justify this string for some dumb reason;
+ undo that brain damage */
+ p = q = &c->x86_model_id[0];
+ while ( *p == ' ' )
+ p++;
+ if ( p != q ) {
+ while ( *p )
+ *q++ = *p++;
+ while ( q <= &c->x86_model_id[48] )
+ *q++ = '\0'; /* Zero-pad the rest */
+ }
+
+ return 1;
+}
+
+
+void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+ unsigned int n, dummy, ecx, edx, l2size;
+
+ n = cpuid_eax(0x80000000);
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache
%dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ }
+
+ if (n < 0x80000006) /* Some chips just has a large L1. */
+ return;
+
+ ecx = cpuid_ecx(0x80000006);
+ l2size = ecx >> 16;
+
+ /* do processor-specific cache resizing */
+ if (this_cpu->c_size_cache)
+ l2size = this_cpu->c_size_cache(c,l2size);
+
+ /* Allow user to override all this if necessary. */
+ if (cachesize_override != -1)
+ l2size = cachesize_override;
+
+ if ( l2size == 0 )
+ return; /* Again, no L2 cache is possible */
+
+ c->x86_cache_size = l2size;
+
+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
+}
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*/
+
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+ struct cpu_model_info *info;
+
+ if ( c->x86_model >= 16 )
+ return NULL; /* Range check */
+
+ if (!this_cpu)
+ return NULL;
+
+ info = this_cpu->c_models;
+
+ while (info && info->family) {
+ if (info->family == c->x86)
+ return info->model_names[c->x86_model];
+ info++;
+ }
+ return NULL; /* Not found */
+}
+
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+{
+ char *v = c->x86_vendor_id;
+ int i;
+
+ for (i = 0; i < X86_VENDOR_NUM; i++) {
+ if (cpu_devs[i]) {
+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+ (cpu_devs[i]->c_ident[1] &&
+ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+ c->x86_vendor = i;
+ if (!early)
+ this_cpu = cpu_devs[i];
+ break;
+ }
+ }
+ }
+}
+
+
+static int __init x86_fxsr_setup(char * s)
+{
+ disable_x86_fxsr = 1;
+ return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
+{
+ u32 f1, f2;
+
+ asm("pushfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "movl %0,%1\n\t"
+ "xorl %2,%0\n\t"
+ "pushl %0\n\t"
+ "popfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "popfl\n\t"
+ : "=&r" (f1), "=&r" (f2)
+ : "ir" (flag));
+
+ return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+int __init have_cpuid_p(void)
+{
+ return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+/* Do minimum CPU detection early.
+ Fields really needed: vendor, cpuid_level, family, model, mask, cache
alignment.
+ The others are not touched to avoid unwanted side effects. */
+void __init early_cpu_detect(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ c->x86_cache_alignment = 32;
+
+ if (!have_cpuid_p())
+ return;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 1);
+
+ c->x86 = 4;
+ if (c->cpuid_level >= 0x00000001) {
+ u32 junk, tfms, cap0, misc;
+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ if (cap0 & (1<<19))
+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+ }
+
+ early_intel_workaround(c);
+}
+
+void __init generic_identify(struct cpuinfo_x86 * c)
+{
+ u32 tfms, xlvl;
+ int junk;
+
+ if (have_cpuid_p()) {
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 0);
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Intel-defined flags: level 0x00000001 */
+ if ( c->cpuid_level >= 0x00000001 ) {
+ u32 capability, excap;
+ cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+ if ( xlvl >= 0x80000001 ) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[6] = cpuid_ecx(0x80000001);
+ }
+ if ( xlvl >= 0x80000004 )
+ get_model_name(c); /* Default name */
+ }
+ }
+}
+
+static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+{
+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+ /* Disable processor serial number */
+ unsigned long lo,hi;
+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+ lo |= 0x200000;
+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+ printk(KERN_NOTICE "CPU serial number disabled.\n");
+ clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+ /* Disabling the serial number may affect the cpuid level */
+ c->cpuid_level = cpuid_eax(0);
+ }
+}
+
+static int __init x86_serial_nr_setup(char *s)
+{
+ disable_x86_serial_nr = 0;
+ return 1;
+}
+__setup("serialnumber", x86_serial_nr_setup);
+
+
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->cpuid_level = -1; /* CPUID not detected */
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_num_cores = 1;
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ if (!have_cpuid_p()) {
+ /* First of all, decide if this is a 486 or higher */
+ /* It's a 486 if we can modify the AC flag */
+ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+ c->x86 = 4;
+ else
+ c->x86 = 3;
+ }
+
+ generic_identify(c);
+
+ printk(KERN_DEBUG "CPU: After generic identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+
+ if (this_cpu->c_identify) {
+ this_cpu->c_identify(c);
+
+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+ }
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ if (this_cpu->c_init)
+ this_cpu->c_init(c);
+
+ /* Disable the PN if appropriate */
+ squash_the_stupid_serial_number(c);
+
+ /*
+ * The vendor-specific functions might have changed features. Now
+ * we do "generic changes."
+ */
+
+ /* TSC disabled? */
+ if ( tsc_disable )
+ clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+ /* FXSR disabled? */
+ if (disable_x86_fxsr) {
+ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+ clear_bit(X86_FEATURE_XMM, c->x86_capability);
+ }
+
+ if (disable_pse)
+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+ /* If the model name is still unset, do table lookup. */
+ if ( !c->x86_model_id[0] ) {
+ char *p;
+ p = table_lookup_model(c);
+ if ( p )
+ strcpy(c->x86_model_id, p);
+ else
+ /* Last resort... */
+ sprintf(c->x86_model_id, "%02x/%02x",
+ c->x86_vendor, c->x86_model);
+ }
+
+ machine_specific_modify_cpu_capabilities(c);
+
+ /* Now the feature flags better reflect actual CPU features! */
+
+ printk(KERN_DEBUG "CPU: After all inits, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if ( c != &boot_cpu_data ) {
+ /* AND the already accumulated flags with these */
+ for ( i = 0 ; i < NCAPINTS ; i++ )
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ }
+
+ /* Init Machine Check Exception if available. */
+#ifdef CONFIG_X86_MCE
+ mcheck_init(c);
+#endif
+}
+/*
+ * Perform early boot up checks for a valid TSC. See
arch/i386/kernel/time.c
+ */
+
+void __init dodgy_tsc(void)
+{
+ if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
+ ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
+ cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+}
+
+#ifdef CONFIG_X86_HT
+void __init detect_ht(struct cpuinfo_x86 *c)
+{
+ u32 eax, ebx, ecx, edx;
+ int index_lsb, index_msb, tmp;
+ int cpu = smp_processor_id();
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+ if (smp_num_siblings == 1) {
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ } else if (smp_num_siblings > 1 ) {
+ index_lsb = 0;
+ index_msb = 31;
+
+ if (smp_num_siblings > NR_CPUS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the
siblings %d", smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 1) == 0) {
+ tmp >>=1 ;
+ index_lsb++;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 0x80000000 ) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+ if (index_lsb != index_msb )
+ index_msb++;
+ phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
+ }
+}
+#endif
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+ char *vendor = NULL;
+
+ if (c->x86_vendor < X86_VENDOR_NUM)
+ vendor = this_cpu->c_vendor;
+ else if (c->cpuid_level >= 0)
+ vendor = c->x86_vendor_id;
+
+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+ printk("%s ", vendor);
+
+ if (!c->x86_model_id[0])
+ printk("%d86", c->x86);
+ else
+ printk("%s", c->x86_model_id);
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
+}
+
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+extern int intel_cpu_init(void);
+extern int cyrix_init_cpu(void);
+extern int nsc_init_cpu(void);
+extern int amd_init_cpu(void);
+extern int centaur_init_cpu(void);
+extern int transmeta_init_cpu(void);
+extern int rise_init_cpu(void);
+extern int nexgen_init_cpu(void);
+extern int umc_init_cpu(void);
+void early_cpu_detect(void);
+
+void __init early_cpu_init(void)
+{
+ intel_cpu_init();
+ cyrix_init_cpu();
+ nsc_init_cpu();
+ amd_init_cpu();
+ centaur_init_cpu();
+ transmeta_init_cpu();
+ rise_init_cpu();
+ nexgen_init_cpu();
+ umc_init_cpu();
+ early_cpu_detect();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ /* pse is not compatible with on-the-fly unmapping,
+ * disable it even if the cpus claim to support it.
+ */
+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+ disable_pse = 1;
+#endif
+}
+
+void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
+{
+ unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+ unsigned long va;
+ int f;
+
+ for (va = gdt_descr->address, f = 0;
+ va < gdt_descr->address + gdt_descr->size;
+ va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ make_page_readonly((void *)va);
+ }
+ flush_page_update_queue();
+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
+ BUG();
+ lgdt_finish();
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __init cpu_init (void)
+{
+ int cpu = smp_processor_id();
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
+ struct thread_struct *thread = ¤t->thread;
+
+ if (cpu_test_and_set(cpu, cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+ for (;;) local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+ if (cpu_has_vme || cpu_has_de)
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ if (tsc_disable && cpu_has_tsc) {
+ printk(KERN_NOTICE "Disabling TSC...\n");
+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+ set_in_cr4(X86_CR4_TSD);
+ }
+
+ /*
+ * Set up the per-thread TLS descriptor cache:
+ */
+ memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+ GDT_ENTRY_TLS_ENTRIES * 8);
+
+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+ /*
+ * Delete NT
+ */
+ __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
+
+ /*
+ * Set up and load the per-CPU TSS and LDT
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if (current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current);
+
+ load_esp0(t, thread);
+
+ load_LDT(&init_mm.context);
+
+ /* Clear %fs and %gs. */
+ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
+
+ /* Clear all 6 debug registers: */
+
+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+ CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+#undef CD
+
+ /*
+ * Force FPU initialization:
+ */
+ current_thread_info()->status = 0;
+ clear_used_math();
+ mxcsr_feature_mask_init();
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile Tue Aug
9 15:17:45 2005
@@ -0,0 +1,16 @@
+obj-y := main.o
+c-obj-y := if.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)): $(obj)/mtrr.h
+ @ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/$(notdir $@) $@
+
+$(patsubst %.o,$(obj)/%.c,$(obj-y)): $(obj)/mtrr.h
+
+$(obj)/mtrr.h:
+ @ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/mtrr.h $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,165 @@
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+
+#include <asm/mtrr.h>
+#include "mtrr.h"
+
+void generic_get_mtrr(unsigned int reg, unsigned long *base,
+ unsigned int *size, mtrr_type * type)
+{
+ dom0_op_t op;
+
+ op.cmd = DOM0_READ_MEMTYPE;
+ op.u.read_memtype.reg = reg;
+ (void)HYPERVISOR_dom0_op(&op);
+
+ *size = op.u.read_memtype.nr_pfns;
+ *base = op.u.read_memtype.pfn;
+ *type = op.u.read_memtype.type;
+}
+
+struct mtrr_ops generic_mtrr_ops = {
+ .use_intel_if = 1,
+ .get = generic_get_mtrr,
+};
+
+struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
+unsigned int num_var_ranges;
+unsigned int *usage_table;
+
+void __init set_num_var_ranges(void)
+{
+ dom0_op_t op;
+
+ for (num_var_ranges = 0; ; num_var_ranges++) {
+ op.cmd = DOM0_READ_MEMTYPE;
+ op.u.read_memtype.reg = num_var_ranges;
+ if (HYPERVISOR_dom0_op(&op) != 0)
+ break;
+ }
+}
+
+static void __init init_table(void)
+{
+ int i, max;
+
+ max = num_var_ranges;
+ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
+ == NULL) {
+ printk(KERN_ERR "mtrr: could not allocate\n");
+ return;
+ }
+ for (i = 0; i < max; i++)
+ usage_table[i] = 0;
+}
+
+int mtrr_add_page(unsigned long base, unsigned long size,
+ unsigned int type, char increment)
+{
+ int error;
+ dom0_op_t op;
+
+ op.cmd = DOM0_ADD_MEMTYPE;
+ op.u.add_memtype.pfn = base;
+ op.u.add_memtype.nr_pfns = size;
+ op.u.add_memtype.type = type;
+ if ((error = HYPERVISOR_dom0_op(&op)))
+ return error;
+
+ if (increment)
+ ++usage_table[op.u.add_memtype.reg];
+
+ return op.u.add_memtype.reg;
+}
+
+int
+mtrr_add(unsigned long base, unsigned long size, unsigned int type,
+ char increment)
+{
+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
+ printk(KERN_WARNING "mtrr: size and base must be multiples of 4
kiB\n");
+ printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size,
base);
+ return -EINVAL;
+ }
+ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
+ increment);
+}
+
+int mtrr_del_page(int reg, unsigned long base, unsigned long size)
+{
+ int i, max;
+ mtrr_type ltype;
+ unsigned long lbase;
+ unsigned int lsize;
+ int error = -EINVAL;
+ dom0_op_t op;
+
+ max = num_var_ranges;
+ if (reg < 0) {
+ /* Search for existing MTRR */
+ for (i = 0; i < max; ++i) {
+ mtrr_if->get(i, &lbase, &lsize, <ype);
+ if (lbase == base && lsize == size) {
+ reg = i;
+ break;
+ }
+ }
+ if (reg < 0) {
+ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000
found\n", base,
+ size);
+ goto out;
+ }
+ }
+ if (usage_table[reg] < 1) {
+ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
+ goto out;
+ }
+ if (--usage_table[reg] < 1) {
+ op.cmd = DOM0_DEL_MEMTYPE;
+ op.u.del_memtype.handle = 0;
+ op.u.add_memtype.reg = reg;
+ (void)HYPERVISOR_dom0_op(&op);
+ }
+ error = reg;
+ out:
+ return error;
+}
+
+int
+mtrr_del(int reg, unsigned long base, unsigned long size)
+{
+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
+ printk(KERN_INFO "mtrr: size and base must be multiples of 4
kiB\n");
+ printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size,
base);
+ return -EINVAL;
+ }
+ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
+}
+
+EXPORT_SYMBOL(mtrr_add);
+EXPORT_SYMBOL(mtrr_del);
+
+static int __init mtrr_init(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ return -ENODEV;
+
+ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
+ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
+ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
+ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
+ return -ENODEV;
+
+ set_num_var_ranges();
+ init_table();
+
+ return 0;
+}
+
+subsys_initcall(mtrr_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1031 @@
+/*
+ * linux/arch/i386/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
+ * on a 486.
+ *
+ * Stack layout in 'ret_from_system_call':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in fork.c:copy_process, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * 0(%esp) - %ebx
+ * 4(%esp) - %ecx
+ * 8(%esp) - %edx
+ * C(%esp) - %esi
+ * 10(%esp) - %edi
+ * 14(%esp) - %ebp
+ * 18(%esp) - %eax
+ * 1C(%esp) - %ds
+ * 20(%esp) - %es
+ * 24(%esp) - orig_eax
+ * 28(%esp) - %eip
+ * 2C(%esp) - %cs
+ * 30(%esp) - %eflags
+ * 34(%esp) - %oldesp
+ * 38(%esp) - %oldss
+ *
+ * "current" is in register %ebx during any slow entries.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include "irq_vectors.h"
+#include <asm-xen/xen-public/xen.h>
+
+#define nr_syscalls ((syscall_table_size)/4)
+
+EBX = 0x00
+ECX = 0x04
+EDX = 0x08
+ESI = 0x0C
+EDI = 0x10
+EBP = 0x14
+EAX = 0x18
+DS = 0x1C
+ES = 0x20
+ORIG_EAX = 0x24
+EIP = 0x28
+CS = 0x2C
+EVENT_MASK = 0x2E
+EFLAGS = 0x30
+OLDESP = 0x34
+OLDSS = 0x38
+
+CF_MASK = 0x00000001
+TF_MASK = 0x00000100
+IF_MASK = 0x00000200
+DF_MASK = 0x00000400
+NT_MASK = 0x00004000
+VM_MASK = 0x00020000
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending /* 0 */
+#define evtchn_upcall_mask 1
+
+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
+#define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
+ movb evtchn_upcall_mask(reg), tmp; \
+ movb tmp, off(%esp)
+
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop XEN_BLOCK_EVENTS(%esi)
+#else
+#define preempt_stop
+#define resume_kernel restore_all
+#endif
+
+#define SAVE_ALL_NO_EVENTMASK \
+ cld; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ movl $(__USER_DS), %edx; \
+ movl %edx, %ds; \
+ movl %edx, %es;
+
+#define SAVE_ALL \
+ SAVE_ALL_NO_EVENTMASK; \
+ XEN_GET_VCPU_INFO(%esi); \
+ XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
+
+#define RESTORE_INT_REGS \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax
+
+#define RESTORE_REGS \
+ RESTORE_INT_REGS; \
+1: popl %ds; \
+2: popl %es; \
+.section .fixup,"ax"; \
+3: movl $0,(%esp); \
+ jmp 1b; \
+4: movl $0,(%esp); \
+ jmp 2b; \
+.previous; \
+.section __ex_table,"a";\
+ .align 4; \
+ .long 1b,3b; \
+ .long 2b,4b; \
+.previous
+
+
+#define RESTORE_ALL \
+ RESTORE_REGS \
+ addl $4, %esp; \
+1: iret; \
+.section .fixup,"ax"; \
+2: movl $(__USER_DS), %edx; \
+ movl %edx, %ds; \
+ movl %edx, %es; \
+ movl $11,%eax; \
+ call do_exit; \
+.previous; \
+.section __ex_table,"a";\
+ .align 4; \
+ .long 1b,2b; \
+.previous
+
+
+ENTRY(ret_from_fork)
+ pushl %eax
+ call schedule_tail
+ GET_THREAD_INFO(%ebp)
+ popl %eax
+ XEN_GET_VCPU_INFO(%esi)
+ jmp syscall_exit
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+ # userspace resumption stub bypassing syscall exit tracing
+ ALIGN
+ret_from_exception:
+ preempt_stop
+ret_from_intr:
+ GET_THREAD_INFO(%ebp)
+ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb CS(%esp), %al
+ testl $(VM_MASK | 2), %eax
+ jz resume_kernel # returning to kernel or vm86-space
+ENTRY(resume_userspace)
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+ jmp restore_all
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_BLOCK_EVENTS(%esi)
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
+ jnz restore_all
+need_resched:
+ movl TI_flags(%ebp), %ecx # need_resched set ?
+ testb $_TIF_NEED_RESCHED, %cl
+ jz restore_all
+ testb $0xFF,EVENT_MASK(%esp) # interrupts off (exception path) ?
+ jnz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+#endif
+
+/* SYSENTER_RETURN points to after the "sysenter" instruction in
+ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
+
+ # sysenter call handler stub
+ENTRY(sysenter_entry)
+ movl TSS_sysenter_esp0(%esp),%esp
+sysenter_past_esp:
+ sti
+ pushl $(__USER_DS)
+ pushl %ebp
+ pushfl
+ pushl $(__USER_CS)
+ pushl $SYSENTER_RETURN
+
+/*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+1: movl (%ebp),%ebp
+.section __ex_table,"a"
+ .align 4
+ .long 1b,syscall_fault
+.previous
+
+ pushl %eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+ jnz syscall_trace_entry
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
+ call *sys_call_table(,%eax,4)
+ movl %eax,EAX(%esp)
+ cli
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx
+ jne syscall_exit_work
+/* if something modifies registers it must also disable sysexit */
+ movl EIP(%esp), %edx
+ movl OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ sti
+ sysexit
+
+
+ # system call handler stub
+ENTRY(system_call)
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+ # system call tracing in operation
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+ jnz syscall_trace_entry
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
+syscall_call:
+ call *sys_call_table(,%eax,4)
+ movl %eax,EAX(%esp) # store the return value
+syscall_exit:
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx # current->work
+ jne syscall_exit_work
+restore_all:
+ testl $VM_MASK, EFLAGS(%esp)
+ jnz resume_vm86
+ movb EVENT_MASK(%esp), %al
+ notb %al # %al == ~saved_mask
+ andb evtchn_upcall_mask(%esi),%al
+ andb $1,%al # %al == mask & ~saved_mask
+ jnz restore_all_enable_events # != 0 => reenable event delivery
+ RESTORE_ALL
+
+resume_vm86:
+ XEN_UNBLOCK_EVENTS(%esi)
+ RESTORE_REGS
+ movl %eax,(%esp)
+ movl $__HYPERVISOR_switch_vm86,%eax
+ int $0x82
+ ud2
+
+ # perform work that needs to be done immediately before resumption
+ ALIGN
+work_pending:
+ testb $_TIF_NEED_RESCHED, %cl
+ jz work_notifysig
+work_resched:
+ call schedule
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+ jz restore_all
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+work_notifysig: # deal with pending signals and
+ # notify-resume requests
+ testl $VM_MASK, EFLAGS(%esp)
+ movl %esp, %eax
+ jne work_notifysig_v86 # returning to kernel-space or
+ # vm86-space
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp restore_all
+
+ ALIGN
+work_notifysig_v86:
+ pushl %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ movl %eax, %esp
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp restore_all
+
+ # perform syscall exit tracing
+ ALIGN
+syscall_trace_entry:
+ movl $-ENOSYS,EAX(%esp)
+ movl %esp, %eax
+ xorl %edx,%edx
+ call do_syscall_trace
+ movl ORIG_EAX(%esp), %eax
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+
+ # perform syscall exit tracing
+ ALIGN
+syscall_exit_work:
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
+ jz work_pending
+ XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call
+ # schedule() instead
+ movl %esp, %eax
+ movl $1, %edx
+ call do_syscall_trace
+ jmp resume_userspace
+
+ ALIGN
+syscall_fault:
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,EAX(%esp)
+ jmp resume_userspace
+
+ ALIGN
+syscall_badsys:
+ movl $-ENOSYS,EAX(%esp)
+ jmp resume_userspace
+
+#if 0 /* XEN */
+/*
+ * Build the entry stubs and pointer table with
+ * some assembler magic.
+ */
+.data
+ENTRY(interrupt)
+.text
+
+vector=0
+ENTRY(irq_entries_start)
+.rept NR_IRQS
+ ALIGN
+1: pushl $vector-256
+ jmp common_interrupt
+.data
+ .long 1b
+.text
+vector=vector+1
+.endr
+
+ ALIGN
+common_interrupt:
+ SAVE_ALL
+ movl %esp,%eax
+ call do_IRQ
+ jmp ret_from_intr
+
+#define BUILD_INTERRUPT(name, nr) \
+ENTRY(name) \
+ pushl $nr-256; \
+ SAVE_ALL \
+ movl %esp,%eax; \
+ call smp_/**/name; \
+ jmp ret_from_intr;
+
+/* The include is where all of the SMP etc. interrupts come from */
+#include "entry_arch.h"
+#endif /* XEN */
+
+ENTRY(divide_error)
+ pushl $0 # no error code
+ pushl $do_divide_error
+ ALIGN
+error_code:
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax # eax = -1
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es, %ecx
+ movl ES(%esp), %edi # get the function address
+ movl ORIG_EAX(%esp), %edx # get the error code
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl $(__USER_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %esp,%eax # pt_regs pointer
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
+ call *%edi
+ jmp ret_from_exception
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until we've done all processing. HOWEVER, we must enable events before
+# popping the stack frame (can't be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so we'd
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+ pushl %eax
+ SAVE_ALL_NO_EVENTMASK
+ movl EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+ jb critical_region_fixup
+11: XEN_GET_VCPU_INFO(%esi)
+ movb $0, EVENT_MASK(%esp)
+ push %esp
+ call evtchn_do_upcall
+ add $4,%esp
+ jmp ret_from_intr
+
+ ALIGN
+restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%esi)
+scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%esi)
+ jnz 14f # process more events if necessary...
+ RESTORE_ALL
+14: XEN_BLOCK_EVENTS(%esi)
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ addl $critical_fixup_table-scrit,%eax
+ movzbl (%eax),%eax # %eax contains num bytes popped
+ mov %esp,%esi
+ add %eax,%esi # %esi points at end of src region
+ mov %esp,%edi
+ add $0x34,%edi # %edi points at end of dst region
+ mov %eax,%ecx
+ shr $2,%ecx # convert words to bytes
+ je 16f # skip loop if nothing to copy
+15: subl $4,%esi # pre-decrementing copy loop
+ subl $4,%edi
+ movl (%esi),%eax
+ movl %eax,(%edi)
+ loop 15b
+16: movl %edi,%esp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .byte 0x00,0x00,0x00 # testb $0xff,(%esi) = XEN_TEST_PENDING
+ .byte 0x00,0x00 # jnz 14f
+ .byte 0x00 # pop %ebx
+ .byte 0x04 # pop %ecx
+ .byte 0x08 # pop %edx
+ .byte 0x0c # pop %esi
+ .byte 0x10 # pop %edi
+ .byte 0x14 # pop %ebp
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+ .byte 0x24,0x24,0x24 # add $4,%esp
+ .byte 0x28 # iret
+ .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
+ .byte 0x00,0x00 # jmp 11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+1: popl %ds
+2: popl %es
+3: popl %fs
+4: popl %gs
+ subl $4,%esp
+ SAVE_ALL
+ jmp ret_from_exception
+.section .fixup,"ax"; \
+6: movl $0,(%esp); \
+ jmp 1b; \
+7: movl $0,(%esp); \
+ jmp 2b; \
+8: movl $0,(%esp); \
+ jmp 3b; \
+9: movl $0,(%esp); \
+ jmp 4b; \
+.previous; \
+.section __ex_table,"a";\
+ .align 4; \
+ .long 1b,6b; \
+ .long 2b,7b; \
+ .long 3b,8b; \
+ .long 4b,9b; \
+.previous
+
+ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp error_code
+
+ENTRY(simd_coprocessor_error)
+ pushl $0
+ pushl $do_simd_coprocessor_error
+ jmp error_code
+
+ENTRY(device_not_available)
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ preempt_stop
+ call math_state_restore
+ jmp ret_from_exception
+
+/*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+ * We just load the right stack, and push the three (known) values
+ * by hand onto the new stack - while updating the return eip past
+ * the instruction that would have done it for sysenter.
+ */
+#define FIX_STACK(offset, ok, label) \
+ cmpw $__KERNEL_CS,4(%esp); \
+ jne ok; \
+label: \
+ movl TSS_sysenter_esp0+offset(%esp),%esp; \
+ pushfl; \
+ pushl $__KERNEL_CS; \
+ pushl $sysenter_past_esp
+
+ENTRY(debug)
+ cmpl $sysenter_entry,(%esp)
+ jne debug_stack_correct
+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+debug_stack_correct:
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ xorl %edx,%edx # error code 0
+ movl %esp,%eax # pt_regs pointer
+ call do_debug
+ testl %eax,%eax
+ jnz restore_all
+ jmp ret_from_exception
+
+#if 0 /* XEN */
+/*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+ * a debug fault, and the debug fault hasn't yet been able to
+ * clear up the stack. So we first check whether we got an
+ * NMI on the sysenter entry path, but after that we need to
+ * check whether we got an NMI on the debug path where the debug
+ * fault happened on the sysenter path.
+ */
+ENTRY(nmi)
+ cmpl $sysenter_entry,(%esp)
+ je nmi_stack_fixup
+ pushl %eax
+ movl %esp,%eax
+ /* Do not access memory above the end of our stack page,
+ * it might not exist.
+ */
+ andl $(THREAD_SIZE-1),%eax
+ cmpl $(THREAD_SIZE-20),%eax
+ popl %eax
+ jae nmi_stack_correct
+ cmpl $sysenter_entry,12(%esp)
+ je nmi_debug_stack_check
+nmi_stack_correct:
+ pushl %eax
+ SAVE_ALL
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
+ RESTORE_ALL
+
+nmi_stack_fixup:
+ FIX_STACK(12,nmi_stack_correct, 1)
+ jmp nmi_stack_correct
+nmi_debug_stack_check:
+ cmpw $__KERNEL_CS,16(%esp)
+ jne nmi_stack_correct
+ cmpl $debug - 1,(%esp)
+ jle nmi_stack_correct
+ cmpl $debug_esp_fix_insn,(%esp)
+ jle nmi_debug_stack_fixup
+nmi_debug_stack_fixup:
+ FIX_STACK(24,nmi_stack_correct, 1)
+ jmp nmi_stack_correct
+#endif /* XEN */
+
+ENTRY(int3)
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_int3
+ testl %eax,%eax
+ jnz restore_all
+ jmp ret_from_exception
+
+ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp error_code
+
+ENTRY(bounds)
+ pushl $0
+ pushl $do_bounds
+ jmp error_code
+
+ENTRY(invalid_op)
+ pushl $0
+ pushl $do_invalid_op
+ jmp error_code
+
+ENTRY(coprocessor_segment_overrun)
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp error_code
+
+ENTRY(invalid_TSS)
+ pushl $do_invalid_TSS
+ jmp error_code
+
+ENTRY(segment_not_present)
+ pushl $do_segment_not_present
+ jmp error_code
+
+ENTRY(stack_segment)
+ pushl $do_stack_segment
+ jmp error_code
+
+ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp error_code
+
+ENTRY(alignment_check)
+ pushl $do_alignment_check
+ jmp error_code
+
+# This handler is special, because it gets an extra value on its stack,
+# which is the linear faulting address.
+# fastcall register usage: %eax = pt_regs, %edx = error code,
+# %ecx = fault address
+ENTRY(page_fault)
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax /* eax = -1 */
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es,%edi
+ movl ES(%esp), %ecx /* get the faulting address */
+ movl ORIG_EAX(%esp), %edx /* get the error code */
+ movl %eax, ORIG_EAX(%esp)
+ movl %edi, ES(%esp)
+ movl $(__KERNEL_DS),%eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %esp,%eax /* pt_regs pointer */
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
+ call do_page_fault
+ jmp ret_from_exception
+
+#ifdef CONFIG_X86_MCE
+ENTRY(machine_check)
+ pushl $0
+ pushl machine_check_vector
+ jmp error_code
+#endif
+
+ENTRY(fixup_4gb_segment)
+ pushl $do_fixup_4gb_segment
+ jmp error_code
+
+.data
+ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used
for restarting */
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 - old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_olduname
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long old_select
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ioperm
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_uname
+ .long sys_iopl /* 110 */
+ .long sys_vhangup
+ .long sys_ni_syscall /* old "idle" system call */
+ .long sys_vm86old
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_modify_ldt
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* reserved for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_vm86
+ .long sys_ni_syscall /* Old sys_query_module */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* reserved for streams1 */
+ .long sys_ni_syscall /* reserved for streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_set_thread_area
+ .long sys_get_thread_area
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_ni_syscall /* sys_vserver */
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_ni_syscall /* reserved for kexec */
+ .long sys_waitid
+ .long sys_ni_syscall /* 285 */ /* available */
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
+
+syscall_table_size=(.-sys_call_table)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S Tue Aug 9 15:17:45 2005
@@ -0,0 +1,187 @@
+
+#include <linux/config.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
+ .ascii ",XEN_VER=2.0"
+ .ascii ",VIRT_BASE=0xC0000000"
+ .ascii ",LOADER=generic"
+ .ascii ",PT_MODE_WRITABLE"
+ .byte 0
+
+.text
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#include <asm/asm_offsets.h>
+#include <asm-xen/xen-public/arch-x86_32.h>
+
+/*
+ * References to members of the new_cpu_data structure.
+ */
+
+#define X86 new_cpu_data+CPUINFO_x86
+#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
+#define X86_MODEL new_cpu_data+CPUINFO_x86_model
+#define X86_MASK new_cpu_data+CPUINFO_x86_mask
+#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
+#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
+#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
+
+ENTRY(startup_32)
+ cld
+
+ /* Copy the necessary stuff from xen_start_info structure. */
+ mov $xen_start_info_union,%edi
+ mov $128,%ecx
+ rep movsl
+
+#ifdef CONFIG_SMP
+ENTRY(startup_32_smp)
+ cld
+#endif /* CONFIG_SMP */
+
+ /* Set up the stack pointer */
+ lss stack_start,%esp
+
+checkCPUtype:
+
+ /* get vendor info */
+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
+ cpuid
+ movl %eax,X86_CPUID # save CPUID level
+ movl %ebx,X86_VENDOR_ID # lo 4 chars
+ movl %edx,X86_VENDOR_ID+4 # next 4 chars
+ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
+
+ movl $1,%eax # Use the CPUID instruction to get CPU type
+ cpuid
+ movb %al,%cl # save reg for future use
+ andb $0x0f,%ah # mask processor family
+ movb %ah,X86
+ andb $0xf0,%al # mask model
+ shrb $4,%al
+ movb %al,X86_MODEL
+ andb $0x0f,%cl # mask mask revision
+ movb %cl,X86_MASK
+ movl %edx,X86_CAPABILITY
+
+ incb ready
+
+ xorl %eax,%eax # Clear FS/GS and LDT
+ movl %eax,%fs
+ movl %eax,%gs
+ cld # gcc2 wants the direction flag cleared at all
times
+
+#ifdef CONFIG_SMP
+ movb ready, %cl
+ cmpb $1,%cl
+ je 1f # the first CPU calls start_kernel
+ # all other CPUs call initialize_secondary
+ call initialize_secondary
+ jmp L6
+1:
+#endif /* CONFIG_SMP */
+ call start_kernel
+L6:
+ jmp L6 # main should never return here, but
+ # just in case, we know what happens.
+
+ENTRY(lgdt_finish)
+ movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movw %ax,%ss # after changing gdt.
+
+ movl $(__USER_DS),%eax # DS/ES contains default USER segment
+ movw %ax,%ds
+ movw %ax,%es
+
+ popl %eax # reload CS by intersegment return
+ pushl $(__KERNEL_CS)
+ pushl %eax
+ lret
+
+ENTRY(stack_start)
+ .long init_thread_union+THREAD_SIZE
+ .long __BOOT_DS
+
+ready: .byte 0
+
+.globl idt_descr
+.globl cpu_gdt_descr
+
+ ALIGN
+ .word 0 # 32-bit align idt_desc.address
+idt_descr:
+ .word IDT_ENTRIES*8-1 # idt contains 256 entries
+ .long idt_table
+
+# boot GDT descriptor (later on used by CPU#0):
+ .word 0 # 32 bit align gdt_desc.address
+cpu_gdt_descr:
+ .word GDT_SIZE
+ .long cpu_gdt_table
+
+ .fill NR_CPUS-1,8,0 # space for the other GDT descriptors
+
+.org 0x1000
+ENTRY(empty_zero_page)
+
+.org 0x2000
+ENTRY(swapper_pg_dir)
+
+.org 0x3000
+ENTRY(cpu_gdt_table)
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x0000000000000000 /* 0x0b reserved */
+ .quad 0x0000000000000000 /* 0x13 reserved */
+ .quad 0x0000000000000000 /* 0x1b reserved */
+ .quad 0x0000000000000000 /* 0x20 unused */
+ .quad 0x0000000000000000 /* 0x28 unused */
+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
+ .quad 0x0000000000000000 /* 0x4b reserved */
+ .quad 0x0000000000000000 /* 0x53 reserved */
+ .quad 0x0000000000000000 /* 0x5b reserved */
+
+ .quad 0x00cfbb000000c3ff /* 0x60 kernel 4GB code at 0x00000000 */
+ .quad 0x00cfb3000000c3ff /* 0x68 kernel 4GB data at 0x00000000 */
+ .quad 0x00cffb000000c3ff /* 0x73 user 4GB code at 0x00000000 */
+ .quad 0x00cff3000000c3ff /* 0x7b user 4GB data at 0x00000000 */
+
+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
+
+ /* Segments used for calling PnP BIOS */
+ .quad 0x0000000000000000 /* 0x90 32-bit code */
+ .quad 0x0000000000000000 /* 0x98 16-bit code */
+ .quad 0x0000000000000000 /* 0xa0 16-bit data */
+ .quad 0x0000000000000000 /* 0xa8 16-bit data */
+ .quad 0x0000000000000000 /* 0xb0 16-bit data */
+ /*
+ * The APM segments have byte granularity and their bases
+ * and limits are set at run time.
+ */
+ .quad 0x0000000000000000 /* 0xb8 APM CS code */
+ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
+ .quad 0x0000000000000000 /* 0xc8 APM DS data */
+
+ .quad 0x0000000000000000 /* 0xd0 - unused */
+ .quad 0x0000000000000000 /* 0xd8 - unused */
+ .quad 0x0000000000000000 /* 0xe0 - unused */
+ .quad 0x0000000000000000 /* 0xe8 - unused */
+ .quad 0x0000000000000000 /* 0xf0 - unused */
+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault
TSS */
+ .fill GDT_ENTRIES-32,8,0
+
+.org 0x4000
+ENTRY(default_ldt)
+
+.org 0x5000
+/*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,194 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/mca.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/apm_bios.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/mmx.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/nmi.h>
+#include <asm/ist.h>
+#include <asm/kdebug.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern spinlock_t rtc_lock;
+
+/* This is definitely a GPL-only symbol */
+EXPORT_SYMBOL_GPL(cpu_gdt_table);
+
+#if defined(CONFIG_APM_MODULE)
+extern void machine_real_restart(unsigned char *, int);
+EXPORT_SYMBOL(machine_real_restart);
+extern void default_idle(void);
+EXPORT_SYMBOL(default_idle);
+#endif
+
+#ifdef CONFIG_SMP
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) ||
defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+extern unsigned long cpu_khz;
+extern unsigned long get_cmos_time(void);
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+#ifdef CONFIG_DISCONTIGMEM
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(physnode_map);
+#endif
+#ifdef CONFIG_X86_NUMAQ
+EXPORT_SYMBOL(xquad_portio);
+#endif
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(pm_idle);
+#ifdef CONFIG_APM
+EXPORT_SYMBOL(pm_power_off);
+#endif
+EXPORT_SYMBOL(get_cmos_time);
+EXPORT_SYMBOL(cpu_khz);
+EXPORT_SYMBOL(apm_info);
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_failed_trylock);
+EXPORT_SYMBOL(__up_wakeup);
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_generic);
+/* Delay loops */
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_from_user_ll);
+EXPORT_SYMBOL(__copy_to_user_ll);
+EXPORT_SYMBOL(strnlen_user);
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+EXPORT_SYMBOL(dma_free_coherent);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pcibios_penalize_isa_irq);
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+
+#ifdef CONFIG_PCI_BIOS
+EXPORT_SYMBOL(pcibios_set_irq_routing);
+EXPORT_SYMBOL(pcibios_get_irq_routing_table);
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+EXPORT_SYMBOL(_mmx_memcpy);
+EXPORT_SYMBOL(mmx_clear_page);
+EXPORT_SYMBOL(mmx_copy_page);
+#endif
+
+#ifdef CONFIG_X86_HT
+EXPORT_SYMBOL(smp_num_siblings);
+EXPORT_SYMBOL(cpu_sibling_map);
+#endif
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_callout_map);
+EXPORT_SYMBOL(__write_lock_failed);
+EXPORT_SYMBOL(__read_lock_failed);
+
+/* Global SMP stuff */
+EXPORT_SYMBOL(smp_call_function);
+
+/* TLB flushing */
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL_GPL(flush_tlb_all);
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+#endif
+
+#ifdef CONFIG_MCA
+EXPORT_SYMBOL(machine_id);
+#endif
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(get_wchan);
+
+EXPORT_SYMBOL(rtc_lock);
+
+EXPORT_SYMBOL_GPL(set_nmi_callback);
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
+
+#undef memcmp
+extern int memcmp(const void *,const void *,__kernel_size_t);
+EXPORT_SYMBOL(memcmp);
+
+EXPORT_SYMBOL(register_die_notifier);
+#ifdef CONFIG_HAVE_DEC_LOCK
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif
+
+EXPORT_SYMBOL(__PAGE_KERNEL);
+
+#ifdef CONFIG_HIGHMEM
+EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_to_page);
+#endif
+
+#if defined(CONFIG_X86_SPEEDSTEP_SMI) ||
defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
+EXPORT_SYMBOL(ist_info);
+#endif
+
+EXPORT_SYMBOL(csum_partial);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/ioport.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/ioport.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,51 @@
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+
+asmlinkage long sys_iopl(unsigned int new_io_pl)
+{
+ unsigned int old_io_pl = current->thread.io_pl;
+ dom0_op_t op;
+
+ if (new_io_pl > 3)
+ return -EINVAL;
+
+ /* Need "raw I/O" privileges for direct port access. */
+ if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ return -EPERM;
+
+ /* Maintain OS privileges even if user attempts to relinquish them. */
+ if (new_io_pl == 0)
+ new_io_pl = 1;
+
+ /* Change our version of the privilege levels. */
+ current->thread.io_pl = new_io_pl;
+
+ /* Force the change at ring 0. */
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = new_io_pl;
+ HYPERVISOR_dom0_op(&op);
+
+ return 0;
+}
+
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+#if 0
+ printk(KERN_INFO "ioperm not fully supported - %s\n",
+ turn_on ? "set iopl to 3" : "ignore resource release");
+#endif
+ return turn_on ? sys_iopl(3) : 0;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/ldt.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/ldt.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,267 @@
+/*
+ * linux/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+
+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+static void flush_ldt(void *null)
+{
+ if (current->active_mm)
+ load_LDT(¤t->active_mm->context);
+}
+#endif
+
+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+{
+ void *oldldt;
+ void *newldt;
+ int oldsize;
+
+ if (mincount <= pc->size)
+ return 0;
+ oldsize = pc->size;
+ mincount = (mincount+511)&(~511);
+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+ else
+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+ if (!newldt)
+ return -ENOMEM;
+
+ if (oldsize)
+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+ oldldt = pc->ldt;
+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0,
(mincount-oldsize)*LDT_ENTRY_SIZE);
+ pc->ldt = newldt;
+ wmb();
+ pc->size = mincount;
+ wmb();
+
+ if (reload) {
+#ifdef CONFIG_SMP
+ cpumask_t mask;
+ preempt_disable();
+#endif
+ make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ load_LDT(pc);
+#ifdef CONFIG_SMP
+ mask = cpumask_of_cpu(smp_processor_id());
+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+ smp_call_function(flush_ldt, NULL, 1, 1);
+ preempt_enable();
+#endif
+ }
+ if (oldsize) {
+ make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ flush_page_update_queue();
+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(oldldt);
+ else
+ kfree(oldldt);
+ }
+ return 0;
+}
+
+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+{
+ int err = alloc_ldt(new, old->size, 0);
+ if (err < 0)
+ return err;
+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+ make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ flush_page_update_queue();
+ return 0;
+}
+
+/*
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ struct mm_struct * old_mm;
+ int retval = 0;
+
+ init_MUTEX(&mm->context.sem);
+ mm->context.size = 0;
+ old_mm = current->mm;
+ if (old_mm && old_mm->context.size > 0) {
+ down(&old_mm->context.sem);
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ up(&old_mm->context.sem);
+ }
+ return retval;
+}
+
+/*
+ * No need to lock the MM as we are the last user
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context.size) {
+ if (mm == current->active_mm)
+ clear_LDT();
+ make_pages_writable(mm->context.ldt,
+ (mm->context.size * LDT_ENTRY_SIZE) /
+ PAGE_SIZE);
+ flush_page_update_queue();
+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(mm->context.ldt);
+ else
+ kfree(mm->context.ldt);
+ mm->context.size = 0;
+ }
+}
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+ int err;
+ unsigned long size;
+ struct mm_struct * mm = current->mm;
+
+ if (!mm->context.size)
+ return 0;
+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+
+ down(&mm->context.sem);
+ size = mm->context.size*LDT_ENTRY_SIZE;
+ if (size > bytecount)
+ size = bytecount;
+
+ err = 0;
+ if (copy_to_user(ptr, mm->context.ldt, size))
+ err = -EFAULT;
+ up(&mm->context.sem);
+ if (err < 0)
+ goto error_return;
+ if (size != bytecount) {
+ /* zero-fill the rest */
+ if (clear_user(ptr+size, bytecount-size) != 0) {
+ err = -EFAULT;
+ goto error_return;
+ }
+ }
+ return bytecount;
+error_return:
+ return err;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+ int err;
+ unsigned long size;
+ void *address;
+
+ err = 0;
+ address = &default_ldt[0];
+ size = 5*sizeof(struct desc_struct);
+ if (size > bytecount)
+ size = bytecount;
+
+ err = size;
+ if (copy_to_user(ptr, address, size))
+ err = -EFAULT;
+
+ return err;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
+{
+ struct mm_struct * mm = current->mm;
+ __u32 entry_1, entry_2, *lp;
+ unsigned long mach_lp;
+ int error;
+ struct user_desc ldt_info;
+
+ error = -EINVAL;
+ if (bytecount != sizeof(ldt_info))
+ goto out;
+ error = -EFAULT;
+ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+ goto out;
+
+ error = -EINVAL;
+ if (ldt_info.entry_number >= LDT_ENTRIES)
+ goto out;
+ if (ldt_info.contents == 3) {
+ if (oldmode)
+ goto out;
+ if (ldt_info.seg_not_present == 0)
+ goto out;
+ }
+
+ down(&mm->context.sem);
+ if (ldt_info.entry_number >= mm->context.size) {
+ error = alloc_ldt(¤t->mm->context,
ldt_info.entry_number+1, 1);
+ if (error < 0)
+ goto out_unlock;
+ }
+
+ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *)
mm->context.ldt);
+ mach_lp = arbitrary_virt_to_machine(lp);
+
+ /* Allow LDTs to be cleared by the user. */
+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+ if (oldmode || LDT_empty(&ldt_info)) {
+ entry_1 = 0;
+ entry_2 = 0;
+ goto install;
+ }
+ }
+
+ entry_1 = LDT_entry_a(&ldt_info);
+ entry_2 = LDT_entry_b(&ldt_info);
+ if (oldmode)
+ entry_2 &= ~(1 << 20);
+
+ /* Install the new entry ... */
+install:
+ error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
+
+out_unlock:
+ up(&mm->context.sem);
+out:
+ return error;
+}
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long
bytecount)
+{
+ int ret = -ENOSYS;
+
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
+ break;
+ case 1:
+ ret = write_ldt(ptr, bytecount, 1);
+ break;
+ case 2:
+ ret = read_default_ldt(ptr, bytecount);
+ break;
+ case 0x11:
+ ret = write_ldt(ptr, bytecount, 0);
+ break;
+ }
+ return ret;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/microcode.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/microcode.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,163 @@
+/*
+ * Intel CPU Microcode Update Driver for Linux
+ *
+ * Copyright (C) 2000-2004 Tigran Aivazian
+ *
+ * This driver allows to upgrade microcode on Intel processors
+ * belonging to IA-32 family - PentiumPro, Pentium II,
+ * Pentium III, Xeon, Pentium 4, etc.
+ *
+ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
+ * Order Number 245472 or free download from:
+ *
+ * http://developer.intel.com/design/pentium4/manuals/245472.htm
+ *
+ * For more information, go to http://www.urbanmyth.org/microcode
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+//#define DEBUG /* pr_debug */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/syscalls.h>
+
+#include <asm/msr.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
+MODULE_AUTHOR("Tigran Aivazian <tigran@xxxxxxxxxxx>");
+MODULE_LICENSE("GPL");
+
+#define MICROCODE_VERSION "1.14-xen"
+
+#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
+#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /*
2048 bytes */
+
+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
+static DECLARE_MUTEX(microcode_sem);
+
+static void __user *user_buffer; /* user area microcode data buffer */
+static unsigned int user_buffer_size; /* it's size */
+
+static int microcode_open (struct inode *unused1, struct file *unused2)
+{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+
+static int do_microcode_update (void)
+{
+ int err;
+ dom0_op_t op;
+
+ err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
+ if (err != 0)
+ return err;
+
+ op.cmd = DOM0_MICROCODE;
+ op.u.microcode.data = user_buffer;
+ op.u.microcode.length = user_buffer_size;
+ err = HYPERVISOR_dom0_op(&op);
+
+ (void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
+
+ return err;
+}
+
+static ssize_t microcode_write (struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
+{
+ ssize_t ret;
+
+ if (len < DEFAULT_UCODE_TOTALSIZE) {
+ printk(KERN_ERR "microcode: not enough data\n");
+ return -EINVAL;
+ }
+
+ if ((len >> PAGE_SHIFT) > num_physpages) {
+ printk(KERN_ERR "microcode: too much data (max %ld pages)\n",
num_physpages);
+ return -EINVAL;
+ }
+
+ down(µcode_sem);
+
+ user_buffer = (void __user *) buf;
+ user_buffer_size = (int) len;
+
+ ret = do_microcode_update();
+ if (!ret)
+ ret = (ssize_t)len;
+
+ up(µcode_sem);
+
+ return ret;
+}
+
+static int microcode_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ /*
+ * XXX: will be removed after microcode_ctl
+ * is updated to ignore failure of this ioctl()
+ */
+ case MICROCODE_IOCFREE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static struct file_operations microcode_fops = {
+ .owner = THIS_MODULE,
+ .write = microcode_write,
+ .ioctl = microcode_ioctl,
+ .open = microcode_open,
+};
+
+static struct miscdevice microcode_dev = {
+ .minor = MICROCODE_MINOR,
+ .name = "microcode",
+ .devfs_name = "cpu/microcode",
+ .fops = µcode_fops,
+};
+
+static int __init microcode_init (void)
+{
+ int error;
+
+ error = misc_register(µcode_dev);
+ if (error) {
+ printk(KERN_ERR
+ "microcode: can't misc_register on minor=%d\n",
+ MICROCODE_MINOR);
+ return error;
+ }
+
+ printk(KERN_INFO
+ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION "
<tigran@xxxxxxxxxxx>\n");
+ return 0;
+}
+
+static void __exit microcode_exit (void)
+{
+ misc_deregister(µcode_dev);
+ printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION "
unregistered\n");
+}
+
+module_init(microcode_init)
+module_exit(microcode_exit)
+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,363 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * On i386 there is no hardware dynamic DMA address translation,
+ * so consistent alloc/free are merely page allocation/freeing.
+ * The rest of the dynamic DMA mapping interface is implemented
+ * in asm/pci.h.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm-xen/balloon.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pte_offset_kernel pte_offset
+#define pud_t pgd_t
+#define pud_offset(d, va) d
+#endif
+
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+static void
+xen_contig_memory(unsigned long vstart, unsigned int order)
+{
+ /*
+ * Ensure multi-page extents are contiguous in machine memory.
+ * This code could be cleaned up some, and the number of
+ * hypercalls reduced.
+ */
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long pfn, i, flags;
+
+ scrub_pages(vstart, 1 << order);
+
+ balloon_lock(flags);
+
+ /* 1. Zap current PTEs, giving away the underlying pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+ pud = pud_offset(pgd, vstart + (i*PAGE_SIZE));
+ pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
+ pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
+ pfn = pte_val_ma(*pte) >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ INVALID_P2M_ENTRY;
+ flush_page_update_queue();
+ if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &pfn, 1, 0) != 1) BUG();
+ }
+ /* 2. Get a new contiguous memory extent. */
+ if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ &pfn, 1, order) != 1) BUG();
+ /* 3. Map the new extent in place of old pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+ pud = pud_offset(pgd, vstart + (i*PAGE_SIZE));
+ pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
+ pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
+ queue_l1_entry_update(pte,
+ ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+ queue_machphys_update(pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = pfn+i;
+ }
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ balloon_unlock(flags);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+#else
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+#endif
+{
+ void *ret;
+ unsigned int order = get_order(size);
+ unsigned long vstart;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ int gfp = GFP_ATOMIC;
+
+ if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+#else
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (mem) {
+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
+ order);
+ if (page >= 0) {
+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
+ ret = mem->virt_base + (page << PAGE_SHIFT);
+ memset(ret, 0, size);
+ return ret;
+ }
+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
+ return NULL;
+ }
+
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+#endif
+
+ vstart = __get_free_pages(gfp, order);
+ ret = (void *)vstart;
+
+ if (ret != NULL) {
+ xen_contig_memory(vstart, order);
+
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#else
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ int order = get_order(size);
+
+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base +
(mem->size << PAGE_SHIFT))) {
+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+ bitmap_release_region(mem->bitmap, page, order);
+ } else
+ free_pages((unsigned long)vaddr, order);
+}
+
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ void __iomem *mem_base;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = (pages + 31)/32;
+
+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+ goto out;
+ if (!size)
+ goto out;
+ if (dev->dma_mem)
+ goto out;
+
+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+ mem_base = ioremap(bus_addr, size);
+ if (!mem_base)
+ goto out;
+
+ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dev->dma_mem)
+ goto out;
+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
+ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+ if (!dev->dma_mem->bitmap)
+ goto free1_out;
+ memset(dev->dma_mem->bitmap, 0, bitmap_size);
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+
+ if (flags & DMA_MEMORY_MAP)
+ return DMA_MEMORY_MAP;
+
+ return DMA_MEMORY_IO;
+
+ free1_out:
+ kfree(dev->dma_mem->bitmap);
+ out:
+ return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+
+ if(!mem)
+ return;
+ dev->dma_mem = NULL;
+ iounmap(mem->virt_base);
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
+ int pos, err;
+
+ if (!mem)
+ return ERR_PTR(-EINVAL);
+
+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
+ if (err != 0)
+ return ERR_PTR(err);
+ return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+static LIST_HEAD(dma_map_head);
+static DEFINE_SPINLOCK(dma_map_lock);
+struct dma_map_entry {
+ struct list_head list;
+ dma_addr_t dma;
+ char *bounce, *host;
+ size_t size;
+};
+#define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
+
+dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ void *bnc;
+ dma_addr_t dma;
+ unsigned long flags;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /*
+ * Even if size is sub-page, the buffer may still straddle a page
+ * boundary. Take into account buffer start offset. All other calls are
+ * conservative and always search the dma_map list if it's non-empty.
+ */
+ if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
+ dma = virt_to_bus(ptr);
+ } else {
+ BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, 0)) == NULL);
+ BUG_ON((ent = kmalloc(sizeof(*ent), GFP_KERNEL)) == NULL);
+ if (direction != DMA_FROM_DEVICE)
+ memcpy(bnc, ptr, size);
+ ent->dma = dma;
+ ent->bounce = bnc;
+ ent->host = ptr;
+ ent->size = size;
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_add(&ent->list, &dma_map_head);
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ }
+
+ flush_write_buffers();
+ return dma;
+}
+EXPORT_SYMBOL(dma_map_single);
+
+void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ unsigned long flags;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /* Fast-path check: are there any multi-page DMA mappings? */
+ if (!list_empty(&dma_map_head)) {
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_for_each_entry ( ent, &dma_map_head, list ) {
+ if (DMA_MAP_MATCHES(ent, dma_addr)) {
+ list_del(&ent->list);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ if (&ent->list != &dma_map_head) {
+ BUG_ON(dma_addr != ent->dma);
+ BUG_ON(size != ent->size);
+ if (direction != DMA_TO_DEVICE)
+ memcpy(ent->host, ent->bounce, size);
+ dma_free_coherent(dev, size, ent->bounce, ent->dma);
+ kfree(ent);
+ }
+ }
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
+void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ unsigned long flags, off;
+
+ /* Fast-path check: are there any multi-page DMA mappings? */
+ if (!list_empty(&dma_map_head)) {
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_for_each_entry ( ent, &dma_map_head, list )
+ if (DMA_MAP_MATCHES(ent, dma_handle))
+ break;
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ if (&ent->list != &dma_map_head) {
+ off = dma_handle - ent->dma;
+ BUG_ON((off + size) > ent->size);
+ /*if (direction != DMA_TO_DEVICE)*/
+ memcpy(ent->host+off, ent->bounce+off, size);
+ }
+ }
+}
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t
size,
+ enum dma_data_direction direction)
+{
+ struct dma_map_entry *ent;
+ unsigned long flags, off;
+
+ /* Fast-path check: are there any multi-page DMA mappings? */
+ if (!list_empty(&dma_map_head)) {
+ spin_lock_irqsave(&dma_map_lock, flags);
+ list_for_each_entry ( ent, &dma_map_head, list )
+ if (DMA_MAP_MATCHES(ent, dma_handle))
+ break;
+ spin_unlock_irqrestore(&dma_map_lock, flags);
+ if (&ent->list != &dma_map_head) {
+ off = dma_handle - ent->dma;
+ BUG_ON((off + size) > ent->size);
+ /*if (direction != DMA_FROM_DEVICE)*/
+ memcpy(ent->bounce+off, ent->host+off, size);
+ }
+ }
+
+ flush_write_buffers();
+}
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,779 @@
+/*
+ * linux/arch/i386/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elfcore.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/utsname.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/ptrace.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/irq.h>
+#include <asm/desc.h>
+#include <asm-xen/multicall.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#ifdef CONFIG_MATH_EMULATION
+#include <asm/math_emu.h>
+#endif
+
+#include <linux/irq.h>
+#include <linux/err.h>
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int hlt_counter;
+
+unsigned long boot_option_idle_override = 0;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ return ((unsigned long *)tsk->thread.esp)[3];
+}
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+static cpumask_t cpu_idle_map;
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
+extern void stop_hz_timer(void);
+extern void start_hz_timer(void);
+void xen_idle(void)
+{
+ local_irq_disable();
+
+ if (need_resched()) {
+ local_irq_enable();
+ } else {
+ stop_hz_timer();
+ HYPERVISOR_block(); /* implicit local_irq_enable() */
+ start_hz_timer();
+ }
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+ int cpu = _smp_processor_id();
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched()) {
+
+ if (cpu_isset(cpu, cpu_idle_map))
+ cpu_clear(cpu, cpu_idle_map);
+ rmb();
+
+ irq_stat[cpu].idle_timestamp = jiffies;
+ xen_idle();
+ }
+ schedule();
+ }
+}
+
+void cpu_idle_wait(void)
+{
+ int cpu;
+ cpumask_t map;
+
+ for_each_online_cpu(cpu)
+ cpu_set(cpu, cpu_idle_map);
+
+ wmb();
+ do {
+ ssleep(1);
+ cpus_and(map, cpu_idle_map, cpu_online_map);
+ } while (!cpus_empty(map));
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
+/* Always use xen_idle() instead. */
+void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
+ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip,
smp_processor_id());
+ print_symbol("EIP is at %s\n", regs->eip);
+
+ if (regs->xcs & 2)
+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
+ printk(" EFLAGS: %08lx %s (%s)\n",
+ regs->eflags, print_tainted(), system_utsname.release);
+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+ regs->eax,regs->ebx,regs->ecx,regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+ regs->esi, regs->edi, regs->ebp);
+ printk(" DS: %04x ES: %04x\n",
+ 0xffff & regs->xds,0xffff & regs->xes);
+
+ show_trace(NULL, ®s->esp);
+}
+
+/*
+ * This gets run with %ebx containing the
+ * function to call, and %edx containing
+ * the "args".
+ */
+extern void kernel_thread_helper(void);
+__asm__(".section .text\n"
+ ".align 4\n"
+ "kernel_thread_helper:\n\t"
+ "movl %edx,%eax\n\t"
+ "pushl %edx\n\t"
+ "call *%ebx\n\t"
+ "pushl %eax\n\t"
+ "call do_exit\n"
+ ".previous");
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(®s, 0, sizeof(regs));
+
+ regs.ebx = (unsigned long) fn;
+ regs.edx = (unsigned long) arg;
+
+ regs.xds = __USER_DS;
+ regs.xes = __USER_DS;
+ regs.orig_eax = -1;
+ regs.eip = (unsigned long) kernel_thread_helper;
+ regs.xcs = __KERNEL_CS;
+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
+
+ /* Ok, create the new process.. */
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL,
NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ struct task_struct *tsk = current;
+ struct thread_struct *t = &tsk->thread;
+
+ /* The process may have allocated an io port bitmap... nuke it. */
+ if (unlikely(NULL != t->io_bitmap_ptr)) {
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+ kfree(t->io_bitmap_ptr);
+ t->io_bitmap_ptr = NULL;
+ /*
+ * Careful, clear this in the TSS too:
+ */
+ memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
+ t->io_bitmap_max = 0;
+ tss->io_bitmap_owner = NULL;
+ tss->io_bitmap_max = 0;
+ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+ put_cpu();
+ }
+}
+
+void flush_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+ * Forget coprocessor state..
+ */
+ clear_fpu(tsk);
+ clear_used_math();
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ if (dead_task->mm) {
+ // temporary debugging check
+ if (dead_task->mm->context.size) {
+ printk("WARNING: dead process %8s still has LDT?
<%p/%d>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+ dead_task->mm->context.size);
+ BUG();
+ }
+ }
+
+ release_vm86_irqs(dead_task);
+}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ unlazy_fpu(tsk);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+ struct task_struct *tsk;
+ int err;
+ unsigned long eflags;
+
+ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long)
p->thread_info)) - 1;
+ *childregs = *regs;
+ childregs->eax = 0;
+ childregs->esp = esp;
+
+ p->thread.esp = (unsigned long) childregs;
+ p->thread.esp0 = (unsigned long) (childregs+1);
+
+ p->thread.eip = (unsigned long) ret_from_fork;
+
+ savesegment(fs,p->thread.fs);
+ savesegment(gs,p->thread.gs);
+
+ tsk = current;
+ if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!p->thread.io_bitmap_ptr) {
+ p->thread.io_bitmap_max = 0;
+ return -ENOMEM;
+ }
+ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
+ IO_BITMAP_BYTES);
+ }
+
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS) {
+ struct desc_struct *desc;
+ struct user_desc info;
+ int idx;
+
+ err = -EFAULT;
+ if (copy_from_user(&info, (void __user *)childregs->esi,
sizeof(info)))
+ goto out;
+ err = -EINVAL;
+ if (LDT_empty(&info))
+ goto out;
+
+ idx = info.entry_number;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ goto out;
+
+ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+
+
+ __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
+ p->thread.io_pl = (eflags >> 12) & 3;
+
+ err = 0;
+ out:
+ if (err && p->thread.io_bitmap_ptr) {
+ kfree(p->thread.io_bitmap_ptr);
+ p->thread.io_bitmap_max = 0;
+ }
+ return err;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ int i;
+
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >>
PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+ for (i = 0; i < 8; i++)
+ dump->u_debugreg[i] = current->thread.debugreg[i];
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long) (TASK_SIZE -
dump->start_stack)) >> PAGE_SHIFT;
+
+ dump->regs.ebx = regs->ebx;
+ dump->regs.ecx = regs->ecx;
+ dump->regs.edx = regs->edx;
+ dump->regs.esi = regs->esi;
+ dump->regs.edi = regs->edi;
+ dump->regs.ebp = regs->ebp;
+ dump->regs.eax = regs->eax;
+ dump->regs.ds = regs->xds;
+ dump->regs.es = regs->xes;
+ savesegment(fs,dump->regs.fs);
+ savesegment(gs,dump->regs.gs);
+ dump->regs.orig_eax = regs->orig_eax;
+ dump->regs.eip = regs->eip;
+ dump->regs.cs = regs->xcs;
+ dump->regs.eflags = regs->eflags;
+ dump->regs.esp = regs->esp;
+ dump->regs.ss = regs->xss;
+
+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+}
+
+/*
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+ struct pt_regs ptregs;
+
+ ptregs = *(struct pt_regs *)
+ ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
+ ptregs.xcs &= 0xffff;
+ ptregs.xds &= 0xffff;
+ ptregs.xes &= 0xffff;
+ ptregs.xss &= 0xffff;
+
+ elf_core_copy_regs(regs, &ptregs);
+
+ boot_option_idle_override = 1;
+ return 1;
+}
+
+static inline void
+handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
+{
+ if (!next->io_bitmap_ptr) {
+ /*
+ * Disable the bitmap via an invalid offset. We still cache
+ * the previous bitmap owner and the IO bitmap contents:
+ */
+ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+ return;
+ }
+ if (likely(next == tss->io_bitmap_owner)) {
+ /*
+ * Previous owner of the bitmap (hence the bitmap content)
+ * matches the next task, we dont have to do anything but
+ * to set a valid offset in the TSS:
+ */
+ tss->io_bitmap_base = IO_BITMAP_OFFSET;
+ return;
+ }
+ /*
+ * Lazy TSS's I/O bitmap copy. We set an invalid offset here
+ * and we let the task to get a GPF in case an I/O instruction
+ * is performed. The handler of the GPF will verify that the
+ * faulting task has a valid I/O bitmap and, it true, does the
+ * real copy and restart the instruction. This will save us
+ * redundant copies when the currently switched task does not
+ * perform any I/O during its timeslice.
+ */
+ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+}
+/*
+ * This special macro can be used to load a debugging register
+ */
+#define loaddebug(thread,register) \
+ HYPERVISOR_set_debugreg((register), \
+ (thread->debugreg[register]))
+
+/*
+ * switch_to(x,yn) should switch tasks from x to y.
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process. Lazy FP saving no longer makes any sense
+ * with modern CPU's, and this simplifies a lot of things (SMP
+ * and UP become the same).
+ *
+ * NOTE! We used to use the x86 hardware context switching. The
+ * reason for not using it any more becomes apparent when you
+ * try to recover gracefully from saved state that is no longer
+ * valid (stale segment register values in particular). With the
+ * hardware task-switch, there is no way to fix up bad state in
+ * a reasonable manner.
+ *
+ * The fact that Intel documents the hardware task-switching to
+ * be slow is a fairly red herring - this code is not noticeably
+ * faster. However, there _is_ some room for improvement here,
+ * so the performance issues may eventually be a valid point.
+ * More important, however, is the fact that this allows us much
+ * more flexibility.
+ *
+ * The return value (in %eax) will be the "prev" task after
+ * the task-switch, and shows up in ret_from_fork in entry.S,
+ * for example.
+ */
+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct
task_struct *next_p)
+{
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ dom0_op_t op;
+
+ /* NB. No need to disable interrupts as already done in sched.c */
+ /* __cli(); */
+
+ /*
+ * Save away %fs and %gs. No need to save %es and %ds, as
+ * those are always kernel segments while inside the kernel.
+ */
+ asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
+ asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+
+ /*
+ * We clobber FS and GS here so that we avoid a GPF when restoring
+ * previous task's FS/GS values in Xen when the LDT is switched.
+ */
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
+ "eax" );
+
+ MULTICALL_flush_page_update_queue();
+
+ /* never put a printk in __switch_to... printk() calls wake_up*()
indirectly */
+
+ /*
+ * This is basically '__unlazy_fpu', except that we queue a
+ * multicall to indicate FPU task switch, rather than
+ * synchronously trapping to Xen.
+ */
+ if (prev_p->thread_info->status & TS_USEDFPU) {
+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
+ queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+ }
+
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ * This is load_esp0(tss, next) with a multicall.
+ */
+ tss->esp0 = next->esp0;
+ queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ * This is load_TLS(next, cpu) with multicalls.
+ */
+#define C(i) do { \
+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
+ next->tls_array[i].b != prev->tls_array[i].b)) \
+ queue_multicall3(__HYPERVISOR_update_descriptor, \
+ virt_to_machine(&get_cpu_gdt_table(cpu) \
+ [GDT_ENTRY_TLS_MIN + i]), \
+ ((u32 *)&next->tls_array[i])[0], \
+ ((u32 *)&next->tls_array[i])[1]); \
+} while (0)
+ C(0); C(1); C(2);
+#undef C
+
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = next->io_pl;
+ op.interface_version = DOM0_INTERFACE_VERSION;
+ queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+ }
+
+ /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
+ execute_multicall_list();
+ /* __sti(); */
+
+ /*
+ * Restore %fs and %gs if needed.
+ */
+ if (unlikely(next->fs | next->gs)) {
+ loadsegment(fs, next->fs);
+ loadsegment(gs, next->gs);
+ }
+
+ /*
+ * Now maybe reload the debug registers
+ */
+ if (unlikely(next->debugreg[7])) {
+ loaddebug(next, 0);
+ loaddebug(next, 1);
+ loaddebug(next, 2);
+ loaddebug(next, 3);
+ /* no 4 and 5 */
+ loaddebug(next, 6);
+ loaddebug(next, 7);
+ }
+
+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
+ handle_io_bitmap(next, tss);
+
+ return prev_p;
+}
+
+asmlinkage int sys_fork(struct pt_regs regs)
+{
+ return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
+}
+
+asmlinkage int sys_clone(struct pt_regs regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+ int __user *parent_tidptr, *child_tidptr;
+
+ clone_flags = regs.ebx;
+ newsp = regs.ecx;
+ parent_tidptr = (int __user *)regs.edx;
+ child_tidptr = (int __user *)regs.edi;
+ if (!newsp)
+ newsp = regs.esp;
+ return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr,
child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(struct pt_regs regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0,
NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(struct pt_regs regs)
+{
+ int error;
+ char * filename;
+
+ filename = getname((char __user *) regs.ebx);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename,
+ (char __user * __user *) regs.ecx,
+ (char __user * __user *) regs.edx,
+ ®s);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ /* Make sure we don't return using sysenter.. */
+ set_thread_flag(TIF_IRET);
+ }
+ putname(filename);
+out:
+ return error;
+}
+
+#define top_esp (THREAD_SIZE - sizeof(unsigned long))
+#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ebp, esp, eip;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack_page = (unsigned long)p->thread_info;
+ esp = p->thread.esp;
+ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
+ return 0;
+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
+ ebp = *(unsigned long *) esp;
+ do {
+ if (ebp < stack_page || ebp > top_ebp+stack_page)
+ return 0;
+ eip = *(unsigned long *) (ebp+4);
+ if (!in_sched_functions(eip))
+ return eip;
+ ebp = *(unsigned long *) ebp;
+ } while (count++ < 16);
+ return 0;
+}
+
+/*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+ struct thread_struct *t = ¤t->thread;
+ int idx;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (desc_empty(t->tls_array + idx))
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
+{
+ struct thread_struct *t = ¤t->thread;
+ struct user_desc info;
+ struct desc_struct *desc;
+ int cpu, idx;
+
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+ idx = info.entry_number;
+
+ /*
+ * index -1 means the kernel should try to find and
+ * allocate an empty descriptor:
+ */
+ if (idx == -1) {
+ idx = get_free_idx();
+ if (idx < 0)
+ return idx;
+ if (put_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ }
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ /*
+ * We must not get preempted while modifying the TLS.
+ */
+ cpu = get_cpu();
+
+ if (LDT_empty(&info)) {
+ desc->a = 0;
+ desc->b = 0;
+ } else {
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+ load_TLS(t, cpu);
+
+ put_cpu();
+
+ return 0;
+}
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+ (((desc)->a >> 16) & 0x0000ffff) | \
+ (((desc)->b << 16) & 0x00ff0000) | \
+ ( (desc)->b & 0xff000000) )
+
+#define GET_LIMIT(desc) ( \
+ ((desc)->a & 0x0ffff) | \
+ ((desc)->b & 0xf0000) )
+
+#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
+
+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
+{
+ struct user_desc info;
+ struct desc_struct *desc;
+ int idx;
+
+ if (get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ info.entry_number = idx;
+ info.base_addr = GET_BASE(desc);
+ info.limit = GET_LIMIT(desc);
+ info.seg_32bit = GET_32BIT(desc);
+ info.contents = GET_CONTENTS(desc);
+ info.read_exec_only = !GET_WRITABLE(desc);
+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
+ info.seg_not_present = !GET_PRESENT(desc);
+ info.useable = GET_USEABLE(desc);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,1638 @@
+/*
+ * linux/arch/i386/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *
+ * Memory region support
+ * David Parsons <orc@xxxxxxxxxxxxxx>, July-August 1999
+ *
+ * Added E820 sanitization routine (removes overlapping memory regions);
+ * Brian Moyle <bmoyle@xxxxxxxxxx>, February 2001
+ *
+ * Moved CPU detection code to cpu/${cpu}.c
+ * Patrick Mochel <mochel@xxxxxxxx>, March 2002
+ *
+ * Provisions for empty E820 memory regions (reported by certain BIOSes).
+ * Alex Achenbach <xela@xxxxxxx>, December 2002.
+ *
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/apm_bios.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/console.h>
+#include <linux/mca.h>
+#include <linux/root_dev.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/edd.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <video/edid.h>
+#include <asm/e820.h>
+#include <asm/mpspec.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/sections.h>
+#include <asm/io_apic.h>
+#include <asm/ist.h>
+#include <asm/io.h>
+#include <asm-xen/hypervisor.h>
+#include "setup_arch_pre.h"
+#include <bios_ebda.h>
+
+/* Allows setting of maximum possible memory size */
+static unsigned long xen_override_max_pfn;
+
+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
+static struct notifier_block xen_panic_block = {
+ xen_panic_event, NULL, 0 /* try to go last */
+};
+
+int disable_pse __initdata = 0;
+
+/*
+ * Machine setup..
+ */
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+/* cpu data as detected by the assembly code in head.S */
+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+/* common cpu data for all cpus */
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+
+unsigned long mmu_cr4_features;
+EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+#ifdef CONFIG_ACPI_INTERPRETER
+ int acpi_disabled = 0;
+#else
+ int acpi_disabled = 1;
+#endif
+EXPORT_SYMBOL(acpi_disabled);
+
+#ifdef CONFIG_ACPI_BOOT
+int __initdata acpi_force = 0;
+extern acpi_interrupt_flags acpi_sci_flags;
+#endif
+
+/* for MCA, but anyone else can use it if they want */
+unsigned int machine_id;
+unsigned int machine_submodel_id;
+unsigned int BIOS_revision;
+unsigned int mca_pentium_flag;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
+int bootloader_type;
+
+/* user-defined highmem size */
+static unsigned int highmem_pages = -1;
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct apm_info apm_info;
+struct sys_desc_table_struct {
+ unsigned short length;
+ unsigned char table[0];
+};
+struct edid_info edid_info;
+struct ist_info ist_info;
+struct e820map e820;
+
+unsigned char aux_device_present;
+
+extern void early_cpu_init(void);
+extern void dmi_scan_machine(void);
+extern void generic_apic_probe(char *);
+extern int root_mountflags;
+
+unsigned long saved_videomode;
+
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+static char command_line[COMMAND_LINE_SIZE];
+
+unsigned char __initdata boot_params[PARAM_SIZE];
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource adapter_rom_resources[] = { {
+ .name = "Adapter ROM",
+ .start = 0xc8000,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+} };
+
+#define ADAPTER_ROM_RESOURCES \
+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+#endif
+
+static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource standard_io_resources[] = { {
+ .name = "dma1",
+ .start = 0x0000,
+ .end = 0x001f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "pic1",
+ .start = 0x0020,
+ .end = 0x0021,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "timer0",
+ .start = 0x0040,
+ .end = 0x0043,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "timer1",
+ .start = 0x0050,
+ .end = 0x0053,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "keyboard",
+ .start = 0x0060,
+ .end = 0x006f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "dma page reg",
+ .start = 0x0080,
+ .end = 0x008f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "pic2",
+ .start = 0x00a0,
+ .end = 0x00a1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "dma2",
+ .start = 0x00c0,
+ .end = 0x00df,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "fpu",
+ .start = 0x00f0,
+ .end = 0x00ff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+} };
+
+#define STANDARD_IO_RESOURCES \
+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end -
extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource,
&extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom,
length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+}
+#endif
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
+unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+multicall_entry_t multicall_list[8];
+int nr_multicall_ents = 0;
+
+/* Raw start-of-day parameters from the hypervisor. */
+union xen_start_info_union xen_start_info_union;
+
+static void __init limit_regions(unsigned long long size)
+{
+ unsigned long long current_addr = 0;
+ int i;
+
+ if (efi_enabled) {
+ for (i = 0; i < memmap.nr_map; i++) {
+ current_addr = memmap.map[i].phys_addr +
+ (memmap.map[i].num_pages << 12);
+ if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
+ if (current_addr >= size) {
+ memmap.map[i].num_pages -=
+ (((current_addr-size) +
PAGE_SIZE-1) >> PAGE_SHIFT);
+ memmap.nr_map = i + 1;
+ return;
+ }
+ }
+ }
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ if (e820.map[i].type == E820_RAM) {
+ current_addr = e820.map[i].addr + e820.map[i].size;
+ if (current_addr >= size) {
+ e820.map[i].size -= current_addr-size;
+ e820.nr_map = i + 1;
+ return;
+ }
+ }
+ }
+}
+
+static void __init add_memory_region(unsigned long long start,
+ unsigned long long size, int type)
+{
+ int x;
+
+ if (!efi_enabled) {
+ x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory
map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+ }
+} /* add_memory_region */
+
+#define E820_DEBUG 1
+
+static void __init print_memory_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(" %s: %016Lx - %016Lx ", who,
+ e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ switch (e820.map[i].type) {
+ case E820_RAM: printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ printk("(ACPI NVS)\n");
+ break;
+ default: printk("type %lu\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+#if 0
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+};
+struct change_member change_point_list[2*E820MAX] __initdata;
+struct change_member *change_point[2*E820MAX] __initdata;
+struct e820entry *overlap_list[E820MAX] __initdata;
+struct e820entry new_bios[E820MAX] __initdata;
+
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory
types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ if (biosmap[i].size != 0) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr +
biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+ }
+ chg_nr = chgidx; /* true number of change-points */
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < chg_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap
*/
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr ==
change_point[i-1]->addr) &&
+ (change_point[i]->addr ==
change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr !=
change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < chg_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr ==
change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an
overlap) */
+
overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap
with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] ==
change_point[chgidx]->pbios)
+ overlap_list[i] =
overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use
*/
+ /* (larger value takes precedence -- 1=usable,
2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information
*/
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was
non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left
for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr =
change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long long start = biosmap->addr;
+ unsigned long long size = biosmap->size;
+ unsigned long long end = start + size;
+ unsigned long type = biosmap->type;
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ */
+ if (type == E820_RAM) {
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (start < 0xA0000ULL)
+ add_memory_region(start,
0xA0000ULL-start, type);
+ if (end <= 0x100000ULL)
+ continue;
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+struct edd edd;
+#ifdef CONFIG_EDD_MODULE
+EXPORT_SYMBOL(edd);
+#endif
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+}
+#else
+static inline void copy_edd(void)
+{
+}
+#endif
+
+/*
+ * Do NOT EVER look at the BIOS memory size location.
+ * It does not work on many machines.
+ */
+#define LOWMEMSIZE() (0x9f000)
+
+static void __init parse_cmdline_early (char ** cmdline_p)
+{
+ char c = ' ', *to = command_line, *from = saved_command_line;
+ int len = 0;
+ int userdef = 0;
+
+ memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
+ /* Save unparsed command line copy for /proc/cmdline */
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c != ' ')
+ goto next_char;
+ /*
+ * "mem=nopentium" disables the 4MB page tables.
+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+ * to <mem>, overriding the bios size.
+ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+ * <start> to <start>+<mem>, overriding the bios size.
+ *
+ * HPA tells me bootloaders need to parse mem=, so no new
+ * option should be mem= [also see Documentation/i386/boot.txt]
+ */
+ if (!memcmp(from, "mem=", 4)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+4, "nopentium", 9)) {
+ from += 9+4;
+ clear_bit(X86_FEATURE_PSE,
boot_cpu_data.x86_capability);
+ disable_pse = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long mem_size;
+
+ mem_size = memparse(from+4, &from);
+#if 0
+ limit_regions(mem_size);
+ userdef=1;
+#else
+ xen_override_max_pfn =
+ (unsigned long)(mem_size>>PAGE_SHIFT);
+#endif
+ }
+ }
+
+ else if (!memcmp(from, "memmap=", 7)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+7, "exactmap", 8)) {
+ from += 8+7;
+ e820.nr_map = 0;
+ userdef = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(from+7, &from);
+ if (*from == '@') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size,
E820_RAM);
+ } else if (*from == '#') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size,
E820_ACPI);
+ } else if (*from == '$') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size,
E820_RESERVED);
+ } else {
+ limit_regions(mem_size);
+ userdef=1;
+ }
+ }
+ }
+
+ else if (!memcmp(from, "noexec=", 7))
+ noexec_setup(from + 7);
+
+
+#ifdef CONFIG_X86_SMP
+ /*
+ * If the BIOS enumerates physical processors before logical,
+ * maxcpus=N at enumeration-time can be used to disable HT.
+ */
+ else if (!memcmp(from, "maxcpus=", 8)) {
+ extern unsigned int maxcpus;
+
+ maxcpus = simple_strtoul(from + 8, NULL, 0);
+ }
+#endif
+
+#ifdef CONFIG_ACPI_BOOT
+ /* "acpi=off" disables both ACPI table parsing and interpreter
*/
+ else if (!memcmp(from, "acpi=off", 8)) {
+ disable_acpi();
+ }
+
+ /* acpi=force to over-ride black-list */
+ else if (!memcmp(from, "acpi=force", 10)) {
+ acpi_force = 1;
+ acpi_ht = 1;
+ acpi_disabled = 0;
+ }
+
+ /* acpi=strict disables out-of-spec workarounds */
+ else if (!memcmp(from, "acpi=strict", 11)) {
+ acpi_strict = 1;
+ }
+
+ /* Limit ACPI just to boot-time to enable HT */
+ else if (!memcmp(from, "acpi=ht", 7)) {
+ if (!acpi_force)
+ disable_acpi();
+ acpi_ht = 1;
+ }
+
+ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
+ else if (!memcmp(from, "pci=noacpi", 10)) {
+ acpi_disable_pci();
+ }
+ /* "acpi=noirq" disables ACPI interrupt routing */
+ else if (!memcmp(from, "acpi=noirq", 10)) {
+ acpi_noirq_set();
+ }
+
+ else if (!memcmp(from, "acpi_sci=edge", 13))
+ acpi_sci_flags.trigger = 1;
+
+ else if (!memcmp(from, "acpi_sci=level", 14))
+ acpi_sci_flags.trigger = 3;
+
+ else if (!memcmp(from, "acpi_sci=high", 13))
+ acpi_sci_flags.polarity = 1;
+
+ else if (!memcmp(from, "acpi_sci=low", 12))
+ acpi_sci_flags.polarity = 3;
+
+#ifdef CONFIG_X86_IO_APIC
+ else if (!memcmp(from, "acpi_skip_timer_override", 24))
+ acpi_skip_timer_override = 1;
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ /* disable IO-APIC */
+ else if (!memcmp(from, "noapic", 6))
+ disable_ioapic_setup();
+#endif /* CONFIG_X86_LOCAL_APIC */
+#endif /* CONFIG_ACPI_BOOT */
+
+ /*
+ * highmem=size forces highmem to be exactly 'size' bytes.
+ * This works even on boxes that have no highmem otherwise.
+ * This also works to reduce highmem size on bigger boxes.
+ */
+ else if (!memcmp(from, "highmem=", 8))
+ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
+
+ /*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the
+ * vmalloc area - the default is 128m.
+ */
+ else if (!memcmp(from, "vmalloc=", 8))
+ __VMALLOC_RESERVE = memparse(from+8, &from);
+
+ next_char:
+ c = *(from++);
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+ if (userdef) {
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+ print_memory_map("user");
+ }
+}
+
+#if 0 /* !XEN */
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long *max_pfn = arg, pfn;
+
+ if (start < end) {
+ pfn = PFN_UP(end -1);
+ if (pfn > *max_pfn)
+ *max_pfn = pfn;
+ }
+ return 0;
+}
+
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+ int i;
+
+ max_pfn = 0;
+ if (efi_enabled) {
+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+ return;
+ }
+
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long start, end;
+ /* RAM? */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ start = PFN_UP(e820.map[i].addr);
+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ }
+}
+#else
+/* We don't use the fake e820 because we need to respond to user override. */
+void __init find_max_pfn(void)
+{
+ if ( xen_override_max_pfn < xen_start_info.nr_pages )
+ xen_override_max_pfn = xen_start_info.nr_pages;
+ max_pfn = xen_override_max_pfn;
+}
+#endif /* XEN */
+
+/*
+ * Determine low and high memory ranges:
+ */
+unsigned long __init find_max_low_pfn(void)
+{
+ unsigned long max_low_pfn;
+
+ max_low_pfn = max_pfn;
+ if (max_low_pfn > MAXMEM_PFN) {
+ if (highmem_pages == -1)
+ highmem_pages = max_pfn - MAXMEM_PFN;
+ if (highmem_pages + MAXMEM_PFN < max_pfn)
+ max_pfn = MAXMEM_PFN + highmem_pages;
+ if (highmem_pages + MAXMEM_PFN > max_pfn) {
+ printk("only %luMB highmem pages available, ignoring
highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN),
pages_to_mb(highmem_pages));
+ highmem_pages = 0;
+ }
+ max_low_pfn = MAXMEM_PFN;
+#ifndef CONFIG_HIGHMEM
+ /* Maximum memory usable is what is directly addressable */
+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+ MAXMEM>>20);
+ if (max_pfn > MAX_NONPAE_PFN)
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ else
+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ max_pfn = MAXMEM_PFN;
+#else /* !CONFIG_HIGHMEM */
+#ifndef CONFIG_X86_PAE
+ if (max_pfn > MAX_NONPAE_PFN) {
+ max_pfn = MAX_NONPAE_PFN;
+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ }
+#endif /* !CONFIG_X86_PAE */
+#endif /* !CONFIG_HIGHMEM */
+ } else {
+ if (highmem_pages == -1)
+ highmem_pages = 0;
+#ifdef CONFIG_HIGHMEM
+ if (highmem_pages >= max_pfn) {
+ printk(KERN_ERR "highmem size specified (%uMB) is
bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages),
pages_to_mb(max_pfn));
+ highmem_pages = 0;
+ }
+ if (highmem_pages) {
+ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
+ printk(KERN_ERR "highmem size %uMB results in
smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
+ highmem_pages = 0;
+ }
+ max_low_pfn -= highmem_pages;
+ }
+#else
+ if (highmem_pages)
+ printk(KERN_ERR "ignoring highmem size on non-highmem
kernel!\n");
+#endif
+ }
+ return max_low_pfn;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+
+/*
+ * Free all available memory for boot time allocation. Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+ /* check max_low_pfn */
+ if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ return 0;
+ if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ end = (max_low_pfn + 1) << PAGE_SHIFT;
+ if (start < end)
+ free_bootmem(start, end - start);
+
+ return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+ int i;
+
+ if (efi_enabled) {
+ efi_memmap_walk(free_available_memory, NULL);
+ return;
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long curr_pfn, last_pfn, size;
+ /*
+ * Reserve usable low memory
+ */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(e820.map[i].addr);
+ if (curr_pfn >= max_low_pfn)
+ continue;
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+ /*
+ * .. finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ }
+}
+
+/*
+ * workaround for Dell systems that neglect to reserve EBDA
+ */
+static void __init reserve_ebda_region(void)
+{
+ unsigned int addr;
+ addr = get_bios_ebda();
+ if (addr)
+ reserve_bootmem(addr, PAGE_SIZE);
+}
+
+static unsigned long __init setup_memory(void)
+{
+ unsigned long bootmap_size, start_pfn, max_low_pfn;
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(xen_start_info.pt_base)) +
xen_start_info.nr_pt_frames;
+
+ find_max_pfn();
+
+ max_low_pfn = find_max_low_pfn();
+
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = highend_pfn = max_pfn;
+ if (max_pfn > max_low_pfn) {
+ highstart_pfn = max_low_pfn;
+ }
+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+ pages_to_mb(highend_pfn - highstart_pfn));
+#endif
+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
+ pages_to_mb(max_low_pfn));
+ /*
+ * Initialize the boot-time allocator (with low memory only):
+ */
+ bootmap_size = init_bootmem(start_pfn, max_low_pfn);
+
+ register_bootmem_low_pages(max_low_pfn);
+
+ /*
+ * Reserve the bootmem bitmap itself as well. We do this in two
+ * steps (first step was init_bootmem()) because this catches
+ * the (very unlikely) case of us accidentally initializing the
+ * bootmem allocator with an invalid RAM area.
+ */
+ reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+
+ /* reserve EBDA region, it's a 4K region */
+ reserve_ebda_region();
+
+ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
+ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
+ unless you have no PS/2 mouse plugged in. */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 6)
+ reserve_bootmem(0xa0000 - 4096, 4096);
+
+#ifdef CONFIG_SMP
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
+#endif
+#ifdef CONFIG_ACPI_SLEEP
+ /*
+ * Reserve low memory region for sleep support.
+ */
+ acpi_reserve_bootmem();
+#endif
+#ifdef CONFIG_X86_FIND_SMP_CONFIG
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (xen_start_info.mod_start) {
+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
+ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ initrd_below_start_ok = 1;
+ }
+ else {
+ printk(KERN_ERR "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ INITRD_START + INITRD_SIZE,
+ max_low_pfn << PAGE_SHIFT);
+ initrd_start = 0;
+ }
+ }
+#endif
+
+ phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
+
+ return max_low_pfn;
+}
+#else
+extern unsigned long setup_memory(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource
*data_resource)
+{
+ int i;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ probe_roms();
+#endif
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+ res = alloc_bootmem_low(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, code_resource);
+ request_resource(res, data_resource);
+ }
+ }
+}
+
+/*
+ * Request address space for all standard resources
+ */
+static void __init register_memory(void)
+{
+ unsigned long gapstart, gapsize;
+ unsigned long long last;
+ int i;
+
+ if (efi_enabled)
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+ else
+ legacy_init_iomem_resources(&code_resource, &data_resource);
+
+ /* EFI systems may still have VGA */
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+
+ /*
+ * Search for the bigest gap in the low 32 bits of the e820
+ * memory space.
+ */
+ last = 0x100000000ull;
+ gapstart = 0x10000000;
+ gapsize = 0x400000;
+ i = e820.nr_map;
+ while (--i >= 0) {
+ unsigned long long start = e820.map[i].addr;
+ unsigned long long end = start + e820.map[i].size;
+
+ /*
+ * Since "last" is at most 4GB, we know we'll
+ * fit in 32 bits if this condition is true
+ */
+ if (last > end) {
+ unsigned long gap = last - end;
+
+ if (gap > gapsize) {
+ gapsize = gap;
+ gapstart = end;
+ }
+ }
+ if (start < last)
+ last = start;
+ }
+
+ /*
+ * Start allocating dynamic PCI memory a bit into the gap,
+ * aligned up to the nearest megabyte.
+ *
+ * Question: should we try to pad it up a bit (do something
+ * like " + (gapsize >> 3)" in there too?). We now have the
+ * technology.
+ */
+ pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
+
+ printk("Allocating PCI resources starting at %08lx (gap:
%08lx:%08lx)\n",
+ pci_mem_start, gapstart, gapsize);
+}
+
+/* Use inline assembly to define this because the nops are defined
+ as inline assembly strings in the include files and we cannot
+ get them easily into strings. */
+asm("\t.data\nintelnops: "
+ GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5
GENERIC_NOP6
+ GENERIC_NOP7 GENERIC_NOP8);
+asm("\t.data\nk8nops: "
+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+ K8_NOP7 K8_NOP8);
+asm("\t.data\nk7nops: "
+ K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
+ K7_NOP7 K7_NOP8);
+
+extern unsigned char intelnops[], k8nops[], k7nops[];
+static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ intelnops,
+ intelnops + 1,
+ intelnops + 1 + 2,
+ intelnops + 1 + 2 + 3,
+ intelnops + 1 + 2 + 3 + 4,
+ intelnops + 1 + 2 + 3 + 4 + 5,
+ intelnops + 1 + 2 + 3 + 4 + 5 + 6,
+ intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k8nops,
+ k8nops + 1,
+ k8nops + 1 + 2,
+ k8nops + 1 + 2 + 3,
+ k8nops + 1 + 2 + 3 + 4,
+ k8nops + 1 + 2 + 3 + 4 + 5,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k7nops,
+ k7nops + 1,
+ k7nops + 1 + 2,
+ k7nops + 1 + 2 + 3,
+ k7nops + 1 + 2 + 3 + 4,
+ k7nops + 1 + 2 + 3 + 4 + 5,
+ k7nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+static struct nop {
+ int cpuid;
+ unsigned char **noptable;
+} noptypes[] = {
+ { X86_FEATURE_K8, k8_nops },
+ { X86_FEATURE_K7, k7_nops },
+ { -1, NULL }
+};
+
+/* Replace instructions with better alternatives for this CPU type.
+
+ This runs before SMP is initialized to avoid SMP problems with
+ self modifying code. This implies that assymetric systems where
+ APs have less capabilities than the boot processor are not handled.
+ In this case boot with "noreplacement". */
+void apply_alternatives(void *start, void *end)
+{
+ struct alt_instr *a;
+ int diff, i, k;
+ unsigned char **noptable = intel_nops;
+ for (i = 0; noptypes[i].cpuid >= 0; i++) {
+ if (boot_cpu_has(noptypes[i].cpuid)) {
+ noptable = noptypes[i].noptable;
+ break;
+ }
+ }
+ for (a = start; (void *)a < end; a++) {
+ if (!boot_cpu_has(a->cpuid))
+ continue;
+ BUG_ON(a->replacementlen > a->instrlen);
+ memcpy(a->instr, a->replacement, a->replacementlen);
+ diff = a->instrlen - a->replacementlen;
+ /* Pad the rest with nops */
+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+ k = diff;
+ if (k > ASM_NOP_MAX)
+ k = ASM_NOP_MAX;
+ memcpy(a->instr + i, noptable[k], k);
+ }
+ }
+}
+
+static int no_replacement __initdata = 0;
+
+void __init alternative_instructions(void)
+{
+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+ if (no_replacement)
+ return;
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
+
+static int __init noreplacement_setup(char *s)
+{
+ no_replacement = 1;
+ return 0;
+}
+
+__setup("noreplacement", noreplacement_setup);
+
+static char * __init machine_specific_memory_setup(void);
+
+#ifdef CONFIG_MCA
+static void set_mca_bus(int x)
+{
+ MCA_bus = x;
+}
+#else
+static void set_mca_bus(int x) { }
+#endif
+
+/*
+ * Determine if we were loaded by an EFI loader. If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+ * for initialization. Note, the efi init code path is determined by the
+ * global efi_enabled. This allows the same kernel image to be used on existing
+ * systems (with a traditional BIOS) as well as on EFI systems.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ int i, j;
+ unsigned long max_low_pfn;
+
+ /* Force a quick death if the kernel panics. */
+ extern int panic_timeout;
+ if (panic_timeout == 0)
+ panic_timeout = 1;
+
+ /* Register a call for panic conditions. */
+ notifier_chain_register(&panic_notifier_list, &xen_panic_block);
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
+
+ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+ early_cpu_init();
+
+ /*
+ * FIXME: This isn't an official loader_type right
+ * now but does currently work with elilo.
+ * If we were configured as an EFI kernel, check to make
+ * sure that we were loaded correctly from elilo and that
+ * the system table is valid. If not, then initialize normally.
+ */
+#ifdef CONFIG_EFI
+ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
+ efi_enabled = 1;
+#endif
+
+ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
+ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
+ */
+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
+ drive_info = DRIVE_INFO;
+ screen_info = SCREEN_INFO;
+ edid_info = EDID_INFO;
+ apm_info.bios = APM_BIOS_INFO;
+ ist_info = IST_INFO;
+ saved_videomode = VIDEO_MODE;
+ if( SYS_DESC_TABLE.length != 0 ) {
+ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
+ machine_id = SYS_DESC_TABLE.table[0];
+ machine_submodel_id = SYS_DESC_TABLE.table[1];
+ BIOS_revision = SYS_DESC_TABLE.table[2];
+ }
+ aux_device_present = AUX_DEVICE_INFO;
+ bootloader_type = LOADER_TYPE;
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* This is drawn from a dump from vgacon:startup in standard Linux. */
+ screen_info.orig_video_mode = 3;
+ screen_info.orig_video_isVGA = 1;
+ screen_info.orig_video_lines = 25;
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_ega_bx = 3;
+ screen_info.orig_video_points = 16;
+#endif
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+ ARCH_SETUP
+ if (efi_enabled)
+ efi_init();
+ else {
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ print_memory_map(machine_specific_memory_setup());
+ }
+
+ copy_edd();
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) +
+ xen_start_info.nr_pt_frames) << PAGE_SHIFT;
+
+ /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
+ /*code_resource.start = virt_to_phys(_text);*/
+ /*code_resource.end = virt_to_phys(_etext)-1;*/
+ /*data_resource.start = virt_to_phys(_etext);*/
+ /*data_resource.end = virt_to_phys(_edata)-1;*/
+
+ parse_cmdline_early(cmdline_p);
+
+ max_low_pfn = setup_memory();
+
+ /*
+ * NOTE: before this point _nobody_ is allowed to allocate
+ * any memory using the bootmem allocator. Although the
+ * alloctor is now initialised only the first 8Mb of the kernel
+ * virtual address space has been mapped. All allocations before
+ * paging_init() has completed must use the alloc_bootmem_low_pages()
+ * variant (which allocates DMA'able memory) and care must be taken
+ * not to exceed the 8Mb limit.
+ */
+
+#ifdef CONFIG_SMP
+ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
+#endif
+ paging_init();
+
+ /* Make sure we have a correctly sized P->M table. */
+ if (max_pfn != xen_start_info.nr_pages) {
+ phys_to_machine_mapping = alloc_bootmem_low_pages(
+ max_pfn * sizeof(unsigned long));
+
+ if (max_pfn > xen_start_info.nr_pages) {
+ /* set to INVALID_P2M_ENTRY */
+ memset(phys_to_machine_mapping, ~0,
+ max_pfn * sizeof(unsigned long));
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(unsigned
long));
+ } else {
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ max_pfn * sizeof(unsigned long));
+ if (HYPERVISOR_dom_mem_op(
+ MEMOP_decrease_reservation,
+ (unsigned long *)xen_start_info.mfn_list +
max_pfn,
+ xen_start_info.nr_pages - max_pfn, 0) !=
+ (xen_start_info.nr_pages - max_pfn)) BUG();
+ }
+ free_bootmem(
+ __pa(xen_start_info.mfn_list),
+ PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
+ sizeof(unsigned long))));
+ }
+
+ pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+ /*
+ * NOTE: at this point the bootmem allocator is fully available.
+ */
+
+#ifdef CONFIG_EARLY_PRINTK
+ {
+ char *s = strstr(*cmdline_p, "earlyprintk=");
+ if (s) {
+ extern void setup_early_printk(char *);
+
+ setup_early_printk(s);
+ printk("early console enabled\n");
+ }
+ }
+#endif
+
+
+ dmi_scan_machine();
+
+#ifdef CONFIG_X86_GENERICARCH
+ generic_apic_probe(*cmdline_p);
+#endif
+ if (efi_enabled)
+ efi_map_memmap();
+
+ /*
+ * Parse the ACPI tables for possible boot-time SMP configuration.
+ */
+ acpi_boot_table_init();
+ acpi_boot_init();
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (smp_found_config)
+ get_smp_config();
+#endif
+
+ /* XXX Disable irqdebug until we have a way to avoid interrupt
+ * conflicts. */
+ noirqdebug_setup("");
+
+ register_memory();
+
+ /* If we are a privileged guest OS then we should request IO privs. */
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ dom0_op_t op;
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = 1;
+ if (HYPERVISOR_dom0_op(&op) != 0)
+ panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
+ current->thread.io_pl = 1;
+ }
+
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ panic("Xen granted us console access "
+ "but not privileged status");
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ if (!efi_enabled ||
+ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ } else {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ extern const struct consw xennull_con;
+ extern int console_use_vt;
+#if defined(CONFIG_VGA_CONSOLE)
+ /* disable VGA driver */
+ ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
+#endif
+ conswitchp = &xennull_con;
+ console_use_vt = 0;
+#endif
+ }
+}
+
+static int
+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ HYPERVISOR_crash();
+ /* we're never actually going to get here... */
+ return NOTIFY_DONE;
+}
+
+#include "setup_arch_post.h"
+/*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:8
+ * End:
+ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/signal.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/signal.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,656 @@
+/*
+ * linux/arch/i386/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/suspend.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <asm/processor.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+#include "sigframe.h"
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+{
+ struct pt_regs * regs = (struct pt_regs *) &history0;
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->eax = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(struct pt_regs regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (regs.ecx != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, (sigset_t __user *)regs.ebx,
sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs.eax = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(®s, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(unsigned long ebx)
+{
+ /* This is needed to make gcc realize it doesn't own the "struct
pt_regs" */
+ struct pt_regs *regs = (struct pt_regs *)&ebx;
+ const stack_t __user *uss = (const stack_t __user *)ebx;
+ stack_t __user *uoss = (stack_t __user *)regs->ecx;
+
+ return do_sigaltstack(uss, uoss, regs->esp);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int
*peax)
+{
+ unsigned int err = 0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->x)
+
+#define COPY_SEG(seg) \
+ { unsigned short tmp; \
+ err |= __get_user(tmp, &sc->seg); \
+ regs->x##seg = tmp; }
+
+#define COPY_SEG_STRICT(seg) \
+ { unsigned short tmp; \
+ err |= __get_user(tmp, &sc->seg); \
+ regs->x##seg = tmp|3; }
+
+#define GET_SEG(seg) \
+ { unsigned short tmp; \
+ err |= __get_user(tmp, &sc->seg); \
+ loadsegment(seg,tmp); }
+
+#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF
| \
+ X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
+ X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
+
+ GET_SEG(gs);
+ GET_SEG(fs);
+ COPY_SEG(es);
+ COPY_SEG(ds);
+ COPY(edi);
+ COPY(esi);
+ COPY(ebp);
+ COPY(esp);
+ COPY(ebx);
+ COPY(edx);
+ COPY(ecx);
+ COPY(eip);
+ COPY_SEG_STRICT(cs);
+ COPY_SEG_STRICT(ss);
+
+ {
+ unsigned int tmpflags;
+ err |= __get_user(tmpflags, &sc->eflags);
+ regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags &
FIX_EFLAGS);
+ regs->orig_eax = -1; /* disable syscall checks */
+ }
+
+ {
+ struct _fpstate __user * buf;
+ err |= __get_user(buf, &sc->fpstate);
+ if (buf) {
+ if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ goto badframe;
+ err |= restore_i387(buf);
+ } else {
+ struct task_struct *me = current;
+ if (used_math()) {
+ clear_fpu(me);
+ clear_used_math();
+ }
+ }
+ }
+
+ err |= __get_user(*peax, &sc->eax);
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage int sys_sigreturn(unsigned long __unused)
+{
+ struct pt_regs *regs = (struct pt_regs *) &__unused;
+ struct sigframe __user *frame = (struct sigframe __user *)(regs->esp -
8);
+ sigset_t set;
+ int eax;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc, &eax))
+ goto badframe;
+ return eax;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+{
+ struct pt_regs *regs = (struct pt_regs *) &__unused;
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user
*)(regs->esp - 4);
+ sigset_t set;
+ int eax;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
+ goto badframe;
+
+ return eax;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
+ struct pt_regs *regs, unsigned long mask)
+{
+ int tmp, err = 0;
+
+ tmp = 0;
+ __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
+ err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+ __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
+ err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
+
+ err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
+ err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
+ err |= __put_user(regs->edi, &sc->edi);
+ err |= __put_user(regs->esi, &sc->esi);
+ err |= __put_user(regs->ebp, &sc->ebp);
+ err |= __put_user(regs->esp, &sc->esp);
+ err |= __put_user(regs->ebx, &sc->ebx);
+ err |= __put_user(regs->edx, &sc->edx);
+ err |= __put_user(regs->ecx, &sc->ecx);
+ err |= __put_user(regs->eax, &sc->eax);
+ err |= __put_user(current->thread.trap_no, &sc->trapno);
+ err |= __put_user(current->thread.error_code, &sc->err);
+ err |= __put_user(regs->eip, &sc->eip);
+ err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
+ err |= __put_user(regs->eflags, &sc->eflags);
+ err |= __put_user(regs->esp, &sc->esp_at_signal);
+ err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
+
+ tmp = save_i387(fpstate);
+ if (tmp < 0)
+ err = 1;
+ else
+ err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+
+ /* non-iBCS2 extensions.. */
+ err |= __put_user(mask, &sc->oldmask);
+ err |= __put_user(current->thread.cr2, &sc->cr2);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+ unsigned long esp;
+
+ /* Default to using normal stack */
+ esp = regs->esp;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (sas_ss_flags(esp) == 0)
+ esp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ /* This is the legacy signal stack switching. */
+ else if ((regs->xss & 0xffff) != __USER_DS &&
+ !(ka->sa.sa_flags & SA_RESTORER) &&
+ ka->sa.sa_restorer) {
+ esp = (unsigned long) ka->sa.sa_restorer;
+ }
+
+ return (void __user *)((esp - frame_size) & -8ul);
+}
+
+/* These symbols are defined with the addresses in the vsyscall page.
+ See vsyscall-sigreturn.S. */
+extern void __user __kernel_sigreturn;
+extern void __user __kernel_rt_sigreturn;
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs * regs)
+{
+ void __user *restorer;
+ struct sigframe __user *frame;
+ int err = 0;
+ int usig;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ usig = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err = __put_user(usig, &frame->sig);
+ if (err)
+ goto give_sigsegv;
+
+ err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
+ if (err)
+ goto give_sigsegv;
+
+ if (_NSIG_WORDS > 1) {
+ err = __copy_to_user(&frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ if (err)
+ goto give_sigsegv;
+ }
+
+ restorer = &__kernel_sigreturn;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+ /* Set up to return from userspace. */
+ err |= __put_user(restorer, &frame->pretcode);
+
+ /*
+ * This is popl %eax ; movl $,%eax ; int $0x80
+ *
+ * WE DO NOT USE IT ANY MORE! It's only left here for historical
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+ err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
+ err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
+ err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->esp = (unsigned long) frame;
+ regs->eip = (unsigned long) ka->sa.sa_handler;
+ regs->eax = (unsigned long) sig;
+ regs->edx = (unsigned long) 0;
+ regs->ecx = (unsigned long) 0;
+
+ set_fs(USER_DS);
+ regs->xds = __USER_DS;
+ regs->xes = __USER_DS;
+ regs->xss = __USER_DS;
+ regs->xcs = __USER_CS;
+
+ /*
+ * Clear TF when entering the signal handler, but
+ * notify any tracer that was single-stepping it.
+ * The tracer may want to single-step inside the
+ * handler too.
+ */
+ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs * regs)
+{
+ void __user *restorer;
+ struct rt_sigframe __user *frame;
+ int err = 0;
+ int usig;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ usig = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(usig, &frame->sig);
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err)
+ goto give_sigsegv;
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->esp),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. */
+ restorer = &__kernel_rt_sigreturn;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ err |= __put_user(restorer, &frame->pretcode);
+
+ /*
+ * This is movl $,%eax ; int $0x80
+ *
+ * WE DO NOT USE IT ANY MORE! It's only left here for historical
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+ err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
+ err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
+ err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->esp = (unsigned long) frame;
+ regs->eip = (unsigned long) ka->sa.sa_handler;
+ regs->eax = (unsigned long) usig;
+ regs->edx = (unsigned long) &frame->info;
+ regs->ecx = (unsigned long) &frame->uc;
+
+ set_fs(USER_DS);
+ regs->xds = __USER_DS;
+ regs->xes = __USER_DS;
+ regs->xss = __USER_DS;
+ regs->xcs = __USER_CS;
+
+ /*
+ * Clear TF when entering the signal handler, but
+ * notify any tracer that was single-stepping it.
+ * The tracer may want to single-step inside the
+ * handler too.
+ */
+ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Are we from a system call? */
+ if (regs->orig_eax >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->eax) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->eax = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->eax = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->eax = regs->orig_eax;
+ regs->eip -= 2;
+ }
+ }
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sighand->siglock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if ((regs->xcs & 2) != 2)
+ return 1;
+
+ if (current->flags & PF_FREEZE) {
+ refrigerator(0);
+ goto no_signal;
+ }
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Reenable any watchpoints before delivering the
+ * signal to user space. The processor register will
+ * have been cleared if the watchpoint triggered
+ * inside the kernel.
+ */
+ if (unlikely(current->thread.debugreg[7])) {
+ HYPERVISOR_set_debugreg(7,
+ current->thread.debugreg[7]);
+ }
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+ return 1;
+ }
+
+ no_signal:
+ /* Did we come from a system call? */
+ if (regs->orig_eax >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->eax == -ERESTARTNOHAND ||
+ regs->eax == -ERESTARTSYS ||
+ regs->eax == -ERESTARTNOINTR) {
+ regs->eax = regs->orig_eax;
+ regs->eip -= 2;
+ }
+ if (regs->eax == -ERESTART_RESTARTBLOCK){
+ regs->eax = __NR_restart_syscall;
+ regs->eip -= 2;
+ }
+ }
+ return 0;
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+__attribute__((regparm(3)))
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+ __u32 thread_info_flags)
+{
+ /* Pending single-step? */
+ if (thread_info_flags & _TIF_SINGLESTEP) {
+ regs->eflags |= TF_MASK;
+ clear_thread_flag(TIF_SINGLESTEP);
+ }
+ /* deal with pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs,oldset);
+
+ clear_thread_flag(TIF_IRET);
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,737 @@
+/*
+ * linux/arch/i386/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ * 1996-05-03 Ingo Molnar
+ * fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-09-05 (Various)
+ * More robust do_fast_gettimeoffset() algorithm implemented
+ * (works with APM, Cyrix 6x86MX and Centaur C6),
+ * monotonic gettimeofday() with fast_get_timeoffset(),
+ * drift-proof precision TSC calibration on boot
+ * (C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, Andrew D.
+ * Balsa <andrebalsa@xxxxxxxxxx>, Philip Gladstone <philip@xxxxxxxxxx>;
+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@xxxxxxxxxxxxx>).
+ * 1998-12-16 Andrea Arcangeli
+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
+ * because was not accounting lost_ticks.
+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ * serialize accesses to xtime/lost_ticks).
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/bcd.h>
+#include <linux/efi.h>
+#include <linux/mca.h>
+#include <linux/sysctl.h>
+
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+#include <asm/delay.h>
+#include <asm/mpspec.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/timer.h>
+
+#include "mach_time.h"
+
+#include <linux/timex.h>
+#include <linux/config.h>
+
+#include <asm/hpet.h>
+
+#include <asm/arch_hooks.h>
+
+#include "io_ports.h"
+
+extern spinlock_t i8259A_lock;
+int pit_latch_buggy; /* extern */
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+
+extern unsigned long wall_jiffies;
+
+DEFINE_SPINLOCK(rtc_lock);
+
+DEFINE_SPINLOCK(i8253_lock);
+EXPORT_SYMBOL(i8253_lock);
+
+extern struct init_timer_opts timer_tsc_init;
+extern struct timer_opts timer_tsc;
+struct timer_opts *cur_timer = &timer_tsc;
+
+/* These are peridically updated in shared_info, and then copied here. */
+u32 shadow_tsc_stamp;
+u64 shadow_system_time;
+static u32 shadow_time_version;
+static struct timeval shadow_tv;
+extern u64 processed_system_time;
+
+/*
+ * We use this to ensure that gettimeofday() is monotonically increasing. We
+ * only break this guarantee if the wall clock jumps backwards "a long way".
+ */
+static struct timeval last_seen_tv = {0,0};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+/* Periodically propagate synchronised time base to the RTC and to Xen. */
+static long last_rtc_update, last_update_to_xen;
+#endif
+
+/* Periodically take synchronised time base from Xen, if we need it. */
+static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
+
+/* Keep track of last time we did processing/updating of jiffies and xtime. */
+u64 processed_system_time; /* System time (ns) at last processing. */
+
+#define NS_PER_TICK (1000000000ULL/HZ)
+
+#define HANDLE_USEC_UNDERFLOW(_tv) do { \
+ while ((_tv).tv_usec < 0) { \
+ (_tv).tv_usec += USEC_PER_SEC; \
+ (_tv).tv_sec--; \
+ } \
+} while (0)
+#define HANDLE_USEC_OVERFLOW(_tv) do { \
+ while ((_tv).tv_usec >= USEC_PER_SEC) { \
+ (_tv).tv_usec -= USEC_PER_SEC; \
+ (_tv).tv_sec++; \
+ } \
+} while (0)
+static inline void __normalize_time(time_t *sec, s64 *nsec)
+{
+ while (*nsec >= NSEC_PER_SEC) {
+ (*nsec) -= NSEC_PER_SEC;
+ (*sec)++;
+ }
+ while (*nsec < 0) {
+ (*nsec) += NSEC_PER_SEC;
+ (*sec)--;
+ }
+}
+
+/* Does this guest OS track Xen time, or set its wall clock independently? */
+static int independent_wallclock = 0;
+static int __init __independent_wallclock(char *str)
+{
+ independent_wallclock = 1;
+ return 1;
+}
+__setup("independent_wallclock", __independent_wallclock);
+#define INDEPENDENT_WALLCLOCK() \
+ (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area. Must be called with the xtime_lock held for writing.
+ */
+static void __get_time_values_from_xen(void)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+
+ do {
+ shadow_time_version = s->time_version2;
+ rmb();
+ shadow_tv.tv_sec = s->wc_sec;
+ shadow_tv.tv_usec = s->wc_usec;
+ shadow_tsc_stamp = (u32)s->tsc_timestamp;
+ shadow_system_time = s->system_time;
+ rmb();
+ }
+ while (shadow_time_version != s->time_version1);
+
+ cur_timer->mark_offset();
+}
+
+#define TIME_VALUES_UP_TO_DATE \
+ ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
+
+/*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long seq;
+ unsigned long usec, sec;
+ unsigned long max_ntp_tick;
+ unsigned long flags;
+ s64 nsec;
+
+ do {
+ unsigned long lost;
+
+ seq = read_seqbegin(&xtime_lock);
+
+ usec = cur_timer->get_offset();
+ lost = jiffies - wall_jiffies;
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0)) {
+ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+ usec = min(usec, max_ntp_tick);
+
+ if (lost)
+ usec += lost * max_ntp_tick;
+ }
+ else if (unlikely(lost))
+ usec += lost * (USEC_PER_SEC / HZ);
+
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / NSEC_PER_USEC);
+
+ nsec = shadow_system_time - processed_system_time;
+ __normalize_time(&sec, &nsec);
+ usec += (long)nsec / NSEC_PER_USEC;
+
+ if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+ /*
+ * We may have blocked for a long time,
+ * rendering our calculations invalid
+ * (e.g. the time delta may have
+ * overflowed). Detect that and recalculate
+ * with fresh values.
+ */
+ write_seqlock_irqsave(&xtime_lock, flags);
+ __get_time_values_from_xen();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ continue;
+ }
+ } while (read_seqretry(&xtime_lock, seq));
+
+ while (usec >= USEC_PER_SEC) {
+ usec -= USEC_PER_SEC;
+ sec++;
+ }
+
+ /* Ensure that time-of-day is monotonically increasing. */
+ if ((sec < last_seen_tv.tv_sec) ||
+ ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
+ sec = last_seen_tv.tv_sec;
+ usec = last_seen_tv.tv_usec;
+ } else {
+ last_seen_tv.tv_sec = sec;
+ last_seen_tv.tv_usec = usec;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec;
+ s64 nsec;
+ struct timespec xentime;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (!INDEPENDENT_WALLCLOCK())
+ return 0; /* Silent failure? */
+
+ write_seqlock_irq(&xtime_lock);
+
+ /*
+ * Ensure we don't get blocked for a long time so that our time delta
+ * overflows. If that were to happen then our shadow time values would
+ * be stale, so we can retry with fresh ones.
+ */
+ again:
+ nsec = (s64)tv->tv_nsec -
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
+ if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+ __get_time_values_from_xen();
+ goto again;
+ }
+
+ __normalize_time(&sec, &nsec);
+ set_normalized_timespec(&xentime, sec, nsec);
+
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * made, and then undo it!
+ */
+ nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+ nsec -= (shadow_system_time - processed_system_time);
+
+ __normalize_time(&sec, &nsec);
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ /* Reset all our running time counts. They make no sense now. */
+ last_seen_tv.tv_sec = 0;
+ last_update_from_xen = 0;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ dom0_op_t op;
+ last_rtc_update = last_update_to_xen = 0;
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = xentime.tv_sec;
+ op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
+ op.u.settime.system_time = shadow_system_time;
+ write_sequnlock_irq(&xtime_lock);
+ HYPERVISOR_dom0_op(&op);
+ } else
+#endif
+ write_sequnlock_irq(&xtime_lock);
+
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int retval;
+
+ /* gets recalled with irq locally disabled */
+ spin_lock(&rtc_lock);
+ if (efi_enabled)
+ retval = efi_set_rtc_mmss(nowtime);
+ else
+ retval = mach_set_rtc_mmss(nowtime);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+#endif
+
+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
+ * Note: This function is required to return accurate
+ * time even in the absence of multiple timer ticks.
+ */
+unsigned long long monotonic_clock(void)
+{
+ return cur_timer->monotonic_clock();
+}
+EXPORT_SYMBOL(monotonic_clock);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
+
+ if (in_lock_functions(pc))
+ return *(unsigned long *)(regs->ebp + 4);
+
+ return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ time_t wtm_sec, sec;
+ s64 delta, nsec;
+ long sec_diff, wtm_nsec;
+
+ do {
+ __get_time_values_from_xen();
+
+ delta = (s64)(shadow_system_time +
+ ((s64)cur_timer->get_offset() *
+ (s64)NSEC_PER_USEC) -
+ processed_system_time);
+ }
+ while (!TIME_VALUES_UP_TO_DATE);
+
+ if (unlikely(delta < 0)) {
+ printk("Timer ISR: Time went backwards: %lld %lld %lld %lld\n",
+ delta, shadow_system_time,
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
+ processed_system_time);
+ return;
+ }
+
+ /* Process elapsed jiffies since last call. */
+ while (delta >= NS_PER_TICK) {
+ delta -= NS_PER_TICK;
+ processed_system_time += NS_PER_TICK;
+ do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+ if (regs)
+ profile_tick(CPU_PROFILING, regs);
+ }
+
+ /*
+ * Take synchronised time from Xen once a minute if we're not
+ * synchronised ourselves, and we haven't chosen to keep an independent
+ * time base.
+ */
+ if (!INDEPENDENT_WALLCLOCK() &&
+ ((time_status & STA_UNSYNC) != 0) &&
+ (xtime.tv_sec > (last_update_from_xen + 60))) {
+ /* Adjust shadow for jiffies that haven't updated xtime yet. */
+ shadow_tv.tv_usec -=
+ (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
+ HANDLE_USEC_UNDERFLOW(shadow_tv);
+
+ /*
+ * Reset our running time counts if they are invalidated by
+ * a warp backwards of more than 500ms.
+ */
+ sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
+ if (unlikely(abs(sec_diff) > 1) ||
+ unlikely(((sec_diff * USEC_PER_SEC) +
+ (xtime.tv_nsec / NSEC_PER_USEC) -
+ shadow_tv.tv_usec) > 500000)) {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ last_rtc_update = last_update_to_xen = 0;
+#endif
+ last_seen_tv.tv_sec = 0;
+ }
+
+ /* Update our unsynchronised xtime appropriately. */
+ sec = shadow_tv.tv_sec;
+ nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+
+ __normalize_time(&sec, &nsec);
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ last_update_from_xen = sec;
+ }
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (!(xen_start_info.flags & SIF_INITDOMAIN))
+ return;
+
+ /* Send synchronised time to Xen approximately every minute. */
+ if (((time_status & STA_UNSYNC) == 0) &&
+ (xtime.tv_sec > (last_update_to_xen + 60))) {
+ dom0_op_t op;
+ struct timeval tv;
+
+ tv.tv_sec = xtime.tv_sec;
+ tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
+ tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
+ HANDLE_USEC_OVERFLOW(tv);
+
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = tv.tv_sec;
+ op.u.settime.usecs = tv.tv_usec;
+ op.u.settime.system_time = shadow_system_time;
+ HYPERVISOR_dom0_op(&op);
+
+ last_update_to_xen = xtime.tv_sec;
+ }
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ (xtime.tv_nsec / 1000)
+ >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+ (xtime.tv_nsec / 1000)
+ <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
+ /* horrible...FIXME */
+ if (efi_enabled) {
+ if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600;
+ } else if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in
60 s */
+ }
+#endif
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
+ do_timer_interrupt(irq, NULL, regs);
+ write_sequnlock(&xtime_lock);
+ return IRQ_HANDLED;
+}
+
+/* not static: needed by APM */
+unsigned long get_cmos_time(void)
+{
+ unsigned long retval;
+
+ spin_lock(&rtc_lock);
+
+ if (efi_enabled)
+ retval = efi_get_time();
+ else
+ retval = mach_get_cmos_time();
+
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+
+static long clock_cmos_diff, sleep_start;
+
+static int timer_suspend(struct sys_device *dev, u32 state)
+{
+ /*
+ * Estimate time zone so that set_time can update the clock
+ */
+ clock_cmos_diff = -get_cmos_time();
+ clock_cmos_diff += get_seconds();
+ sleep_start = get_cmos_time();
+ return 0;
+}
+
+static int timer_resume(struct sys_device *dev)
+{
+ unsigned long flags;
+ unsigned long sec;
+ unsigned long sleep_length;
+
+#ifdef CONFIG_HPET_TIMER
+ if (is_hpet_enabled())
+ hpet_reenable();
+#endif
+ sec = get_cmos_time() + clock_cmos_diff;
+ sleep_length = (get_cmos_time() - sleep_start) * HZ;
+ write_seqlock_irqsave(&xtime_lock, flags);
+ xtime.tv_sec = sec;
+ xtime.tv_nsec = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ jiffies += sleep_length;
+ wall_jiffies += sleep_length;
+ return 0;
+}
+
+static struct sysdev_class timer_sysclass = {
+ .resume = timer_resume,
+ .suspend = timer_suspend,
+ set_kset_name("timer"),
+};
+
+
+/* XXX this driverfs stuff should probably go elsewhere later -john */
+static struct sys_device device_timer = {
+ .id = 0,
+ .cls = &timer_sysclass,
+};
+
+static int time_init_device(void)
+{
+ int error = sysdev_class_register(&timer_sysclass);
+ if (!error)
+ error = sysdev_register(&device_timer);
+ return error;
+}
+
+device_initcall(time_init_device);
+
+#ifdef CONFIG_HPET_TIMER
+extern void (*late_time_init)(void);
+/* Duplicate of time_init() below, with hpet_enable part added */
+void __init hpet_time_init(void)
+{
+ xtime.tv_sec = get_cmos_time();
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ if (hpet_enable() >= 0) {
+ printk("Using HPET for base-timer\n");
+ }
+
+ cur_timer = select_timer();
+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+ time_init_hook();
+}
+#endif
+
+/* Dynamically-mapped IRQ. */
+static int TIMER_IRQ;
+
+static struct irqaction irq_timer = {
+ timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
+ NULL, NULL
+};
+
+void __init time_init(void)
+{
+#ifdef CONFIG_HPET_TIMER
+ if (is_hpet_capable()) {
+ /*
+ * HPET initialization needs to do memory-mapped io. So, let
+ * us do a late initialization after mem_init().
+ */
+ late_time_init = hpet_time_init;
+ return;
+ }
+#endif
+ __get_time_values_from_xen();
+ xtime.tv_sec = shadow_tv.tv_sec;
+ xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+ processed_system_time = shadow_system_time;
+
+ if (timer_tsc_init.init(NULL) != 0)
+ BUG();
+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+ TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
+
+ (void)setup_irq(TIMER_IRQ, &irq_timer);
+}
+
+/* Convert jiffies to system time. Call with xtime_lock held for reading. */
+static inline u64 __jiffies_to_st(unsigned long j)
+{
+ long delta = j - jiffies;
+ /* NB. The next check can trigger in some wrap-around cases, but
+ * that's ok -- we'll just end up with a shorter timeout. */
+ if (delta < 1)
+ delta = 1;
+ return processed_system_time + (delta * NS_PER_TICK);
+}
+
+/*
+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
+ * These functions are based on implementations from arch/s390/kernel/time.c
+ */
+void stop_hz_timer(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long j;
+
+ /* s390 does this /before/ checking rcu_pending(). We copy them. */
+ cpu_set(cpu, nohz_cpu_mask);
+
+ /* Leave ourselves in 'tick mode' if rcu or softirq pending. */
+ if (rcu_pending(cpu) || local_softirq_pending()) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ j = jiffies + 1;
+ } else {
+ j = next_timer_interrupt();
+ }
+
+ BUG_ON(HYPERVISOR_set_timer_op(__jiffies_to_st(j)) != 0);
+}
+
+void start_hz_timer(void)
+{
+ cpu_clear(smp_processor_id(), nohz_cpu_mask);
+}
+
+void time_suspend(void)
+{
+ /* nothing */
+}
+
+/* No locking required. We are only CPU running, and interrupts are off. */
+void time_resume(void)
+{
+ if (timer_tsc_init.init(NULL) != 0)
+ BUG();
+
+ /* Get timebases for new environment. */
+ __get_time_values_from_xen();
+
+ /* Reset our own concept of passage of system time. */
+ processed_system_time = shadow_system_time;
+
+ /* Accept a warp in UTC (wall-clock) time. */
+ last_seen_tv.tv_sec = 0;
+
+ /* Make sure we resync UTC time with Xen on next timer interrupt. */
+ last_update_from_xen = 0;
+}
+
+/*
+ * /proc/sys/xen: This really belongs in another file. It can stay here for
+ * now however.
+ */
+static ctl_table xen_subtable[] = {
+ {1, "independent_wallclock", &independent_wallclock,
+ sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
+ {0}
+};
+static ctl_table xen_table[] = {
+ {123, "xen", NULL, 0, 0555, xen_subtable},
+ {0}
+};
+static int __init xen_sysctl_init(void)
+{
+ (void)register_sysctl_table(xen_table, 0);
+ return 0;
+}
+__initcall(xen_sysctl_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/timers/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/timers/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,17 @@
+#
+# Makefile for x86 timers
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+obj-y := timer_tsc.o
+c-obj-y :=
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/kernel/timers/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c Tue Aug
9 15:17:45 2005
@@ -0,0 +1,379 @@
+/*
+ * This code largely moved from arch/i386/kernel/time.c.
+ * See comments there for proper credits.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/cpufreq.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <asm/timer.h>
+#include <asm/io.h>
+/* processor.h for distable_tsc flag */
+#include <asm/processor.h>
+
+#include "io_ports.h"
+#include "mach_timer.h"
+
+#include <asm/hpet.h>
+
+#ifdef CONFIG_HPET_TIMER
+static unsigned long hpet_usec_quotient;
+static unsigned long hpet_last;
+static struct timer_opts timer_tsc;
+#endif
+
+static inline void cpufreq_delayed_get(void);
+
+int tsc_disable __initdata = 0;
+
+extern spinlock_t i8253_lock;
+
+static int use_tsc;
+
+static unsigned long long monotonic_base;
+static u32 monotonic_offset;
+static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
+
+/* convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_mhz * 10^6))
+ * ns = cycles * (10^3 / cpu_mhz)
+ *
+ * Then we use scaling math (suggested by george@xxxxxxxxxx) to get:
+ * ns = cycles * (10^3 * SC / cpu_mhz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ * -johnstul@xxxxxxxxxx "math is hard, lets go shopping!"
+ */
+static unsigned long cyc2ns_scale;
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
+{
+ cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+/* Cached *multiplier* to convert TSC counts to microseconds.
+ * (see the equation below).
+ * Equal to 2^32 * (1 / (clocks per usec) ).
+ * Initialized in time_init.
+ */
+static unsigned long fast_gettimeoffset_quotient;
+
+extern u32 shadow_tsc_stamp;
+extern u64 shadow_system_time;
+
+static unsigned long get_offset_tsc(void)
+{
+ register unsigned long eax, edx;
+
+ /* Read the Time Stamp Counter */
+
+ rdtsc(eax,edx);
+
+ /* .. relative to previous jiffy (32 bits is enough) */
+ eax -= shadow_tsc_stamp;
+
+ /*
+ * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
+ * = (tsc_low delta) * (usecs_per_clock)
+ * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
+ *
+ * Using a mull instead of a divl saves up to 31 clock cycles
+ * in the critical path.
+ */
+
+ __asm__("mull %2"
+ :"=a" (eax), "=d" (edx)
+ :"rm" (fast_gettimeoffset_quotient),
+ "0" (eax));
+
+ /* our adjusted time offset in microseconds */
+ return edx;
+}
+
+static unsigned long long monotonic_clock_tsc(void)
+{
+ unsigned long long last_offset, this_offset, base;
+ unsigned seq;
+
+ /* atomically read monotonic base & last_offset */
+ do {
+ seq = read_seqbegin(&monotonic_lock);
+ last_offset = monotonic_offset;
+ base = monotonic_base;
+ } while (read_seqretry(&monotonic_lock, seq));
+
+ /* Read the Time Stamp Counter */
+ rdtscll(this_offset);
+
+ /* return the value in ns */
+ return base + cycles_2_ns(this_offset - last_offset);
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long this_offset;
+
+ /*
+ * In the NUMA case we dont use the TSC as they are not
+ * synchronized across all CPUs.
+ */
+#ifndef CONFIG_NUMA
+ if (!use_tsc)
+#endif
+ /* no locking but a rare wrong value is not a big deal */
+ return jiffies_64 * (1000000000 / HZ);
+
+ /* Read the Time Stamp Counter */
+ rdtscll(this_offset);
+
+ /* return the value in ns */
+ return cycles_2_ns(this_offset);
+}
+
+
+static void mark_offset_tsc(void)
+{
+
+ /* update the monotonic base value */
+ write_seqlock(&monotonic_lock);
+ monotonic_base = shadow_system_time;
+ monotonic_offset = shadow_tsc_stamp;
+ write_sequnlock(&monotonic_lock);
+}
+
+static void delay_tsc(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ rdtscl(bclock);
+ do
+ {
+ rep_nop();
+ rdtscl(now);
+ } while ((now-bclock) < loops);
+}
+
+#ifdef CONFIG_HPET_TIMER
+static void mark_offset_tsc_hpet(void)
+{
+ unsigned long long this_offset, last_offset;
+ unsigned long offset, temp, hpet_current;
+
+ write_seqlock(&monotonic_lock);
+ last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+ /*
+ * It is important that these two operations happen almost at
+ * the same time. We do the RDTSC stuff first, since it's
+ * faster. To avoid any inconsistencies, we need interrupts
+ * disabled locally.
+ */
+ /*
+ * Interrupts are just disabled locally since the timer irq
+ * has the SA_INTERRUPT flag set. -arca
+ */
+ /* read Pentium cycle counter */
+
+ hpet_current = hpet_readl(HPET_COUNTER);
+ rdtsc(last_tsc_low, last_tsc_high);
+
+ /* lost tick compensation */
+ offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) {
+ int lost_ticks = (offset - hpet_last) / hpet_tick;
+ jiffies_64 += lost_ticks;
+ }
+ hpet_last = hpet_current;
+
+ /* update the monotonic base value */
+ this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+ monotonic_base += cycles_2_ns(this_offset - last_offset);
+ write_sequnlock(&monotonic_lock);
+
+ /* calculate delay_at_last_interrupt */
+ /*
+ * Time offset = (hpet delta) * ( usecs per HPET clock )
+ * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
+ * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
+ * Where,
+ * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
+ */
+ delay_at_last_interrupt = hpet_current - offset;
+ ASM_MUL64_REG(temp, delay_at_last_interrupt,
+ hpet_usec_quotient, delay_at_last_interrupt);
+}
+#endif
+
+
+#ifdef CONFIG_CPU_FREQ
+#include <linux/workqueue.h>
+
+static unsigned int cpufreq_delayed_issched = 0;
+static unsigned int cpufreq_init = 0;
+static struct work_struct cpufreq_delayed_get_work;
+
+static void handle_cpufreq_delayed_get(void *v)
+{
+ unsigned int cpu;
+ for_each_online_cpu(cpu) {
+ cpufreq_get(cpu);
+ }
+ cpufreq_delayed_issched = 0;
+}
+
+/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
+ * to verify the CPU frequency the timing core thinks the CPU is running
+ * at is still correct.
+ */
+static inline void cpufreq_delayed_get(void)
+{
+ if (cpufreq_init && !cpufreq_delayed_issched) {
+ cpufreq_delayed_issched = 1;
+ printk(KERN_DEBUG "Losing some ticks... checking if CPU
frequency changed.\n");
+ schedule_work(&cpufreq_delayed_get_work);
+ }
+}
+
+/* If the CPU frequency is scaled, TSC-based delays will need a different
+ * loops_per_jiffy value to function properly.
+ */
+
+static unsigned int ref_freq = 0;
+static unsigned long loops_per_jiffy_ref = 0;
+
+#ifndef CONFIG_SMP
+static unsigned long fast_gettimeoffset_ref = 0;
+static unsigned long cpu_khz_ref = 0;
+#endif
+
+static int
+time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (val != CPUFREQ_RESUMECHANGE)
+ write_seqlock_irq(&xtime_lock);
+ if (!ref_freq) {
+ ref_freq = freq->old;
+ loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+#ifndef CONFIG_SMP
+ fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
+ cpu_khz_ref = cpu_khz;
+#endif
+ }
+
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+ (val == CPUFREQ_RESUMECHANGE)) {
+ if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+ cpu_data[freq->cpu].loops_per_jiffy =
cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+#ifndef CONFIG_SMP
+ if (cpu_khz)
+ cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq,
freq->new);
+ if (use_tsc) {
+ if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
+ fast_gettimeoffset_quotient =
cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
+ set_cyc2ns_scale(cpu_khz/1000);
+ }
+ }
+#endif
+ }
+
+ if (val != CPUFREQ_RESUMECHANGE)
+ write_sequnlock_irq(&xtime_lock);
+
+ return 0;
+}
+
+static struct notifier_block time_cpufreq_notifier_block = {
+ .notifier_call = time_cpufreq_notifier
+};
+
+
+static int __init cpufreq_tsc(void)
+{
+ int ret;
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (!ret)
+ cpufreq_init = 1;
+ return ret;
+}
+core_initcall(cpufreq_tsc);
+
+#else /* CONFIG_CPU_FREQ */
+static inline void cpufreq_delayed_get(void) { return; }
+#endif
+
+
+static int init_tsc(char* override)
+{
+ u64 __cpu_khz;
+
+ __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
+ do_div(__cpu_khz, 1000);
+ cpu_khz = (u32)__cpu_khz;
+ printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
+
+ /* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz =
+ (2^32 * 1 / (clocks/us)) */
+ {
+ unsigned long eax=0, edx=1000;
+ __asm__("divl %2"
+ :"=a" (fast_gettimeoffset_quotient), "=d" (edx)
+ :"r" (cpu_khz),
+ "0" (eax), "1" (edx));
+ }
+
+ set_cyc2ns_scale(cpu_khz/1000);
+
+ use_tsc = 1;
+
+ return 0;
+}
+
+static int __init tsc_setup(char *str)
+{
+ printk(KERN_WARNING "notsc: cannot disable TSC in Xen/Linux.\n");
+ return 1;
+}
+__setup("notsc", tsc_setup);
+
+
+
+/************************************************************/
+
+/* tsc timer_opts struct */
+struct timer_opts timer_tsc = {
+ .name = "tsc",
+ .mark_offset = mark_offset_tsc,
+ .get_offset = get_offset_tsc,
+ .monotonic_clock = monotonic_clock_tsc,
+ .delay = delay_tsc,
+};
+
+struct init_timer_opts timer_tsc_init = {
+ .init = init_tsc,
+ .opts = &timer_tsc,
+};
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,980 @@
+/*
+ * linux/arch/i386/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kallsyms.h>
+#include <linux/ptrace.h>
+#include <linux/utsname.h>
+#include <linux/kprobes.h>
+
+#ifdef CONFIG_EISA
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#endif
+
+#ifdef CONFIG_MCA
+#include <linux/mca.h>
+#endif
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
+#include <asm/nmi.h>
+
+#include <asm/smp.h>
+#include <asm/arch_hooks.h>
+#include <asm/kdebug.h>
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "mach_traps.h"
+
+asmlinkage int system_call(void);
+
+/* Do we ignore FPU interrupts ? */
+char ignore_fpu_irq = 0;
+
+/*
+ * The IDT has to be page-aligned to simplify the Pentium
+ * F0 0F bug workaround.. We have a special link segment
+ * for this.
+ */
+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) =
{ {0, 0}, };
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void nmi(void);
+asmlinkage void int3(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void simd_coprocessor_error(void);
+asmlinkage void alignment_check(void);
+asmlinkage void fixup_4gb_segment(void);
+asmlinkage void machine_check(void);
+
+static int kstack_depth_to_print = 24;
+struct notifier_block *i386die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&i386die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+{
+ return p > (void *)tinfo &&
+ p < (void *)tinfo + THREAD_SIZE - 3;
+}
+
+static inline unsigned long print_context_stack(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long ebp)
+{
+ unsigned long addr;
+
+#ifdef CONFIG_FRAME_POINTER
+ while (valid_stack_ptr(tinfo, (void *)ebp)) {
+ addr = *(unsigned long *)(ebp + 4);
+ printk(" [<%08lx>] ", addr);
+ print_symbol("%s", addr);
+ printk("\n");
+ ebp = *(unsigned long *)ebp;
+ }
+#else
+ while (valid_stack_ptr(tinfo, stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ printk(" [<%08lx>]", addr);
+ print_symbol(" %s", addr);
+ printk("\n");
+ }
+ }
+#endif
+ return ebp;
+}
+
+void show_trace(struct task_struct *task, unsigned long * stack)
+{
+ unsigned long ebp;
+
+ if (!task)
+ task = current;
+
+ if (task == current) {
+ /* Grab ebp right from our regs */
+ asm ("movl %%ebp, %0" : "=r" (ebp) : );
+ } else {
+ /* ebp is the last reg pushed by switch_to */
+ ebp = *(unsigned long *) task->thread.esp;
+ }
+
+ while (1) {
+ struct thread_info *context;
+ context = (struct thread_info *)
+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+ ebp = print_context_stack(context, stack, ebp);
+ stack = (unsigned long*)context->previous_esp;
+ if (!stack)
+ break;
+ printk(" =======================\n");
+ }
+}
+
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+ unsigned long *stack;
+ int i;
+
+ if (esp == NULL) {
+ if (task)
+ esp = (unsigned long*)task->thread.esp;
+ else
+ esp = (unsigned long *)&esp;
+ }
+
+ stack = esp;
+ for(i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(stack))
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+ printk("\nCall Trace:\n");
+ show_trace(task, esp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_trace(current, &stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+ int i;
+ int in_kernel = 1;
+ unsigned long esp;
+ unsigned short ss;
+
+ esp = (unsigned long) (®s->esp);
+ ss = __KERNEL_DS;
+ if (regs->xcs & 2) {
+ in_kernel = 0;
+ esp = regs->esp;
+ ss = regs->xss & 0xffff;
+ }
+ print_modules();
+ printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
+ " (%s) \n",
+ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
+ print_tainted(), regs->eflags, system_utsname.release);
+ print_symbol("EIP is at %s\n", regs->eip);
+ printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+ printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ regs->esi, regs->edi, regs->ebp, esp);
+ printk("ds: %04x es: %04x ss: %04x\n",
+ regs->xds & 0xffff, regs->xes & 0xffff, ss);
+ printk("Process %s (pid: %d, threadinfo=%p task=%p)",
+ current->comm, current->pid, current_thread_info(), current);
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+ u8 *eip;
+
+ printk("\nStack: ");
+ show_stack(NULL, (unsigned long*)esp);
+
+ printk("Code: ");
+
+ eip = (u8 *)regs->eip - 43;
+ for (i = 0; i < 64; i++, eip++) {
+ unsigned char c;
+
+ if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
+ printk(" Bad EIP value.");
+ break;
+ }
+ if (eip == (u8 *)regs->eip)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+}
+
+static void handle_BUG(struct pt_regs *regs)
+{
+ unsigned short ud2;
+ unsigned short line;
+ char *file;
+ char c;
+ unsigned long eip;
+
+ if (regs->xcs & 2)
+ goto no_bug; /* Not in kernel */
+
+ eip = regs->eip;
+
+ if (eip < PAGE_OFFSET)
+ goto no_bug;
+ if (__get_user(ud2, (unsigned short *)eip))
+ goto no_bug;
+ if (ud2 != 0x0b0f)
+ goto no_bug;
+ if (__get_user(line, (unsigned short *)(eip + 2)))
+ goto bug;
+ if (__get_user(file, (char **)(eip + 4)) ||
+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+ file = "<bad filename>";
+
+ printk("------------[ cut here ]------------\n");
+ printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
+
+no_bug:
+ return;
+
+ /* Here we know it was a BUG but file-n-line is unavailable */
+bug:
+ printk("Kernel BUG\n");
+}
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ static struct {
+ spinlock_t lock;
+ u32 lock_owner;
+ int lock_owner_depth;
+ } die = {
+ .lock = SPIN_LOCK_UNLOCKED,
+ .lock_owner = -1,
+ .lock_owner_depth = 0
+ };
+ static int die_counter;
+
+ if (die.lock_owner != _smp_processor_id()) {
+ console_verbose();
+ spin_lock_irq(&die.lock);
+ die.lock_owner = smp_processor_id();
+ die.lock_owner_depth = 0;
+ bust_spinlocks(1);
+ }
+
+ if (++die.lock_owner_depth < 3) {
+ int nl = 0;
+ handle_BUG(regs);
+ printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff,
++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+ nl = 1;
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+ nl = 1;
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+ nl = 1;
+#endif
+ if (nl)
+ printk("\n");
+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ show_registers(regs);
+ } else
+ printk(KERN_ERR "Recursive die() failure, output suppressed\n");
+
+ bust_spinlocks(0);
+ die.lock_owner = -1;
+ spin_unlock_irq(&die.lock);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(5 * HZ);
+ panic("Fatal exception");
+ }
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long
err)
+{
+ if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
+ die(str, regs, err);
+}
+
+static void do_trap(int trapnr, int signr, char *str, int vm86,
+ struct pt_regs * regs, long error_code, siginfo_t
*info)
+{
+ if (regs->eflags & VM_MASK) {
+ if (vm86)
+ goto vm86_trap;
+ goto trap_signal;
+ }
+
+ if (!(regs->xcs & 2))
+ goto kernel_trap;
+
+ trap_signal: {
+ struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+ force_sig(signr, tsk);
+ return;
+ }
+
+ kernel_trap: {
+ if (!fixup_exception(regs))
+ die(str, regs, error_code);
+ return;
+ }
+
+ vm86_trap: {
+ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, trapnr);
+ if (ret) goto trap_signal;
+ return;
+ }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+fastcall void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+fastcall void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
+}
+
+#define DO_VM86_ERROR(trapnr, signr, str, name) \
+fastcall void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
+}
+
+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+fastcall void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
+}
+
+DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV,
regs->eip)
+#ifndef CONFIG_KPROBES
+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+#endif
+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN,
regs->eip)
+DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun",
coprocessor_segment_overrun)
+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+#ifdef CONFIG_X86_MCE
+DO_ERROR(18, SIGBUS, "machine check", machine_check)
+#endif
+
+fastcall void do_general_protection(struct pt_regs * regs, long error_code)
+{
+ /*
+ * If we trapped on an LDT access then ensure that the default_ldt is
+ * loaded, if nothing else. We load default_ldt lazily because LDT
+ * switching costs time and many applications don't need it.
+ */
+ if (unlikely((error_code & 6) == 4)) {
+ unsigned long ldt;
+ __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
+ if (ldt == 0) {
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.ptr |= (unsigned long)&default_ldt[0];
+ u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
+ if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
+ show_trace(NULL, (unsigned long *)&u);
+ panic("Failed to install default LDT");
+ }
+ return;
+ }
+ }
+
+ if (regs->eflags & VM_MASK)
+ goto gp_in_vm86;
+
+ if (!(regs->xcs & 2))
+ goto gp_in_kernel;
+
+ current->thread.error_code = error_code;
+ current->thread.trap_no = 13;
+ force_sig(SIGSEGV, current);
+ return;
+
+gp_in_vm86:
+ local_irq_enable();
+ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+ return;
+
+gp_in_kernel:
+ if (!fixup_exception(regs)) {
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die("general protection fault", regs, error_code);
+ }
+}
+
+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+{
+ printk("Uhhuh. NMI received. Dazed and confused, but trying to
continue\n");
+ printk("You probably have a hardware problem with your RAM chips\n");
+
+ /* Clear and disable the memory parity error line. */
+ clear_mem_error(reason);
+}
+
+static void io_check_error(unsigned char reason, struct pt_regs * regs)
+{
+ unsigned long i;
+
+ printk("NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+ i = 2000;
+ while (--i) udelay(1000);
+ reason &= ~8;
+ outb(reason, 0x61);
+}
+
+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+{
+#ifdef CONFIG_MCA
+ /* Might actually be able to figure out what the guilty party
+ * is. */
+ if( MCA_bus ) {
+ mca_handle_nmi();
+ return;
+ }
+#endif
+ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
+ printk("Dazed and confused, but trying to continue\n");
+ printk("Do you have a strange power saving mode enabled?\n");
+}
+
+static DEFINE_SPINLOCK(nmi_print_lock);
+
+void die_nmi (struct pt_regs *regs, const char *msg)
+{
+ spin_lock(&nmi_print_lock);
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ bust_spinlocks(1);
+ printk(msg);
+ printk(" on CPU%d, eip %08lx, registers:\n",
+ smp_processor_id(), regs->eip);
+ show_registers(regs);
+ printk("console shuts up ...\n");
+ console_silent();
+ spin_unlock(&nmi_print_lock);
+ bust_spinlocks(0);
+ do_exit(SIGSEGV);
+}
+
+static void default_do_nmi(struct pt_regs * regs)
+{
+ unsigned char reason = 0;
+
+ /* Only the BSP gets external NMIs from the system. */
+ if (!smp_processor_id())
+ reason = get_nmi_reason();
+
+ if (!(reason & 0xc0)) {
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ == NOTIFY_STOP)
+ return;
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+ if (nmi_watchdog) {
+ nmi_watchdog_tick(regs);
+ return;
+ }
+#endif
+ unknown_nmi_error(reason, regs);
+ return;
+ }
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ return;
+ if (reason & 0x80)
+ mem_parity_error(reason, regs);
+ if (reason & 0x40)
+ io_check_error(reason, regs);
+ /*
+ * Reassert NMI in case it became active meanwhile
+ * as it's edge-triggered.
+ */
+ reassert_nmi();
+}
+
+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+{
+ return 0;
+}
+
+static nmi_callback_t nmi_callback = dummy_nmi_callback;
+
+fastcall void do_nmi(struct pt_regs * regs, long error_code)
+{
+ int cpu;
+
+ nmi_enter();
+
+ cpu = smp_processor_id();
+ ++nmi_count(cpu);
+
+ if (!nmi_callback(regs, cpu))
+ default_do_nmi(regs);
+
+ nmi_exit();
+}
+
+void set_nmi_callback(nmi_callback_t callback)
+{
+ nmi_callback = callback;
+}
+
+void unset_nmi_callback(void)
+{
+ nmi_callback = dummy_nmi_callback;
+}
+
+#ifdef CONFIG_KPROBES
+fastcall int do_int3(struct pt_regs *regs, long error_code)
+{
+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+ == NOTIFY_STOP)
+ return 1;
+ /* This is an interrupt gate, because kprobes wants interrupts
+ disabled. Normal trap handlers don't. */
+ restore_interrupts(regs);
+ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
+ return 0;
+}
+#endif
+
+/*
+ * Our handling of the processor debug registers is non-trivial.
+ * We do not clear them on entry and exit from the kernel. Therefore
+ * it is possible to get a watchpoint trap here from inside the kernel.
+ * However, the code in ./ptrace.c has ensured that the user can
+ * only set watchpoints on userspace addresses. Therefore the in-kernel
+ * watchpoint trap can only occur in code which is reading/writing
+ * from user space. Such code must not hold kernel locks (since it
+ * can equally take a page fault), therefore it is safe to call
+ * force_sig_info even though that claims and releases locks.
+ *
+ * Code in ./signal.c ensures that the debug control register
+ * is restored before we deliver any signal, and therefore that
+ * user code runs with the correct debug control register even though
+ * we clear it here.
+ *
+ * Being careful here means that we don't have to be as careful in a
+ * lot of more complicated places (task switching can be a bit lazy
+ * about restoring all the debug state, and ptrace doesn't have to
+ * find every occurrence of the TF bit that could be saved away even
+ * by user code)
+ */
+fastcall void do_debug(struct pt_regs * regs, long error_code)
+{
+ unsigned int condition;
+ struct task_struct *tsk = current;
+
+ condition = HYPERVISOR_get_debugreg(6);
+
+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+#if 0
+ /* It's safe to allow irq's after DR6 has been saved */
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
+#endif
+
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+ if (!tsk->thread.debugreg[7])
+ goto clear_dr7;
+ }
+
+ if (regs->eflags & VM_MASK)
+ goto debug_vm86;
+
+ /* Save debug status register where ptrace can see it */
+ tsk->thread.debugreg[6] = condition;
+
+ /*
+ * Single-stepping through TF: make sure we ignore any events in
+ * kernel space (but re-enable TF when returning to user mode).
+ * And if the event was due to a debugger (PT_DTRACE), clear the
+ * TF flag so that register information is correct.
+ */
+ if (condition & DR_STEP) {
+ /*
+ * We already checked v86 mode above, so we can
+ * check for kernel mode by just checking the CPL
+ * of CS.
+ */
+ if ((regs->xcs & 2) == 0)
+ goto clear_TF_reenable;
+
+ if (likely(tsk->ptrace & PT_DTRACE)) {
+ tsk->ptrace &= ~PT_DTRACE;
+ regs->eflags &= ~TF_MASK;
+ }
+ }
+
+ /* Ok, finally something we can handle */
+ send_sigtrap(tsk, regs, error_code);
+
+ /* Disable additional traps. They'll be re-enabled when
+ * the signal is delivered.
+ */
+clear_dr7:
+ HYPERVISOR_set_debugreg(7, 0);
+ return;
+
+debug_vm86:
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
+ return;
+
+clear_TF_reenable:
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->eflags &= ~TF_MASK;
+ return;
+}
+
+/*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+void math_error(void __user *eip)
+{
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short cwd, swd;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 16;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't syncronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
+ switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x041: /* Stack Fault */
+ case 0x241: /* Stack Fault | Direction */
+ info.si_code = FPE_FLTINV;
+ /* Should we clear the SF or let user space do it ????
*/
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
+{
+ ignore_fpu_irq = 1;
+ math_error((void __user *)regs->eip);
+}
+
+void simd_math_error(void __user *eip)
+{
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short mxcsr;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 19;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ mxcsr = get_fpu_mxcsr(task);
+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+}
+
+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
+ long error_code)
+{
+ if (cpu_has_xmm) {
+ /* Handle SIMD FPU exceptions on PIII+ processors. */
+ ignore_fpu_irq = 1;
+ simd_math_error((void __user *)regs->eip);
+ } else {
+ /*
+ * Handle strange cache flush from user space exception
+ * in all other cases. This is undocumented behaviour.
+ */
+ if (regs->eflags & VM_MASK) {
+ handle_vm86_fault((struct kernel_vm86_regs *)regs,
+ error_code);
+ return;
+ }
+ die_if_kernel("cache flush denied", regs, error_code);
+ current->thread.trap_no = 19;
+ current->thread.error_code = error_code;
+ force_sig(SIGSEGV, current);
+ }
+}
+
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ *
+ * Must be called with kernel preemption disabled (in this case,
+ * local interrupts are disabled at the call-site in entry.S).
+ */
+asmlinkage void math_state_restore(struct pt_regs regs)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = thread->task;
+
+ /*
+ * A trap in kernel mode can be ignored. It'll be the fast XOR or
+ * copying libraries, which will correctly save/restore state and
+ * reset the TS bit in CR0.
+ */
+ if ((regs.xcs & 2) == 0)
+ return;
+
+ clts(); /* Allow maths ops (or we recurse) */
+ if (!tsk_used_math(tsk))
+ init_fpu(tsk);
+ restore_fpu(tsk);
+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
+}
+
+#ifndef CONFIG_MATH_EMULATION
+
+asmlinkage void math_emulate(long arg)
+{
+ printk("math-emulation not enabled and no coprocessor found.\n");
+ printk("killing %s.\n",current->comm);
+ force_sig(SIGFPE,current);
+ schedule();
+}
+
+#endif /* CONFIG_MATH_EMULATION */
+
+#ifdef CONFIG_X86_F00F_BUG
+void __init trap_init_f00f_bug(void)
+{
+ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
+
+ /*
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
+ __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
+}
+#endif
+
+
+/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
+static trap_info_t trap_table[] = {
+ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
+ { 1, 0, __KERNEL_CS, (unsigned long)debug },
+ { 3, 3, __KERNEL_CS, (unsigned long)int3 },
+ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
+ { 5, 3, __KERNEL_CS, (unsigned long)bounds },
+ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
+ { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
+ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
+ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
+ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
+ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
+ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
+ { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
+ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
+ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
+ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
+#ifdef CONFIG_X86_MCE
+ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
+#endif
+ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
+ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
+ { 0, 0, 0, 0 }
+};
+
+void __init trap_init(void)
+{
+ HYPERVISOR_set_trap_table(trap_table);
+ HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
+
+ /*
+ * default LDT is a single-entry callgate to lcall7 for iBCS
+ * and a callgate to lcall27 for Solaris/x86 binaries
+ */
+ make_lowmem_page_readonly(&default_ldt[0]);
+ xen_flush_page_update_queue();
+
+ /*
+ * Should be a barrier for any external CPU state.
+ */
+ cpu_init();
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/kernel/vsyscall.S
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/vsyscall.S Tue Aug 9
15:17:45 2005
@@ -0,0 +1,15 @@
+#include <linux/init.h>
+
+__INITDATA
+
+ .globl vsyscall_int80_start, vsyscall_int80_end
+vsyscall_int80_start:
+ .incbin "arch/xen/i386/kernel/vsyscall-int80.so"
+vsyscall_int80_end:
+
+ .globl vsyscall_sysenter_start, vsyscall_sysenter_end
+vsyscall_sysenter_start:
+ .incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
+vsyscall_sysenter_end:
+
+__FINIT
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,24 @@
+#
+# Makefile for the linux i386-specific parts of the memory manager.
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/mm
+
+obj-y := init.o pgtable.o fault.o ioremap.o pageattr.o hypervisor.o
+c-obj-y := extable.o mmap.o
+
+c-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
+c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,561 @@
+/*
+ * linux/arch/i386/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/desc.h>
+#include <asm/kdebug.h>
+
+extern void die(const char *,struct pt_regs *,long);
+
+pgd_t *cur_pgd; /* XXXsmp */
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out
+ */
+void bust_spinlocks(int yes)
+{
+ int loglevel_save = console_loglevel;
+
+ if (yes) {
+ oops_in_progress = 1;
+ return;
+ }
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console
up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+}
+
+/*
+ * Return EIP plus the CS segment base. The segment limit is also
+ * adjusted, clamped to the kernel/user address space (whichever is
+ * appropriate), and returned in *eip_limit.
+ *
+ * The segment is checked, because it might have been changed by another
+ * task between the original faulting instruction and here.
+ *
+ * If CS is no longer a valid code segment, or if EIP is beyond the
+ * limit, or if it is a kernel address when CS is not a kernel segment,
+ * then the returned value will be greater than *eip_limit.
+ *
+ * This is slow, but is very rarely executed.
+ */
+static inline unsigned long get_segment_eip(struct pt_regs *regs,
+ unsigned long *eip_limit)
+{
+ unsigned long eip = regs->eip;
+ unsigned seg = regs->xcs & 0xffff;
+ u32 seg_ar, seg_limit, base, *desc;
+
+ /* The standard kernel/user address space limit. */
+ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
+
+ /* Unlikely, but must come before segment checks. */
+ if (unlikely((regs->eflags & VM_MASK) != 0))
+ return eip + (seg << 4);
+
+ /* By far the most common cases. */
+ if (likely(seg == __USER_CS || seg == __KERNEL_CS))
+ return eip;
+
+ /* Check the segment exists, is within the current LDT/GDT size,
+ that kernel/user (ring 0..3) has the appropriate privilege,
+ that it's a code segment, and get the limit. */
+ __asm__ ("larl %3,%0; lsll %3,%1"
+ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
+ if ((~seg_ar & 0x9800) || eip > seg_limit) {
+ *eip_limit = 0;
+ return 1; /* So that returned eip > *eip_limit. */
+ }
+
+ /* Get the GDT/LDT descriptor base.
+ When you look for races in this code remember that
+ LDT and other horrors are only used in user space. */
+ if (seg & (1<<2)) {
+ /* Must lock the LDT while reading it. */
+ down(¤t->mm->context.sem);
+ desc = current->mm->context.ldt;
+ desc = (void *)desc + (seg & ~7);
+ } else {
+ /* Must disable preemption while reading the GDT. */
+ desc = (u32 *)get_cpu_gdt_table(get_cpu());
+ desc = (void *)desc + (seg & ~7);
+ }
+
+ /* Decode the code segment base from the descriptor */
+ base = get_desc_base((unsigned long *)desc);
+
+ if (seg & (1<<2)) {
+ up(¤t->mm->context.sem);
+ } else
+ put_cpu();
+
+ /* Adjust EIP and segment limit, and clamp at the kernel limit.
+ It's legitimate for segments to wrap at 0xffffffff. */
+ seg_limit += base;
+ if (seg_limit < *eip_limit && seg_limit >= base)
+ *eip_limit = seg_limit;
+ return eip + base;
+}
+
+/*
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ * Check that here and ignore it.
+ */
+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
+{
+ unsigned long limit;
+ unsigned long instr = get_segment_eip (regs, &limit);
+ int scan_more = 1;
+ int prefetch = 0;
+ int i;
+
+ for (i = 0; scan_more && i < 15; i++) {
+ unsigned char opcode;
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+ if (instr > limit)
+ break;
+ if (__get_user(opcode, (unsigned char *) instr))
+ break;
+
+ instr_hi = opcode & 0xf0;
+ instr_lo = opcode & 0x0f;
+ instr++;
+
+ switch (instr_hi) {
+ case 0x20:
+ case 0x30:
+ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
+ scan_more = ((instr_lo & 7) == 0x6);
+ break;
+
+ case 0x60:
+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
+ scan_more = (instr_lo & 0xC) == 0x4;
+ break;
+ case 0xF0:
+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
+ scan_more = !instr_lo || (instr_lo>>1) == 1;
+ break;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+ if (instr > limit)
+ break;
+ if (__get_user(opcode, (unsigned char *) instr))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+ break;
+ default:
+ scan_more = 0;
+ break;
+ }
+ }
+ return prefetch;
+}
+
+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
+{
+ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 6)) {
+ /* Catch an obscure case of prefetch inside an NX page. */
+ if (nx_enabled && (error_code & 16))
+ return 0;
+ return __is_prefetch(regs, addr);
+ }
+ return 0;
+}
+
+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
+fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long page;
+ int write;
+ siginfo_t info;
+
+ /* Set the "privileged fault" bit to something sane. */
+ error_code &= 3;
+ error_code |= (regs->xcs & 2) << 1;
+ if (regs->eflags & X86_EFLAGS_VM)
+ error_code |= 4;
+
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+#if 0
+ /* It's safe to allow irq's after cr2 has been saved */
+ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+ local_irq_enable();
+#endif
+
+ tsk = current;
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 1) == 0.
+ */
+ if (unlikely(address >= TASK_SIZE)) {
+ if (!(error_code & 5))
+ goto vmalloc_fault;
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+ * fault we could otherwise deadlock.
+ */
+ goto bad_area_nosemaphore;
+ }
+
+ mm = tsk->mm;
+
+ /*
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
+ /* When running in the kernel we expect faults to occur only to
+ * addresses in user space. All other faults represent errors in the
+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * erroneous fault occuring in a code path which already holds mmap_sem
+ * we will deadlock attempting to validate the fault against the
+ * address space. Luckily the kernel only validly references user
+ * space from well defined areas of code, which are listed in the
+ * exceptions table.
+ *
+ * As the vast majority of faults will be valid we will only perform
+ * the source reference check when there is a possibilty of a deadlock.
+ * Attempt to lock the address space, if we cannot we then validate the
+ * source. If this is invalid we can skip the address space check,
+ * thus avoiding the deadlock.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((error_code & 4) == 0 &&
+ !search_exception_tables(regs->eip))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (error_code & 4) {
+ /*
+ * accessing the stack below %esp is always a bug.
+ * The "+ 32" is there due to some instructions (like
+ * pusha) doing post-decrement on the stack and that
+ * doesn't show up until later..
+ */
+ if (address + 32 < regs->esp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+#ifdef TEST_VERIFY_AREA
+ if (regs->cs == KERNEL_CS)
+ printk("WP fault at %08lx\n", regs->eip);
+#endif
+ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case VM_FAULT_MINOR:
+ tsk->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ tsk->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ goto do_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ default:
+ BUG();
+ }
+
+ /*
+ * Did it hit the DOS screen memory VA from vm86 mode?
+ */
+ if (regs->eflags & VM_MASK) {
+ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
+ if (bit < 32)
+ tsk->thread.screen_bitmap |= 1 << bit;
+ }
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
+ /*
+ * Valid to do another page fault here because this one came
+ * from user space.
+ */
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+#ifdef CONFIG_X86_F00F_BUG
+ /*
+ * Pentium F0 0F C7 C8 bug workaround.
+ */
+ if (boot_cpu_data.f00f_bug) {
+ unsigned long nr;
+
+ nr = (address - idt_descr.address) >> 3;
+
+ if (nr == 6) {
+ do_invalid_op(regs, 0);
+ return;
+ }
+ }
+#endif
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Valid to do another page fault here, because if this fault
+ * had been triggered by is_prefetch fixup_exception would have
+ * handled it.
+ */
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+#ifdef CONFIG_X86_PAE
+ if (error_code & 16) {
+ pte_t *pte = lookup_address(address);
+
+ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+ printk(KERN_CRIT "kernel tried to execute NX-protected
page - exploit attempt? (uid: %d)\n", current->uid);
+ }
+#endif
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer
dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+ printk(KERN_ALERT " printing eip:\n");
+ printk("%08lx\n", regs->eip);
+ page = ((unsigned long *) cur_pgd)[address >> 22];
+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+ /*
+ * We must not directly access the pte in the highpte
+ * case, the page table might be allocated in highmem.
+ * And lets rather not kmap-atomic the pte, just in case
+ * it's allocated already.
+ */
+#ifndef CONFIG_HIGHPTE
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = machine_to_phys(page);
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+ }
+#endif
+ show_trace(NULL, (unsigned long *)®s[1]);
+ die("Oops", regs, error_code);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (tsk->pid == 1) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (error_code & 4)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+
+ /* User space => ok to do another page fault */
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = index + cur_pgd;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+
+ /*
+ * set_pgd(pgd, *pgd_k); here would be useless on PAE
+ * and redundant with the set_pmd() on non-PAE. As would
+ * set_pud.
+ */
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+ xen_flush_page_update_queue(); /* flush PMD update */
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/highmem.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/highmem.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,100 @@
+#include <linux/highmem.h>
+
+void *kmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+
+void kunmap(struct page *page)
+{
+ if (in_interrupt())
+ BUG();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ inc_preempt_count();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ if (!pte_none(*(kmap_pte-idx)))
+ BUG();
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
+ __flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
+void *kmap_atomic(struct page *page, enum km_type type)
+{
+ return __kmap_atomic(page, type, kmap_prot);
+}
+
+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
+void *kmap_atomic_pte(struct page *page, enum km_type type)
+{
+ return __kmap_atomic(page, type, PAGE_KERNEL_RO);
+}
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < FIXADDR_START) { // FIXME
+ dec_preempt_count();
+ preempt_check_resched();
+ return;
+ }
+
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ BUG();
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(kmap_pte-idx);
+ __flush_tlb_one(vaddr);
+#endif
+
+ dec_preempt_count();
+ preempt_check_resched();
+}
+
+struct page *kmap_atomic_to_page(void *ptr)
+{
+ unsigned long idx, vaddr = (unsigned long)ptr;
+ pte_t *pte;
+
+ if (vaddr < FIXADDR_START)
+ return virt_to_page(ptr);
+
+ idx = virt_to_fix(vaddr);
+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+ return pte_page(*pte);
+}
+
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,395 @@
+/******************************************************************************
+ * mm/hypervisor.c
+ *
+ * Update page tables via the hypervisor.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/multicall.h>
+#include <asm-xen/balloon.h>
+
+/*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
+/* Linux 2.6 isn't using the traditional batched interface. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define QUEUE_SIZE 2048
+#define pte_offset_kernel pte_offset
+#define pmd_val_ma(v) (v).pmd;
+#define pud_t pgd_t
+#define pud_offset(d, va) d
+#else
+#define QUEUE_SIZE 128
+#define pmd_val_ma(v) (v).pud.pgd.pgd;
+#endif
+
+static mmu_update_t update_queue[QUEUE_SIZE];
+unsigned int mmu_update_queue_idx = 0;
+#define idx mmu_update_queue_idx
+
+/*
+ * MULTICALL_flush_page_update_queue:
+ * This is a version of the flush which queues as part of a multicall.
+ */
+void MULTICALL_flush_page_update_queue(void)
+{
+ unsigned long flags;
+ unsigned int _idx;
+ spin_lock_irqsave(&update_lock, flags);
+ if ( (_idx = idx) != 0 )
+ {
+ idx = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ queue_multicall3(__HYPERVISOR_mmu_update,
+ (unsigned long)update_queue,
+ (unsigned long)_idx,
+ (unsigned long)NULL);
+ }
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void __flush_page_update_queue(void)
+{
+ unsigned int _idx = idx;
+ idx = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
+ {
+ printk(KERN_ALERT "Failed to execute MMU updates.\n");
+ BUG();
+ }
+}
+
+void _flush_page_update_queue(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ if ( idx != 0 ) __flush_page_update_queue();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void increment_index(void)
+{
+ idx++;
+ if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
+}
+
+static inline void increment_index_and_flush(void)
+{
+ idx++;
+ __flush_page_update_queue();
+}
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = pmd_val_ma(val);
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pt_switch(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_NEW_BASEPTR;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_tlb_flush(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_TLB_FLUSH;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_invlpg(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].ptr |= ptr & PAGE_MASK;
+ update_queue[idx].val = MMUEXT_INVLPG;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_set_ldt(unsigned long ptr, unsigned long len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
+ update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ update_queue[idx].val = pfn;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+/* queue and flush versions of the above */
+void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = pmd_val_ma(val);
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pt_switch(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_NEW_BASEPTR;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_tlb_flush(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_TLB_FLUSH;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_invlpg(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].ptr |= ptr & PAGE_MASK;
+ update_queue[idx].val = MMUEXT_INVLPG;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_set_ldt(unsigned long ptr, unsigned long len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
+ update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ update_queue[idx].val = pfn;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+
+unsigned long allocate_empty_lowmem_region(unsigned long pages)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long *pfn_array;
+ unsigned long vstart;
+ unsigned long i;
+ unsigned int order = get_order(pages*PAGE_SIZE);
+
+ vstart = __get_free_pages(GFP_KERNEL, order);
+ if ( vstart == 0 )
+ return 0UL;
+
+ scrub_pages(vstart, 1 << order);
+
+ pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+ if ( pfn_array == NULL )
+ BUG();
+
+ for ( i = 0; i < (1<<order); i++ )
+ {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ INVALID_P2M_ENTRY;
+ }
+
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ balloon_put_pages(pfn_array, 1 << order);
+
+ vfree(pfn_array);
+
+ return vstart;
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/init.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,804 @@
+/*
+ * linux/arch/i386/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm-xen/hypervisor.h>
+
+unsigned int __VMALLOC_RESERVE = 128 << 20;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+unsigned long highstart_pfn, highend_pfn;
+
+static int noinline do_test_wp_bit(void);
+
+/*
+ * Creates a middle page table and puts a pointer to it in the
+ * given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
+{
+ pud_t *pud;
+ pmd_t *pmd_table;
+
+#ifdef CONFIG_X86_PAE
+ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+ pud = pud_offset(pgd, 0);
+ if (pmd_table != pmd_offset(pud, 0))
+ BUG();
+#else
+ pud = pud_offset(pgd, 0);
+ pmd_table = pmd_offset(pud, 0);
+#endif
+
+ return pmd_table;
+}
+
+/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry.
+ */
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page_table = (pte_t *)
alloc_bootmem_low_pages(PAGE_SIZE);
+ make_page_readonly(page_table);
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+ if (page_table != pte_offset_kernel(pmd, 0))
+ BUG();
+
+ return page_table;
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This function initializes a certain range of kernel virtual memory
+ * with new bootmem page tables, everywhere page tables are missing in
+ * the given range.
+ */
+
+/*
+ * NOTE: The pagetables are allocated contiguous on the physical space
+ * so we can cache the place of the first one and move around without
+ * checking the pgd every time.
+ */
+static void __init page_table_range_init (unsigned long start, unsigned long
end, pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+
+ vaddr = start;
+ pgd_idx = pgd_index(vaddr);
+ pmd_idx = pmd_index(vaddr);
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++,
pgd_idx++) {
+ if (pgd_none(*pgd))
+ one_md_table_init(pgd);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++,
pmd_idx++) {
+ if (pmd_none(*pmd))
+ one_page_table_init(pmd);
+
+ vaddr += PMD_SIZE;
+ }
+ pmd_idx = 0;
+ }
+}
+
+static inline int is_kernel_text(unsigned long addr)
+{
+ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
+ return 1;
+ return 0;
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET.
+ */
+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
+{
+ unsigned long pfn;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int pgd_idx, pmd_idx, pte_ofs;
+
+ unsigned long max_ram_pfn = xen_start_info.nr_pages;
+ if (max_ram_pfn > max_low_pfn)
+ max_ram_pfn = max_low_pfn;
+
+ pgd_idx = pgd_index(PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+ pfn = 0;
+ pmd_idx = pmd_index(PAGE_OFFSET);
+ pte_ofs = pte_index(PAGE_OFFSET);
+
+ for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
+ pmd = one_md_table_init(pgd);
+ if (pfn >= max_low_pfn)
+ continue;
+ pmd += pmd_idx;
+ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++,
pmd_idx++) {
+ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /* Map with big pages if possible, otherwise create
normal page tables. */
+ if (cpu_has_pse) {
+ unsigned int address2 = (pfn + PTRS_PER_PTE -
1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
+
+ if (is_kernel_text(address) ||
is_kernel_text(address2))
+ set_pmd(pmd, pfn_pmd(pfn,
PAGE_KERNEL_LARGE_EXEC));
+ else
+ set_pmd(pmd, pfn_pmd(pfn,
PAGE_KERNEL_LARGE));
+ pfn += PTRS_PER_PTE;
+ } else {
+ pte = one_page_table_init(pmd);
+
+ pte += pte_ofs;
+ for (; pte_ofs < PTRS_PER_PTE && pfn <
max_low_pfn; pte++, pfn++, pte_ofs++) {
+ /* XEN: Only map initial RAM
allocation. */
+ if ((pfn >= max_ram_pfn) ||
pte_present(*pte))
+ continue;
+ if (is_kernel_text(address))
+ set_pte(pte,
pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ else
+ set_pte(pte,
pfn_pte(pfn, PAGE_KERNEL));
+ }
+ pte_ofs = 0;
+ }
+ flush_page_update_queue();
+ }
+ pmd_idx = 0;
+ }
+}
+
+static inline int page_kills_ppro(unsigned long pagenr)
+{
+ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
+ return 1;
+ return 0;
+}
+
+extern int is_available_memory(efi_memory_desc_t *);
+
+static inline int page_is_ram(unsigned long pagenr)
+{
+ int i;
+ unsigned long addr, end;
+
+ if (efi_enabled) {
+ efi_memory_desc_t *md;
+
+ for (i = 0; i < memmap.nr_map; i++) {
+ md = &memmap.map[i];
+ if (!is_available_memory(md))
+ continue;
+ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+ end = (md->phys_addr + (md->num_pages <<
EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
+
+ if ((pagenr >= addr) && (pagenr < end))
+ return 1;
+ }
+ return 0;
+ }
+
+ for (i = 0; i < e820.nr_map; i++) {
+
+ if (e820.map[i].type != E820_RAM) /* not usable memory */
+ continue;
+ /*
+ * !!!FIXME!!! Some BIOSen report areas as RAM that
+ * are not. Notably the 640->1Mb area. We need a sanity
+ * check here.
+ */
+ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
+ if ((pagenr >= addr) && (pagenr < end))
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr),
(vaddr)), (vaddr))
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+
+ kmap_prot = PAGE_KERNEL;
+}
+
+void __init permanent_kmaps_init(pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long vaddr;
+
+ vaddr = PKMAP_BASE;
+ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+}
+
+void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
+{
+ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
+ ClearPageReserved(page);
+ set_bit(PG_highmem, &page->flags);
+ set_page_count(page, 1);
+ if (pfn < xen_start_info.nr_pages)
+ __free_page(page);
+ totalhigh_pages++;
+ } else
+ SetPageReserved(page);
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init set_highmem_pages_init(int bad_ppro)
+{
+ int pfn;
+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
+ one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+ totalram_pages += totalhigh_pages;
+}
+#else
+extern void set_highmem_pages_init(int);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#else
+#define kmap_init() do { } while (0)
+#define permanent_kmaps_init(pgd_base) do { } while (0)
+#define set_highmem_pages_init(bad_ppro) do { } while (0)
+#endif /* CONFIG_HIGHMEM */
+
+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
+
+#ifndef CONFIG_DISCONTIGMEM
+#define remap_numa_kva() do {} while (0)
+#else
+extern void __init remap_numa_kva(void);
+#endif
+
+static void __init pagetable_init (void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd_base = swapper_pg_dir;
+ pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
+
+#ifdef CONFIG_X86_PAE
+ int i;
+ /* Init entries of the first-level page table to the zero page */
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) |
_PAGE_PRESENT));
+#endif
+
+ /* Enable PSE if available */
+ if (cpu_has_pse) {
+ set_in_cr4(X86_CR4_PSE);
+ }
+
+ /* Enable PGE if available */
+ if (cpu_has_pge) {
+ set_in_cr4(X86_CR4_PGE);
+ __PAGE_KERNEL |= _PAGE_GLOBAL;
+ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
+ }
+
+ /*
+ * Switch to proper mm_init page directory. Initialise from the current
+ * page directory, write-protect the new page directory, then switch to
+ * it. We clean up by write-enabling and then freeing the old page dir.
+ */
+ memcpy(pgd_base, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
+ make_page_readonly(pgd_base);
+ queue_pgd_pin(__pa(pgd_base));
+ load_cr3(pgd_base);
+ queue_pgd_unpin(__pa(old_pgd));
+ make_page_writable(old_pgd);
+ __flush_tlb_all();
+ free_bootmem(__pa(old_pgd), PAGE_SIZE);
+
+ kernel_physical_mapping_init(pgd_base);
+ remap_numa_kva();
+
+ /*
+ * Fixed mappings, only the page table structure has to be
+ * created - mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ page_table_range_init(vaddr, 0, pgd_base);
+
+ permanent_kmaps_init(pgd_base);
+
+#ifdef CONFIG_X86_PAE
+ /*
+ * Add low memory identity-mappings - SMP needs it when
+ * starting up on an AP from real-mode. In the non-PAE
+ * case we already have these mappings through head.S.
+ * All user-space mappings are explicitly cleared after
+ * SMP startup.
+ */
+ pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
+#endif
+}
+
+#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
+/*
+ * Swap suspend & friends need this for resume because things like the
intel-agp
+ * driver might have split up a kernel 4MB mapping.
+ */
+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
+ __attribute__ ((aligned (PAGE_SIZE)));
+
+static inline void save_pg_dir(void)
+{
+ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+}
+#else
+static inline void save_pg_dir(void)
+{
+}
+#endif
+
+void zap_low_mappings (void)
+{
+ int i;
+
+ save_pg_dir();
+
+ /*
+ * Zap initial low-memory mappings.
+ *
+ * Note that "pgd_clear()" doesn't do it for
+ * us, because pgd_clear() is a no-op on i386.
+ */
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+#ifdef CONFIG_X86_PAE
+ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
+#else
+ set_pgd(swapper_pg_dir+i, __pgd(0));
+#endif
+ flush_tlb_all();
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned int /*max_dma,*/ high, low;
+
+ /*
+ * XEN: Our notion of "DMA memory" is fake when running over Xen.
+ * We simply put all RAM in the DMA zone so that those drivers which
+ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
+ * Those drivers that *do* require lowmem are screwed anyway when
+ * running over Xen!
+ */
+ /*max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;*/
+ low = max_low_pfn;
+ high = highend_pfn;
+
+ /*if (low < max_dma)*/
+ zones_size[ZONE_DMA] = low;
+ /*else*/ {
+ /*zones_size[ZONE_DMA] = max_dma;*/
+ /*zones_size[ZONE_NORMAL] = low - max_dma;*/
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = high - low;
+#endif
+ }
+ free_area_init(zones_size);
+}
+#else
+extern void zone_sizes_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static int disable_nx __initdata = 0;
+u64 __supported_pte_mask = ~_PAGE_NX;
+
+/*
+ * noexec = on|off
+ *
+ * Control non executable mappings.
+ *
+ * on Enable
+ * off Disable
+ */
+void __init noexec_setup(const char *str)
+{
+ if (!strncmp(str, "on",2) && cpu_has_nx) {
+ __supported_pte_mask |= _PAGE_NX;
+ disable_nx = 0;
+ } else if (!strncmp(str,"off",3)) {
+ disable_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+}
+
+int nx_enabled = 0;
+#ifdef CONFIG_X86_PAE
+
+static void __init set_nx(void)
+{
+ unsigned int v[4], l, h;
+
+ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+ if ((v[3] & (1 << 20)) && !disable_nx) {
+ rdmsr(MSR_EFER, l, h);
+ l |= EFER_NX;
+ wrmsr(MSR_EFER, l, h);
+ nx_enabled = 1;
+ __supported_pte_mask |= _PAGE_NX;
+ }
+ }
+}
+
+/*
+ * Enables/disables executability of a given kernel page and
+ * returns the previous setting.
+ */
+int __init set_kernel_exec(unsigned long vaddr, int enable)
+{
+ pte_t *pte;
+ int ret = 1;
+
+ if (!nx_enabled)
+ goto out;
+
+ pte = lookup_address(vaddr);
+ BUG_ON(!pte);
+
+ if (!pte_exec_kernel(*pte))
+ ret = 0;
+
+ if (enable)
+ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+ else
+ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
+ __flush_tlb_all();
+out:
+ return ret;
+}
+
+#endif
+
+/*
+ * paging_init() sets up the page tables - note that the first 8MB are
+ * already mapped by head.S.
+ *
+ * This routines also unmaps the page at virtual kernel address 0, so
+ * that we can trap those pesky NULL-reference errors in the kernel.
+ */
+void __init paging_init(void)
+{
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ int i;
+#endif
+
+#ifdef CONFIG_X86_PAE
+ set_nx();
+ if (nx_enabled)
+ printk("NX (Execute Disable) protection: active\n");
+#endif
+
+ pagetable_init();
+
+#ifdef CONFIG_X86_PAE
+ /*
+ * We will bail out later - printk doesn't work right now so
+ * the user would just see a hanging kernel.
+ */
+ if (cpu_has_pae)
+ set_in_cr4(X86_CR4_PAE);
+#endif
+ __flush_tlb_all();
+
+ kmap_init();
+ zone_sizes_init();
+
+ /* Switch to the real shared_info page, and clear the dummy page. */
+ flush_page_update_queue();
+ set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* Setup mapping of lower 1st MB */
+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
+ if (xen_start_info.flags & SIF_PRIVILEGED)
+ set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+ else
+ set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+ virt_to_machine(empty_zero_page));
+#endif
+}
+
+/*
+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
+ * used to involve black magic jumps to work around some nasty CPU bugs,
+ * but fortunately the switch to using exceptions got rid of all that.
+ */
+
+void __init test_wp_bit(void)
+{
+ printk("Checking if this processor honours the WP bit even in
supervisor mode... ");
+
+ /* Any page-aligned address will do, the test is non-destructive */
+ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
+ boot_cpu_data.wp_works_ok = do_test_wp_bit();
+ clear_fixmap(FIX_WP_TEST);
+
+ if (!boot_cpu_data.wp_works_ok) {
+ printk("No.\n");
+#ifdef CONFIG_X86_WP_WORKS_OK
+ panic("This kernel doesn't support CPU's with broken WP.
Recompile it for a 386!");
+#endif
+ } else {
+ printk("Ok.\n");
+ }
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init set_max_mapnr_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ max_mapnr = num_physpages = highend_pfn;
+#else
+ max_mapnr = num_physpages = max_low_pfn;
+#endif
+}
+#define __free_all_bootmem() free_all_bootmem()
+#else
+#define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
+extern void set_max_mapnr_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static struct kcore_list kcore_mem, kcore_vmalloc;
+
+void __init mem_init(void)
+{
+ extern int ppro_with_ram_bug(void);
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
+ int bad_ppro;
+ unsigned long pfn;
+
+#ifndef CONFIG_DISCONTIGMEM
+ if (!mem_map)
+ BUG();
+#endif
+
+ bad_ppro = ppro_with_ram_bug();
+
+#ifdef CONFIG_HIGHMEM
+ /* check that fixmap and pkmap do not overlap */
+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+ printk(KERN_ERR "fixmap and kmap areas overlap - this will
crash\n");
+ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
FIXADDR_START);
+ BUG();
+ }
+#endif
+
+ set_max_mapnr_init();
+
+#ifdef CONFIG_HIGHMEM
+ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
+#else
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+#endif
+ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
+ VMALLOC_START,VMALLOC_END,MAXMEM);
+ BUG_ON(VMALLOC_START > VMALLOC_END);
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages += __free_all_bootmem();
+ /* XEN: init and count low-mem pages outside initial allocation. */
+ for (pfn = xen_start_info.nr_pages; pfn < max_low_pfn; pfn++) {
+ ClearPageReserved(&mem_map[pfn]);
+ set_page_count(&mem_map[pfn], 1);
+ totalram_pages++;
+ }
+
+ reservedpages = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+ reservedpages++;
+
+ set_highmem_pages_init(bad_ppro);
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk
reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+ );
+
+#ifdef CONFIG_X86_PAE
+ if (!cpu_has_pae)
+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
+#endif
+ if (boot_cpu_data.wp_works_ok < 0)
+ test_wp_bit();
+
+ /*
+ * Subtle. SMP is doing it's boot stuff late (because it has to
+ * fork idle threads) - but it also needs low mappings for the
+ * protected-mode entry to work. We zap these entries only after
+ * the WP-bit has been tested.
+ */
+#ifndef CONFIG_SMP
+ zap_low_mappings();
+#endif
+}
+
+kmem_cache_t *pgd_cache;
+kmem_cache_t *pmd_cache;
+kmem_cache_t *pte_cache;
+
+void __init pgtable_cache_init(void)
+{
+ pte_cache = kmem_cache_create("pte",
+ PTRS_PER_PTE*sizeof(pte_t),
+ PTRS_PER_PTE*sizeof(pte_t),
+ 0,
+ pte_ctor,
+ pte_dtor);
+ if (!pte_cache)
+ panic("pgtable_cache_init(): Cannot create pte cache");
+ if (PTRS_PER_PMD > 1) {
+ pmd_cache = kmem_cache_create("pmd",
+ PTRS_PER_PMD*sizeof(pmd_t),
+ PTRS_PER_PMD*sizeof(pmd_t),
+ 0,
+ pmd_ctor,
+ NULL);
+ if (!pmd_cache)
+ panic("pgtable_cache_init(): cannot create pmd cache");
+ }
+ pgd_cache = kmem_cache_create("pgd",
+ PTRS_PER_PGD*sizeof(pgd_t),
+ PTRS_PER_PGD*sizeof(pgd_t),
+ 0,
+ pgd_ctor,
+ pgd_dtor);
+ if (!pgd_cache)
+ panic("pgtable_cache_init(): Cannot create pgd cache");
+}
+
+/*
+ * This function cannot be __init, since exceptions don't work in that
+ * section. Put this after the callers, so that it cannot be inlined.
+ */
+static int noinline do_test_wp_bit(void)
+{
+ char tmp_reg;
+ int flag;
+
+ __asm__ __volatile__(
+ " movb %0,%1 \n"
+ "1: movb %1,%0 \n"
+ " xorl %2,%2 \n"
+ "2: \n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4 \n"
+ " .long 1b,2b \n"
+ ".previous \n"
+ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
+ "=q" (tmp_reg),
+ "=r" (flag)
+ :"2" (1)
+ :"memory");
+
+ return flag;
+}
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ memset((void *)addr, 0xcc, PAGE_SIZE);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n",
(__init_end - __init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end -
start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,450 @@
+/*
+ * arch/i386/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/fixmap.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+#ifndef CONFIG_XEN_PHYSDEV_ACCESS
+
+void * __ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
+{
+ return NULL;
+}
+
+void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+}
+
+#else
+
+/*
+ * Does @address reside within a non-highmem page that is local to this virtual
+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
+ * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
+ * why this works.
+ */
+static inline int is_local_lowmem(unsigned long address)
+{
+ extern unsigned long max_low_pfn;
+ unsigned long mfn = address >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+}
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned
long flags)
+{
+ void __iomem * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
+ domid_t domid = DOMID_IO;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (is_local_lowmem(phys_addr)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = bus_to_virt(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end);
page++)
+ if(!PageReserved(page))
+ return NULL;
+
+ domid = DOMID_LOCAL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
+ if (!area)
+ return NULL;
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
+ if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
+ size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
+ _PAGE_DIRTY | _PAGE_ACCESSED
+ | flags), domid)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char __iomem *)addr);
+}
+
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In particular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ unsigned long last_addr;
+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+ if (!p)
+ return p;
+
+ /* Guaranteed to be > phys_addr, as per __ioremap() */
+ last_addr = phys_addr + size - 1;
+
+ if (is_local_lowmem(last_addr)) {
+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
+ unsigned long npages;
+
+ phys_addr &= PAGE_MASK;
+
+ /* This might overflow and become zero.. */
+ last_addr = PAGE_ALIGN(last_addr);
+
+ /* .. but that's ok, because modulo-2**n arithmetic will make
+ * the page-aligned "last - first" come out right.
+ */
+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+
+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
+ iounmap(p);
+ p = NULL;
+ }
+ global_flush_tlb();
+ }
+
+ return p;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ struct vm_struct *p;
+ if ((void __force *) addr <= high_memory)
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p) {
+ printk("__iounmap: bad address %p\n", addr);
+ return;
+ }
+
+ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
+ /* p->size includes the guard page, but cpa doesn't like that */
+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
+ PAGE_KERNEL);
+ global_flush_tlb();
+ }
+ kfree(p);
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long offset, last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr) - phys_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (nrpages > NR_FIX_BTMAPS)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ set_fixmap_ma(idx, phys_addr);
+ phys_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+ unsigned long virt_addr;
+ unsigned long offset;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ virt_addr = (unsigned long)addr;
+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ offset = virt_addr & ~PAGE_MASK;
+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
+
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ clear_fixmap(idx);
+ --idx;
+ --nrpages;
+ }
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+ __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+ __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void direct_remap_area_pte(pte_t *pte,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+
+ do {
+ (*v)->ptr = virt_to_machine(pte);
+ (*v)++;
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int direct_remap_area_pmd(struct mm_struct *mm,
+ pmd_t *pmd,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t *pte = (mm == &init_mm) ?
+ pte_alloc_kernel(mm, pmd, address) :
+ pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ direct_remap_area_pte(pte, address, end - address, v);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+ int error;
+
+ dir = pgd_offset(mm, address);
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ error = -ENOMEM;
+ pud = pud_alloc(mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(mm, pud, address);
+ if (!pmd)
+ break;
+ error = 0;
+ direct_remap_area_pmd(mm, pmd, address, end - address, &v);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+
+ } while (address && (address < end));
+ spin_unlock(&mm->page_table_lock);
+ return error;
+}
+
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid)
+{
+ int i;
+ unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+
+ v = w = &u[0];
+ if (domid != DOMID_LOCAL) {
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)domid << 16;
+ v = w = &u[1];
+ }
+
+ start_address = address;
+
+ flush_cache_all();
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
+ /* Fill in the PTE pointers. */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ w);
+
+ if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
+ return -EFAULT;
+ v = w;
+ start_address = address;
+ }
+
+ /*
+ * Fill in the machine address: PTE ptr is done later by
+ * __direct_remap_area_pages().
+ */
+ v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+ machine_addr += PAGE_SIZE;
+ address += PAGE_SIZE;
+ v++;
+ }
+
+ if (v != w) {
+ /* get the ptep's filled in */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ w);
+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
+ return -EFAULT;
+ }
+
+ flush_tlb_all();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(direct_remap_area_pages);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/pageattr.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pageattr.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ * Thanks to Ben LaHaise for precious feedback.
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+
+static DEFINE_SPINLOCK(cpa_lock);
+static struct list_head df_list = LIST_HEAD_INIT(df_list);
+
+
+pte_t *lookup_address(unsigned long address)
+{
+ pgd_t *pgd = pgd_offset_k(address);
+ pud_t *pud;
+ pmd_t *pmd;
+ if (pgd_none(*pgd))
+ return NULL;
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud))
+ return NULL;
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd))
+ return NULL;
+ if (pmd_large(*pmd))
+ return (pte_t *)pmd;
+ return pte_offset_kernel(pmd, address);
+}
+
+static struct page *split_large_page(unsigned long address, pgprot_t prot)
+{
+ int i;
+ unsigned long addr;
+ struct page *base;
+ pte_t *pbase;
+
+ spin_unlock_irq(&cpa_lock);
+ base = alloc_pages(GFP_KERNEL, 0);
+ spin_lock_irq(&cpa_lock);
+ if (!base)
+ return NULL;
+
+ address = __pa(address);
+ addr = address & LARGE_PAGE_MASK;
+ pbase = (pte_t *)page_address(base);
+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
+ addr == address ? prot : PAGE_KERNEL);
+ }
+ return base;
+}
+
+static void flush_kernel_map(void *dummy)
+{
+ /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
+ if (boot_cpu_data.x86_model >= 4)
+ wbinvd();
+ /* Flush all to work around Errata in early athlons regarding
+ * large page flushing.
+ */
+ __flush_tlb_all();
+}
+
+static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+{
+ struct page *page;
+ unsigned long flags;
+
+ set_pte_atomic(kpte, pte); /* change init_mm */
+ if (PTRS_PER_PMD > 1)
+ return;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ for (page = pgd_list; page; page = (struct page *)page->index) {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ set_pte_atomic((pte_t *)pmd, pte);
+ }
+ spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+/*
+ * No more special protections in this 2/4MB area - revert to a
+ * large page again.
+ */
+static inline void revert_page(struct page *kpte_page, unsigned long address)
+{
+ pte_t *linear = (pte_t *)
+ pmd_offset(pud_offset(pgd_offset_k(address), address), address);
+ set_pmd_pte(linear, address,
+ pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
+ PAGE_KERNEL_LARGE));
+}
+
+static int
+__change_page_attr(struct page *page, pgprot_t prot)
+{
+ pte_t *kpte;
+ unsigned long address;
+ struct page *kpte_page;
+
+ BUG_ON(PageHighMem(page));
+ address = (unsigned long)page_address(page);
+
+ kpte = lookup_address(address);
+ if (!kpte)
+ return -EINVAL;
+ kpte_page = virt_to_page(kpte);
+ if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
+ if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+ set_pte_batched(kpte, mk_pte(page, prot));
+ } else {
+ struct page *split = split_large_page(address, prot);
+ if (!split)
+ return -ENOMEM;
+ set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
+ kpte_page = split;
+ }
+ get_page(kpte_page);
+ } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+ set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
+ __put_page(kpte_page);
+ } else
+ BUG();
+
+ /*
+ * If the pte was reserved, it means it was created at boot
+ * time (not via split_large_page) and in turn we must not
+ * replace it with a largepage.
+ */
+ if (!PageReserved(kpte_page)) {
+ /* memleak and potential failed 2M page regeneration */
+ BUG_ON(!page_count(kpte_page));
+
+ if (cpu_has_pse && (page_count(kpte_page) == 1)) {
+ list_add(&kpte_page->lru, &df_list);
+ revert_page(kpte_page, address);
+ }
+ }
+ return 0;
+}
+
+static inline void flush_map(void)
+{
+ on_each_cpu(flush_kernel_map, NULL, 1, 1);
+}
+
+/*
+ * Change the page attributes of an page in the linear mapping.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
+ * This function only deals with the kernel linear map.
+ *
+ * Caller must call global_flush_tlb() after this.
+ */
+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+{
+ int err = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpa_lock, flags);
+ for (i = 0; i < numpages; i++, page++) {
+ err = __change_page_attr(page, prot);
+ if (err)
+ break;
+ }
+ flush_page_update_queue();
+ spin_unlock_irqrestore(&cpa_lock, flags);
+ return err;
+}
+
+void global_flush_tlb(void)
+{
+ LIST_HEAD(l);
+ struct list_head* n;
+
+ BUG_ON(irqs_disabled());
+
+ spin_lock_irq(&cpa_lock);
+ list_splice_init(&df_list, &l);
+ spin_unlock_irq(&cpa_lock);
+ flush_map();
+ n = l.next;
+ while (n != &l) {
+ struct page *pg = list_entry(n, struct page, lru);
+ n = n->next;
+ __free_page(pg);
+ }
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (PageHighMem(page))
+ return;
+ /* the return value is ignored - the calls cannot fail,
+ * large pages are disabled at boot time.
+ */
+ change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ /* we should perform an IPI and flush all tlbs,
+ * but that can deadlock->flush only current cpu.
+ */
+ __flush_tlb_all();
+}
+#endif
+
+EXPORT_SYMBOL(change_page_attr);
+EXPORT_SYMBOL(global_flush_tlb);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,433 @@
+/*
+ * linux/arch/i386/mm/pgtable.c
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/spinlock.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/io.h>
+
+#include <asm-xen/foreign_page.h>
+
+void show_mem(void)
+{
+ int total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+ int highmem = 0;
+ struct page *page;
+ pg_data_t *pgdat;
+ unsigned long i;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ for_each_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+ page = pgdat->node_mem_map + i;
+ total++;
+ if (PageHighMem(page))
+ highmem++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
+ }
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d pages of HIGHMEM\n",highmem);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame
+ * and protection flags for that frame.
+ */
+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ BUG();
+ return;
+ }
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
+ BUG();
+ return;
+ }
+ pmd = pmd_offset(pud, vaddr);
+ if (pmd_none(*pmd)) {
+ BUG();
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
+ /* <pfn,flags> stored as-is, to permit clearing entries */
+ set_pte(pte, pfn_pte(pfn, flags));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame
+ * and protection flags for that frame.
+ */
+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+ pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ BUG();
+ return;
+ }
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
+ BUG();
+ return;
+ }
+ pmd = pmd_offset(pud, vaddr);
+ if (pmd_none(*pmd)) {
+ BUG();
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
+ /* <pfn,flags> stored as-is, to permit clearing entries */
+ set_pte(pte, pfn_pte_ma(pfn, flags));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+/*
+ * Associate a large virtual page frame with a given physical page frame
+ * and protection flags for that frame. pfn is for the base of the page,
+ * vaddr is what the page gets mapped to - both must be properly aligned.
+ * The pmd must already be instantiated. Assumes PAE mode.
+ */
+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
+ printk ("set_pmd_pfn: vaddr misaligned\n");
+ return; /* BUG(); */
+ }
+ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
+ printk ("set_pmd_pfn: pfn misaligned\n");
+ return; /* BUG(); */
+ }
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ printk ("set_pmd_pfn: pgd_none\n");
+ return; /* BUG(); */
+ }
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ set_pmd(pmd, pfn_pmd(pfn, flags));
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t
flags)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
+}
+
+void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t
flags)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (pte) {
+ make_page_readonly(pte);
+ xen_flush_page_update_queue();
+ }
+ return pte;
+}
+
+void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+ struct page *page = virt_to_page(pte);
+ SetPageForeign(page, pte_free);
+ set_page_count(page, 1);
+
+ clear_page(pte);
+ make_page_readonly(pte);
+ queue_pte_pin(__pa(pte));
+ flush_page_update_queue();
+}
+
+void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+ struct page *page = virt_to_page(pte);
+ ClearPageForeign(page);
+
+ queue_pte_unpin(__pa(pte));
+ make_page_writable(pte);
+ flush_page_update_queue();
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *ptep;
+
+#ifdef CONFIG_HIGHPTE
+ struct page *pte;
+
+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
+ if (pte == NULL)
+ return pte;
+ if (PageHighMem(pte))
+ return pte;
+ /* not a highmem page -- free page and grab one from the cache */
+ __free_page(pte);
+#endif
+ ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
+ if (ptep)
+ return virt_to_page(ptep);
+ return NULL;
+}
+
+void pte_free(struct page *pte)
+{
+ set_page_count(pte, 1);
+#ifdef CONFIG_HIGHPTE
+ if (!PageHighMem(pte))
+#endif
+ kmem_cache_free(pte_cache,
+ phys_to_virt(page_to_pseudophys(pte)));
+#ifdef CONFIG_HIGHPTE
+ else
+ __free_page(pte);
+#endif
+}
+
+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+{
+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+}
+
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * The locking scheme was chosen on the basis of manfred's
+ * recommendations and having no core impact whatsoever.
+ * -- wli
+ */
+DEFINE_SPINLOCK(pgd_lock);
+struct page *pgd_list;
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+ page->index = (unsigned long)pgd_list;
+ if (pgd_list)
+ pgd_list->private = (unsigned long)&page->index;
+ pgd_list = page;
+ page->private = (unsigned long)&pgd_list;
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+ struct page *next, **pprev, *page = virt_to_page(pgd);
+ next = (struct page *)page->index;
+ pprev = (struct page **)page->private;
+ *pprev = next;
+ if (next)
+ next->private = (unsigned long)pprev;
+}
+
+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+{
+ unsigned long flags;
+
+ if (PTRS_PER_PMD == 1)
+ spin_lock_irqsave(&pgd_lock, flags);
+
+ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ if (PTRS_PER_PMD > 1)
+ goto out;
+
+ pgd_list_add(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+ out:
+ make_page_readonly(pgd);
+ queue_pgd_pin(__pa(pgd));
+ flush_page_update_queue();
+}
+
+/* never called when PTRS_PER_PMD > 1 */
+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+{
+ unsigned long flags; /* can be called from interrupt context */
+
+ queue_pgd_unpin(__pa(pgd));
+ make_page_writable(pgd);
+ flush_page_update_queue();
+
+ if (PTRS_PER_PMD > 1)
+ return;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ int i;
+ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
+
+ if (PTRS_PER_PMD == 1 || !pgd)
+ return pgd;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+ if (!pmd)
+ goto out_oom;
+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+ }
+ return pgd;
+
+out_oom:
+ for (i--; i >= 0; i--)
+ kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
+ kmem_cache_free(pgd_cache, pgd);
+ return NULL;
+}
+
+void pgd_free(pgd_t *pgd)
+{
+ int i;
+
+ /* in the PAE case user pgd entries are overwritten before usage */
+ if (PTRS_PER_PMD > 1)
+ for (i = 0; i < USER_PTRS_PER_PGD; ++i)
+ kmem_cache_free(pmd_cache, (void
*)__va(pgd_val(pgd[i])-1));
+ /* in the non-PAE case, clear_page_range() clears user pgd entries */
+ kmem_cache_free(pgd_cache, pgd);
+}
+
+void make_lowmem_page_readonly(void *va)
+{
+ pte_t *pte = virt_to_ptep(va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+}
+
+void make_lowmem_page_writable(void *va)
+{
+ pte_t *pte = virt_to_ptep(va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+}
+
+void make_page_readonly(void *va)
+{
+ pte_t *pte = virt_to_ptep(va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ if ( (unsigned long)va >= (unsigned long)high_memory )
+ {
+ unsigned long phys;
+ phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+#ifdef CONFIG_HIGHMEM
+ if ( (phys >> PAGE_SHIFT) < highstart_pfn )
+#endif
+ make_lowmem_page_readonly(phys_to_virt(phys));
+ }
+}
+
+void make_page_writable(void *va)
+{
+ pte_t *pte = virt_to_ptep(va);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ if ( (unsigned long)va >= (unsigned long)high_memory )
+ {
+ unsigned long phys;
+ phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+#ifdef CONFIG_HIGHMEM
+ if ( (phys >> PAGE_SHIFT) < highstart_pfn )
+#endif
+ make_lowmem_page_writable(phys_to_virt(phys));
+ }
+}
+
+void make_pages_readonly(void *va, unsigned int nr)
+{
+ while ( nr-- != 0 )
+ {
+ make_page_readonly(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
+
+void make_pages_writable(void *va, unsigned int nr)
+{
+ while ( nr-- != 0 )
+ {
+ make_page_writable(va);
+ va = (void *)((unsigned long)va + PAGE_SIZE);
+ }
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/pci/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/pci/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,31 @@
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/pci
+
+c-obj-y := i386.o
+
+c-obj-$(CONFIG_PCI_BIOS) += pcbios.o
+c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+obj-$(CONFIG_PCI_DIRECT) += direct.o
+
+c-pci-y := fixup.o
+c-pci-$(CONFIG_ACPI_PCI) += acpi.o
+c-pci-y += legacy.o
+pci-y += irq.o
+
+c-pci-$(CONFIG_X86_VISWS) := visws.o fixup.o
+pci-$(CONFIG_X86_VISWS) :=
+c-pci-$(CONFIG_X86_NUMAQ) := numa.o
+pci-$(CONFIG_X86_NUMAQ) := irq.o
+
+obj-y += $(pci-y)
+c-obj-y += $(c-pci-y) common.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+ @ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
+
+obj-y += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/pci/direct.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/pci/direct.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,81 @@
+/*
+ * direct.c - Low-level direct PCI config space access
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include "pci.h"
+
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/physdev.h>
+
+/*
+ * Functions for accessing PCI configuration space with type xen accesses
+ */
+
+static int pci_conf_read (int seg, int bus, int devfn, int reg, int len, u32
*value)
+{
+ unsigned long flags;
+ physdev_op_t op;
+ int ret;
+
+ if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pci_config_lock, flags);
+
+ op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
+ op.u.pci_cfgreg_read.bus = bus;
+ op.u.pci_cfgreg_read.dev = (devfn & ~0x7) >> 3;
+ op.u.pci_cfgreg_read.func = devfn & 0x7;
+ op.u.pci_cfgreg_read.reg = reg;
+ op.u.pci_cfgreg_read.len = len;
+
+ ret = HYPERVISOR_physdev_op(&op);
+ if (ret == 0)
+ *value = op.u.pci_cfgreg_read.value;
+
+ spin_unlock_irqrestore(&pci_config_lock, flags);
+
+ return ret;
+}
+
+static int pci_conf_write (int seg, int bus, int devfn, int reg, int len, u32
value)
+{
+ unsigned long flags;
+ physdev_op_t op;
+ int ret;
+
+ if ((bus > 255) || (devfn > 255) || (reg > 255))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pci_config_lock, flags);
+
+ op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
+ op.u.pci_cfgreg_write.bus = bus;
+ op.u.pci_cfgreg_write.dev = (devfn & ~0x7) >> 3;
+ op.u.pci_cfgreg_write.func = devfn & 0x7;
+ op.u.pci_cfgreg_write.reg = reg;
+ op.u.pci_cfgreg_write.len = len;
+ op.u.pci_cfgreg_write.value = value;
+
+ ret = HYPERVISOR_physdev_op(&op);
+
+ spin_unlock_irqrestore(&pci_config_lock, flags);
+
+ return ret;
+}
+
+struct pci_raw_ops pci_direct_xen = {
+ .read = pci_conf_read,
+ .write = pci_conf_write,
+};
+
+static int __init pci_direct_init(void)
+{
+ printk(KERN_INFO "PCI: Using configuration type Xen\n");
+ raw_pci_ops = &pci_direct_xen;
+ return 0;
+}
+
+arch_initcall(pci_direct_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/i386/pci/irq.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/pci/irq.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,155 @@
+/*
+ * Low-Level PCI Support for PC -- Routing of Interrupts
+ *
+ * (c) 1999--2000 Martin Mares <mj@xxxxxx>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/io_apic.h>
+#include <asm/hw_irq.h>
+#include <linux/acpi.h>
+
+#include "pci.h"
+
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/physdev.h>
+
+static int pirq_enable_irq(struct pci_dev *dev);
+
+/*
+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
+ * Avoid using: 13, 14 and 15 (FP error and IDE).
+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and
mouse)
+ */
+unsigned int pcibios_irq_mask = 0xfff8;
+
+static int pirq_penalty[16] = {
+ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
+ 0, 0, 0, 0, 1000, 100000, 100000, 100000
+};
+
+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
+
+
+static int __init pcibios_irq_init(void)
+{
+ int bus;
+ physdev_op_t op;
+
+ DBG("PCI: IRQ init\n");
+
+ if (pcibios_enable_irq || raw_pci_ops == NULL)
+ return 0;
+
+ op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
+ if (HYPERVISOR_physdev_op(&op) != 0) {
+ printk(KERN_WARNING "PCI: System does not support PCI\n");
+ return 0;
+ }
+
+ printk(KERN_INFO "PCI: Probing PCI hardware\n");
+ for (bus = 0; bus < 256; bus++)
+ if (test_bit(bus, (unsigned long *)
+ &op.u.pci_probe_root_buses.busmask[0]))
+ (void)pcibios_scan_root(bus);
+
+ pcibios_enable_irq = pirq_enable_irq;
+
+ return 0;
+}
+
+subsys_initcall(pcibios_irq_init);
+
+
+static void pirq_penalize_isa_irq(int irq)
+{
+ /*
+ * If any ISAPnP device reports an IRQ in its list of possible
+ * IRQ's, we try to avoid assigning it to PCI devices.
+ */
+ if (irq < 16)
+ pirq_penalty[irq] += 100;
+}
+
+void pcibios_penalize_isa_irq(int irq)
+{
+#ifdef CONFIG_ACPI_PCI
+ if (!acpi_noirq)
+ acpi_penalize_isa_irq(irq);
+ else
+#endif
+ pirq_penalize_isa_irq(irq);
+}
+
+static int pirq_enable_irq(struct pci_dev *dev)
+{
+ int err;
+ u8 pin;
+ physdev_op_t op;
+
+ /* Inform Xen that we are going to use this device. */
+ op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
+ op.u.pci_initialise_device.bus = dev->bus->number;
+ op.u.pci_initialise_device.dev = PCI_SLOT(dev->devfn);
+ op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
+ if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
+ return err;
+
+ /* Now we can bind to the very final IRQ line. */
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
+ dev->irq = pin;
+
+ /* Sanity-check that an interrupt-producing device is routed
+ * to an IRQ. */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin != 0) {
+ if (dev->irq != 0)
+ printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
+ dev->irq, dev->slot_name);
+ else
+ printk(KERN_WARNING "PCI: No IRQ known for interrupt "
+ "pin %c of device %s.\n", 'A' + pin - 1,
+ dev->slot_name);
+ }
+
+ return 0;
+}
+
+int pci_vector_resources(int last, int nr_released)
+{
+ int count = nr_released;
+
+ int next = last;
+ int offset = (last % 8);
+
+ while (next < FIRST_SYSTEM_VECTOR) {
+ next += 8;
+#ifdef CONFIG_X86_64
+ if (next == IA32_SYSCALL_VECTOR)
+ continue;
+#else
+ if (next == SYSCALL_VECTOR)
+ continue;
+#endif
+ count++;
+ if (next >= FIRST_SYSTEM_VECTOR) {
+ if (offset%8) {
+ next = FIRST_DEVICE_VECTOR + offset;
+ offset++;
+ continue;
+ }
+ count--;
+ }
+ }
+
+ return count;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,14 @@
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CPPFLAGS_vmlinux.lds += -U$(XENARCH)
+
+$(obj)/vmlinux.lds.S:
+ @ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
+
+extra-y += vmlinux.lds
+
+obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o skbuff.o devmem.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/ctrl_if.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/ctrl_if.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,549 @@
+/******************************************************************************
+ * ctrl_if.c
+ *
+ * Management functions for special interface to the domain controller.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/evtchn.h>
+
+#if 0
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+/*
+ * Only used by initial domain which must create its own control-interface
+ * event channel. This value is picked up by the user-space domain controller
+ * via an ioctl.
+ */
+int initdom_ctrlif_domcontroller_port = -1;
+
+static int ctrl_if_evtchn;
+static int ctrl_if_irq;
+static spinlock_t ctrl_if_lock;
+
+static struct irqaction ctrl_if_irq_action;
+
+static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
+static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+
+/* Incoming message requests. */
+ /* Primary message type -> message handler. */
+static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
+ /* Primary message type -> callback in process context? */
+static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
+ /* Is it late enough during bootstrap to use schedule_task()? */
+static int safe_to_schedule_task;
+ /* Queue up messages to be handled in process context. */
+static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
+
+/* Incoming message responses: message identifier -> message handler/id. */
+static struct {
+ ctrl_msg_handler_t fn;
+ unsigned long id;
+} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
+
+/* For received messages that must be deferred to process context. */
+static void __ctrl_if_rxmsg_deferred(void *unused);
+static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
+ __ctrl_if_rxmsg_deferred,
+ NULL);
+
+/* Deferred callbacks for people waiting for space in the transmit ring. */
+static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
+
+static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
+static void __ctrl_if_tx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
+
+static void __ctrl_if_rx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
+
+#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
+#define TX_FULL(_c) \
+ (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
+
+static void ctrl_if_notify_controller(void)
+{
+ notify_via_evtchn(ctrl_if_evtchn);
+}
+
+static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
+{
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+static void __ctrl_if_tx_tasklet(unsigned long data)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ ctrl_msg_t *msg;
+ int was_full = TX_FULL(ctrl_if);
+ CONTROL_RING_IDX rp;
+
+ rp = ctrl_if->tx_resp_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
+ while ( ctrl_if_tx_resp_cons != rp )
+ {
+ msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
+
+ DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
+ ctrl_if_tx_resp_cons,
+ ctrl_if->tx_resp_prod,
+ msg->type, msg->subtype);
+
+ /* Execute the callback handler, if one was specified. */
+ if ( msg->id != 0xFF )
+ {
+ (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
+ msg, ctrl_if_txmsg_id_mapping[msg->id].id);
+ smp_mb(); /* Execute, /then/ free. */
+ ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
+ }
+
+ /*
+ * Step over the message in the ring /after/ finishing reading it. As
+ * soon as the index is updated then the message may get blown away.
+ */
+ smp_mb();
+ ctrl_if_tx_resp_cons++;
+ }
+
+ if ( was_full && !TX_FULL(ctrl_if) )
+ {
+ wake_up(&ctrl_if_tx_wait);
+ run_task_queue(&ctrl_if_tx_tq);
+ }
+}
+
+static void __ctrl_if_rxmsg_deferred(void *unused)
+{
+ ctrl_msg_t *msg;
+ CONTROL_RING_IDX dp;
+
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rmb(); /* Ensure we see all deferred requests up to 'dp'. */
+
+ while ( ctrl_if_rxmsg_deferred_cons != dp )
+ {
+ msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
+ ctrl_if_rxmsg_deferred_cons++)];
+ (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
+ }
+}
+
+static void __ctrl_if_rx_tasklet(unsigned long data)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ ctrl_msg_t msg, *pmsg;
+ CONTROL_RING_IDX rp, dp;
+
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rp = ctrl_if->rx_req_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
+ while ( ctrl_if_rx_req_cons != rp )
+ {
+ pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
+ memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
+
+ DPRINTK("Rx-Req %u/%u :: %d/%d\n",
+ ctrl_if_rx_req_cons-1,
+ ctrl_if->rx_req_prod,
+ msg.type, msg.subtype);
+
+ if ( msg.length != 0 )
+ memcpy(msg.msg, pmsg->msg, msg.length);
+
+ if ( test_bit(msg.type,
+ (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
+ memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
+ &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+ else
+ (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
+ }
+
+ if ( dp != ctrl_if_rxmsg_deferred_prod )
+ {
+ wmb();
+ ctrl_if_rxmsg_deferred_prod = dp;
+ schedule_work(&ctrl_if_rxmsg_deferred_work);
+ }
+}
+
+static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+ tasklet_schedule(&ctrl_if_tx_tasklet);
+
+ if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+ tasklet_schedule(&ctrl_if_rx_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+int
+ctrl_if_send_message_noblock(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ if ( TX_FULL(ctrl_if) )
+ {
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+ return -EAGAIN;
+ }
+
+ msg->id = 0xFF;
+ if ( hnd != NULL )
+ {
+ for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
+ continue;
+ ctrl_if_txmsg_id_mapping[i].fn = hnd;
+ ctrl_if_txmsg_id_mapping[i].id = id;
+ msg->id = i;
+ }
+
+ DPRINTK("Tx-Req %u/%u :: %d/%d\n",
+ ctrl_if->tx_req_prod,
+ ctrl_if_tx_resp_cons,
+ msg->type, msg->subtype);
+
+ memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
+ msg, sizeof(*msg));
+ wmb(); /* Write the message before letting the controller peek at it. */
+ ctrl_if->tx_req_prod++;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ ctrl_if_notify_controller();
+
+ return 0;
+}
+
+int
+ctrl_if_send_message_block(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id,
+ long wait_state)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int rc;
+
+ /* Fast path. */
+ if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+ return rc;
+
+ add_wait_queue(&ctrl_if_tx_wait, &wait);
+
+ for ( ; ; )
+ {
+ set_current_state(wait_state);
+
+ if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+ break;
+
+ rc = -ERESTARTSYS;
+ if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
+ break;
+
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ctrl_if_tx_wait, &wait);
+
+ return rc;
+}
+
+/* Allow a reponse-callback handler to find context of a blocked requester. */
+struct rsp_wait {
+ ctrl_msg_t *msg; /* Buffer for the response message. */
+ struct task_struct *task; /* The task that is blocked on the response. */
+ int done; /* Indicate to 'task' that response is rcv'ed. */
+};
+
+static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
+{
+ struct rsp_wait *wait = (struct rsp_wait *)id;
+ struct task_struct *task = wait->task;
+
+ memcpy(wait->msg, msg, sizeof(*msg));
+ wmb();
+ wait->done = 1;
+
+ wake_up_process(task);
+}
+
+int
+ctrl_if_send_message_and_get_response(
+ ctrl_msg_t *msg,
+ ctrl_msg_t *rmsg,
+ long wait_state)
+{
+ struct rsp_wait wait;
+ int rc;
+
+ wait.msg = rmsg;
+ wait.done = 0;
+ wait.task = current;
+
+ if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
+ (unsigned long)&wait,
+ wait_state)) != 0 )
+ return rc;
+
+ for ( ; ; )
+ {
+ /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if ( wait.done )
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+int
+ctrl_if_enqueue_space_callback(
+ struct tq_struct *task)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ /* Fast path. */
+ if ( !TX_FULL(ctrl_if) )
+ return 0;
+
+ (void)queue_task(task, &ctrl_if_tx_tq);
+
+ /*
+ * We may race execution of the task queue, so return re-checked status. If
+ * the task is not executed despite the ring being non-full then we will
+ * certainly return 'not full'.
+ */
+ smp_mb();
+ return TX_FULL(ctrl_if);
+}
+
+void
+ctrl_if_send_response(
+ ctrl_msg_t *msg)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ unsigned long flags;
+ ctrl_msg_t *dmsg;
+
+ /*
+ * NB. The response may the original request message, modified in-place.
+ * In this situation we may have src==dst, so no copying is required.
+ */
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ DPRINTK("Tx-Rsp %u :: %d/%d\n",
+ ctrl_if->rx_resp_prod,
+ msg->type, msg->subtype);
+
+ dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
+ if ( dmsg != msg )
+ memcpy(dmsg, msg, sizeof(*msg));
+
+ wmb(); /* Write the message before letting the controller peek at it. */
+ ctrl_if->rx_resp_prod++;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ ctrl_if_notify_controller();
+}
+
+int
+ctrl_if_register_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd,
+ unsigned int flags)
+{
+ unsigned long _flags;
+ int inuse;
+
+ spin_lock_irqsave(&ctrl_if_lock, _flags);
+
+ inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
+
+ if ( inuse )
+ {
+ printk(KERN_INFO "Receiver %p already established for control "
+ "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
+ }
+ else
+ {
+ ctrl_if_rxmsg_handler[type] = hnd;
+ clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+ if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
+ {
+ set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+ if ( !safe_to_schedule_task )
+ BUG();
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_if_lock, _flags);
+
+ return !inuse;
+}
+
+void
+ctrl_if_unregister_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ if ( ctrl_if_rxmsg_handler[type] != hnd )
+ printk(KERN_INFO "Receiver %p is not registered for control "
+ "messages of type %d.\n", hnd, type);
+ else
+ ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ /* Ensure that @hnd will not be executed after this function returns. */
+ tasklet_unlock_wait(&ctrl_if_rx_tasklet);
+}
+
+void ctrl_if_suspend(void)
+{
+ teardown_irq(ctrl_if_irq, &ctrl_if_irq_action);
+ unbind_evtchn_from_irq(ctrl_if_evtchn);
+}
+
+void ctrl_if_resume(void)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ /*
+ * The initial domain must create its own domain-controller link.
+ * The controller is probably not running at this point, but will
+ * pick up its end of the event channel from
+ */
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_bind_interdomain;
+ op.u.bind_interdomain.dom1 = DOMID_SELF;
+ op.u.bind_interdomain.dom2 = DOMID_SELF;
+ op.u.bind_interdomain.port1 = 0;
+ op.u.bind_interdomain.port2 = 0;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ BUG();
+ xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
+ initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
+ }
+
+ /* Sync up with shared indexes. */
+ ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
+ ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
+
+ ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
+ ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
+
+ memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
+ ctrl_if_irq_action.handler = ctrl_if_interrupt;
+ ctrl_if_irq_action.name = "ctrl-if";
+ (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
+}
+
+void __init ctrl_if_init(void)
+{
+ int i;
+
+ for ( i = 0; i < 256; i++ )
+ ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
+
+ spin_lock_init(&ctrl_if_lock);
+
+ ctrl_if_resume();
+}
+
+
+/* This is called after it is safe to call schedule_task(). */
+static int __init ctrl_if_late_setup(void)
+{
+ safe_to_schedule_task = 1;
+ return 0;
+}
+__initcall(ctrl_if_late_setup);
+
+
+/*
+ * !! The following are DANGEROUS FUNCTIONS !!
+ * Use with care [for example, see xencons_force_flush()].
+ */
+
+int ctrl_if_transmitter_empty(void)
+{
+ return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
+}
+
+void ctrl_if_discard_responses(void)
+{
+ ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
+}
+
+EXPORT_SYMBOL(ctrl_if_send_message_noblock);
+EXPORT_SYMBOL(ctrl_if_send_message_block);
+EXPORT_SYMBOL(ctrl_if_send_message_and_get_response);
+EXPORT_SYMBOL(ctrl_if_enqueue_space_callback);
+EXPORT_SYMBOL(ctrl_if_send_response);
+EXPORT_SYMBOL(ctrl_if_register_receiver);
+EXPORT_SYMBOL(ctrl_if_unregister_receiver);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/devmem.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,158 @@
+/*
+ * Originally from linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>
+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@xxxxxxx>
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/smp_lock.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+static inline int uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_SYNC)
+ return 1;
+ /* Xen sets correct MTRR type on non-RAM for us. */
+ return 0;
+}
+
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i, p = *ppos;
+ ssize_t read = -EFAULT;
+ void *v;
+
+ if ((v = ioremap(p, count)) == NULL) {
+ /*
+ * Some programs (e.g., dmidecode) groove off into weird RAM
+ * areas where no table scan possibly exist (because Xen will
+ * have stomped on them!). These programs get rather upset if
+ * we let them know that Xen failed their access, so we fake
+ * out a read of all zeroes. :-)
+ */
+ for (i = 0; i < count; i++)
+ if (put_user(0, buf+i))
+ return -EFAULT;
+ return count;
+ }
+ if (copy_to_user(buf, v, count))
+ goto out;
+
+ read = count;
+ *ppos += read;
+out:
+ iounmap(v);
+ return read;
+}
+
+static ssize_t write_mem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t written = -EFAULT;
+ void *v;
+
+ if ((v = ioremap(p, count)) == NULL)
+ return -EFAULT;
+ if (copy_to_user(v, buf, count))
+ goto out;
+
+ written = count;
+ *ppos += written;
+out:
+ iounmap(v);
+ return written;
+}
+
+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ int uncached;
+
+ uncached = uncached_access(file, offset);
+ if (uncached)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Don't try to swap out physical pages.. */
+ vma->vm_flags |= VM_RESERVED;
+
+ /*
+ * Don't dump addresses that are not real memory to a core file.
+ */
+ if (uncached)
+ vma->vm_flags |= VM_IO;
+
+ if (io_remap_page_range(vma, vma->vm_start, offset,
+ vma->vm_end-vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ down(&file->f_dentry->d_inode->i_sem);
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ case 1:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ up(&file->f_dentry->d_inode->i_sem);
+ return ret;
+}
+
+static int open_mem(struct inode * inode, struct file * filp)
+{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+ .write = write_mem,
+ .mmap = mmap_mem,
+ .open = open_mem,
+};
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,535 @@
+/******************************************************************************
+ * evtchn.c
+ *
+ * Communication via Xen event channels.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/version.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
+#include <asm-xen/xen-public/event_channel.h>
+#include <asm-xen/xen-public/physdev.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+EXPORT_SYMBOL(force_evtchn_callback);
+EXPORT_SYMBOL(evtchn_do_upcall);
+#endif
+
+/*
+ * This lock protects updates to the following mapping and reference-count
+ * arrays. The lock does not need to be acquired to read the mapping tables.
+ */
+static spinlock_t irq_mapping_update_lock;
+
+/* IRQ <-> event-channel mappings. */
+static int evtchn_to_irq[NR_EVENT_CHANNELS];
+static int irq_to_evtchn[NR_IRQS];
+
+/* IRQ <-> VIRQ mapping. */
+static int virq_to_irq[NR_VIRQS];
+
+/* Reference counts for bindings to IRQs. */
+static int irq_bindcount[NR_IRQS];
+
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+
+/* Upcall to generic IRQ layer. */
+#ifdef CONFIG_X86
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
+#else
+extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
+#endif
+#define do_IRQ(irq, regs) do { \
+ (regs)->orig_eax = (irq); \
+ do_IRQ((regs)); \
+} while (0)
+#endif
+
+#define VALID_EVTCHN(_chn) ((_chn) >= 0)
+
+/*
+ * Force a proper event-channel callback from Xen after clearing the
+ * callback mask. We do this in a very simple manner, by making a call
+ * down into Xen. The pending flag will be checked by Xen on return.
+ */
+void force_evtchn_callback(void)
+{
+ (void)HYPERVISOR_xen_version(0);
+}
+
+/* NB. Interrupts are disabled on entry. */
+asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
+{
+ unsigned long l1, l2;
+ unsigned int l1i, l2i, port;
+ int irq;
+ shared_info_t *s = HYPERVISOR_shared_info;
+
+ s->vcpu_data[0].evtchn_upcall_pending = 0;
+
+ /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+ l1 = xchg(&s->evtchn_pending_sel, 0);
+ while ( l1 != 0 )
+ {
+ l1i = __ffs(l1);
+ l1 &= ~(1 << l1i);
+
+ l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
+ while ( l2 != 0 )
+ {
+ l2i = __ffs(l2);
+ l2 &= ~(1 << l2i);
+
+ port = (l1i << 5) + l2i;
+ if ( (irq = evtchn_to_irq[port]) != -1 )
+ do_IRQ(irq, regs);
+ else
+ evtchn_device_upcall(port);
+ }
+ }
+}
+
+static int find_unbound_irq(void)
+{
+ int irq;
+
+ for ( irq = 0; irq < NR_IRQS; irq++ )
+ if ( irq_bindcount[irq] == 0 )
+ break;
+
+ if ( irq == NR_IRQS )
+ panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+ return irq;
+}
+
+int bind_virq_to_irq(int virq)
+{
+ evtchn_op_t op;
+ int evtchn, irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( (irq = virq_to_irq[virq]) == -1 )
+ {
+ op.cmd = EVTCHNOP_bind_virq;
+ op.u.bind_virq.virq = virq;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to bind virtual IRQ %d\n", virq);
+ evtchn = op.u.bind_virq.port;
+
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ virq_to_irq[virq] = irq;
+ }
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+}
+
+void unbind_virq_from_irq(int virq)
+{
+ evtchn_op_t op;
+ int irq = virq_to_irq[virq];
+ int evtchn = irq_to_evtchn[irq];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( --irq_bindcount[irq] == 0 )
+ {
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind virtual IRQ %d\n", virq);
+
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ virq_to_irq[virq] = -1;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+}
+
+int bind_evtchn_to_irq(int evtchn)
+{
+ int irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( (irq = evtchn_to_irq[evtchn]) == -1 )
+ {
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+ }
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+}
+
+void unbind_evtchn_from_irq(int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( --irq_bindcount[irq] == 0 )
+ {
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+}
+
+
+/*
+ * Interface to generic handling in irq.c
+ */
+
+static unsigned int startup_dynirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !VALID_EVTCHN(evtchn) )
+ return 0;
+ unmask_evtchn(evtchn);
+ return 0;
+}
+
+static void shutdown_dynirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ mask_evtchn(evtchn);
+}
+
+static void enable_dynirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ unmask_evtchn(evtchn);
+}
+
+static void disable_dynirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ mask_evtchn(evtchn);
+}
+
+static void ack_dynirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+}
+
+static void end_dynirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+ unmask_evtchn(evtchn);
+}
+
+static struct hw_interrupt_type dynirq_type = {
+ "Dynamic-irq",
+ startup_dynirq,
+ shutdown_dynirq,
+ enable_dynirq,
+ disable_dynirq,
+ ack_dynirq,
+ end_dynirq,
+ NULL
+};
+
+static inline void pirq_unmask_notify(int pirq)
+{
+ physdev_op_t op;
+ if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
+ {
+ op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+ (void)HYPERVISOR_physdev_op(&op);
+ }
+}
+
+static inline void pirq_query_unmask(int pirq)
+{
+ physdev_op_t op;
+ op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+ op.u.irq_status_query.irq = pirq;
+ (void)HYPERVISOR_physdev_op(&op);
+ clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+ if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
+ set_bit(pirq, &pirq_needs_unmask_notify[0]);
+}
+
+/*
+ * On startup, if there is no action associated with the IRQ then we are
+ * probing. In this case we should not share with others as it will confuse us.
+ */
+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
+
+static unsigned int startup_pirq(unsigned int irq)
+{
+ evtchn_op_t op;
+ int evtchn;
+
+ op.cmd = EVTCHNOP_bind_pirq;
+ op.u.bind_pirq.pirq = irq;
+ /* NB. We are happy to share unless we are probing. */
+ op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ {
+ if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
+ printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
+ return 0;
+ }
+ evtchn = op.u.bind_pirq.port;
+
+ pirq_query_unmask(irq_to_pirq(irq));
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+
+ return 0;
+}
+
+static void shutdown_pirq(unsigned int irq)
+{
+ evtchn_op_t op;
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+
+ mask_evtchn(evtchn);
+
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind physical IRQ %d\n", irq);
+
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+}
+
+static void enable_pirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+}
+
+static void disable_pirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ mask_evtchn(evtchn);
+}
+
+static void ack_pirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+}
+
+static void end_pirq(unsigned int irq)
+{
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+ {
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+ }
+}
+
+static struct hw_interrupt_type pirq_type = {
+ "Phys-irq",
+ startup_pirq,
+ shutdown_pirq,
+ enable_pirq,
+ disable_pirq,
+ ack_pirq,
+ end_pirq,
+ NULL
+};
+
+static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ /* nothing */
+ return IRQ_HANDLED;
+}
+
+static struct irqaction misdirect_action = {
+ misdirect_interrupt,
+ SA_INTERRUPT,
+ CPU_MASK_NONE,
+ "misdirect",
+ NULL,
+ NULL
+};
+
+void irq_suspend(void)
+{
+ int pirq, virq, irq, evtchn;
+
+ /* Unbind VIRQs from event channels. */
+ for ( virq = 0; virq < NR_VIRQS; virq++ )
+ {
+ if ( (irq = virq_to_irq[virq]) == -1 )
+ continue;
+ evtchn = irq_to_evtchn[irq];
+
+ /* Mark the event channel as unused in our table. */
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
+
+ /* Check that no PIRQs are still bound. */
+ for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
+ if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
+ panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
+ pirq, evtchn);
+}
+
+void irq_resume(void)
+{
+ evtchn_op_t op;
+ int virq, irq, evtchn;
+
+ for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
+ mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
+
+ for ( virq = 0; virq < NR_VIRQS; virq++ )
+ {
+ if ( (irq = virq_to_irq[virq]) == -1 )
+ continue;
+
+ /* Get a new binding from Xen. */
+ op.cmd = EVTCHNOP_bind_virq;
+ op.u.bind_virq.virq = virq;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to bind virtual IRQ %d\n", virq);
+ evtchn = op.u.bind_virq.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+ }
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+
+ irq_ctx_init(0);
+
+ spin_lock_init(&irq_mapping_update_lock);
+
+ /* No VIRQ -> IRQ mappings. */
+ for ( i = 0; i < NR_VIRQS; i++ )
+ virq_to_irq[i] = -1;
+
+ /* No event-channel -> IRQ mappings. */
+ for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+ {
+ evtchn_to_irq[i] = -1;
+ mask_evtchn(i); /* No event channels are 'live' right now. */
+ }
+
+ /* No IRQ -> event-channel mappings. */
+ for ( i = 0; i < NR_IRQS; i++ )
+ irq_to_evtchn[i] = -1;
+
+ for ( i = 0; i < NR_DYNIRQS; i++ )
+ {
+ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+ irq_bindcount[dynirq_to_irq(i)] = 0;
+
+ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[dynirq_to_irq(i)].action = 0;
+ irq_desc[dynirq_to_irq(i)].depth = 1;
+ irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
+ }
+
+ for ( i = 0; i < NR_PIRQS; i++ )
+ {
+ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+ irq_bindcount[pirq_to_irq(i)] = 1;
+
+ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[pirq_to_irq(i)].action = 0;
+ irq_desc[pirq_to_irq(i)].depth = 1;
+ irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+ }
+
+ (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
+
+ /* This needs to be done early, but after the IRQ subsystem is alive. */
+ ctrl_if_init();
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/fixup.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,83 @@
+/******************************************************************************
+ * fixup.c
+ *
+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
+ * Used to avoid repeated slow emulation of common instructions used by the
+ * user-space TLS (Thread-Local Storage) libraries.
+ *
+ * **** NOTE ****
+ * Issues with the binary rewriting have caused it to be removed. Instead
+ * we rely on Xen's emulator to boot the kernel, and then print a banner
+ * message recommending that the user disables /lib/tls.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#define DP(_f) printk(KERN_ALERT " " _f "\n")
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define __LINKAGE fastcall
+#else
+#define __LINKAGE asmlinkage
+#endif
+
+__LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
+{
+ static unsigned long printed = 0;
+ int i;
+
+ if ( !test_and_set_bit(0, &printed) )
+ {
+ HYPERVISOR_vm_assist(VMASST_CMD_disable,
+ VMASST_TYPE_4gb_segments_notify);
+
+ DP("");
+ DP("***************************************************************");
+ DP("***************************************************************");
+ DP("** WARNING: Currently emulating unsupported memory accesses **");
+ DP("** in /lib/tls libraries. The emulation is very **");
+ DP("** slow. To ensure full performance you should **");
+ DP("** execute the following as root: **");
+ DP("** mv /lib/tls /lib/tls.disabled **");
+ DP("***************************************************************");
+ DP("***************************************************************");
+ DP("");
+
+ for ( i = 5; i > 0; i-- )
+ {
+ printk("Pausing... %d", i);
+ mdelay(1000);
+ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
+ }
+ printk("Continuing...\n\n");
+ }
+}
+
+static int __init fixup_init(void)
+{
+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
+ return 0;
+}
+__initcall(fixup_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,256 @@
+
+#define __KERNEL_SYSCALLS__
+static int errno;
+#include <linux/errno.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/unistd.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/sysrq.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/linux-public/suspend.h>
+#include <asm-xen/queues.h>
+
+void machine_restart(char * __unused)
+{
+ /* We really want to get pending console data out before we die. */
+ extern void xencons_force_flush(void);
+ xencons_force_flush();
+ HYPERVISOR_reboot();
+}
+
+void machine_halt(void)
+{
+ machine_power_off();
+}
+
+void machine_power_off(void)
+{
+ /* We really want to get pending console data out before we die. */
+ extern void xencons_force_flush(void);
+ xencons_force_flush();
+ HYPERVISOR_shutdown();
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+int reboot_thru_bios = 0; /* for dmi_scan.c */
+EXPORT_SYMBOL(machine_restart);
+EXPORT_SYMBOL(machine_halt);
+EXPORT_SYMBOL(machine_power_off);
+#endif
+
+
+/******************************************************************************
+ * Stop/pickle callback handling.
+ */
+
+/* Ignore multiple shutdown requests. */
+static int shutting_down = -1;
+
+static void __do_suspend(void)
+{
+ int i, j;
+ suspend_record_t *suspend_record;
+
+ /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
+ /* XXX SMH: yes it would :-( */
+#ifdef CONFIG_XEN_BLKDEV_FRONTEND
+ extern void blkdev_suspend(void);
+ extern void blkdev_resume(void);
+#else
+#define blkdev_suspend() do{}while(0)
+#define blkdev_resume() do{}while(0)
+#endif
+
+#ifdef CONFIG_XEN_NETDEV_FRONTEND
+ extern void netif_suspend(void);
+ extern void netif_resume(void);
+#else
+#define netif_suspend() do{}while(0)
+#define netif_resume() do{}while(0)
+#endif
+
+ extern void time_suspend(void);
+ extern void time_resume(void);
+ extern unsigned long max_pfn;
+ extern unsigned int *pfn_to_mfn_frame_list;
+
+ suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
+ if ( suspend_record == NULL )
+ goto out;
+
+ suspend_record->nr_pfns = max_pfn; /* final number of pfns */
+
+ __cli();
+
+ netif_suspend();
+
+ blkdev_suspend();
+
+ time_suspend();
+
+ ctrl_if_suspend();
+
+ irq_suspend();
+
+ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+ clear_fixmap(FIX_SHARED_INFO);
+
+ memcpy(&suspend_record->resume_info, &xen_start_info,
+ sizeof(xen_start_info));
+
+ HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments);
+#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_writable_pagetables);
+#endif
+
+ shutting_down = -1;
+
+ memcpy(&xen_start_info, &suspend_record->resume_info,
+ sizeof(xen_start_info));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+#else
+ set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+#endif
+
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+ irq_resume();
+
+ ctrl_if_resume();
+
+ time_resume();
+
+ blkdev_resume();
+
+ netif_resume();
+
+ __sti();
+
+ out:
+ if ( suspend_record != NULL )
+ free_page((unsigned long)suspend_record);
+}
+
+static int shutdown_process(void *__unused)
+{
+ static char *envp[] = { "HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+ static char *restart_argv[] = { "/sbin/shutdown", "-r", "now", NULL };
+ static char *poweroff_argv[] = { "/sbin/halt", "-p", NULL };
+
+ extern asmlinkage long sys_reboot(int magic1, int magic2,
+ unsigned int cmd, void *arg);
+
+ daemonize(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ "shutdown"
+#endif
+ );
+
+ switch ( shutting_down )
+ {
+ case CMSG_SHUTDOWN_POWEROFF:
+ if ( execve("/sbin/halt", poweroff_argv, envp) < 0 )
+ {
+ sys_reboot(LINUX_REBOOT_MAGIC1,
+ LINUX_REBOOT_MAGIC2,
+ LINUX_REBOOT_CMD_POWER_OFF,
+ NULL);
+ }
+ break;
+
+ case CMSG_SHUTDOWN_REBOOT:
+ if ( execve("/sbin/shutdown", restart_argv, envp) < 0 )
+ {
+ sys_reboot(LINUX_REBOOT_MAGIC1,
+ LINUX_REBOOT_MAGIC2,
+ LINUX_REBOOT_CMD_RESTART,
+ NULL);
+ }
+ break;
+ }
+
+ shutting_down = -1; /* could try again */
+
+ return 0;
+}
+
+static void __shutdown_handler(void *unused)
+{
+ int err;
+
+ if ( shutting_down != CMSG_SHUTDOWN_SUSPEND )
+ {
+ err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
+ if ( err < 0 )
+ printk(KERN_ALERT "Error creating shutdown process!\n");
+ }
+ else
+ {
+ __do_suspend();
+ }
+}
+
+static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
+{
+ static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
+
+ if ( msg->subtype == CMSG_SHUTDOWN_SYSRQ )
+ {
+ int sysrq = ((shutdown_sysrq_t *)&msg->msg[0])->key;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ handle_sysrq(sysrq, NULL, NULL);
+#else
+ handle_sysrq(sysrq, NULL, NULL, NULL);
+#endif
+#endif
+ }
+ else if ( (shutting_down == -1) &&
+ ((msg->subtype == CMSG_SHUTDOWN_POWEROFF) ||
+ (msg->subtype == CMSG_SHUTDOWN_REBOOT) ||
+ (msg->subtype == CMSG_SHUTDOWN_SUSPEND)) )
+ {
+ shutting_down = msg->subtype;
+ schedule_work(&shutdown_work);
+ }
+ else
+ {
+ printk("Ignore spurious shutdown request\n");
+ }
+
+ ctrl_if_send_response(msg);
+}
+
+static int __init setup_shutdown_event(void)
+{
+ ctrl_if_register_receiver(CMSG_SHUTDOWN, shutdown_handler, 0);
+ return 0;
+}
+
+__initcall(setup_shutdown_event);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,47 @@
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+EXPORT_SYMBOL(__dev_alloc_skb);
+
+/* Referenced in netback.c. */
+/*static*/ kmem_cache_t *skbuff_cachep;
+
+/* Size must be cacheline-aligned (alloc_skb uses SKB_DATA_ALIGN). */
+#define XEN_SKB_SIZE \
+ ((PAGE_SIZE - sizeof(struct skb_shared_info)) & ~(SMP_CACHE_BYTES - 1))
+
+struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
+{
+ struct sk_buff *skb;
+ skb = alloc_skb_from_cache(skbuff_cachep, length + 16, gfp_mask);
+ if ( likely(skb != NULL) )
+ skb_reserve(skb, 16);
+ return skb;
+}
+
+static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
+{
+ scrub_pages(buf, 1);
+}
+
+static int __init skbuff_init(void)
+{
+ skbuff_cachep = kmem_cache_create(
+ "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL);
+ return 0;
+}
+__initcall(skbuff_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,18 @@
+
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+static struct proc_dir_entry *xen_base;
+
+struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
+{
+ if ( xen_base == NULL )
+ if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
+ panic("Couldn't create /proc/xen");
+ return create_proc_entry(name, mode, xen_base);
+}
+
+void remove_xen_proc_entry(const char *name)
+{
+ remove_proc_entry(name, xen_base);
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/early_printk.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/early_printk.c Tue Aug
9 15:17:45 2005
@@ -0,0 +1,242 @@
+#include <linux/config.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+/* Simple VGA output */
+
+#ifdef __i386__
+#define VGABASE (__ISA_IO_base + 0xb8000)
+#else
+#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
+#endif
+
+#define MAX_YPOS 25
+#define MAX_XPOS 80
+
+static int current_ypos = 1, current_xpos = 0;
+
+static void early_vga_write(struct console *con, const char *str, unsigned n)
+{
+ char c;
+ int i, k, j;
+
+ while ((c = *str++) != '\0' && n-- > 0) {
+ if (current_ypos >= MAX_YPOS) {
+ /* scroll 1 line up */
+ for (k = 1, j = 0; k < MAX_YPOS; k++, j++) {
+ for (i = 0; i < MAX_XPOS; i++) {
+ writew(readw(VGABASE + 2*(MAX_XPOS*k +
i)),
+ VGABASE + 2*(MAX_XPOS*j + i));
+ }
+ }
+ for (i = 0; i < MAX_XPOS; i++)
+ writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
+ current_ypos = MAX_YPOS-1;
+ }
+ if (c == '\n') {
+ current_xpos = 0;
+ current_ypos++;
+ } else if (c != '\r') {
+ writew(((0x7 << 8) | (unsigned short) c),
+ VGABASE + 2*(MAX_XPOS*current_ypos +
+ current_xpos++));
+ if (current_xpos >= MAX_XPOS) {
+ current_xpos = 0;
+ current_ypos++;
+ }
+ }
+ }
+}
+
+static struct console early_vga_console = {
+ .name = "earlyvga",
+ .write = early_vga_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+#ifndef CONFIG_XEN
+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher
*/
+
+int early_serial_base = 0x3f8; /* ttyS0 */
+
+#define XMTRDY 0x20
+
+#define DLAB 0x80
+
+#define TXR 0 /* Transmit register (WRITE) */
+#define RXR 0 /* Receive register (READ) */
+#define IER 1 /* Interrupt Enable */
+#define IIR 2 /* Interrupt ID */
+#define FCR 2 /* FIFO control */
+#define LCR 3 /* Line control */
+#define MCR 4 /* Modem control */
+#define LSR 5 /* Line Status */
+#define MSR 6 /* Modem Status */
+#define DLL 0 /* Divisor Latch Low */
+#define DLH 1 /* Divisor latch High */
+
+static int early_serial_putc(unsigned char ch)
+{
+ unsigned timeout = 0xffff;
+ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
+ cpu_relax();
+ outb(ch, early_serial_base + TXR);
+ return timeout ? 0 : -1;
+}
+
+static void early_serial_write(struct console *con, const char *s, unsigned n)
+{
+ while (*s && n-- > 0) {
+ early_serial_putc(*s);
+ if (*s == '\n')
+ early_serial_putc('\r');
+ s++;
+ }
+}
+
+#define DEFAULT_BAUD 9600
+
+static __init void early_serial_init(char *s)
+{
+ unsigned char c;
+ unsigned divisor;
+ unsigned baud = DEFAULT_BAUD;
+ char *e;
+
+ if (*s == ',')
+ ++s;
+
+ if (*s) {
+ unsigned port;
+ if (!strncmp(s,"0x",2)) {
+ early_serial_base = simple_strtoul(s, &e, 16);
+ } else {
+ static int bases[] = { 0x3f8, 0x2f8 };
+
+ if (!strncmp(s,"ttyS",4))
+ s += 4;
+ port = simple_strtoul(s, &e, 10);
+ if (port > 1 || s == e)
+ port = 0;
+ early_serial_base = bases[port];
+ }
+ s += strcspn(s, ",");
+ if (*s == ',')
+ s++;
+ }
+
+ outb(0x3, early_serial_base + LCR); /* 8n1 */
+ outb(0, early_serial_base + IER); /* no interrupt */
+ outb(0, early_serial_base + FCR); /* no fifo */
+ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
+
+ if (*s) {
+ baud = simple_strtoul(s, &e, 0);
+ if (baud == 0 || s == e)
+ baud = DEFAULT_BAUD;
+ }
+
+ divisor = 115200 / baud;
+ c = inb(early_serial_base + LCR);
+ outb(c | DLAB, early_serial_base + LCR);
+ outb(divisor & 0xff, early_serial_base + DLL);
+ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
+ outb(c & ~DLAB, early_serial_base + LCR);
+}
+#else
+
+static void
+early_serial_write(struct console *con, const char *s, unsigned count)
+{
+ int n;
+
+ while (count > 0) {
+ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
+ if (n <= 0)
+ break;
+ count -= n;
+ s += n;
+ }
+}
+
+static __init void early_serial_init(char *s)
+{
+}
+#endif
+
+static struct console early_serial_console = {
+ .name = "earlyser",
+ .write = early_serial_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/* Direct interface for emergencies */
+struct console *early_console = &early_vga_console;
+static int early_console_initialized = 0;
+
+void early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap,fmt);
+ n = vscnprintf(buf,512,fmt,ap);
+ early_console->write(early_console,buf,n);
+ va_end(ap);
+}
+
+static int keep_early;
+
+int __init setup_early_printk(char *opt)
+{
+ char *space;
+ char buf[256];
+
+ if (early_console_initialized)
+ return -1;
+
+ opt = strchr(opt, '=') + 1;
+
+ strlcpy(buf,opt,sizeof(buf));
+ space = strchr(buf, ' ');
+ if (space)
+ *space = 0;
+
+ if (strstr(buf,"keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "serial", 6)) {
+ early_serial_init(buf + 6);
+ early_console = &early_serial_console;
+ } else if (!strncmp(buf, "ttyS", 4)) {
+ early_serial_init(buf);
+ early_console = &early_serial_console;
+ } else if (!strncmp(buf, "vga", 3)) {
+ early_console = &early_vga_console;
+ }
+ early_console_initialized = 1;
+ register_console(early_console);
+ return 0;
+}
+
+void __init disable_early_printk(void)
+{
+ if (!early_console_initialized || !early_console)
+ return;
+ if (!keep_early) {
+ printk("disabling early console\n");
+ unregister_console(early_console);
+ early_console_initialized = 0;
+ } else {
+ printk("keeping early console\n");
+ }
+}
+
+__setup("earlyprintk=", setup_early_printk);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/drivers/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,66 @@
+#
+# Makefile for the Linux kernel device drivers.
+#
+# 15 Sep 2000, Christoph Hellwig <hch@xxxxxxxxxxxxx>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-$(CONFIG_PCI) += pci/
+obj-$(CONFIG_PARISC) += parisc/
+obj-y += video/
+obj-$(CONFIG_ACPI_BOOT) += acpi/
+# PnP must come after ACPI since it will eventually need to check if acpi
+# was used and do nothing if so
+obj-$(CONFIG_PNP) += pnp/
+
+# char/ comes before serial/ etc so that the VT console is the boot-time
+# default.
+obj-y += char/
+
+# i810fb and intelfb depend on char/agp/
+obj-$(CONFIG_FB_I810) += video/i810/
+obj-$(CONFIG_FB_INTEL) += video/intelfb/
+
+# we also need input/serio early so serio bus is initialized by the time
+# serial drivers start registering their serio ports
+obj-$(CONFIG_SERIO) += input/serio/
+obj-y += serial/
+obj-$(CONFIG_PARPORT) += parport/
+obj-y += base/ block/ misc/ net/ media/
+obj-$(CONFIG_NUBUS) += nubus/
+obj-$(CONFIG_ATM) += atm/
+obj-$(CONFIG_PPC_PMAC) += macintosh/
+obj-$(CONFIG_ARCH_XEN) += xen/
+obj-$(CONFIG_IDE) += ide/
+obj-$(CONFIG_FC4) += fc4/
+obj-$(CONFIG_SCSI) += scsi/
+obj-$(CONFIG_FUSION) += message/
+obj-$(CONFIG_IEEE1394) += ieee1394/
+obj-y += cdrom/
+obj-$(CONFIG_MTD) += mtd/
+obj-$(CONFIG_PCCARD) += pcmcia/
+obj-$(CONFIG_DIO) += dio/
+obj-$(CONFIG_SBUS) += sbus/
+obj-$(CONFIG_ZORRO) += zorro/
+obj-$(CONFIG_MAC) += macintosh/
+obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
+obj-$(CONFIG_PARIDE) += block/paride/
+obj-$(CONFIG_TC) += tc/
+obj-$(CONFIG_USB) += usb/
+obj-$(CONFIG_USB_GADGET) += usb/gadget/
+obj-$(CONFIG_INPUT) += input/
+obj-$(CONFIG_GAMEPORT) += input/gameport/
+obj-$(CONFIG_I2O) += message/
+obj-$(CONFIG_I2C) += i2c/
+obj-$(CONFIG_W1) += w1/
+obj-$(CONFIG_PHONE) += telephony/
+obj-$(CONFIG_MD) += md/
+obj-$(CONFIG_BT) += bluetooth/
+obj-$(CONFIG_ISDN) += isdn/
+obj-$(CONFIG_MCA) += mca/
+obj-$(CONFIG_EISA) += eisa/
+obj-$(CONFIG_CPU_FREQ) += cpufreq/
+obj-$(CONFIG_MMC) += mmc/
+obj-$(CONFIG_INFINIBAND) += infiniband/
+obj-y += firmware/
+obj-$(CONFIG_CRYPTO) += crypto/
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/drivers/char/mem.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/char/mem.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,733 @@
+/*
+ * linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>
+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@xxxxxxx>
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/smp_lock.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_IA64
+# include <linux/efi.h>
+#endif
+
+#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
+extern void tapechar_init(void);
+#endif
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+static inline int uncached_access(struct file *file, unsigned long addr)
+{
+#if defined(__i386__)
+ /*
+ * On the PPro and successors, the MTRRs are used to set
+ * memory types for physical addresses outside main memory,
+ * so blindly setting PCD or PWT on those pages is wrong.
+ * For Pentiums and earlier, the surround logic should disable
+ * caching for the high addresses through the KEN pin, but
+ * we maintain the tradition of paranoia in this code.
+ */
+ if (file->f_flags & O_SYNC)
+ return 1;
+ return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability)
||
+ test_bit(X86_FEATURE_CENTAUR_MCR,
boot_cpu_data.x86_capability) )
+ && addr >= __pa(high_memory);
+#elif defined(__x86_64__)
+ /*
+ * This is broken because it can generate memory type aliases,
+ * which can cause cache corruptions
+ * But it is only available for root and we have to be bug-to-bug
+ * compatible with i386.
+ */
+ if (file->f_flags & O_SYNC)
+ return 1;
+ /* same behaviour as i386. PAT always set to cached and MTRRs control
the
+ caching behaviour.
+ Hopefully a full PAT implementation will fix that soon. */
+ return 0;
+#elif defined(CONFIG_IA64)
+ /*
+ * On ia64, we ignore O_SYNC because we cannot tolerate memory
attribute aliases.
+ */
+ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+#elif defined(CONFIG_PPC64)
+ /* On PPC64, we always do non-cacheable access to the IO hole and
+ * cacheable elsewhere. Cache paradox can checkstop the CPU and
+ * the high_memory heuristic below is wrong on machines with memory
+ * above the IO hole... Ah, and of course, XFree86 doesn't pass
+ * O_SYNC when mapping us to tap IO space. Surprised ?
+ */
+ return !page_is_ram(addr >> PAGE_SHIFT);
+#else
+ /*
+ * Accessing memory above the top the kernel knows about or through a
file pointer
+ * that was marked O_SYNC will be done non-cached.
+ */
+ if (file->f_flags & O_SYNC)
+ return 1;
+ return addr >= __pa(high_memory);
+#endif
+}
+
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
+{
+ unsigned long end_mem;
+
+ end_mem = __pa(high_memory);
+ if (addr >= end_mem)
+ return 0;
+
+ if (*count > end_mem - addr)
+ *count = end_mem - addr;
+
+ return 1;
+}
+#endif
+
+static ssize_t do_write_mem(void *p, unsigned long realp,
+ const char __user * buf, size_t count, loff_t *ppos)
+{
+ ssize_t written;
+ unsigned long copied;
+
+ written = 0;
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (realp < PAGE_SIZE) {
+ unsigned long sz = PAGE_SIZE-realp;
+ if (sz > count) sz = count;
+ /* Hmm. Do something? */
+ buf+=sz;
+ p+=sz;
+ count-=sz;
+ written+=sz;
+ }
+#endif
+ copied = copy_from_user(p, buf, count);
+ if (copied) {
+ ssize_t ret = written + (count - copied);
+
+ if (ret)
+ return ret;
+ return -EFAULT;
+ }
+ written += count;
+ *ppos += written;
+ return written;
+}
+
+#ifndef ARCH_HAS_DEV_MEM
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t read;
+
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
+ read = 0;
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ unsigned long sz = PAGE_SIZE-p;
+ if (sz > count)
+ sz = count;
+ if (sz > 0) {
+ if (clear_user(buf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ }
+ }
+#endif
+ if (copy_to_user(buf, __va(p), count))
+ return -EFAULT;
+ read += count;
+ *ppos += read;
+ return read;
+}
+
+static ssize_t write_mem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
+ return do_write_mem(__va(p), p, buf, count, ppos);
+}
+#endif
+
+static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+{
+#ifdef pgprot_noncached
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ int uncached;
+
+ uncached = uncached_access(file, offset);
+ if (uncached)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+
+ /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end-vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+extern long vread(char *buf, char *addr, unsigned long count);
+extern long vwrite(char *buf, char *addr, unsigned long count);
+
+/*
+ * This function reads the *virtual* memory as seen by the kernel.
+ */
+static ssize_t read_kmem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t read = 0;
+ ssize_t virtr = 0;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+
+ if (p < (unsigned long) high_memory) {
+ read = count;
+ if (count > (unsigned long) high_memory - p)
+ read = (unsigned long) high_memory - p;
+
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE && read > 0) {
+ size_t tmp = PAGE_SIZE - p;
+ if (tmp > read) tmp = read;
+ if (clear_user(buf, tmp))
+ return -EFAULT;
+ buf += tmp;
+ p += tmp;
+ read -= tmp;
+ count -= tmp;
+ }
+#endif
+ if (copy_to_user(buf, (char *)p, read))
+ return -EFAULT;
+ p += read;
+ buf += read;
+ count -= read;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len = vread(kbuf, (char *)p, len);
+ if (!len)
+ break;
+ if (copy_to_user(buf, kbuf, len)) {
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
+ }
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+ *ppos = p;
+ return virtr + read;
+}
+
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t wrote = 0;
+ ssize_t virtr = 0;
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+ if (count > (unsigned long) high_memory - p)
+ wrote = (unsigned long) high_memory - p;
+
+ written = do_write_mem((void*)p, p, buf, wrote, ppos);
+ if (written != wrote)
+ return written;
+ wrote = written;
+ p += wrote;
+ buf += wrote;
+ count -= wrote;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return wrote ? wrote : -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len) {
+ written = copy_from_user(kbuf, buf, len);
+ if (written) {
+ ssize_t ret;
+
+ free_page((unsigned long)kbuf);
+ ret = wrote + virtr + (len - written);
+ return ret ? ret : -EFAULT;
+ }
+ }
+ len = vwrite(kbuf, (char *)p, len);
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+
+ *ppos = p;
+ return virtr + wrote;
+}
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static ssize_t read_port(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ char __user *tmp = buf;
+
+ if (verify_area(VERIFY_WRITE,buf,count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ if (__put_user(inb(i),tmp) < 0)
+ return -EFAULT;
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+
+static ssize_t write_port(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ const char __user * tmp = buf;
+
+ if (verify_area(VERIFY_READ,buf,count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ char c;
+ if (__get_user(c, tmp))
+ return -EFAULT;
+ outb(c,i);
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+#endif
+
+static ssize_t read_null(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t write_null(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * For fun, we are using the MMU for this.
+ */
+static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long addr=(unsigned long)buf;
+
+ mm = current->mm;
+ /* Oops, this was forgotten before. -ben */
+ down_read(&mm->mmap_sem);
+
+ /* For private mappings, just map in zero pages. */
+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ unsigned long count;
+
+ if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
+ goto out_up;
+ if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
+ break;
+ count = vma->vm_end - addr;
+ if (count > size)
+ count = size;
+
+ zap_page_range(vma, addr, count, NULL);
+ zeromap_page_range(vma, addr, count, PAGE_COPY);
+
+ size -= count;
+ buf += count;
+ addr += count;
+ if (size == 0)
+ goto out_up;
+ }
+
+ up_read(&mm->mmap_sem);
+
+ /* The shared case is hard. Let's do the conventional zeroing. */
+ do {
+ unsigned long unwritten = clear_user(buf, PAGE_SIZE);
+ if (unwritten)
+ return size + unwritten - PAGE_SIZE;
+ cond_resched();
+ buf += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ } while (size);
+
+ return size;
+out_up:
+ up_read(&mm->mmap_sem);
+ return size;
+}
+
+static ssize_t read_zero(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long left, unwritten, written = 0;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ left = count;
+
+ /* do we want to be clever? Arbitrary cut-off */
+ if (count >= PAGE_SIZE*4) {
+ unsigned long partial;
+
+ /* How much left of the page? */
+ partial = (PAGE_SIZE-1) & -(unsigned long) buf;
+ unwritten = clear_user(buf, partial);
+ written = partial - unwritten;
+ if (unwritten)
+ goto out;
+ left -= partial;
+ buf += partial;
+ unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
+ written += (left & PAGE_MASK) - unwritten;
+ if (unwritten)
+ goto out;
+ buf += left & PAGE_MASK;
+ left &= ~PAGE_MASK;
+ }
+ unwritten = clear_user(buf, left);
+ written += left - unwritten;
+out:
+ return written ? written : -EFAULT;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+#else /* CONFIG_MMU */
+static ssize_t read_zero(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ size_t todo = count;
+
+ while (todo) {
+ size_t chunk = todo;
+
+ if (chunk > 4096)
+ chunk = 4096; /* Just for latency reasons */
+ if (clear_user(buf, chunk))
+ return -EFAULT;
+ buf += chunk;
+ todo -= chunk;
+ cond_resched();
+ }
+ return count;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_MMU */
+
+static ssize_t write_full(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero. Most notably, you
+ * can fopen() both devices with "a" now. This was previously impossible.
+ * -- SRB.
+ */
+
+static loff_t null_lseek(struct file * file, loff_t offset, int orig)
+{
+ return file->f_pos = 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ down(&file->f_dentry->d_inode->i_sem);
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ case 1:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ up(&file->f_dentry->d_inode->i_sem);
+ return ret;
+}
+
+static int open_port(struct inode * inode, struct file * filp)
+{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+#define mmap_mem mmap_kmem
+#define zero_lseek null_lseek
+#define full_lseek null_lseek
+#define write_zero write_null
+#define read_full read_zero
+#define open_mem open_port
+#define open_kmem open_mem
+
+#ifndef ARCH_HAS_DEV_MEM
+static struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+ .write = write_mem,
+ .mmap = mmap_mem,
+ .open = open_mem,
+};
+#else
+extern struct file_operations mem_fops;
+#endif
+
+static struct file_operations kmem_fops = {
+ .llseek = memory_lseek,
+ .read = read_kmem,
+ .write = write_kmem,
+ .mmap = mmap_kmem,
+ .open = open_kmem,
+};
+
+static struct file_operations null_fops = {
+ .llseek = null_lseek,
+ .read = read_null,
+ .write = write_null,
+};
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static struct file_operations port_fops = {
+ .llseek = memory_lseek,
+ .read = read_port,
+ .write = write_port,
+ .open = open_port,
+};
+#endif
+
+static struct file_operations zero_fops = {
+ .llseek = zero_lseek,
+ .read = read_zero,
+ .write = write_zero,
+ .mmap = mmap_zero,
+};
+
+static struct file_operations full_fops = {
+ .llseek = full_lseek,
+ .read = read_full,
+ .write = write_full,
+};
+
+static ssize_t kmsg_write(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ int ret;
+
+ tmp = kmalloc(count + 1, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+ ret = -EFAULT;
+ if (!copy_from_user(tmp, buf, count)) {
+ tmp[count] = 0;
+ ret = printk("%s", tmp);
+ }
+ kfree(tmp);
+ return ret;
+}
+
+static struct file_operations kmsg_fops = {
+ .write = kmsg_write,
+};
+
+static int memory_open(struct inode * inode, struct file * filp)
+{
+ switch (iminor(inode)) {
+ case 1:
+ filp->f_op = &mem_fops;
+ break;
+ case 2:
+ filp->f_op = &kmem_fops;
+ break;
+ case 3:
+ filp->f_op = &null_fops;
+ break;
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+ case 4:
+ filp->f_op = &port_fops;
+ break;
+#endif
+ case 5:
+ filp->f_op = &zero_fops;
+ break;
+ case 7:
+ filp->f_op = &full_fops;
+ break;
+ case 8:
+ filp->f_op = &random_fops;
+ break;
+ case 9:
+ filp->f_op = &urandom_fops;
+ break;
+ case 11:
+ filp->f_op = &kmsg_fops;
+ break;
+ default:
+ return -ENXIO;
+ }
+ if (filp->f_op && filp->f_op->open)
+ return filp->f_op->open(inode,filp);
+ return 0;
+}
+
+static struct file_operations memory_fops = {
+ .open = memory_open, /* just a selector for the real open */
+};
+
+static const struct {
+ unsigned int minor;
+ char *name;
+ umode_t mode;
+ struct file_operations *fops;
+} devlist[] = { /* list of minor devices */
+ {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+ {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+ {3, "null", S_IRUGO | S_IWUGO, &null_fops},
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+ {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
+#endif
+ {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
+ {7, "full", S_IRUGO | S_IWUGO, &full_fops},
+ {8, "random", S_IRUGO | S_IWUSR, &random_fops},
+ {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
+ {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
+};
+
+static struct class_simple *mem_class;
+
+static int __init chr_dev_init(void)
+{
+ int i;
+
+ if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
+ printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+
+ mem_class = class_simple_create(THIS_MODULE, "mem");
+ for (i = 0; i < ARRAY_SIZE(devlist); i++) {
+ class_simple_device_add(mem_class,
+ MKDEV(MEM_MAJOR, devlist[i].minor),
+ NULL, devlist[i].name);
+ devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
+ S_IFCHR | devlist[i].mode, devlist[i].name);
+ }
+
+ return 0;
+}
+
+fs_initcall(chr_dev_init);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/drivers/char/tty_io.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/char/tty_io.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2988 @@
+/*
+ * linux/drivers/char/tty_io.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures. Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time. Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c). This
+ * makes for cleaner and more compact code. -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling. No delays, but all
+ * other bits should be there.
+ * -- Nick Holloway <alfie@xxxxxxxxxxxxxxxxx>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * -- julian@xxxxxxxxxxxxxxxxxxxxxx (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ * -- ctm@xxxxxxxx, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ * -- mj@xxxxxxxxxxxxxxxxx, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ * -- grif@xxxxxxxxxx, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ * -- Marko Kohtala <Marko.Kohtala@xxxxxx>, March 97
+ *
+ * Rewrote init_dev and release_dev to eliminate races.
+ * -- Bill Hawes <whawes@xxxxxxxx>, June 97
+ *
+ * Added devfs support.
+ * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ * -- Russell King <rmk@xxxxxxxxxxxxxxxx>
+ *
+ * Move do_SAK() into process context. Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc() -- Andrew Morton
<andrewm@xxxxxxxxxx> 17Mar01
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <linux/kmod.h>
+
+#undef TTY_DEBUG_HANGUP
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct termios tty_std_termios = { /* for the benefit of tty drivers */
+ .c_iflag = ICRNL | IXON,
+ .c_oflag = OPOST | ONLCR,
+ .c_cflag = B38400 | CS8 | CREAD | HUPCL,
+ .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+ ECHOCTL | ECHOKE | IEXTEN,
+ .c_cc = INIT_C_CC
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+ could do with some rationalisation such as pulling the tty proc function
+ into this file */
+
+LIST_HEAD(tty_drivers); /* linked list of tty drivers */
+
+/* Semaphore to protect creating and releasing a tty. This is shared with
+ vt.c for deeply disgusting hack reasons */
+DECLARE_MUTEX(tty_sem);
+
+int console_use_vt = 1;
+
+#ifdef CONFIG_UNIX98_PTYS
+extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
+extern int pty_limit; /* Config limit on Unix98 ptys */
+static DEFINE_IDR(allocated_ptys);
+static DECLARE_MUTEX(allocated_ptys_lock);
+static int ptmx_open(struct inode *, struct file *);
+#endif
+
+extern void disable_early_printk(void);
+
+static void initialize_tty_struct(struct tty_struct *tty);
+
+static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ssize_t redirected_tty_write(struct file *, const char __user *, size_t,
loff_t *);
+static unsigned int tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+static int tty_release(struct inode *, struct file *);
+int tty_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg);
+static int tty_fasync(int fd, struct file * filp, int on);
+extern void rs_360_init(void);
+static void release_mem(struct tty_struct *tty, int idx);
+
+
+static struct tty_struct *alloc_tty_struct(void)
+{
+ struct tty_struct *tty;
+
+ tty = kmalloc(sizeof(struct tty_struct), GFP_KERNEL);
+ if (tty)
+ memset(tty, 0, sizeof(struct tty_struct));
+ return tty;
+}
+
+static inline void free_tty_struct(struct tty_struct *tty)
+{
+ kfree(tty->write_buf);
+ kfree(tty);
+}
+
+#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
+
+char *tty_name(struct tty_struct *tty, char *buf)
+{
+ if (!tty) /* Hmm. NULL pointer. That's fun. */
+ strcpy(buf, "NULL tty");
+ else
+ strcpy(buf, tty->name);
+ return buf;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+inline int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+ const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (!tty) {
+ printk(KERN_WARNING
+ "null TTY for (%d:%d) in %s\n",
+ imajor(inode), iminor(inode), routine);
+ return 1;
+ }
+ if (tty->magic != TTY_MAGIC) {
+ printk(KERN_WARNING
+ "bad magic number for tty struct (%d:%d) in %s\n",
+ imajor(inode), iminor(inode), routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+ struct list_head *p;
+ int count = 0;
+
+ file_list_lock();
+ list_for_each(p, &tty->tty_files) {
+ count++;
+ }
+ file_list_unlock();
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_SLAVE &&
+ tty->link && tty->link->count)
+ count++;
+ if (tty->count != count) {
+ printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
+ "!= #fd's(%d) in %s\n",
+ tty->name, tty->count, count, routine);
+ return count;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * This is probably overkill for real world processors but
+ * they are not on hot paths so a little discipline won't do
+ * any harm.
+ */
+
+static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+{
+ down(&tty->termios_sem);
+ tty->termios->c_line = num;
+ up(&tty->termios_sem);
+}
+
+/*
+ * This guards the refcounted line discipline lists. The lock
+ * must be taken with irqs off because there are hangup path
+ * callers who will do ldisc lookups and cannot sleep.
+ */
+
+static DEFINE_SPINLOCK(tty_ldisc_lock);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table
*/
+
+int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if (new_ldisc) {
+ tty_ldiscs[disc] = *new_ldisc;
+ tty_ldiscs[disc].num = disc;
+ tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED;
+ tty_ldiscs[disc].refcount = 0;
+ } else {
+ if(tty_ldiscs[disc].refcount)
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc].flags &= ~LDISC_FLAG_DEFINED;
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(tty_register_ldisc);
+
+struct tty_ldisc *tty_ldisc_get(int disc)
+{
+ unsigned long flags;
+ struct tty_ldisc *ld;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return NULL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+
+ ld = &tty_ldiscs[disc];
+ /* Check the entry is defined */
+ if(ld->flags & LDISC_FLAG_DEFINED)
+ {
+ /* If the module is being unloaded we can't use it */
+ if (!try_module_get(ld->owner))
+ ld = NULL;
+ else /* lock it */
+ ld->refcount++;
+ }
+ else
+ ld = NULL;
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return ld;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_get);
+
+void tty_ldisc_put(int disc)
+{
+ struct tty_ldisc *ld;
+ unsigned long flags;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ BUG();
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = &tty_ldiscs[disc];
+ if(ld->refcount == 0)
+ BUG();
+ ld->refcount --;
+ module_put(ld->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_put);
+
+static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ tty->ldisc = *ld;
+ tty->ldisc.refcount = 0;
+}
+
+/**
+ * tty_ldisc_try - internal helper
+ * @tty: the tty
+ *
+ * Make a single attempt to grab and bump the refcount on
+ * the tty ldisc. Return 0 on failure or 1 on success. This is
+ * used to implement both the waiting and non waiting versions
+ * of tty_ldisc_ref
+ */
+
+static int tty_ldisc_try(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct tty_ldisc *ld;
+ int ret = 0;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = &tty->ldisc;
+ if(test_bit(TTY_LDISC, &tty->flags))
+ {
+ ld->refcount++;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return ret;
+}
+
+/**
+ * tty_ldisc_ref_wait - wait for the tty ldisc
+ * @tty: tty device
+ *
+ * Dereference the line discipline for the terminal and take a
+ * reference to it. If the line discipline is in flux then
+ * wait patiently until it changes.
+ *
+ * Note: Must not be called from an IRQ/timer context. The caller
+ * must also be careful not to hold other locks that will deadlock
+ * against a discipline change, such as an existing ldisc reference
+ * (which we check for)
+ */
+
+struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
+{
+ /* wait_event is a macro */
+ wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
+ if(tty->ldisc.refcount == 0)
+ printk(KERN_ERR "tty_ldisc_ref_wait\n");
+ return &tty->ldisc;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
+
+/**
+ * tty_ldisc_ref - get the tty ldisc
+ * @tty: tty device
+ *
+ * Dereference the line discipline for the terminal and take a
+ * reference to it. If the line discipline is in flux then
+ * return NULL. Can be called from IRQ and timer functions.
+ */
+
+struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
+{
+ if(tty_ldisc_try(tty))
+ return &tty->ldisc;
+ return NULL;
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_ref);
+
+/**
+ * tty_ldisc_deref - free a tty ldisc reference
+ * @ld: reference to free up
+ *
+ * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
+ * be called in IRQ context.
+ */
+
+void tty_ldisc_deref(struct tty_ldisc *ld)
+{
+ unsigned long flags;
+
+ if(ld == NULL)
+ BUG();
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if(ld->refcount == 0)
+ printk(KERN_ERR "tty_ldisc_deref: no references.\n");
+ else
+ ld->refcount--;
+ if(ld->refcount == 0)
+ wake_up(&tty_ldisc_wait);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_deref);
+
+/**
+ * tty_ldisc_enable - allow ldisc use
+ * @tty: terminal to activate ldisc on
+ *
+ * Set the TTY_LDISC flag when the line discipline can be called
+ * again. Do neccessary wakeups for existing sleepers.
+ *
+ * Note: nobody should set this bit except via this function. Clearing
+ * directly is allowed.
+ */
+
+static void tty_ldisc_enable(struct tty_struct *tty)
+{
+ set_bit(TTY_LDISC, &tty->flags);
+ wake_up(&tty_ldisc_wait);
+}
+
+/**
+ * tty_set_ldisc - set line discipline
+ * @tty: the terminal to set
+ * @ldisc: the line discipline
+ *
+ * Set the discipline of a tty line. Must be called from a process
+ * context.
+ */
+
+static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+{
+ int retval = 0;
+ struct tty_ldisc o_ldisc;
+ char buf[64];
+ int work;
+ unsigned long flags;
+ struct tty_ldisc *ld;
+
+ if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
+ return -EINVAL;
+
+restart:
+
+ if (tty->ldisc.num == ldisc)
+ return 0; /* We are already in the desired discipline */
+
+ ld = tty_ldisc_get(ldisc);
+ /* Eduardo Blanco <ejbs@xxxxxxxxxxxx> */
+ /* Cyrus Durgin <cider@xxxxxxxxxxxxx> */
+ if (ld == NULL) {
+ request_module("tty-ldisc-%d", ldisc);
+ ld = tty_ldisc_get(ldisc);
+ }
+ if (ld == NULL)
+ return -EINVAL;
+
+ o_ldisc = tty->ldisc;
+
+ tty_wait_until_sent(tty, 0);
+
+ /*
+ * Make sure we don't change while someone holds a
+ * reference to the line discipline. The TTY_LDISC bit
+ * prevents anyone taking a reference once it is clear.
+ * We need the lock to avoid racing reference takers.
+ */
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if(tty->ldisc.refcount)
+ {
+ /* Free the new ldisc we grabbed. Must drop the lock
+ first. */
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ tty_ldisc_put(ldisc);
+ /*
+ * There are several reasons we may be busy, including
+ * random momentary I/O traffic. We must therefore
+ * retry. We could distinguish between blocking ops
+ * and retries if we made tty_ldisc_wait() smarter. That
+ * is up for discussion.
+ */
+ if(wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount
== 0) < 0)
+ return -ERESTARTSYS;
+ goto restart;
+ }
+ clear_bit(TTY_LDISC, &tty->flags);
+ clear_bit(TTY_DONT_FLIP, &tty->flags);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ /*
+ * From this point on we know nobody has an ldisc
+ * usage reference, nor can they obtain one until
+ * we say so later on.
+ */
+
+ work = cancel_delayed_work(&tty->flip.work);
+ /*
+ * Wait for ->hangup_work and ->flip.work handlers to terminate
+ */
+
+ flush_scheduled_work();
+ /* Shutdown the current discipline. */
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+
+ /* Now set up the new line discipline. */
+ tty_ldisc_assign(tty, ld);
+ tty_set_termios_ldisc(tty, ldisc);
+ if (tty->ldisc.open)
+ retval = (tty->ldisc.open)(tty);
+ if (retval < 0) {
+ tty_ldisc_put(ldisc);
+ /* There is an outstanding reference here so this is safe */
+ tty_ldisc_assign(tty, tty_ldisc_get(o_ldisc.num));
+ tty_set_termios_ldisc(tty, tty->ldisc.num);
+ if (tty->ldisc.open && (tty->ldisc.open(tty) < 0)) {
+ tty_ldisc_put(o_ldisc.num);
+ /* This driver is always present */
+ tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+ tty_set_termios_ldisc(tty, N_TTY);
+ if (tty->ldisc.open) {
+ int r = tty->ldisc.open(tty);
+
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty, buf), r);
+ }
+ }
+ }
+ /* At this point we hold a reference to the new ldisc and a
+ a reference to the old ldisc. If we ended up flipping back
+ to the existing ldisc we have two references to it */
+
+ if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc)
+ tty->driver->set_ldisc(tty);
+
+ tty_ldisc_put(o_ldisc.num);
+
+ /*
+ * Allow ldisc referencing to occur as soon as the driver
+ * ldisc callback completes.
+ */
+
+ tty_ldisc_enable(tty);
+
+ /* Restart it in case no characters kick it off. Safe if
+ already running */
+ if(work)
+ schedule_delayed_work(&tty->flip.work, 1);
+ return retval;
+}
+
+/*
+ * This routine returns a tty driver structure, given a device number
+ */
+static struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+ struct tty_driver *p;
+
+ list_for_each_entry(p, &tty_drivers, tty_drivers) {
+ dev_t base = MKDEV(p->major, p->minor_start);
+ if (device < base || device >= base + p->num)
+ continue;
+ *index = device - base;
+ return p;
+ }
+ return NULL;
+}
+
+/*
+ * If we try to write to, or set the state of, a terminal and we're
+ * not in the foreground, send a SIGTTOU. If the signal is blocked or
+ * ignored, go ahead and perform the operation. (POSIX 7.2)
+ */
+int tty_check_change(struct tty_struct * tty)
+{
+ if (current->signal->tty != tty)
+ return 0;
+ if (tty->pgrp <= 0) {
+ printk(KERN_WARNING "tty_check_change: tty->pgrp <= 0!\n");
+ return 0;
+ }
+ if (process_group(current) == tty->pgrp)
+ return 0;
+ if (is_ignored(SIGTTOU))
+ return 0;
+ if (is_orphaned_pgrp(process_group(current)))
+ return -EIO;
+ (void) kill_pg(process_group(current), SIGTTOU, 1);
+ return -ERESTARTSYS;
+}
+
+EXPORT_SYMBOL(tty_check_change);
+
+static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait)
+{
+ return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
+}
+
+static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static struct file_operations tty_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+ .write = tty_write,
+ .poll = tty_poll,
+ .ioctl = tty_ioctl,
+ .open = tty_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+
+#ifdef CONFIG_UNIX98_PTYS
+static struct file_operations ptmx_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+ .write = tty_write,
+ .poll = tty_poll,
+ .ioctl = tty_ioctl,
+ .open = ptmx_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+#endif
+
+static struct file_operations console_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+ .write = redirected_tty_write,
+ .poll = tty_poll,
+ .ioctl = tty_ioctl,
+ .open = tty_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+
+static struct file_operations hung_up_tty_fops = {
+ .llseek = no_llseek,
+ .read = hung_up_tty_read,
+ .write = hung_up_tty_write,
+ .poll = hung_up_tty_poll,
+ .ioctl = hung_up_tty_ioctl,
+ .release = tty_release,
+};
+
+static DEFINE_SPINLOCK(redirect_lock);
+static struct file *redirect;
+
+/**
+ * tty_wakeup - request more data
+ * @tty: terminal
+ *
+ * Internal and external helper for wakeups of tty. This function
+ * informs the line discipline if present that the driver is ready
+ * to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+ ld = tty_ldisc_ref(tty);
+ if(ld) {
+ if(ld->write_wakeup)
+ ld->write_wakeup(tty);
+ tty_ldisc_deref(ld);
+ }
+ }
+ wake_up_interruptible(&tty->write_wait);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ * tty_ldisc_flush - flush line discipline queue
+ * @tty: tty
+ *
+ * Flush the line discipline queue (if any) for this tty. If there
+ * is no line discipline active this is a no-op.
+ */
+
+void tty_ldisc_flush(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld = tty_ldisc_ref(tty);
+ if(ld) {
+ if(ld->flush_buffer)
+ ld->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+}
+
+EXPORT_SYMBOL_GPL(tty_ldisc_flush);
+
+/*
+ * This can be called by the "eventd" kernel thread. That is process
synchronous,
+ * but doesn't hold any locks, so we need to make sure we have the appropriate
+ * locks for what we're doing..
+ */
+static void do_tty_hangup(void *data)
+{
+ struct tty_struct *tty = (struct tty_struct *) data;
+ struct file * cons_filp = NULL;
+ struct file *filp, *f = NULL;
+ struct task_struct *p;
+ struct tty_ldisc *ld;
+ int closecount = 0, n;
+
+ if (!tty)
+ return;
+
+ /* inuse_filps is protected by the single kernel lock */
+ lock_kernel();
+
+ spin_lock(&redirect_lock);
+ if (redirect && redirect->private_data == tty) {
+ f = redirect;
+ redirect = NULL;
+ }
+ spin_unlock(&redirect_lock);
+
+ check_tty_count(tty, "do_tty_hangup");
+ file_list_lock();
+ /* This breaks for file handles being sent over AF_UNIX sockets ? */
+ list_for_each_entry(filp, &tty->tty_files, f_list) {
+ if (filp->f_op->write == redirected_tty_write)
+ cons_filp = filp;
+ if (filp->f_op->write != tty_write)
+ continue;
+ closecount++;
+ tty_fasync(-1, filp, 0); /* can't block */
+ filp->f_op = &hung_up_tty_fops;
+ }
+ file_list_unlock();
+
+ /* FIXME! What are the locking issues here? This may me overdoing
things..
+ * this question is especially important now that we've removed the
irqlock. */
+
+ ld = tty_ldisc_ref(tty);
+ if(ld != NULL) /* We may have no line discipline at this point */
+ {
+ if (ld->flush_buffer)
+ ld->flush_buffer(tty);
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
+ ld->write_wakeup)
+ ld->write_wakeup(tty);
+ if (ld->hangup)
+ ld->hangup(tty);
+ }
+
+ /* FIXME: Once we trust the LDISC code better we can wait here for
+ ldisc completion and fix the driver call race */
+
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
+
+ /*
+ * Shutdown the current line discipline, and reset it to
+ * N_TTY.
+ */
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+ {
+ down(&tty->termios_sem);
+ *tty->termios = tty->driver->init_termios;
+ up(&tty->termios_sem);
+ }
+
+ /* Defer ldisc switch */
+ /* tty_deferred_ldisc_switch(N_TTY);
+
+ This should get done automatically when the port closes and
+ tty_release is called */
+
+ read_lock(&tasklist_lock);
+ if (tty->session > 0) {
+ do_each_task_pid(tty->session, PIDTYPE_SID, p) {
+ if (p->signal->tty == tty)
+ p->signal->tty = NULL;
+ if (!p->signal->leader)
+ continue;
+ send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p);
+ send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+ if (tty->pgrp > 0)
+ p->signal->tty_old_pgrp = tty->pgrp;
+ } while_each_task_pid(tty->session, PIDTYPE_SID, p);
+ }
+ read_unlock(&tasklist_lock);
+
+ tty->flags = 0;
+ tty->session = 0;
+ tty->pgrp = -1;
+ tty->ctrl_status = 0;
+ /*
+ * If one of the devices matches a console pointer, we
+ * cannot just call hangup() because that will cause
+ * tty->count and state->count to go out of sync.
+ * So we just call close() the right number of times.
+ */
+ if (cons_filp) {
+ if (tty->driver->close)
+ for (n = 0; n < closecount; n++)
+ tty->driver->close(tty, cons_filp);
+ } else if (tty->driver->hangup)
+ (tty->driver->hangup)(tty);
+
+ /* We don't want to have driver/ldisc interactions beyond
+ the ones we did here. The driver layer expects no
+ calls after ->hangup() from the ldisc side. However we
+ can't yet guarantee all that */
+
+ set_bit(TTY_HUPPED, &tty->flags);
+ if (ld) {
+ tty_ldisc_enable(tty);
+ tty_ldisc_deref(ld);
+ }
+ unlock_kernel();
+ if (f)
+ fput(f);
+}
+
+void tty_hangup(struct tty_struct * tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+ char buf[64];
+
+ printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
+#endif
+ schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+void tty_vhangup(struct tty_struct * tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+ char buf[64];
+
+ printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
+#endif
+ do_tty_hangup((void *) tty);
+}
+EXPORT_SYMBOL(tty_vhangup);
+
+int tty_hung_up_p(struct file * filp)
+{
+ return (filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+/*
+ * This function is typically called only by the session leader, when
+ * it wants to disassociate itself from its controlling tty.
+ *
+ * It performs the following functions:
+ * (1) Sends a SIGHUP and SIGCONT to the foreground process group
+ * (2) Clears the tty from being controlling the session
+ * (3) Clears the controlling tty for all processes in the
+ * session group.
+ *
+ * The argument on_exit is set to 1 if called when a process is
+ * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ */
+void disassociate_ctty(int on_exit)
+{
+ struct tty_struct *tty;
+ struct task_struct *p;
+ int tty_pgrp = -1;
+
+ lock_kernel();
+
+ down(&tty_sem);
+ tty = current->signal->tty;
+ if (tty) {
+ tty_pgrp = tty->pgrp;
+ up(&tty_sem);
+ if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
+ tty_vhangup(tty);
+ } else {
+ if (current->signal->tty_old_pgrp) {
+ kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit);
+ kill_pg(current->signal->tty_old_pgrp, SIGCONT,
on_exit);
+ }
+ up(&tty_sem);
+ unlock_kernel();
+ return;
+ }
+ if (tty_pgrp > 0) {
+ kill_pg(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pg(tty_pgrp, SIGCONT, on_exit);
+ }
+
+ /* Must lock changes to tty_old_pgrp */
+ down(&tty_sem);
+ current->signal->tty_old_pgrp = 0;
+ tty->session = 0;
+ tty->pgrp = -1;
+
+ /* Now clear signal->tty under the lock */
+ read_lock(&tasklist_lock);
+ do_each_task_pid(current->signal->session, PIDTYPE_SID, p) {
+ p->signal->tty = NULL;
+ } while_each_task_pid(current->signal->session, PIDTYPE_SID, p);
+ read_unlock(&tasklist_lock);
+ up(&tty_sem);
+ unlock_kernel();
+}
+
+void stop_tty(struct tty_struct *tty)
+{
+ if (tty->stopped)
+ return;
+ tty->stopped = 1;
+ if (tty->link && tty->link->packet) {
+ tty->ctrl_status &= ~TIOCPKT_START;
+ tty->ctrl_status |= TIOCPKT_STOP;
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ if (tty->driver->stop)
+ (tty->driver->stop)(tty);
+}
+
+EXPORT_SYMBOL(stop_tty);
+
+void start_tty(struct tty_struct *tty)
+{
+ if (!tty->stopped || tty->flow_stopped)
+ return;
+ tty->stopped = 0;
+ if (tty->link && tty->link->packet) {
+ tty->ctrl_status &= ~TIOCPKT_STOP;
+ tty->ctrl_status |= TIOCPKT_START;
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ if (tty->driver->start)
+ (tty->driver->start)(tty);
+
+ /* If we have a running line discipline it may need kicking */
+ tty_wakeup(tty);
+ wake_up_interruptible(&tty->write_wait);
+}
+
+EXPORT_SYMBOL(start_tty);
+
+static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
+ loff_t *ppos)
+{
+ int i;
+ struct tty_struct * tty;
+ struct inode *inode;
+ struct tty_ldisc *ld;
+
+ tty = (struct tty_struct *)file->private_data;
+ inode = file->f_dentry->d_inode;
+ if (tty_paranoia_check(tty, inode, "tty_read"))
+ return -EIO;
+ if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
+ return -EIO;
+
+ /* We want to wait for the line discipline to sort out in this
+ situation */
+ ld = tty_ldisc_ref_wait(tty);
+ lock_kernel();
+ if (ld->read)
+ i = (ld->read)(tty,file,buf,count);
+ else
+ i = -EIO;
+ tty_ldisc_deref(ld);
+ unlock_kernel();
+ if (i > 0)
+ inode->i_atime = current_fs_time(inode->i_sb);
+ return i;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned
char *, size_t),
+ struct tty_struct *tty,
+ struct file *file,
+ const char __user *buf,
+ size_t count)
+{
+ ssize_t ret = 0, written = 0;
+ unsigned int chunk;
+
+ if (down_interruptible(&tty->atomic_write)) {
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * We chunk up writes into a temporary buffer. This
+ * simplifies low-level drivers immensely, since they
+ * don't have locking issues and user mode accesses.
+ *
+ * But if TTY_NO_WRITE_SPLIT is set, we should use a
+ * big chunk-size..
+ *
+ * The default chunk-size is 2kB, because the NTTY
+ * layer has problems with bigger chunks. It will
+ * claim to be able to handle more characters than
+ * it actually does.
+ */
+ chunk = 2048;
+ if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
+ chunk = 65536;
+ if (count < chunk)
+ chunk = count;
+
+ /* write_buf/write_cnt is protected by the atomic_write semaphore */
+ if (tty->write_cnt < chunk) {
+ unsigned char *buf;
+
+ if (chunk < 1024)
+ chunk = 1024;
+
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf) {
+ up(&tty->atomic_write);
+ return -ENOMEM;
+ }
+ kfree(tty->write_buf);
+ tty->write_cnt = chunk;
+ tty->write_buf = buf;
+ }
+
+ /* Do the write .. */
+ for (;;) {
+ size_t size = count;
+ if (size > chunk)
+ size = chunk;
+ ret = -EFAULT;
+ if (copy_from_user(tty->write_buf, buf, size))
+ break;
+ lock_kernel();
+ ret = write(tty, file, tty->write_buf, size);
+ unlock_kernel();
+ if (ret <= 0)
+ break;
+ written += ret;
+ buf += ret;
+ count -= ret;
+ if (!count)
+ break;
+ ret = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ if (written) {
+ struct inode *inode = file->f_dentry->d_inode;
+ inode->i_mtime = current_fs_time(inode->i_sb);
+ ret = written;
+ }
+ up(&tty->atomic_write);
+ return ret;
+}
+
+
+static ssize_t tty_write(struct file * file, const char __user * buf, size_t
count,
+ loff_t *ppos)
+{
+ struct tty_struct * tty;
+ struct inode *inode = file->f_dentry->d_inode;
+ ssize_t ret;
+ struct tty_ldisc *ld;
+
+ tty = (struct tty_struct *)file->private_data;
+ if (tty_paranoia_check(tty, inode, "tty_write"))
+ return -EIO;
+ if (!tty || !tty->driver->write || (test_bit(TTY_IO_ERROR,
&tty->flags)))
+ return -EIO;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld->write)
+ ret = -EIO;
+ else
+ ret = do_tty_write(ld->write, tty, file, buf, count);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+ssize_t redirected_tty_write(struct file * file, const char __user * buf,
size_t count,
+ loff_t *ppos)
+{
+ struct file *p = NULL;
+
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ get_file(redirect);
+ p = redirect;
+ }
+ spin_unlock(&redirect_lock);
+
+ if (p) {
+ ssize_t res;
+ res = vfs_write(p, buf, count, &p->f_pos);
+ fput(p);
+ return res;
+ }
+
+ return tty_write(file, buf, count, ppos);
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+ int i = index + driver->name_base;
+ /* ->name is initialized to "ttyp", but "tty" is expected */
+ sprintf(p, "%s%c%x",
+ driver->subtype == PTY_TYPE_SLAVE ? "tty" :
driver->name,
+ ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+static inline void tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+ sprintf(p, "%s%d", driver->name, index + driver->name_base);
+}
+
+/*
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open. The new code protects the open with a semaphore, so it's
+ * really quite straightforward. The semaphore locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ */
+static int init_dev(struct tty_driver *driver, int idx,
+ struct tty_struct **ret_tty)
+{
+ struct tty_struct *tty, *o_tty;
+ struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
+ struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
+ int retval=0;
+
+ /* check whether we're reopening an existing tty */
+ if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
+ tty = devpts_get_tty(idx);
+ if (tty && driver->subtype == PTY_TYPE_MASTER)
+ tty = tty->link;
+ } else {
+ tty = driver->ttys[idx];
+ }
+ if (tty) goto fast_track;
+
+ /*
+ * First time open is complex, especially for PTY devices.
+ * This code guarantees that either everything succeeds and the
+ * TTY is ready for operation, or else the table slots are vacated
+ * and the allocated memory released. (Except that the termios
+ * and locked termios may be retained.)
+ */
+
+ if (!try_module_get(driver->owner)) {
+ retval = -ENODEV;
+ goto end_init;
+ }
+
+ o_tty = NULL;
+ tp = o_tp = NULL;
+ ltp = o_ltp = NULL;
+
+ tty = alloc_tty_struct();
+ if(!tty)
+ goto fail_no_mem;
+ initialize_tty_struct(tty);
+ tty->driver = driver;
+ tty->index = idx;
+ tty_line_name(driver, idx, tty->name);
+
+ if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
+ tp_loc = &tty->termios;
+ ltp_loc = &tty->termios_locked;
+ } else {
+ tp_loc = &driver->termios[idx];
+ ltp_loc = &driver->termios_locked[idx];
+ }
+
+ if (!*tp_loc) {
+ tp = (struct termios *) kmalloc(sizeof(struct termios),
+ GFP_KERNEL);
+ if (!tp)
+ goto free_mem_out;
+ *tp = driver->init_termios;
+ }
+
+ if (!*ltp_loc) {
+ ltp = (struct termios *) kmalloc(sizeof(struct termios),
+ GFP_KERNEL);
+ if (!ltp)
+ goto free_mem_out;
+ memset(ltp, 0, sizeof(struct termios));
+ }
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY) {
+ o_tty = alloc_tty_struct();
+ if (!o_tty)
+ goto free_mem_out;
+ initialize_tty_struct(o_tty);
+ o_tty->driver = driver->other;
+ o_tty->index = idx;
+ tty_line_name(driver->other, idx, o_tty->name);
+
+ if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
+ o_tp_loc = &o_tty->termios;
+ o_ltp_loc = &o_tty->termios_locked;
+ } else {
+ o_tp_loc = &driver->other->termios[idx];
+ o_ltp_loc = &driver->other->termios_locked[idx];
+ }
+
+ if (!*o_tp_loc) {
+ o_tp = (struct termios *)
+ kmalloc(sizeof(struct termios), GFP_KERNEL);
+ if (!o_tp)
+ goto free_mem_out;
+ *o_tp = driver->other->init_termios;
+ }
+
+ if (!*o_ltp_loc) {
+ o_ltp = (struct termios *)
+ kmalloc(sizeof(struct termios), GFP_KERNEL);
+ if (!o_ltp)
+ goto free_mem_out;
+ memset(o_ltp, 0, sizeof(struct termios));
+ }
+
+ /*
+ * Everything allocated ... set up the o_tty structure.
+ */
+ if (!(driver->other->flags & TTY_DRIVER_DEVPTS_MEM)) {
+ driver->other->ttys[idx] = o_tty;
+ }
+ if (!*o_tp_loc)
+ *o_tp_loc = o_tp;
+ if (!*o_ltp_loc)
+ *o_ltp_loc = o_ltp;
+ o_tty->termios = *o_tp_loc;
+ o_tty->termios_locked = *o_ltp_loc;
+ driver->other->refcount++;
+ if (driver->subtype == PTY_TYPE_MASTER)
+ o_tty->count++;
+
+ /* Establish the links in both directions */
+ tty->link = o_tty;
+ o_tty->link = tty;
+ }
+
+ /*
+ * All structures have been allocated, so now we install them.
+ * Failures after this point use release_mem to clean up, so
+ * there's no need to null out the local pointers.
+ */
+ if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+ driver->ttys[idx] = tty;
+ }
+
+ if (!*tp_loc)
+ *tp_loc = tp;
+ if (!*ltp_loc)
+ *ltp_loc = ltp;
+ tty->termios = *tp_loc;
+ tty->termios_locked = *ltp_loc;
+ driver->refcount++;
+ tty->count++;
+
+ /*
+ * Structures all installed ... call the ldisc open routines.
+ * If we fail here just call release_mem to clean up. No need
+ * to decrement the use counts, as release_mem doesn't care.
+ */
+
+ if (tty->ldisc.open) {
+ retval = (tty->ldisc.open)(tty);
+ if (retval)
+ goto release_mem_out;
+ }
+ if (o_tty && o_tty->ldisc.open) {
+ retval = (o_tty->ldisc.open)(o_tty);
+ if (retval) {
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+ goto release_mem_out;
+ }
+ tty_ldisc_enable(o_tty);
+ }
+ tty_ldisc_enable(tty);
+ goto success;
+
+ /*
+ * This fast open can be used if the tty is already open.
+ * No memory is allocated, and the only failures are from
+ * attempting to open a closing tty or attempting multiple
+ * opens on a pty master.
+ */
+fast_track:
+ if (test_bit(TTY_CLOSING, &tty->flags)) {
+ retval = -EIO;
+ goto end_init;
+ }
+ if (driver->type == TTY_DRIVER_TYPE_PTY &&
+ driver->subtype == PTY_TYPE_MASTER) {
+ /*
+ * special case for PTY masters: only one open permitted,
+ * and the slave side open count is incremented as well.
+ */
+ if (tty->count) {
+ retval = -EIO;
+ goto end_init;
+ }
+ tty->link->count++;
+ }
+ tty->count++;
+ tty->driver = driver; /* N.B. why do this every time?? */
+
+ /* FIXME */
+ if(!test_bit(TTY_LDISC, &tty->flags))
+ printk(KERN_ERR "init_dev but no ldisc\n");
+success:
+ *ret_tty = tty;
+
+ /* All paths come through here to release the semaphore */
+end_init:
+ return retval;
+
+ /* Release locally allocated memory ... nothing placed in slots */
+free_mem_out:
+ if (o_tp)
+ kfree(o_tp);
+ if (o_tty)
+ free_tty_struct(o_tty);
+ if (ltp)
+ kfree(ltp);
+ if (tp)
+ kfree(tp);
+ free_tty_struct(tty);
+
+fail_no_mem:
+ module_put(driver->owner);
+ retval = -ENOMEM;
+ goto end_init;
+
+ /* call the tty release_mem routine to clean out this slot */
+release_mem_out:
+ printk(KERN_INFO "init_dev: ldisc open failed, "
+ "clearing slot %d\n", idx);
+ release_mem(tty, idx);
+ goto end_init;
+}
+
+/*
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots.
+ */
+static void release_mem(struct tty_struct *tty, int idx)
+{
+ struct tty_struct *o_tty;
+ struct termios *tp;
+ int devpts = tty->driver->flags & TTY_DRIVER_DEVPTS_MEM;
+
+ if ((o_tty = tty->link) != NULL) {
+ if (!devpts)
+ o_tty->driver->ttys[idx] = NULL;
+ if (o_tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+ tp = o_tty->termios;
+ if (!devpts)
+ o_tty->driver->termios[idx] = NULL;
+ kfree(tp);
+
+ tp = o_tty->termios_locked;
+ if (!devpts)
+ o_tty->driver->termios_locked[idx] = NULL;
+ kfree(tp);
+ }
+ o_tty->magic = 0;
+ o_tty->driver->refcount--;
+ file_list_lock();
+ list_del_init(&o_tty->tty_files);
+ file_list_unlock();
+ free_tty_struct(o_tty);
+ }
+
+ if (!devpts)
+ tty->driver->ttys[idx] = NULL;
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+ tp = tty->termios;
+ if (!devpts)
+ tty->driver->termios[idx] = NULL;
+ kfree(tp);
+
+ tp = tty->termios_locked;
+ if (!devpts)
+ tty->driver->termios_locked[idx] = NULL;
+ kfree(tp);
+ }
+
+ tty->magic = 0;
+ tty->driver->refcount--;
+ file_list_lock();
+ list_del_init(&tty->tty_files);
+ file_list_unlock();
+ module_put(tty->driver->owner);
+ free_tty_struct(tty);
+}
+
+/*
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+static void release_dev(struct file * filp)
+{
+ struct tty_struct *tty, *o_tty;
+ int pty_master, tty_closing, o_tty_closing, do_sleep;
+ int devpts_master, devpts;
+ int idx;
+ char buf[64];
+ unsigned long flags;
+
+ tty = (struct tty_struct *)filp->private_data;
+ if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "release_dev"))
+ return;
+
+ check_tty_count(tty, "release_dev");
+
+ tty_fasync(-1, filp, 0);
+
+ idx = tty->index;
+ pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER);
+ devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
+ devpts_master = pty_master && devpts;
+ o_tty = tty->link;
+
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver->num) {
+ printk(KERN_DEBUG "release_dev: bad idx when trying to "
+ "free (%s)\n", tty->name);
+ return;
+ }
+ if (!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+ if (tty != tty->driver->ttys[idx]) {
+ printk(KERN_DEBUG "release_dev: driver.table[%d] not
tty "
+ "for (%s)\n", idx, tty->name);
+ return;
+ }
+ if (tty->termios != tty->driver->termios[idx]) {
+ printk(KERN_DEBUG "release_dev: driver.termios[%d] not
termios "
+ "for (%s)\n",
+ idx, tty->name);
+ return;
+ }
+ if (tty->termios_locked != tty->driver->termios_locked[idx]) {
+ printk(KERN_DEBUG "release_dev:
driver.termios_locked[%d] not "
+ "termios_locked for (%s)\n",
+ idx, tty->name);
+ return;
+ }
+ }
+#endif
+
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
+ tty_name(tty, buf), tty->count);
+#endif
+
+#ifdef TTY_PARANOIA_CHECK
+ if (tty->driver->other &&
+ !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+ if (o_tty != tty->driver->other->ttys[idx]) {
+ printk(KERN_DEBUG "release_dev: other->table[%d] "
+ "not o_tty for (%s)\n",
+ idx, tty->name);
+ return;
+ }
+ if (o_tty->termios != tty->driver->other->termios[idx]) {
+ printk(KERN_DEBUG "release_dev: other->termios[%d] "
+ "not o_termios for (%s)\n",
+ idx, tty->name);
+ return;
+ }
+ if (o_tty->termios_locked !=
+ tty->driver->other->termios_locked[idx]) {
+ printk(KERN_DEBUG "release_dev: other->termios_locked["
+ "%d] not o_termios_locked for (%s)\n",
+ idx, tty->name);
+ return;
+ }
+ if (o_tty->link != tty) {
+ printk(KERN_DEBUG "release_dev: bad pty pointers\n");
+ return;
+ }
+ }
+#endif
+ if (tty->driver->close)
+ tty->driver->close(tty, filp);
+
+ /*
+ * Sanity check: if tty->count is going to zero, there shouldn't be
+ * any waiters on tty->read_wait or tty->write_wait. We test the
+ * wait queues and kick everyone out _before_ actually starting to
+ * close. This ensures that we won't block while releasing the tty
+ * structure.
+ *
+ * The test for the o_tty closing is necessary, since the master and
+ * slave sides may close in any order. If the slave side closes out
+ * first, its count will be one, since the master side holds an open.
+ * Thus this test wouldn't be triggered at the time the slave closes,
+ * so we do it now.
+ *
+ * Note that it's possible for the tty to be opened again while we're
+ * flushing out waiters. By recalculating the closing flags before
+ * each iteration we avoid any problems.
+ */
+ while (1) {
+ /* Guard against races with tty->count changes elsewhere and
+ opens on /dev/tty */
+
+ down(&tty_sem);
+ tty_closing = tty->count <= 1;
+ o_tty_closing = o_tty &&
+ (o_tty->count <= (pty_master ? 1 : 0));
+ up(&tty_sem);
+ do_sleep = 0;
+
+ if (tty_closing) {
+ if (waitqueue_active(&tty->read_wait)) {
+ wake_up(&tty->read_wait);
+ do_sleep++;
+ }
+ if (waitqueue_active(&tty->write_wait)) {
+ wake_up(&tty->write_wait);
+ do_sleep++;
+ }
+ }
+ if (o_tty_closing) {
+ if (waitqueue_active(&o_tty->read_wait)) {
+ wake_up(&o_tty->read_wait);
+ do_sleep++;
+ }
+ if (waitqueue_active(&o_tty->write_wait)) {
+ wake_up(&o_tty->write_wait);
+ do_sleep++;
+ }
+ }
+ if (!do_sleep)
+ break;
+
+ printk(KERN_WARNING "release_dev: %s: read/write wait queue "
+ "active!\n", tty_name(tty, buf));
+ schedule();
+ }
+
+ /*
+ * The closing flags are now consistent with the open counts on
+ * both sides, and we've completed the last operation that could
+ * block, so it's safe to proceed with closing.
+ */
+
+ down(&tty_sem);
+ if (pty_master) {
+ if (--o_tty->count < 0) {
+ printk(KERN_WARNING "release_dev: bad pty slave count "
+ "(%d) for %s\n",
+ o_tty->count, tty_name(o_tty, buf));
+ o_tty->count = 0;
+ }
+ }
+ if (--tty->count < 0) {
+ printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n",
+ tty->count, tty_name(tty, buf));
+ tty->count = 0;
+ }
+ up(&tty_sem);
+
+ /*
+ * We've decremented tty->count, so we need to remove this file
+ * descriptor off the tty->tty_files list; this serves two
+ * purposes:
+ * - check_tty_count sees the correct number of file descriptors
+ * associated with this tty.
+ * - do_tty_hangup no longer sees this file descriptor as
+ * something that needs to be handled for hangups.
+ */
+ file_kill(filp);
+ filp->private_data = NULL;
+
+ /*
+ * Perform some housekeeping before deciding whether to return.
+ *
+ * Set the TTY_CLOSING flag if this was the last open. In the
+ * case of a pty we may have to wait around for the other side
+ * to close, and TTY_CLOSING makes sure we can't be reopened.
+ */
+ if(tty_closing)
+ set_bit(TTY_CLOSING, &tty->flags);
+ if(o_tty_closing)
+ set_bit(TTY_CLOSING, &o_tty->flags);
+
+ /*
+ * If _either_ side is closing, make sure there aren't any
+ * processes that still think tty or o_tty is their controlling
+ * tty.
+ */
+ if (tty_closing || o_tty_closing) {
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ do_each_task_pid(tty->session, PIDTYPE_SID, p) {
+ p->signal->tty = NULL;
+ } while_each_task_pid(tty->session, PIDTYPE_SID, p);
+ if (o_tty)
+ do_each_task_pid(o_tty->session, PIDTYPE_SID, p) {
+ p->signal->tty = NULL;
+ } while_each_task_pid(o_tty->session, PIDTYPE_SID, p);
+ read_unlock(&tasklist_lock);
+ }
+
+ /* check whether both sides are closing ... */
+ if (!tty_closing || (o_tty && !o_tty_closing))
+ return;
+
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "freeing tty structure...");
+#endif
+ /*
+ * Prevent flush_to_ldisc() from rescheduling the work for later. Then
+ * kill any delayed work. As this is the final close it does not
+ * race with the set_ldisc code path.
+ */
+ clear_bit(TTY_LDISC, &tty->flags);
+ clear_bit(TTY_DONT_FLIP, &tty->flags);
+ cancel_delayed_work(&tty->flip.work);
+
+ /*
+ * Wait for ->hangup_work and ->flip.work handlers to terminate
+ */
+
+ flush_scheduled_work();
+
+ /*
+ * Wait for any short term users (we know they are just driver
+ * side waiters as the file is closing so user count on the file
+ * side is zero.
+ */
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ while(tty->ldisc.refcount)
+ {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ /*
+ * Shutdown the current line discipline, and reset it to N_TTY.
+ * N.B. why reset ldisc when we're releasing the memory??
+ *
+ * FIXME: this MUST get fixed for the new reflocking
+ */
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+ tty_ldisc_put(tty->ldisc.num);
+
+ /*
+ * Switch the line discipline back
+ */
+ tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+ tty_set_termios_ldisc(tty,N_TTY);
+ if (o_tty) {
+ /* FIXME: could o_tty be in setldisc here ? */
+ clear_bit(TTY_LDISC, &o_tty->flags);
+ if (o_tty->ldisc.close)
+ (o_tty->ldisc.close)(o_tty);
+ tty_ldisc_put(o_tty->ldisc.num);
+ tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY));
+ tty_set_termios_ldisc(o_tty,N_TTY);
+ }
+ /*
+ * The release_mem function takes care of the details of clearing
+ * the slots and preserving the termios structure.
+ */
+ release_mem(tty, idx);
+
+#ifdef CONFIG_UNIX98_PTYS
+ /* Make this pty number available for reallocation */
+ if (devpts) {
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, idx);
+ up(&allocated_ptys_lock);
+ }
+#endif
+
+}
+
+/*
+ * tty_open and tty_release keep up the tty count that contains the
+ * number of opens done on a tty. We cannot use the inode-count, as
+ * different inodes might point to the same tty.
+ *
+ * Open-counting is needed for pty masters, as well as for keeping
+ * track of serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ *
+ * The termios state of a pty is reset on first open so that
+ * settings don't persist across reuse.
+ */
+static int tty_open(struct inode * inode, struct file * filp)
+{
+ struct tty_struct *tty;
+ int noctty, retval;
+ struct tty_driver *driver;
+ int index;
+ dev_t device = inode->i_rdev;
+ unsigned short saved_flags = filp->f_flags;
+
+ nonseekable_open(inode, filp);
+
+retry_open:
+ noctty = filp->f_flags & O_NOCTTY;
+ index = -1;
+ retval = 0;
+
+ down(&tty_sem);
+
+ if (device == MKDEV(TTYAUX_MAJOR,0)) {
+ if (!current->signal->tty) {
+ up(&tty_sem);
+ return -ENXIO;
+ }
+ driver = current->signal->tty->driver;
+ index = current->signal->tty->index;
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ goto got_driver;
+ }
+#ifdef CONFIG_VT
+ if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
+ extern int fg_console;
+ extern struct tty_driver *console_driver;
+ driver = console_driver;
+ index = fg_console;
+ noctty = 1;
+ goto got_driver;
+ }
+#endif
+ if (device == MKDEV(TTYAUX_MAJOR,1)) {
+ driver = console_device(&index);
+ if (driver) {
+ /* Don't let /dev/console block */
+ filp->f_flags |= O_NONBLOCK;
+ noctty = 1;
+ goto got_driver;
+ }
+ up(&tty_sem);
+ return -ENODEV;
+ }
+
+ driver = get_tty_driver(device, &index);
+ if (!driver) {
+ up(&tty_sem);
+ return -ENODEV;
+ }
+got_driver:
+ retval = init_dev(driver, index, &tty);
+ up(&tty_sem);
+ if (retval)
+ return retval;
+
+ filp->private_data = tty;
+ file_move(filp, &tty->tty_files);
+ check_tty_count(tty, "tty_open");
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ noctty = 1;
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "opening %s...", tty->name);
+#endif
+ if (!retval) {
+ if (tty->driver->open)
+ retval = tty->driver->open(tty, filp);
+ else
+ retval = -ENODEV;
+ }
+ filp->f_flags = saved_flags;
+
+ if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
!capable(CAP_SYS_ADMIN))
+ retval = -EBUSY;
+
+ if (retval) {
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "error %d in opening %s...", retval,
+ tty->name);
+#endif
+ release_dev(filp);
+ if (retval != -ERESTARTSYS)
+ return retval;
+ if (signal_pending(current))
+ return retval;
+ schedule();
+ /*
+ * Need to reset f_op in case a hangup happened.
+ */
+ if (filp->f_op == &hung_up_tty_fops)
+ filp->f_op = &tty_fops;
+ goto retry_open;
+ }
+ if (!noctty &&
+ current->signal->leader &&
+ !current->signal->tty &&
+ tty->session == 0) {
+ task_lock(current);
+ current->signal->tty = tty;
+ task_unlock(current);
+ current->signal->tty_old_pgrp = 0;
+ tty->session = current->signal->session;
+ tty->pgrp = process_group(current);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_UNIX98_PTYS
+static int ptmx_open(struct inode * inode, struct file * filp)
+{
+ struct tty_struct *tty;
+ int retval;
+ int index;
+ int idr_ret;
+
+ nonseekable_open(inode, filp);
+
+ /* find a device that is not in use. */
+ down(&allocated_ptys_lock);
+ if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
+ up(&allocated_ptys_lock);
+ return -ENOMEM;
+ }
+ idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
+ if (idr_ret < 0) {
+ up(&allocated_ptys_lock);
+ if (idr_ret == -EAGAIN)
+ return -ENOMEM;
+ return -EIO;
+ }
+ if (index >= pty_limit) {
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return -EIO;
+ }
+ up(&allocated_ptys_lock);
+
+ down(&tty_sem);
+ retval = init_dev(ptm_driver, index, &tty);
+ up(&tty_sem);
+
+ if (retval)
+ goto out;
+
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ filp->private_data = tty;
+ file_move(filp, &tty->tty_files);
+
+ retval = -ENOMEM;
+ if (devpts_pty_new(tty->link))
+ goto out1;
+
+ check_tty_count(tty, "tty_open");
+ retval = ptm_driver->open(tty, filp);
+ if (!retval)
+ return 0;
+out1:
+ release_dev(filp);
+out:
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return retval;
+}
+#endif
+
+static int tty_release(struct inode * inode, struct file * filp)
+{
+ lock_kernel();
+ release_dev(filp);
+ unlock_kernel();
+ return 0;
+}
+
+/* No kernel lock held - fine */
+static unsigned int tty_poll(struct file * filp, poll_table * wait)
+{
+ struct tty_struct * tty;
+ struct tty_ldisc *ld;
+ int ret = 0;
+
+ tty = (struct tty_struct *)filp->private_data;
+ if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_poll"))
+ return 0;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld->poll)
+ ret = (ld->poll)(tty, filp, wait);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+static int tty_fasync(int fd, struct file * filp, int on)
+{
+ struct tty_struct * tty;
+ int retval;
+
+ tty = (struct tty_struct *)filp->private_data;
+ if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_fasync"))
+ return 0;
+
+ retval = fasync_helper(fd, filp, on, &tty->fasync);
+ if (retval <= 0)
+ return retval;
+
+ if (on) {
+ if (!waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = 1;
+ retval = f_setown(filp, (-tty->pgrp) ? : current->pid, 0);
+ if (retval)
+ return retval;
+ } else {
+ if (!tty->fasync && !waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = N_TTY_BUF_SIZE;
+ }
+ return 0;
+}
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
+ if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ch, p))
+ return -EFAULT;
+ ld = tty_ldisc_ref_wait(tty);
+ ld->receive_buf(tty, &ch, &mbz, 1);
+ tty_ldisc_deref(ld);
+ return 0;
+}
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
+{
+ if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
+ return -EFAULT;
+ return 0;
+}
+
+static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize __user * arg)
+{
+ struct winsize tmp_ws;
+
+ if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+ return -EFAULT;
+ if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
+ return 0;
+#ifdef CONFIG_VT
+ if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
+ unsigned int currcons = tty->index;
+ int rc;
+
+ acquire_console_sem();
+ rc = vc_resize(currcons, tmp_ws.ws_col, tmp_ws.ws_row);
+ release_console_sem();
+ if (rc)
+ return -ENXIO;
+ }
+#endif
+ if (tty->pgrp > 0)
+ kill_pg(tty->pgrp, SIGWINCH, 1);
+ if ((real_tty->pgrp != tty->pgrp) && (real_tty->pgrp > 0))
+ kill_pg(real_tty->pgrp, SIGWINCH, 1);
+ tty->winsize = tmp_ws;
+ real_tty->winsize = tmp_ws;
+ return 0;
+}
+
+static int tioccons(struct file *file)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (file->f_op->write == redirected_tty_write) {
+ struct file *f;
+ spin_lock(&redirect_lock);
+ f = redirect;
+ redirect = NULL;
+ spin_unlock(&redirect_lock);
+ if (f)
+ fput(f);
+ return 0;
+ }
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ spin_unlock(&redirect_lock);
+ return -EBUSY;
+ }
+ get_file(file);
+ redirect = file;
+ spin_unlock(&redirect_lock);
+ return 0;
+}
+
+
+static int fionbio(struct file *file, int __user *p)
+{
+ int nonblock;
+
+ if (get_user(nonblock, p))
+ return -EFAULT;
+
+ if (nonblock)
+ file->f_flags |= O_NONBLOCK;
+ else
+ file->f_flags &= ~O_NONBLOCK;
+ return 0;
+}
+
+static int tiocsctty(struct tty_struct *tty, int arg)
+{
+ task_t *p;
+
+ if (current->signal->leader &&
+ (current->signal->session == tty->session))
+ return 0;
+ /*
+ * The process must be a session leader and
+ * not have a controlling tty already.
+ */
+ if (!current->signal->leader || current->signal->tty)
+ return -EPERM;
+ if (tty->session > 0) {
+ /*
+ * This tty is already the controlling
+ * tty for another session group!
+ */
+ if ((arg == 1) && capable(CAP_SYS_ADMIN)) {
+ /*
+ * Steal it away
+ */
+
+ read_lock(&tasklist_lock);
+ do_each_task_pid(tty->session, PIDTYPE_SID, p) {
+ p->signal->tty = NULL;
+ } while_each_task_pid(tty->session, PIDTYPE_SID, p);
+ read_unlock(&tasklist_lock);
+ } else
+ return -EPERM;
+ }
+ task_lock(current);
+ current->signal->tty = tty;
+ task_unlock(current);
+ current->signal->tty_old_pgrp = 0;
+ tty->session = current->signal->session;
+ tty->pgrp = process_group(current);
+ return 0;
+}
+
+static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t __user *p)
+{
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->signal->tty != real_tty)
+ return -ENOTTY;
+ return put_user(real_tty->pgrp, p);
+}
+
+static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t __user *p)
+{
+ pid_t pgrp;
+ int retval = tty_check_change(real_tty);
+
+ if (retval == -EIO)
+ return -ENOTTY;
+ if (retval)
+ return retval;
+ if (!current->signal->tty ||
+ (current->signal->tty != real_tty) ||
+ (real_tty->session != current->signal->session))
+ return -ENOTTY;
+ if (get_user(pgrp, p))
+ return -EFAULT;
+ if (pgrp < 0)
+ return -EINVAL;
+ if (session_of_pgrp(pgrp) != current->signal->session)
+ return -EPERM;
+ real_tty->pgrp = pgrp;
+ return 0;
+}
+
+static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
__user *p)
+{
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->signal->tty != real_tty)
+ return -ENOTTY;
+ if (real_tty->session <= 0)
+ return -ENOTTY;
+ return put_user(real_tty->session, p);
+}
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+ int ldisc;
+
+ if (get_user(ldisc, p))
+ return -EFAULT;
+ return tty_set_ldisc(tty, ldisc);
+}
+
+static int send_break(struct tty_struct *tty, int duration)
+{
+ tty->driver->break_ctl(tty, -1);
+ if (!signal_pending(current)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(duration);
+ }
+ tty->driver->break_ctl(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ return 0;
+}
+
+static int
+tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
+{
+ int retval = -EINVAL;
+
+ if (tty->driver->tiocmget) {
+ retval = tty->driver->tiocmget(tty, file);
+
+ if (retval >= 0)
+ retval = put_user(retval, p);
+ }
+ return retval;
+}
+
+static int
+tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
+ unsigned __user *p)
+{
+ int retval = -EINVAL;
+
+ if (tty->driver->tiocmset) {
+ unsigned int set, clear, val;
+
+ retval = get_user(val, p);
+ if (retval)
+ return retval;
+
+ set = clear = 0;
+ switch (cmd) {
+ case TIOCMBIS:
+ set = val;
+ break;
+ case TIOCMBIC:
+ clear = val;
+ break;
+ case TIOCMSET:
+ set = val;
+ clear = ~val;
+ break;
+ }
+
+ set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+ clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+
+ retval = tty->driver->tiocmset(tty, file, set, clear);
+ }
+ return retval;
+}
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+int tty_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty_struct *tty, *real_tty;
+ void __user *p = (void __user *)arg;
+ int retval;
+ struct tty_ldisc *ld;
+
+ tty = (struct tty_struct *)file->private_data;
+ if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+ return -EINVAL;
+
+ real_tty = tty;
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ real_tty = tty->link;
+
+ /*
+ * Break handling by driver
+ */
+ if (!tty->driver->break_ctl) {
+ switch(cmd) {
+ case TIOCSBRK:
+ case TIOCCBRK:
+ if (tty->driver->ioctl)
+ return tty->driver->ioctl(tty, file, cmd, arg);
+ return -EINVAL;
+
+ /* These two ioctl's always return success; even if */
+ /* the driver doesn't support them. */
+ case TCSBRK:
+ case TCSBRKP:
+ if (!tty->driver->ioctl)
+ return 0;
+ retval = tty->driver->ioctl(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD)
+ retval = 0;
+ return retval;
+ }
+ }
+
+ /*
+ * Factor out some common prep work
+ */
+ switch (cmd) {
+ case TIOCSETD:
+ case TIOCSBRK:
+ case TIOCCBRK:
+ case TCSBRK:
+ case TCSBRKP:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ if (cmd != TIOCCBRK) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ break;
+ }
+
+ switch (cmd) {
+ case TIOCSTI:
+ return tiocsti(tty, p);
+ case TIOCGWINSZ:
+ return tiocgwinsz(tty, p);
+ case TIOCSWINSZ:
+ return tiocswinsz(tty, real_tty, p);
+ case TIOCCONS:
+ return real_tty!=tty ? -EINVAL : tioccons(file);
+ case FIONBIO:
+ return fionbio(file, p);
+ case TIOCEXCL:
+ set_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNXCL:
+ clear_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNOTTY:
+ if (current->signal->tty != tty)
+ return -ENOTTY;
+ if (current->signal->leader)
+ disassociate_ctty(0);
+ task_lock(current);
+ current->signal->tty = NULL;
+ task_unlock(current);
+ return 0;
+ case TIOCSCTTY:
+ return tiocsctty(tty, arg);
+ case TIOCGPGRP:
+ return tiocgpgrp(tty, real_tty, p);
+ case TIOCSPGRP:
+ return tiocspgrp(tty, real_tty, p);
+ case TIOCGSID:
+ return tiocgsid(tty, real_tty, p);
+ case TIOCGETD:
+ /* FIXME: check this is ok */
+ return put_user(tty->ldisc.num, (int __user *)p);
+ case TIOCSETD:
+ return tiocsetd(tty, p);
+#ifdef CONFIG_VT
+ case TIOCLINUX:
+ return tioclinux(tty, arg);
+#endif
+ /*
+ * Break handling
+ */
+ case TIOCSBRK: /* Turn break on, unconditionally */
+ tty->driver->break_ctl(tty, -1);
+ return 0;
+
+ case TIOCCBRK: /* Turn break off, unconditionally */
+ tty->driver->break_ctl(tty, 0);
+ return 0;
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+ /*
+ * XXX is the above comment correct, or the
+ * code below correct? Is this ioctl used at
+ * all by anyone?
+ */
+ if (!arg)
+ return send_break(tty, HZ/4);
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+ return send_break(tty, arg ? arg*(HZ/10) : HZ/4);
+
+ case TIOCMGET:
+ return tty_tiocmget(tty, file, p);
+
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS:
+ return tty_tiocmset(tty, file, cmd, p);
+ }
+ if (tty->driver->ioctl) {
+ retval = (tty->driver->ioctl)(tty, file, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+ ld = tty_ldisc_ref_wait(tty);
+ retval = -EINVAL;
+ if (ld->ioctl) {
+ retval = ld->ioctl(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD)
+ retval = -EINVAL;
+ }
+ tty_ldisc_deref(ld);
+ return retval;
+}
+
+
+/*
+ * This implements the "Secure Attention Key" --- the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key". Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal. But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context. This can
+ * deadlock. We punt it up to process context. AKPM - 16Mar2001
+ */
+static void __do_SAK(void *arg)
+{
+#ifdef TTY_SOFT_SAK
+ tty_hangup(tty);
+#else
+ struct tty_struct *tty = arg;
+ struct task_struct *p;
+ int session;
+ int i;
+ struct file *filp;
+ struct tty_ldisc *disc;
+
+ if (!tty)
+ return;
+ session = tty->session;
+
+ /* We don't want an ldisc switch during this */
+ disc = tty_ldisc_ref(tty);
+ if (disc && disc->flush_buffer)
+ disc->flush_buffer(tty);
+ tty_ldisc_deref(disc);
+
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+
+ read_lock(&tasklist_lock);
+ do_each_task_pid(session, PIDTYPE_SID, p) {
+ if (p->signal->tty == tty || session > 0) {
+ printk(KERN_NOTICE "SAK: killed process %d"
+ " (%s): p->signal->session==tty->session\n",
+ p->pid, p->comm);
+ send_sig(SIGKILL, p, 1);
+ continue;
+ }
+ task_lock(p);
+ if (p->files) {
+ spin_lock(&p->files->file_lock);
+ for (i=0; i < p->files->max_fds; i++) {
+ filp = fcheck_files(p->files, i);
+ if (!filp)
+ continue;
+ if (filp->f_op->read == tty_read &&
+ filp->private_data == tty) {
+ printk(KERN_NOTICE "SAK: killed process
%d"
+ " (%s): fd#%d opened to the tty\n",
+ p->pid, p->comm, i);
+ send_sig(SIGKILL, p, 1);
+ break;
+ }
+ }
+ spin_unlock(&p->files->file_lock);
+ }
+ task_unlock(p);
+ } while_each_task_pid(session, PIDTYPE_SID, p);
+ read_unlock(&tasklist_lock);
+#endif
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+ if (!tty)
+ return;
+ PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+ schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+/*
+ * This routine is called out of the software interrupt to flush data
+ * from the flip buffer to the line discipline.
+ */
+
+static void flush_to_ldisc(void *private_)
+{
+ struct tty_struct *tty = (struct tty_struct *) private_;
+ unsigned char *cp;
+ char *fp;
+ int count;
+ unsigned long flags;
+ struct tty_ldisc *disc;
+
+ disc = tty_ldisc_ref(tty);
+ if (disc == NULL) /* !TTY_LDISC */
+ return;
+
+ if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
+ /*
+ * Do it after the next timer tick:
+ */
+ schedule_delayed_work(&tty->flip.work, 1);
+ goto out;
+ }
+ spin_lock_irqsave(&tty->read_lock, flags);
+ if (tty->flip.buf_num) {
+ cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
+ fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
+ tty->flip.buf_num = 0;
+ tty->flip.char_buf_ptr = tty->flip.char_buf;
+ tty->flip.flag_buf_ptr = tty->flip.flag_buf;
+ } else {
+ cp = tty->flip.char_buf;
+ fp = tty->flip.flag_buf;
+ tty->flip.buf_num = 1;
+ tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
+ tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
+ }
+ count = tty->flip.count;
+ tty->flip.count = 0;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+
+ disc->receive_buf(tty, cp, fp, count);
+out:
+ tty_ldisc_deref(disc);
+}
+
+/*
+ * Routine which returns the baud rate of the tty
+ *
+ * Note that the baud_table needs to be kept in sync with the
+ * include/asm/termbits.h file.
+ */
+static int baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+ 9600, 19200, 38400, 57600, 115200, 230400, 460800,
+#ifdef __sparc__
+ 76800, 153600, 307200, 614400, 921600
+#else
+ 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
+ 2500000, 3000000, 3500000, 4000000
+#endif
+};
+
+static int n_baud_table = ARRAY_SIZE(baud_table);
+
+/**
+ * tty_termios_baud_rate
+ * @termios: termios structure
+ *
+ * Convert termios baud rate data into a speed. This should be called
+ * with the termios lock held if this termios is a terminal termios
+ * structure. May change the termios data.
+ */
+
+int tty_termios_baud_rate(struct termios *termios)
+{
+ unsigned int cbaud;
+
+ cbaud = termios->c_cflag & CBAUD;
+
+ if (cbaud & CBAUDEX) {
+ cbaud &= ~CBAUDEX;
+
+ if (cbaud < 1 || cbaud + 15 > n_baud_table)
+ termios->c_cflag &= ~CBAUDEX;
+ else
+ cbaud += 15;
+ }
+ return baud_table[cbaud];
+}
+
+EXPORT_SYMBOL(tty_termios_baud_rate);
+
+/**
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
+ *
+ * Returns the baud rate as an integer for this terminal. The
+ * termios lock must be held by the caller and the terminal bit
+ * flags may be updated.
+ */
+
+int tty_get_baud_rate(struct tty_struct *tty)
+{
+ int baud = tty_termios_baud_rate(tty->termios);
+
+ if (baud == 38400 && tty->alt_speed) {
+ if (!tty->warned) {
+ printk(KERN_WARNING "Use of setserial/setrocket to "
+ "set SPD_* flags is deprecated\n");
+ tty->warned = 1;
+ }
+ baud = tty->alt_speed;
+ }
+
+ return baud;
+}
+
+EXPORT_SYMBOL(tty_get_baud_rate);
+
+/**
+ * tty_flip_buffer_push - terminal
+ * @tty: tty to push
+ *
+ * Queue a push of the terminal flip buffers to the line discipline. This
+ * function must not be called from IRQ context if tty->low_latency is set.
+ *
+ * In the event of the queue being busy for flipping the work will be
+ * held off and retried later.
+ */
+
+void tty_flip_buffer_push(struct tty_struct *tty)
+{
+ if (tty->low_latency)
+ flush_to_ldisc((void *) tty);
+ else
+ schedule_delayed_work(&tty->flip.work, 1);
+}
+
+EXPORT_SYMBOL(tty_flip_buffer_push);
+
+/*
+ * This subroutine initializes a tty structure.
+ */
+static void initialize_tty_struct(struct tty_struct *tty)
+{
+ memset(tty, 0, sizeof(struct tty_struct));
+ tty->magic = TTY_MAGIC;
+ tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
+ tty->pgrp = -1;
+ tty->flip.char_buf_ptr = tty->flip.char_buf;
+ tty->flip.flag_buf_ptr = tty->flip.flag_buf;
+ INIT_WORK(&tty->flip.work, flush_to_ldisc, tty);
+ init_MUTEX(&tty->flip.pty_sem);
+ init_MUTEX(&tty->termios_sem);
+ init_waitqueue_head(&tty->write_wait);
+ init_waitqueue_head(&tty->read_wait);
+ INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+ sema_init(&tty->atomic_read, 1);
+ sema_init(&tty->atomic_write, 1);
+ spin_lock_init(&tty->read_lock);
+ INIT_LIST_HEAD(&tty->tty_files);
+ INIT_WORK(&tty->SAK_work, NULL, NULL);
+}
+
+/*
+ * The default put_char routine if the driver did not define one.
+ */
+static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ tty->driver->write(tty, &ch, 1);
+}
+
+static struct class_simple *tty_class;
+
+/**
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device for this
+ * tty device it can be set to NULL safely.
+ *
+ * This call is required to be made to register an individual tty device if
+ * the tty driver's flags have the TTY_DRIVER_NO_DEVFS bit set. If that
+ * bit is not set, this function should not be called.
+ */
+void tty_register_device(struct tty_driver *driver, unsigned index,
+ struct device *device)
+{
+ char name[64];
+ dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+ if (index >= driver->num) {
+ printk(KERN_ERR "Attempt to register invalid tty line number "
+ " (%d).\n", index);
+ return;
+ }
+
+ devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
+ "%s%d", driver->devfs_name, index + driver->name_base);
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY)
+ pty_line_name(driver, index, name);
+ else
+ tty_line_name(driver, index, name);
+ class_simple_device_add(tty_class, dev, device, name);
+}
+
+/**
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ *
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be made when the tty device is gone.
+ */
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+ devfs_remove("%s%d", driver->devfs_name, index + driver->name_base);
+ class_simple_device_remove(MKDEV(driver->major, driver->minor_start) +
index);
+}
+
+EXPORT_SYMBOL(tty_register_device);
+EXPORT_SYMBOL(tty_unregister_device);
+
+struct tty_driver *alloc_tty_driver(int lines)
+{
+ struct tty_driver *driver;
+
+ driver = kmalloc(sizeof(struct tty_driver), GFP_KERNEL);
+ if (driver) {
+ memset(driver, 0, sizeof(struct tty_driver));
+ driver->magic = TTY_DRIVER_MAGIC;
+ driver->num = lines;
+ /* later we'll move allocation of tables here */
+ }
+ return driver;
+}
+
+void put_tty_driver(struct tty_driver *driver)
+{
+ kfree(driver);
+}
+
+void tty_set_operations(struct tty_driver *driver, struct tty_operations *op)
+{
+ driver->open = op->open;
+ driver->close = op->close;
+ driver->write = op->write;
+ driver->put_char = op->put_char;
+ driver->flush_chars = op->flush_chars;
+ driver->write_room = op->write_room;
+ driver->chars_in_buffer = op->chars_in_buffer;
+ driver->ioctl = op->ioctl;
+ driver->set_termios = op->set_termios;
+ driver->throttle = op->throttle;
+ driver->unthrottle = op->unthrottle;
+ driver->stop = op->stop;
+ driver->start = op->start;
+ driver->hangup = op->hangup;
+ driver->break_ctl = op->break_ctl;
+ driver->flush_buffer = op->flush_buffer;
+ driver->set_ldisc = op->set_ldisc;
+ driver->wait_until_sent = op->wait_until_sent;
+ driver->send_xchar = op->send_xchar;
+ driver->read_proc = op->read_proc;
+ driver->write_proc = op->write_proc;
+ driver->tiocmget = op->tiocmget;
+ driver->tiocmset = op->tiocmset;
+}
+
+
+EXPORT_SYMBOL(alloc_tty_driver);
+EXPORT_SYMBOL(put_tty_driver);
+EXPORT_SYMBOL(tty_set_operations);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+ int error;
+ int i;
+ dev_t dev;
+ void **p = NULL;
+
+ if (driver->flags & TTY_DRIVER_INSTALLED)
+ return 0;
+
+ if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+ p = kmalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ memset(p, 0, driver->num * 3 * sizeof(void *));
+ }
+
+ if (!driver->major) {
+ error = alloc_chrdev_region(&dev, driver->minor_start,
driver->num,
+ (char*)driver->name);
+ if (!error) {
+ driver->major = MAJOR(dev);
+ driver->minor_start = MINOR(dev);
+ }
+ } else {
+ dev = MKDEV(driver->major, driver->minor_start);
+ error = register_chrdev_region(dev, driver->num,
+ (char*)driver->name);
+ }
+ if (error < 0) {
+ kfree(p);
+ return error;
+ }
+
+ if (p) {
+ driver->ttys = (struct tty_struct **)p;
+ driver->termios = (struct termios **)(p + driver->num);
+ driver->termios_locked = (struct termios **)(p + driver->num *
2);
+ } else {
+ driver->ttys = NULL;
+ driver->termios = NULL;
+ driver->termios_locked = NULL;
+ }
+
+ cdev_init(&driver->cdev, &tty_fops);
+ driver->cdev.owner = driver->owner;
+ error = cdev_add(&driver->cdev, dev, driver->num);
+ if (error) {
+ cdev_del(&driver->cdev);
+ unregister_chrdev_region(dev, driver->num);
+ driver->ttys = NULL;
+ driver->termios = driver->termios_locked = NULL;
+ kfree(p);
+ return error;
+ }
+
+ if (!driver->put_char)
+ driver->put_char = tty_default_put_char;
+
+ list_add(&driver->tty_drivers, &tty_drivers);
+
+ if ( !(driver->flags & TTY_DRIVER_NO_DEVFS) ) {
+ for(i = 0; i < driver->num; i++)
+ tty_register_device(driver, i, NULL);
+ }
+ proc_tty_register_driver(driver);
+ return 0;
+}
+
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+ int i;
+ struct termios *tp;
+ void *p;
+
+ if (driver->refcount)
+ return -EBUSY;
+
+ unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+ driver->num);
+
+ list_del(&driver->tty_drivers);
+
+ /*
+ * Free the termios and termios_locked structures because
+ * we don't want to get memory leaks when modular tty
+ * drivers are removed from the kernel.
+ */
+ for (i = 0; i < driver->num; i++) {
+ tp = driver->termios[i];
+ if (tp) {
+ driver->termios[i] = NULL;
+ kfree(tp);
+ }
+ tp = driver->termios_locked[i];
+ if (tp) {
+ driver->termios_locked[i] = NULL;
+ kfree(tp);
+ }
+ if (!(driver->flags & TTY_DRIVER_NO_DEVFS))
+ tty_unregister_device(driver, i);
+ }
+ p = driver->ttys;
+ proc_tty_unregister_driver(driver);
+ driver->ttys = NULL;
+ driver->termios = driver->termios_locked = NULL;
+ kfree(p);
+ cdev_del(&driver->cdev);
+ return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+
+/*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+ * Just do some early initializations, and do the complex setup
+ * later.
+ */
+void __init console_init(void)
+{
+ initcall_t *call;
+
+ /* Setup the default TTY line discipline. */
+ (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
+
+ /*
+ * set up the console device so that later boot sequences can
+ * inform about problems etc..
+ */
+#ifdef CONFIG_EARLY_PRINTK
+ disable_early_printk();
+#endif
+#ifdef CONFIG_SERIAL_68360
+ /* This is not a console initcall. I know not what it's doing here.
+ So I haven't moved it. dwmw2 */
+ rs_360_init();
+#endif
+ call = __con_initcall_start;
+ while (call < __con_initcall_end) {
+ (*call)();
+ call++;
+ }
+}
+
+#ifdef CONFIG_VT
+extern int vty_init(void);
+#endif
+
+static int __init tty_class_init(void)
+{
+ tty_class = class_simple_create(THIS_MODULE, "tty");
+ if (IS_ERR(tty_class))
+ return PTR_ERR(tty_class);
+ return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+
+static struct cdev tty_cdev, console_cdev;
+#ifdef CONFIG_UNIX98_PTYS
+static struct cdev ptmx_cdev;
+#endif
+#ifdef CONFIG_VT
+static struct cdev vc0_cdev;
+#endif
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+static int __init tty_init(void)
+{
+ cdev_init(&tty_cdev, &tty_fops);
+ if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+ panic("Couldn't register /dev/tty driver\n");
+ devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 0), S_IFCHR|S_IRUGO|S_IWUGO, "tty");
+ class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+
+ cdev_init(&console_cdev, &console_fops);
+ if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") <
0)
+ panic("Couldn't register /dev/console driver\n");
+ devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 1), S_IFCHR|S_IRUSR|S_IWUSR,
"console");
+ class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL,
"console");
+
+#ifdef CONFIG_UNIX98_PTYS
+ cdev_init(&ptmx_cdev, &ptmx_fops);
+ if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
+ panic("Couldn't register /dev/ptmx driver\n");
+ devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 2), S_IFCHR|S_IRUGO|S_IWUGO, "ptmx");
+ class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 2), NULL,
"ptmx");
+#endif
+
+#ifdef CONFIG_VT
+ if (console_use_vt) {
+ cdev_init(&vc0_cdev, &console_fops);
+ if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1,
+ "/dev/vc/0") < 0)
+ panic("Couldn't register /dev/tty0 driver\n");
+ devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR,
+ "vc/0");
+ class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL,
+ "tty0");
+
+ vty_init();
+ }
+#endif
+ return 0;
+}
+module_init(tty_init);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/drivers/xen/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,12 @@
+
+
+obj-y += console/
+obj-y += evtchn/
+obj-y += balloon/
+
+obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += privcmd/
+obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
+obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
+
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/balloon/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y += balloon.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,458 @@
+/******************************************************************************
+ * balloon.c
+ *
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <asm-xen/xen_proc.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/balloon.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <linux/list.h>
+
+static struct proc_dir_entry *balloon_pde;
+
+static DECLARE_MUTEX(balloon_mutex);
+spinlock_t balloon_lock = SPIN_LOCK_UNLOCKED;
+
+/* We aim for 'current allocation' == 'target allocation'. */
+static unsigned long current_pages;
+static unsigned long target_pages;
+
+/* We may hit the hard limit in Xen. If we do then we remember it. */
+static unsigned long hard_limit;
+
+/*
+ * Drivers may alter the memory reservation independently, but they must
+ * inform the balloon driver so that we can avoid hitting the hard limit.
+ */
+static unsigned long driver_pages;
+
+/* List of ballooned pages, threaded through the mem_map array. */
+static LIST_HEAD(ballooned_pages);
+static unsigned long balloon_low, balloon_high;
+
+/* Main work function, always executed in process context. */
+static void balloon_process(void *unused);
+static DECLARE_WORK(balloon_worker, balloon_process, NULL);
+static struct timer_list balloon_timer;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+/* Use the private and mapping fields of struct page as a list. */
+#define PAGE_TO_LIST(p) ( (struct list_head *)&p->private )
+#define LIST_TO_PAGE(l) ( list_entry( ((unsigned long *)l), \
+ struct page, private ) )
+#define UNLIST_PAGE(p) do { list_del(PAGE_TO_LIST(p)); \
+ p->mapping = NULL; \
+ p->private = 0; } while(0)
+#else
+/* There's a dedicated list field in struct page we can use. */
+#define PAGE_TO_LIST(p) ( &p->list )
+#define LIST_TO_PAGE(l) ( list_entry(l, struct page, list) )
+#define UNLIST_PAGE(p) ( list_del(&p->list) )
+#define pte_offset_kernel pte_offset
+#define pud_t pgd_t
+#define pud_offset(d, va) d
+#define pud_none(d) 0
+#define pud_bad(d) 0
+#define subsys_initcall(_fn) __initcall(_fn)
+#define pfn_to_page(_pfn) (mem_map + (_pfn))
+#endif
+
+#define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_mem: " fmt, ##args)
+#define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_mem: " fmt, ##args)
+
+/* balloon_append: add the given page to the balloon. */
+static void balloon_append(struct page *page)
+{
+ /* Low memory is re-populated first, so highmem pages go at list tail. */
+ if ( PageHighMem(page) )
+ {
+ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
+ balloon_high++;
+ }
+ else
+ {
+ list_add(PAGE_TO_LIST(page), &ballooned_pages);
+ balloon_low++;
+ }
+}
+
+/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
+static struct page *balloon_retrieve(void)
+{
+ struct page *page;
+
+ if ( list_empty(&ballooned_pages) )
+ return NULL;
+
+ page = LIST_TO_PAGE(ballooned_pages.next);
+ UNLIST_PAGE(page);
+
+ if ( PageHighMem(page) )
+ balloon_high--;
+ else
+ balloon_low--;
+
+ return page;
+}
+
+static inline pte_t *get_ptep(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset_k(addr);
+ if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
+
+ pud = pud_offset(pgd, addr);
+ if ( pud_none(*pud) || pud_bad(*pud) ) BUG();
+
+ pmd = pmd_offset(pud, addr);
+ if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+static void balloon_alarm(unsigned long unused)
+{
+ schedule_work(&balloon_worker);
+}
+
+static unsigned long current_target(void)
+{
+ unsigned long target = min(target_pages, hard_limit);
+ if ( target > (current_pages + balloon_low + balloon_high) )
+ target = current_pages + balloon_low + balloon_high;
+ return target;
+}
+
+/*
+ * We avoid multiple worker processes conflicting via the balloon mutex.
+ * We may of course race updates of the target counts (which are protected
+ * by the balloon lock), or with changes to the Xen hard limit, but we will
+ * recover from these in time.
+ */
+static void balloon_process(void *unused)
+{
+ unsigned long *mfn_list, pfn, i, flags;
+ struct page *page;
+ long credit, debt, rc;
+ void *v;
+
+ down(&balloon_mutex);
+
+ retry:
+ mfn_list = NULL;
+
+ if ( (credit = current_target() - current_pages) > 0 )
+ {
+ mfn_list = (unsigned long *)vmalloc(credit * sizeof(*mfn_list));
+ if ( mfn_list == NULL )
+ goto out;
+
+ balloon_lock(flags);
+ rc = HYPERVISOR_dom_mem_op(
+ MEMOP_increase_reservation, mfn_list, credit, 0);
+ balloon_unlock(flags);
+ if ( rc < credit )
+ {
+ /* We hit the Xen hard limit: reprobe. */
+ if ( HYPERVISOR_dom_mem_op(
+ MEMOP_decrease_reservation, mfn_list, rc, 0) != rc )
+ BUG();
+ hard_limit = current_pages + rc - driver_pages;
+ vfree(mfn_list);
+ goto retry;
+ }
+
+ for ( i = 0; i < credit; i++ )
+ {
+ if ( (page = balloon_retrieve()) == NULL )
+ BUG();
+
+ pfn = page - mem_map;
+ if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
+ BUG();
+
+ /* Update P->M and M->P tables. */
+ phys_to_machine_mapping[pfn] = mfn_list[i];
+ queue_machphys_update(mfn_list[i], pfn);
+
+ /* Link back into the page tables if it's not a highmem page. */
+ if ( pfn < max_low_pfn )
+ queue_l1_entry_update(
+ get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
+ (mfn_list[i] << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+
+ /* Finally, relinquish the memory back to the system allocator. */
+ ClearPageReserved(page);
+ set_page_count(page, 1);
+ __free_page(page);
+ }
+
+ current_pages += credit;
+ }
+ else if ( credit < 0 )
+ {
+ debt = -credit;
+
+ mfn_list = (unsigned long *)vmalloc(debt * sizeof(*mfn_list));
+ if ( mfn_list == NULL )
+ goto out;
+
+ for ( i = 0; i < debt; i++ )
+ {
+ if ( (page = alloc_page(GFP_HIGHUSER)) == NULL )
+ {
+ debt = i;
+ break;
+ }
+
+ pfn = page - mem_map;
+ mfn_list[i] = phys_to_machine_mapping[pfn];
+
+ if ( !PageHighMem(page) )
+ {
+ v = phys_to_virt(pfn << PAGE_SHIFT);
+ scrub_pages(v, 1);
+ queue_l1_entry_update(get_ptep((unsigned long)v), 0);
+ }
+#ifdef CONFIG_XEN_SCRUB_PAGES
+ else
+ {
+ v = kmap(page);
+ scrub_pages(v, 1);
+ kunmap(page);
+ }
+#endif
+ }
+
+ /* Ensure that ballooned highmem pages don't have cached mappings. */
+ kmap_flush_unused();
+
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ /* No more mappings: invalidate pages in P2M and add to balloon. */
+ for ( i = 0; i < debt; i++ )
+ {
+ pfn = mfn_to_pfn(mfn_list[i]);
+ phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+ balloon_append(pfn_to_page(pfn));
+ }
+
+ if ( HYPERVISOR_dom_mem_op(
+ MEMOP_decrease_reservation, mfn_list, debt, 0) != debt )
+ BUG();
+
+ current_pages -= debt;
+ }
+
+ out:
+ if ( mfn_list != NULL )
+ vfree(mfn_list);
+
+ /* Schedule more work if there is some still to be done. */
+ if ( current_target() != current_pages )
+ mod_timer(&balloon_timer, jiffies + HZ);
+
+ up(&balloon_mutex);
+}
+
+/* Resets the Xen limit, sets new target, and kicks off processing. */
+static void set_new_target(unsigned long target)
+{
+ /* No need for lock. Not read-modify-write updates. */
+ hard_limit = ~0UL;
+ target_pages = target;
+ schedule_work(&balloon_worker);
+}
+
+static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ switch ( msg->subtype )
+ {
+ case CMSG_MEM_REQUEST_SET:
+ {
+ mem_request_t *req = (mem_request_t *)&msg->msg[0];
+ if ( msg->length != sizeof(mem_request_t) )
+ goto parse_error;
+ set_new_target(req->target);
+ req->status = 0;
+ }
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+static int balloon_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char memstring[64], *endchar;
+ unsigned long long target_bytes;
+
+ if ( !capable(CAP_SYS_ADMIN) )
+ return -EPERM;
+
+ if ( count <= 1 )
+ return -EBADMSG; /* runt */
+ if ( count > sizeof(memstring) )
+ return -EFBIG; /* too long */
+
+ if ( copy_from_user(memstring, buffer, count) )
+ return -EFAULT;
+ memstring[sizeof(memstring)-1] = '\0';
+
+ target_bytes = memparse(memstring, &endchar);
+ set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static int balloon_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+
+#define K(_p) ((_p)<<(PAGE_SHIFT-10))
+ len = sprintf(
+ page,
+ "Current allocation: %8lu kB\n"
+ "Requested target: %8lu kB\n"
+ "Low-mem balloon: %8lu kB\n"
+ "High-mem balloon: %8lu kB\n"
+ "Xen hard limit: ",
+ K(current_pages), K(target_pages), K(balloon_low), K(balloon_high));
+
+ if ( hard_limit != ~0UL )
+ len += sprintf(
+ page + len,
+ "%8lu kB (inc. %8lu kB driver headroom)\n",
+ K(hard_limit), K(driver_pages));
+ else
+ len += sprintf(
+ page + len,
+ " ??? kB\n");
+
+ *eof = 1;
+ return len;
+}
+
+static int __init balloon_init(void)
+{
+ unsigned long pfn;
+ struct page *page;
+
+ IPRINTK("Initialising balloon driver.\n");
+
+ current_pages = min(xen_start_info.nr_pages, max_pfn);
+ target_pages = current_pages;
+ balloon_low = 0;
+ balloon_high = 0;
+ driver_pages = 0UL;
+ hard_limit = ~0UL;
+
+ init_timer(&balloon_timer);
+ balloon_timer.data = 0;
+ balloon_timer.function = balloon_alarm;
+
+ if ( (balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL )
+ {
+ WPRINTK("Unable to create /proc/xen/balloon.\n");
+ return -1;
+ }
+
+ balloon_pde->read_proc = balloon_read;
+ balloon_pde->write_proc = balloon_write;
+
+ (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx, 0);
+
+ /* Initialise the balloon with excess memory space. */
+ for ( pfn = xen_start_info.nr_pages; pfn < max_pfn; pfn++ )
+ {
+ page = &mem_map[pfn];
+ if ( !PageReserved(page) )
+ balloon_append(page);
+ }
+
+ return 0;
+}
+
+subsys_initcall(balloon_init);
+
+void balloon_update_driver_allowance(long delta)
+{
+ unsigned long flags;
+ balloon_lock(flags);
+ driver_pages += delta; /* non-atomic update */
+ balloon_unlock(flags);
+}
+
+void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns)
+{
+ unsigned long flags;
+
+ balloon_lock(flags);
+ if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ mfn_list, nr_mfns, 0) != nr_mfns )
+ BUG();
+ current_pages -= nr_mfns; /* non-atomic update */
+ balloon_unlock(flags);
+
+ schedule_work(&balloon_worker);
+}
+
+EXPORT_SYMBOL(balloon_update_driver_allowance);
+EXPORT_SYMBOL(balloon_put_pages);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkback/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y := blkback.o control.o interface.o vbd.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,645 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/main.c
+ *
+ * Back-end of the driver for virtual block devices. This portion of the
+ * driver exports a 'unified' block-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+ * arch/xen/drivers/blkif/frontend
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ */
+
+#include "common.h"
+
+/*
+ * These are rather arbitrary. They are fairly large because adjacent requests
+ * pulled from a communication ring are quite likely to end up being part of
+ * the same scatter/gather request at the disc.
+ *
+ * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW **
+ * This will increase the chances of being able to write whole tracks.
+ * 64 should be enough to keep us competitive with Linux.
+ */
+#define MAX_PENDING_REQS 64
+#define BATCH_PER_DOMAIN 16
+
+static unsigned long mmap_vstart;
+#define MMAP_PAGES_PER_REQUEST \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
+#define MMAP_PAGES \
+ (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg) \
+ (mmap_vstart + \
+ ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+ ((_seg) * PAGE_SIZE))
+
+/*
+ * Each outstanding request that we've passed to the lower device layers has a
+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
+ * the pendcnt towards zero. When it hits zero, the specified domain has a
+ * response queued for it, with the saved 'id' passed back.
+ */
+typedef struct {
+ blkif_t *blkif;
+ unsigned long id;
+ int nr_pages;
+ atomic_t pendcnt;
+ unsigned short operation;
+ int status;
+ void *bounce_page;
+ unsigned int bounce_off, bounce_len;
+} pending_req_t;
+
+/*
+ * We can't allocate pending_req's in order, since they may complete out of
+ * order. We therefore maintain an allocation ring. This ring also indicates
+ * when enough work has been passed down -- at that point the allocation ring
+ * will be empty.
+ */
+static pending_req_t pending_reqs[MAX_PENDING_REQS];
+static unsigned char pending_ring[MAX_PENDING_REQS];
+static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
+/* NB. We use a different index type to differentiate from shared blk rings. */
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static kmem_cache_t *buffer_head_cachep;
+#else
+static request_queue_t *plugged_queue;
+static inline void flush_plugged_queue(void)
+{
+ request_queue_t *q = plugged_queue;
+ if ( q != NULL )
+ {
+ if ( q->unplug_fn != NULL )
+ q->unplug_fn(q);
+ blk_put_queue(q);
+ plugged_queue = NULL;
+ }
+}
+#endif
+
+static int do_block_io_op(blkif_t *blkif, int max_to_do);
+static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
+static void make_response(blkif_t *blkif, unsigned long id,
+ unsigned short op, int st);
+
+static void fast_flush_area(int idx, int nr_pages)
+{
+ multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+ int i;
+
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ mcl[i].op = __HYPERVISOR_update_va_mapping;
+ mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
+ mcl[i].args[1] = 0;
+ mcl[i].args[2] = 0;
+ }
+
+ mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
+ BUG();
+}
+
+
+/******************************************************************
+ * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
+ */
+
+static struct list_head blkio_schedule_list;
+static spinlock_t blkio_schedule_list_lock;
+
+static int __on_blkdev_list(blkif_t *blkif)
+{
+ return blkif->blkdev_list.next != NULL;
+}
+
+static void remove_from_blkdev_list(blkif_t *blkif)
+{
+ unsigned long flags;
+ if ( !__on_blkdev_list(blkif) ) return;
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if ( __on_blkdev_list(blkif) )
+ {
+ list_del(&blkif->blkdev_list);
+ blkif->blkdev_list.next = NULL;
+ blkif_put(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+}
+
+static void add_to_blkdev_list_tail(blkif_t *blkif)
+{
+ unsigned long flags;
+ if ( __on_blkdev_list(blkif) ) return;
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
+ {
+ list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+ blkif_get(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+}
+
+
+/******************************************************************
+ * SCHEDULER FUNCTIONS
+ */
+
+static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
+
+static int blkio_schedule(void *arg)
+{
+ DECLARE_WAITQUEUE(wq, current);
+
+ blkif_t *blkif;
+ struct list_head *ent;
+
+ daemonize(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ "xenblkd"
+#endif
+ );
+
+ for ( ; ; )
+ {
+ /* Wait for work to do. */
+ add_wait_queue(&blkio_schedule_wait, &wq);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
+ list_empty(&blkio_schedule_list) )
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&blkio_schedule_wait, &wq);
+
+ /* Queue up a batch of requests. */
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&blkio_schedule_list) )
+ {
+ ent = blkio_schedule_list.next;
+ blkif = list_entry(ent, blkif_t, blkdev_list);
+ blkif_get(blkif);
+ remove_from_blkdev_list(blkif);
+ if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
+ add_to_blkdev_list_tail(blkif);
+ blkif_put(blkif);
+ }
+
+ /* Push the batch through to disc. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ run_task_queue(&tq_disk);
+#else
+ flush_plugged_queue();
+#endif
+ }
+}
+
+static void maybe_trigger_blkio_schedule(void)
+{
+ /*
+ * Needed so that two processes, who together make the following predicate
+ * true, don't both read stale values and evaluate the predicate
+ * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
+ */
+ smp_mb();
+
+ if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&blkio_schedule_list) )
+ wake_up(&blkio_schedule_wait);
+}
+
+
+
+/******************************************************************
+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
+ */
+
+static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
+{
+ unsigned long flags;
+
+ /* An error fails the entire request. */
+ if ( !uptodate )
+ {
+ DPRINTK("Buffer not up-to-date at end of operation\n");
+ pending_req->status = BLKIF_RSP_ERROR;
+ }
+
+ if ( atomic_dec_and_test(&pending_req->pendcnt) )
+ {
+ int pending_idx = pending_req - pending_reqs;
+ if ( unlikely(pending_req->bounce_page != NULL) )
+ {
+ memcpy((void *)(MMAP_VADDR(pending_idx, 0) +
+ pending_req->bounce_off),
+ (void *)((unsigned long)pending_req->bounce_page +
+ pending_req->bounce_off),
+ pending_req->bounce_len);
+ free_page((unsigned long)pending_req->bounce_page);
+ }
+ fast_flush_area(pending_idx, pending_req->nr_pages);
+ make_response(pending_req->blkif, pending_req->id,
+ pending_req->operation, pending_req->status);
+ blkif_put(pending_req->blkif);
+ spin_lock_irqsave(&pend_prod_lock, flags);
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&pend_prod_lock, flags);
+ maybe_trigger_blkio_schedule();
+ }
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static void end_block_io_op(struct buffer_head *bh, int uptodate)
+{
+ __end_block_io_op(bh->b_private, uptodate);
+ kmem_cache_free(buffer_head_cachep, bh);
+}
+#else
+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
+{
+ if ( bio->bi_size != 0 )
+ return 1;
+ __end_block_io_op(bio->bi_private, !error);
+ bio_put(bio);
+ return error;
+}
+#endif
+
+
+/******************************************************************************
+ * NOTIFICATION FROM GUEST OS.
+ */
+
+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ blkif_t *blkif = dev_id;
+ add_to_blkdev_list_tail(blkif);
+ maybe_trigger_blkio_schedule();
+ return IRQ_HANDLED;
+}
+
+
+
+/******************************************************************
+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
+ */
+
+static int do_block_io_op(blkif_t *blkif, int max_to_do)
+{
+ blkif_ring_t *blk_ring = blkif->blk_ring_base;
+ blkif_request_t *req;
+ BLKIF_RING_IDX i, rp;
+ int more_to_do = 0;
+
+ rp = blk_ring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+ /* Take items off the comms ring, taking care not to overflow. */
+ for ( i = blkif->blk_req_cons;
+ (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
+ i++ )
+ {
+ if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
+ {
+ more_to_do = 1;
+ break;
+ }
+
+ req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
+ switch ( req->operation )
+ {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ dispatch_rw_block_io(blkif, req);
+ break;
+
+ case BLKIF_OP_PROBE:
+ dispatch_probe(blkif, req);
+ break;
+
+ default:
+ DPRINTK("error: unknown block io operation [%d]\n",
+ blk_ring->ring[i].req.operation);
+ make_response(blkif, blk_ring->ring[i].req.id,
+ blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
+ break;
+ }
+ }
+
+ blkif->blk_req_cons = i;
+ return more_to_do;
+}
+
+static void dispatch_probe(blkif_t *blkif, blkif_request_t *req)
+{
+ int rsp = BLKIF_RSP_ERROR;
+ int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ /* We expect one buffer only. */
+ if ( unlikely(req->nr_segments != 1) )
+ goto out;
+
+ /* Make sure the buffer is page-sized. */
+ if ( (blkif_first_sect(req->frame_and_sects[0]) != 0) ||
+ (blkif_last_sect(req->frame_and_sects[0]) != 7) )
+ goto out;
+
+ if ( HYPERVISOR_update_va_mapping_otherdomain(
+ MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
+ (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
+ 0, blkif->domid) )
+ goto out;
+
+ rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0),
+ PAGE_SIZE / sizeof(vdisk_t));
+
+ out:
+ fast_flush_area(pending_idx, 1);
+ make_response(blkif, req->id, req->operation, rsp);
+}
+
+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
+{
+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
+ int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
+ short nr_sects;
+ unsigned long buffer, fas;
+ int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ pending_req_t *pending_req;
+ unsigned long remap_prot;
+ multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+
+ /* We map virtual scatter/gather segments to physical segments. */
+ int new_segs, nr_psegs = 0;
+ phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
+
+ /* Check that number of segments is sane. */
+ if ( unlikely(req->nr_segments == 0) ||
+ unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
+ {
+ DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
+ goto bad_descriptor;
+ }
+
+ /*
+ * Check each address/size pair is sane, and convert into a
+ * physical device and block offset. Note that if the offset and size
+ * crosses a virtual extent boundary, we may end up with more
+ * physical scatter/gather segments than virtual segments.
+ */
+ for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
+ {
+ fas = req->frame_and_sects[i];
+ buffer = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
+ nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+
+ if ( nr_sects <= 0 )
+ goto bad_descriptor;
+
+ phys_seg[nr_psegs].dev = req->device;
+ phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
+ phys_seg[nr_psegs].buffer = buffer;
+ phys_seg[nr_psegs].nr_sects = nr_sects;
+
+ /* Translate the request into the relevant 'physical device' */
+ new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation);
+ if ( new_segs < 0 )
+ {
+ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
+ operation == READ ? "read" : "write",
+ req->sector_number + tot_sects,
+ req->sector_number + tot_sects + nr_sects,
+ req->device);
+ goto bad_descriptor;
+ }
+
+ nr_psegs += new_segs;
+ ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1));
+ }
+
+ /* Nonsensical zero-sized request? */
+ if ( unlikely(nr_psegs == 0) )
+ goto bad_descriptor;
+
+ if ( operation == READ )
+ remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
+ else
+ remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED;
+
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
+ mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
+ mcl[i].args[2] = 0;
+ mcl[i].args[3] = blkif->domid;
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+ FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
+ }
+
+ if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
+ BUG();
+
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ if ( unlikely(mcl[i].args[5] != 0) )
+ {
+ DPRINTK("invalid buffer -- could not remap it\n");
+ fast_flush_area(pending_idx, nr_psegs);
+ goto bad_descriptor;
+ }
+ }
+
+ pending_req = &pending_reqs[pending_idx];
+ pending_req->blkif = blkif;
+ pending_req->id = req->id;
+ pending_req->operation = operation;
+ pending_req->status = BLKIF_RSP_OKAY;
+ pending_req->nr_pages = nr_psegs;
+ pending_req->bounce_page = NULL;
+ atomic_set(&pending_req->pendcnt, nr_psegs);
+ pending_cons++;
+
+ blkif_get(blkif);
+
+ /* Now we pass each segment down to the real blkdev layer. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ struct buffer_head *bh;
+
+ bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC);
+ if ( unlikely(bh == NULL) )
+ {
+ __end_block_io_op(pending_req, 0);
+ continue;
+ }
+
+ memset(bh, 0, sizeof (struct buffer_head));
+
+ init_waitqueue_head(&bh->b_wait);
+ bh->b_size = phys_seg[i].nr_sects << 9;
+ bh->b_dev = phys_seg[i].dev;
+ bh->b_rdev = phys_seg[i].dev;
+ bh->b_rsector = (unsigned long)phys_seg[i].sector_number;
+ bh->b_data = (char *)MMAP_VADDR(pending_idx, i) +
+ (phys_seg[i].buffer & ~PAGE_MASK);
+ bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i));
+ bh->b_end_io = end_block_io_op;
+ bh->b_private = pending_req;
+
+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) |
+ (1 << BH_Req) | (1 << BH_Launder);
+ if ( operation == WRITE )
+ bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate);
+
+ atomic_set(&bh->b_count, 1);
+
+ /* Dispatch a single request. We'll flush it to disc later. */
+ generic_make_request(operation, bh);
+ }
+#else
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ struct bio *bio;
+ request_queue_t *q;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if ( unlikely(bio == NULL) )
+ {
+ __end_block_io_op(pending_req, 0);
+ continue;
+ }
+
+ bio->bi_bdev = phys_seg[i].bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
+ bio->bi_sector = phys_seg[i].sector_number;
+
+ /* Is the request misaligned with respect to hardware sector size? */
+ if ( ((bio->bi_sector | phys_seg[i].nr_sects) &
+ ((bdev_hardsect_size(bio->bi_bdev) >> 9) - 1)) )
+ {
+ /* We can't bounce scatter-gather requests. */
+ if ( (nr_psegs != 1) ||
+ ((pending_req->bounce_page = (void *)
+ __get_free_page(GFP_KERNEL)) == NULL) )
+ {
+ printk("xen_blk: Unaligned scatter-gather request!\n");
+ bio_put(bio);
+ __end_block_io_op(pending_req, 0);
+ continue;
+ }
+
+ /* Record offset and length within a bounce page. */
+ pending_req->bounce_off = (bio->bi_sector << 9) & ~PAGE_MASK;
+ pending_req->bounce_len = phys_seg[i].nr_sects << 9;
+
+ /* Submit a page-aligned I/O. */
+ bio->bi_sector &= ~((PAGE_SIZE >> 9) - 1);
+ bio_add_page(
+ bio, virt_to_page(pending_req->bounce_page), PAGE_SIZE, 0);
+ }
+ else
+ {
+ bio_add_page(
+ bio,
+ virt_to_page(MMAP_VADDR(pending_idx, i)),
+ phys_seg[i].nr_sects << 9,
+ phys_seg[i].buffer & ~PAGE_MASK);
+ }
+
+ if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
+ {
+ flush_plugged_queue();
+ blk_get_queue(q);
+ plugged_queue = q;
+ }
+
+ submit_bio(operation, bio);
+ }
+#endif
+
+ return;
+
+ bad_descriptor:
+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+}
+
+
+
+/******************************************************************
+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
+ */
+
+
+static void make_response(blkif_t *blkif, unsigned long id,
+ unsigned short op, int st)
+{
+ blkif_response_t *resp;
+ unsigned long flags;
+
+ /* Place on the response ring for the relevant domain. */
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ resp = &blkif->blk_ring_base->
+ ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+ wmb(); /* Ensure other side can see the response fields. */
+ blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+ /* Kick the relevant domain. */
+ notify_via_evtchn(blkif->evtchn);
+}
+
+void blkif_deschedule(blkif_t *blkif)
+{
+ remove_from_blkdev_list(blkif);
+}
+
+static int __init blkif_init(void)
+{
+ int i;
+
+ if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
+ !(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+ return 0;
+
+ blkif_interface_init();
+
+ if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
+ BUG();
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ memset(pending_reqs, 0, sizeof(pending_reqs));
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
+
+ spin_lock_init(&blkio_schedule_list_lock);
+ INIT_LIST_HEAD(&blkio_schedule_list);
+
+ if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
+ BUG();
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ buffer_head_cachep = kmem_cache_create(
+ "buffer_head_cache", sizeof(struct buffer_head),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+#endif
+
+ blkif_ctrlif_init();
+
+ return 0;
+}
+
+__initcall(blkif_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkback/common.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,120 @@
+
+#ifndef __BLKIF__BACKEND__COMMON_H__
+#define __BLKIF__BACKEND__COMMON_H__
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/xen-public/io/blkif.h>
+
+#if 0
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define ASSERT(_p) ((void)0)
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+typedef struct rb_root rb_root_t;
+typedef struct rb_node rb_node_t;
+#else
+struct block_device;
+#endif
+
+typedef struct blkif_st {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+ /* Physical parameters of the comms window. */
+ unsigned long shmem_frame;
+ unsigned int evtchn;
+ int irq;
+ /* Comms information. */
+ blkif_ring_t *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
+ BLKIF_RING_IDX blk_req_cons; /* Request consumer. */
+ BLKIF_RING_IDX blk_resp_prod; /* Private version of resp. producer. */
+ /* VBDs attached to this interface. */
+ rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs. */
+ spinlock_t vbd_lock; /* Protects VBD mapping. */
+ /* Private fields. */
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
+ struct blkif_st *hash_next;
+ struct list_head blkdev_list;
+ spinlock_t blk_ring_lock;
+ atomic_t refcnt;
+
+ struct work_struct work;
+} blkif_t;
+
+void blkif_create(blkif_be_create_t *create);
+void blkif_destroy(blkif_be_destroy_t *destroy);
+void blkif_connect(blkif_be_connect_t *connect);
+int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id);
+void blkif_disconnect_complete(blkif_t *blkif);
+blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define blkif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ blkif_disconnect_complete(_b); \
+ } while (0)
+
+/* An entry in a list of xen_extents. */
+typedef struct _blkif_extent_le {
+ blkif_extent_t extent; /* an individual extent */
+ struct _blkif_extent_le *next; /* and a pointer to the next */
+ struct block_device *bdev;
+} blkif_extent_le_t;
+
+typedef struct _vbd {
+ blkif_vdev_t vdevice; /* what the domain refers to this vbd as */
+ unsigned char readonly; /* Non-zero -> read-only */
+ unsigned char type; /* VDISK_TYPE_xxx */
+ blkif_extent_le_t *extents; /* list of xen_extents making up this vbd */
+ rb_node_t rb; /* for linking into R-B tree lookup struct */
+} vbd_t;
+
+void vbd_create(blkif_be_vbd_create_t *create);
+void vbd_grow(blkif_be_vbd_grow_t *grow);
+void vbd_shrink(blkif_be_vbd_shrink_t *shrink);
+void vbd_destroy(blkif_be_vbd_destroy_t *delete);
+int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds);
+void destroy_all_vbds(blkif_t *blkif);
+
+/* Describes a [partial] disk extent (part of a block io request) */
+typedef struct {
+ unsigned short dev;
+ unsigned short nr_sects;
+ struct block_device *bdev;
+ unsigned long buffer;
+ blkif_sector_t sector_number;
+} phys_seg_t;
+
+int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation);
+
+void blkif_interface_init(void);
+void blkif_ctrlif_init(void);
+
+void blkif_deschedule(blkif_t *blkif);
+
+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+#endif /* __BLKIF__BACKEND__COMMON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkback/control.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/control.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/control.c
+ *
+ * Routines for interfacing with the control plane.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ DPRINTK("Received blkif backend message, subtype=%d\n", msg->subtype);
+
+ switch ( msg->subtype )
+ {
+ case CMSG_BLKIF_BE_CREATE:
+ if ( msg->length != sizeof(blkif_be_create_t) )
+ goto parse_error;
+ blkif_create((blkif_be_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_DESTROY:
+ if ( msg->length != sizeof(blkif_be_destroy_t) )
+ goto parse_error;
+ blkif_destroy((blkif_be_destroy_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_CONNECT:
+ if ( msg->length != sizeof(blkif_be_connect_t) )
+ goto parse_error;
+ blkif_connect((blkif_be_connect_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_DISCONNECT:
+ if ( msg->length != sizeof(blkif_be_disconnect_t) )
+ goto parse_error;
+ if ( !blkif_disconnect((blkif_be_disconnect_t *)&msg->msg[0],msg->id) )
+ return; /* Sending the response is deferred until later. */
+ break;
+ case CMSG_BLKIF_BE_VBD_CREATE:
+ if ( msg->length != sizeof(blkif_be_vbd_create_t) )
+ goto parse_error;
+ vbd_create((blkif_be_vbd_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_VBD_DESTROY:
+ if ( msg->length != sizeof(blkif_be_vbd_destroy_t) )
+ goto parse_error;
+ vbd_destroy((blkif_be_vbd_destroy_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_VBD_GROW:
+ if ( msg->length != sizeof(blkif_be_vbd_grow_t) )
+ goto parse_error;
+ vbd_grow((blkif_be_vbd_grow_t *)&msg->msg[0]);
+ break;
+ case CMSG_BLKIF_BE_VBD_SHRINK:
+ if ( msg->length != sizeof(blkif_be_vbd_shrink_t) )
+ goto parse_error;
+ vbd_shrink((blkif_be_vbd_shrink_t *)&msg->msg[0]);
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+void blkif_ctrlif_init(void)
+{
+ ctrl_msg_t cmsg;
+ blkif_be_driver_status_t st;
+
+ (void)ctrl_if_register_receiver(CMSG_BLKIF_BE, blkif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_BLKIF_BE;
+ cmsg.subtype = CMSG_BLKIF_BE_DRIVER_STATUS;
+ cmsg.length = sizeof(blkif_be_driver_status_t);
+ st.status = BLKIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,246 @@
+/******************************************************************************
+ * arch/xen/drivers/blkif/backend/interface.c
+ *
+ * Block-device interface management.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#endif
+
+#define BLKIF_HASHSZ 1024
+#define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1))
+
+static kmem_cache_t *blkif_cachep;
+static blkif_t *blkif_hash[BLKIF_HASHSZ];
+
+blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle)
+{
+ blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( (blkif != NULL) &&
+ ((blkif->domid != domid) || (blkif->handle != handle)) )
+ blkif = blkif->hash_next;
+ return blkif;
+}
+
+static void __blkif_disconnect_complete(void *arg)
+{
+ blkif_t *blkif = (blkif_t *)arg;
+ ctrl_msg_t cmsg;
+ blkif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in blkif_disconnect() because at that point there
+ * may be outstanding requests at the disc whose asynchronous responses
+ * must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(blkif->evtchn);
+ vfree(blkif->blk_ring_base);
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_BLKIF_BE;
+ cmsg.subtype = CMSG_BLKIF_BE_DISCONNECT;
+ cmsg.id = blkif->disconnect_rspid;
+ cmsg.length = sizeof(blkif_be_disconnect_t);
+ disc.domid = blkif->domid;
+ disc.blkif_handle = blkif->handle;
+ disc.status = BLKIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'blkif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( blkif->status != DISCONNECTING )
+ BUG();
+ blkif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+}
+
+void blkif_disconnect_complete(blkif_t *blkif)
+{
+ INIT_WORK(&blkif->work, __blkif_disconnect_complete, (void *)blkif);
+ schedule_work(&blkif->work);
+}
+
+void blkif_create(blkif_be_create_t *create)
+{
+ domid_t domid = create->domid;
+ unsigned int handle = create->blkif_handle;
+ blkif_t **pblkif, *blkif;
+
+ if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL )
+ {
+ DPRINTK("Could not create blkif: out of memory\n");
+ create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ memset(blkif, 0, sizeof(*blkif));
+ blkif->domid = domid;
+ blkif->handle = handle;
+ blkif->status = DISCONNECTED;
+ spin_lock_init(&blkif->vbd_lock);
+ spin_lock_init(&blkif->blk_ring_lock);
+ atomic_set(&blkif->refcnt, 0);
+
+ pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( *pblkif != NULL )
+ {
+ if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) )
+ {
+ DPRINTK("Could not create blkif: already exists\n");
+ create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS;
+ kmem_cache_free(blkif_cachep, blkif);
+ return;
+ }
+ pblkif = &(*pblkif)->hash_next;
+ }
+
+ blkif->hash_next = *pblkif;
+ *pblkif = blkif;
+
+ DPRINTK("Successfully created blkif\n");
+ create->status = BLKIF_BE_STATUS_OKAY;
+}
+
+void blkif_destroy(blkif_be_destroy_t *destroy)
+{
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->blkif_handle;
+ blkif_t **pblkif, *blkif;
+
+ pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( (blkif = *pblkif) != NULL )
+ {
+ if ( (blkif->domid == domid) && (blkif->handle == handle) )
+ {
+ if ( blkif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pblkif = &blkif->hash_next;
+ }
+
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pblkif = blkif->hash_next;
+ destroy_all_vbds(blkif);
+ kmem_cache_free(blkif_cachep, blkif);
+ destroy->status = BLKIF_BE_STATUS_OKAY;
+}
+
+void blkif_connect(blkif_be_connect_t *connect)
+{
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->blkif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long shmem_frame = connect->shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ blkif_t *blkif;
+
+ blkif = blkif_find_by_handle(domid, handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n",
+ connect->domid, connect->blkif_handle);
+ connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
+ shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = BLKIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ if ( blkif->status != DISCONNECTED )
+ {
+ connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+ vfree(vma->addr);
+ return;
+ }
+
+ blkif->evtchn = evtchn;
+ blkif->irq = bind_evtchn_to_irq(evtchn);
+ blkif->shmem_frame = shmem_frame;
+ blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
+ blkif->status = CONNECTED;
+ blkif_get(blkif);
+
+ request_irq(blkif->irq, blkif_be_int, 0, "blkif-backend", blkif);
+
+ connect->status = BLKIF_BE_STATUS_OKAY;
+}
+
+int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->blkif_handle;
+ blkif_t *blkif;
+
+ blkif = blkif_find_by_handle(domid, handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("blkif_disconnect attempted for non-existent blkif"
+ " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle);
+ disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( blkif->status == CONNECTED )
+ {
+ blkif->status = DISCONNECTING;
+ blkif->disconnect_rspid = rsp_id;
+ wmb(); /* Let other CPUs see the status change. */
+ free_irq(blkif->irq, blkif);
+ blkif_deschedule(blkif);
+ blkif_put(blkif);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = BLKIF_BE_STATUS_OKAY;
+ return 1;
+}
+
+void __init blkif_interface_init(void)
+{
+ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
+ 0, 0, NULL, NULL);
+ memset(blkif_hash, 0, sizeof(blkif_hash));
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,583 @@
+/******************************************************************************
+ * blkback/vbd.c
+ *
+ * Routines for managing virtual block devices (VBDs).
+ *
+ * NOTE: vbd_lock protects updates to the rb_tree against concurrent lookups
+ * in vbd_translate. All other lookups are implicitly protected because the
+ * only caller (the control message dispatch routine) serializes the calls.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ */
+
+#include "common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static dev_t vbd_map_devnum(blkif_pdev_t);
+#define bdev_put(_b) blkdev_put(_b)
+#else
+#define bdev_put(_b) ((void)0)
+#endif
+
+void vbd_create(blkif_be_vbd_create_t *create)
+{
+ vbd_t *vbd;
+ rb_node_t **rb_p, *rb_parent = NULL;
+ blkif_t *blkif;
+ blkif_vdev_t vdevice = create->vdevice;
+
+ blkif = blkif_find_by_handle(create->domid, create->blkif_handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("vbd_create attempted for non-existent blkif (%u,%u)\n",
+ create->domid, create->blkif_handle);
+ create->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ rb_p = &blkif->vbd_rb.rb_node;
+ while ( *rb_p != NULL )
+ {
+ rb_parent = *rb_p;
+ vbd = rb_entry(rb_parent, vbd_t, rb);
+ if ( vdevice < vbd->vdevice )
+ {
+ rb_p = &rb_parent->rb_left;
+ }
+ else if ( vdevice > vbd->vdevice )
+ {
+ rb_p = &rb_parent->rb_right;
+ }
+ else
+ {
+ DPRINTK("vbd_create attempted for already existing vbd\n");
+ create->status = BLKIF_BE_STATUS_VBD_EXISTS;
+ return;
+ }
+ }
+
+ if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) )
+ {
+ DPRINTK("vbd_create: out of memory\n");
+ create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ vbd->vdevice = vdevice;
+ vbd->readonly = create->readonly;
+ vbd->type = VDISK_TYPE_DISK;
+ vbd->extents = NULL;
+
+ spin_lock(&blkif->vbd_lock);
+ rb_link_node(&vbd->rb, rb_parent, rb_p);
+ rb_insert_color(&vbd->rb, &blkif->vbd_rb);
+ spin_unlock(&blkif->vbd_lock);
+
+ DPRINTK("Successful creation of vdev=%04x (dom=%u)\n",
+ vdevice, create->domid);
+ create->status = BLKIF_BE_STATUS_OKAY;
+}
+
+
+/* Grow a VBD by appending a new extent. Fails if the VBD doesn't exist. */
+void vbd_grow(blkif_be_vbd_grow_t *grow)
+{
+ blkif_t *blkif;
+ blkif_extent_le_t **px, *x;
+ vbd_t *vbd = NULL;
+ rb_node_t *rb;
+ blkif_vdev_t vdevice = grow->vdevice;
+ unsigned long sz;
+
+ blkif = blkif_find_by_handle(grow->domid, grow->blkif_handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("vbd_grow attempted for non-existent blkif (%u,%u)\n",
+ grow->domid, grow->blkif_handle);
+ grow->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ rb = blkif->vbd_rb.rb_node;
+ while ( rb != NULL )
+ {
+ vbd = rb_entry(rb, vbd_t, rb);
+ if ( vdevice < vbd->vdevice )
+ rb = rb->rb_left;
+ else if ( vdevice > vbd->vdevice )
+ rb = rb->rb_right;
+ else
+ break;
+ }
+
+ if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
+ {
+ DPRINTK("vbd_grow: attempted to append extent to non-existent VBD.\n");
+ grow->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+ return;
+ }
+
+ if ( grow->extent.sector_start > 0 )
+ {
+ DPRINTK("vbd_grow: dev %08x start not zero.\n", grow->extent.device);
+ grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ return;
+ }
+
+ if ( unlikely((x = kmalloc(sizeof(blkif_extent_le_t),
+ GFP_KERNEL)) == NULL) )
+ {
+ DPRINTK("vbd_grow: out of memory\n");
+ grow->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ /* Mask to 16-bit for compatibility with old tools */
+ x->extent.device = grow->extent.device & 0xffff;
+ x->extent.sector_start = grow->extent.sector_start;
+ x->extent.sector_length = grow->extent.sector_length;
+ x->next = (blkif_extent_le_t *)NULL;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ x->bdev = open_by_devnum(vbd_map_devnum(x->extent.device),
+ vbd->readonly ? FMODE_READ : FMODE_WRITE);
+ if ( IS_ERR(x->bdev) )
+ {
+ DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
+ grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ goto out;
+ }
+ /* XXXcl maybe bd_claim? */
+
+ if ( (x->bdev->bd_disk == NULL) )
+ {
+ DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
+ grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ bdev_put(x->bdev);
+ goto out;
+ }
+
+ /* get size in sectors */
+ if ( x->bdev->bd_part )
+ sz = x->bdev->bd_part->nr_sects;
+ else
+ sz = x->bdev->bd_disk->capacity;
+
+ vbd->type = (x->bdev->bd_disk->flags & GENHD_FL_CD) ?
+ VDISK_TYPE_CDROM : VDISK_TYPE_DISK;
+
+#else
+ if( !blk_size[MAJOR(x->extent.device)] )
+ {
+ DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
+ grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ goto out;
+ }
+
+ /* convert blocks (1KB) to sectors */
+ sz = blk_size[MAJOR(x->extent.device)][MINOR(x->extent.device)] * 2;
+
+ if ( sz == 0 )
+ {
+ DPRINTK("vbd_grow: device %08x zero size!\n", x->extent.device);
+ grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ goto out;
+ }
+#endif
+
+ /*
+ * NB. This test assumes sector_start == 0, which is always the case
+ * in Xen 1.3. In fact the whole grow/shrink interface could do with
+ * some simplification.
+ */
+ if ( x->extent.sector_length > sz )
+ x->extent.sector_length = sz;
+
+ DPRINTK("vbd_grow: requested_len %llu actual_len %lu\n",
+ x->extent.sector_length, sz);
+
+ for ( px = &vbd->extents; *px != NULL; px = &(*px)->next )
+ continue;
+
+ *px = x; /* ATOMIC: no need for vbd_lock. */
+
+ DPRINTK("Successful grow of vdev=%04x (dom=%u)\n",
+ vdevice, grow->domid);
+
+ grow->status = BLKIF_BE_STATUS_OKAY;
+ return;
+
+ out:
+ kfree(x);
+}
+
+
+void vbd_shrink(blkif_be_vbd_shrink_t *shrink)
+{
+ blkif_t *blkif;
+ blkif_extent_le_t **px, *x;
+ vbd_t *vbd = NULL;
+ rb_node_t *rb;
+ blkif_vdev_t vdevice = shrink->vdevice;
+
+ blkif = blkif_find_by_handle(shrink->domid, shrink->blkif_handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("vbd_shrink attempted for non-existent blkif (%u,%u)\n",
+ shrink->domid, shrink->blkif_handle);
+ shrink->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ rb = blkif->vbd_rb.rb_node;
+ while ( rb != NULL )
+ {
+ vbd = rb_entry(rb, vbd_t, rb);
+ if ( vdevice < vbd->vdevice )
+ rb = rb->rb_left;
+ else if ( vdevice > vbd->vdevice )
+ rb = rb->rb_right;
+ else
+ break;
+ }
+
+ if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
+ {
+ shrink->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+ return;
+ }
+
+ if ( unlikely(vbd->extents == NULL) )
+ {
+ shrink->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
+ return;
+ }
+
+ /* Find the last extent. We now know that there is at least one. */
+ for ( px = &vbd->extents; (*px)->next != NULL; px = &(*px)->next )
+ continue;
+
+ x = *px;
+ *px = x->next; /* ATOMIC: no need for vbd_lock. */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ bdev_put(x->bdev);
+#endif
+ kfree(x);
+
+ shrink->status = BLKIF_BE_STATUS_OKAY;
+}
+
+
+void vbd_destroy(blkif_be_vbd_destroy_t *destroy)
+{
+ blkif_t *blkif;
+ vbd_t *vbd;
+ rb_node_t *rb;
+ blkif_extent_le_t *x, *t;
+ blkif_vdev_t vdevice = destroy->vdevice;
+
+ blkif = blkif_find_by_handle(destroy->domid, destroy->blkif_handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("vbd_destroy attempted for non-existent blkif (%u,%u)\n",
+ destroy->domid, destroy->blkif_handle);
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ rb = blkif->vbd_rb.rb_node;
+ while ( rb != NULL )
+ {
+ vbd = rb_entry(rb, vbd_t, rb);
+ if ( vdevice < vbd->vdevice )
+ rb = rb->rb_left;
+ else if ( vdevice > vbd->vdevice )
+ rb = rb->rb_right;
+ else
+ goto found;
+ }
+
+ destroy->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
+ return;
+
+ found:
+ spin_lock(&blkif->vbd_lock);
+ rb_erase(rb, &blkif->vbd_rb);
+ spin_unlock(&blkif->vbd_lock);
+
+ x = vbd->extents;
+ kfree(vbd);
+
+ while ( x != NULL )
+ {
+ t = x->next;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ bdev_put(x->bdev);
+#endif
+ kfree(x);
+ x = t;
+ }
+}
+
+
+void destroy_all_vbds(blkif_t *blkif)
+{
+ vbd_t *vbd;
+ rb_node_t *rb;
+ blkif_extent_le_t *x, *t;
+
+ spin_lock(&blkif->vbd_lock);
+
+ while ( (rb = blkif->vbd_rb.rb_node) != NULL )
+ {
+ vbd = rb_entry(rb, vbd_t, rb);
+
+ rb_erase(rb, &blkif->vbd_rb);
+ x = vbd->extents;
+ kfree(vbd);
+
+ while ( x != NULL )
+ {
+ t = x->next;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ bdev_put(x->bdev);
+#endif
+ kfree(x);
+ x = t;
+ }
+ }
+
+ spin_unlock(&blkif->vbd_lock);
+}
+
+
+static int vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd)
+{
+ blkif_extent_le_t *x;
+
+ vbd_info->device = vbd->vdevice;
+ vbd_info->info = vbd->type;
+ if ( vbd->readonly )
+ vbd_info->info |= VDISK_FLAG_RO;
+ vbd_info->capacity = 0ULL;
+ for ( x = vbd->extents; x != NULL; x = x->next )
+ vbd_info->capacity += x->extent.sector_length;
+
+ return 0;
+}
+
+
+int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds)
+{
+ int rc = 0, nr_vbds = 0;
+ rb_node_t *rb;
+
+ spin_lock(&blkif->vbd_lock);
+
+ if ( (rb = blkif->vbd_rb.rb_node) == NULL )
+ goto out;
+
+ new_subtree:
+ /* STEP 1. Find least node (it'll be left-most). */
+ while ( rb->rb_left != NULL )
+ rb = rb->rb_left;
+
+ for ( ; ; )
+ {
+ /* STEP 2. Dealt with left subtree. Now process current node. */
+ if ( (rc = vbd_probe_single(blkif, &vbd_info[nr_vbds],
+ rb_entry(rb, vbd_t, rb))) != 0 )
+ goto out;
+ if ( ++nr_vbds == max_vbds )
+ goto out;
+
+ /* STEP 3. Process right subtree, if any. */
+ if ( rb->rb_right != NULL )
+ {
+ rb = rb->rb_right;
+ goto new_subtree;
+ }
+
+ /* STEP 4. Done both subtrees. Head back through ancesstors. */
+ for ( ; ; )
+ {
+ /* We're done when we get back to the root node. */
+ if ( rb->rb_parent == NULL )
+ goto out;
+ /* If we are left of parent, then parent is next to process. */
+ if ( rb->rb_parent->rb_left == rb )
+ break;
+ /* If we are right of parent, then we climb to grandparent. */
+ rb = rb->rb_parent;
+ }
+
+ rb = rb->rb_parent;
+ }
+
+ out:
+ spin_unlock(&blkif->vbd_lock);
+ return (rc == 0) ? nr_vbds : rc;
+}
+
+
+int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
+{
+ blkif_extent_le_t *x;
+ vbd_t *vbd;
+ rb_node_t *rb;
+ blkif_sector_t sec_off;
+ unsigned long nr_secs;
+
+ /* Take the vbd_lock because another thread could be updating the tree. */
+ spin_lock(&blkif->vbd_lock);
+
+ rb = blkif->vbd_rb.rb_node;
+ while ( rb != NULL )
+ {
+ vbd = rb_entry(rb, vbd_t, rb);
+ if ( pseg->dev < vbd->vdevice )
+ rb = rb->rb_left;
+ else if ( pseg->dev > vbd->vdevice )
+ rb = rb->rb_right;
+ else
+ goto found;
+ }
+
+ DPRINTK("vbd_translate; domain %u attempted to access "
+ "non-existent VBD.\n", blkif->domid);
+
+ spin_unlock(&blkif->vbd_lock);
+ return -ENODEV;
+
+ found:
+
+ if ( (operation == WRITE) && vbd->readonly )
+ {
+ spin_unlock(&blkif->vbd_lock);
+ return -EACCES;
+ }
+
+ /*
+ * Now iterate through the list of blkif_extents, working out which should
+ * be used to perform the translation.
+ */
+ sec_off = pseg->sector_number;
+ nr_secs = pseg->nr_sects;
+ for ( x = vbd->extents; x != NULL; x = x->next )
+ {
+ if ( sec_off < x->extent.sector_length )
+ {
+ pseg->dev = x->extent.device;
+ pseg->bdev = x->bdev;
+ pseg->sector_number = x->extent.sector_start + sec_off;
+ if ( unlikely((sec_off + nr_secs) > x->extent.sector_length) )
+ goto overrun;
+ spin_unlock(&blkif->vbd_lock);
+ return 1;
+ }
+ sec_off -= x->extent.sector_length;
+ }
+
+ DPRINTK("vbd_translate: end of vbd.\n");
+ spin_unlock(&blkif->vbd_lock);
+ return -EACCES;
+
+ /*
+ * Here we deal with overrun onto the following extent. We don't deal with
+ * overrun of more than one boundary since each request is restricted to
+ * 2^9 512-byte sectors, so it should be trivial for control software to
+ * ensure that extents are large enough to prevent excessive overrun.
+ */
+ overrun:
+
+ /* Adjust length of first chunk to run to end of first extent. */
+ pseg[0].nr_sects = x->extent.sector_length - sec_off;
+
+ /* Set second chunk buffer and length to start where first chunk ended. */
+ pseg[1].buffer = pseg[0].buffer + (pseg[0].nr_sects << 9);
+ pseg[1].nr_sects = nr_secs - pseg[0].nr_sects;
+
+ /* Now move to the next extent. Check it exists and is long enough! */
+ if ( unlikely((x = x->next) == NULL) ||
+ unlikely(x->extent.sector_length < pseg[1].nr_sects) )
+ {
+ DPRINTK("vbd_translate: multiple overruns or end of vbd.\n");
+ spin_unlock(&blkif->vbd_lock);
+ return -EACCES;
+ }
+
+ /* Store the real device and start sector for the second chunk. */
+ pseg[1].dev = x->extent.device;
+ pseg[1].bdev = x->bdev;
+ pseg[1].sector_number = x->extent.sector_start;
+
+ spin_unlock(&blkif->vbd_lock);
+ return 2;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+#define MAJOR_XEN(dev) ((dev)>>8)
+#define MINOR_XEN(dev) ((dev) & 0xff)
+
+#ifndef FANCY_REMAPPING
+static dev_t vbd_map_devnum(blkif_pdev_t cookie)
+{
+ int major = MAJOR_XEN(cookie);
+ int minor = MINOR_XEN(cookie);
+
+ return MKDEV(major, minor);
+}
+#else
+#define XEN_IDE0_MAJOR IDE0_MAJOR
+#define XEN_IDE1_MAJOR IDE1_MAJOR
+#define XEN_IDE2_MAJOR IDE2_MAJOR
+#define XEN_IDE3_MAJOR IDE3_MAJOR
+#define XEN_IDE4_MAJOR IDE4_MAJOR
+#define XEN_IDE5_MAJOR IDE5_MAJOR
+#define XEN_IDE6_MAJOR IDE6_MAJOR
+#define XEN_IDE7_MAJOR IDE7_MAJOR
+#define XEN_IDE8_MAJOR IDE8_MAJOR
+#define XEN_IDE9_MAJOR IDE9_MAJOR
+#define XEN_SCSI_DISK0_MAJOR SCSI_DISK0_MAJOR
+#define XEN_SCSI_DISK1_MAJOR SCSI_DISK1_MAJOR
+#define XEN_SCSI_DISK2_MAJOR SCSI_DISK2_MAJOR
+#define XEN_SCSI_DISK3_MAJOR SCSI_DISK3_MAJOR
+#define XEN_SCSI_DISK4_MAJOR SCSI_DISK4_MAJOR
+#define XEN_SCSI_DISK5_MAJOR SCSI_DISK5_MAJOR
+#define XEN_SCSI_DISK6_MAJOR SCSI_DISK6_MAJOR
+#define XEN_SCSI_DISK7_MAJOR SCSI_DISK7_MAJOR
+#define XEN_SCSI_CDROM_MAJOR SCSI_CDROM_MAJOR
+
+static dev_t vbd_map_devnum(blkif_pdev_t cookie)
+{
+ int new_major;
+ int major = MAJOR_XEN(cookie);
+ int minor = MINOR_XEN(cookie);
+
+ switch (major) {
+ case XEN_IDE0_MAJOR: new_major = IDE0_MAJOR; break;
+ case XEN_IDE1_MAJOR: new_major = IDE1_MAJOR; break;
+ case XEN_IDE2_MAJOR: new_major = IDE2_MAJOR; break;
+ case XEN_IDE3_MAJOR: new_major = IDE3_MAJOR; break;
+ case XEN_IDE4_MAJOR: new_major = IDE4_MAJOR; break;
+ case XEN_IDE5_MAJOR: new_major = IDE5_MAJOR; break;
+ case XEN_IDE6_MAJOR: new_major = IDE6_MAJOR; break;
+ case XEN_IDE7_MAJOR: new_major = IDE7_MAJOR; break;
+ case XEN_IDE8_MAJOR: new_major = IDE8_MAJOR; break;
+ case XEN_IDE9_MAJOR: new_major = IDE9_MAJOR; break;
+ case XEN_SCSI_DISK0_MAJOR: new_major = SCSI_DISK0_MAJOR; break;
+ case XEN_SCSI_DISK1_MAJOR ... XEN_SCSI_DISK7_MAJOR:
+ new_major = SCSI_DISK1_MAJOR + major - XEN_SCSI_DISK1_MAJOR;
+ break;
+ case XEN_SCSI_CDROM_MAJOR: new_major = SCSI_CDROM_MAJOR; break;
+ default: new_major = 0; break;
+ }
+
+ return MKDEV(new_major, minor);
+}
+#endif
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION_CODE(2,6,0) */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkfront/Kconfig
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/Kconfig Tue Aug 9 15:17:45 2005
@@ -0,0 +1,6 @@
+
+config XENBLOCK
+ tristate "Block device driver"
+ depends on ARCH_XEN
+ help
+ Block device driver for Xen
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkfront/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,3 @@
+
+obj-y := blkfront.o vbd.o
+
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,1410 @@
+/******************************************************************************
+ * blkfront.c
+ *
+ * XenLinux virtual block-device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include "block.h"
+#else
+#include "common.h"
+#include <linux/blk.h>
+#include <linux/tqueue.h>
+#endif
+
+#include <linux/cdrom.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <asm-xen/ctrl_if.h>
+
+typedef unsigned char byte; /* from linux/ide.h */
+
+/* Control whether runtime update of vbds is enabled. */
+#define ENABLE_VBD_UPDATE 1
+
+#if ENABLE_VBD_UPDATE
+static void vbd_update(void);
+#else
+static void vbd_update(void){};
+#endif
+
+#define BLKIF_STATE_CLOSED 0
+#define BLKIF_STATE_DISCONNECTED 1
+#define BLKIF_STATE_CONNECTED 2
+
+static int blkif_handle = 0;
+static unsigned int blkif_state = BLKIF_STATE_CLOSED;
+static unsigned int blkif_evtchn = 0;
+static unsigned int blkif_irq = 0;
+
+static int blkif_control_rsp_valid;
+static blkif_response_t blkif_control_rsp;
+
+static blkif_ring_t *blk_ring = NULL;
+static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
+static BLKIF_RING_IDX req_prod; /* Private request producer. */
+
+unsigned long rec_ring_free;
+blkif_request_t rec_ring[BLKIF_RING_SIZE];
+
+static int recovery = 0; /* "Recovery in progress" flag. Protected
+ * by the blkif_io_lock */
+
+/* We plug the I/O ring if the driver is suspended or if the ring is full. */
+#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
+ (blkif_state != BLKIF_STATE_CONNECTED))
+
+static void kick_pending_request_queues(void);
+
+int __init xlblk_init(void);
+
+void blkif_completion( blkif_request_t *req );
+
+static inline int GET_ID_FROM_FREELIST( void )
+{
+ unsigned long free = rec_ring_free;
+
+ if ( free > BLKIF_RING_SIZE )
+ BUG();
+
+ rec_ring_free = rec_ring[free].id;
+
+ rec_ring[free].id = 0x0fffffee; /* debug */
+
+ return free;
+}
+
+static inline void ADD_ID_TO_FREELIST( unsigned long id )
+{
+ rec_ring[id].id = rec_ring_free;
+ rec_ring_free = id;
+}
+
+
+/************************ COMMON CODE (inlined) ************************/
+
+/* Kernel-specific definitions used in the common code */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define DISABLE_SCATTERGATHER()
+#else
+static int sg_operation = -1;
+#define DISABLE_SCATTERGATHER() (sg_operation = -1)
+#endif
+
+static inline void translate_req_to_pfn(blkif_request_t *xreq,
+ blkif_request_t *req)
+{
+ int i;
+
+ xreq->operation = req->operation;
+ xreq->nr_segments = req->nr_segments;
+ xreq->device = req->device;
+ /* preserve id */
+ xreq->sector_number = req->sector_number;
+
+ for ( i = 0; i < req->nr_segments; i++ )
+ xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+}
+
+static inline void translate_req_to_mfn(blkif_request_t *xreq,
+ blkif_request_t *req)
+{
+ int i;
+
+ xreq->operation = req->operation;
+ xreq->nr_segments = req->nr_segments;
+ xreq->device = req->device;
+ xreq->id = req->id; /* copy id (unlike above) */
+ xreq->sector_number = req->sector_number;
+
+ for ( i = 0; i < req->nr_segments; i++ )
+ xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+}
+
+
+static inline void flush_requests(void)
+{
+ DISABLE_SCATTERGATHER();
+ wmb(); /* Ensure that the frontend can see the requests. */
+ blk_ring->req_prod = req_prod;
+ notify_via_evtchn(blkif_evtchn);
+}
+
+
+
+
+/************************** KERNEL VERSION 2.6 **************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+module_init(xlblk_init);
+
+#if ENABLE_VBD_UPDATE
+static void update_vbds_task(void *unused)
+{
+ xlvbd_update_vbds();
+}
+
+static void vbd_update(void)
+{
+ static DECLARE_WORK(update_tq, update_vbds_task, NULL);
+ schedule_work(&update_tq);
+}
+#endif /* ENABLE_VBD_UPDATE */
+
+static void kick_pending_request_queues(void)
+{
+
+ if ( (xlbd_blk_queue != NULL) &&
+ test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
+ {
+ blk_start_queue(xlbd_blk_queue);
+ /* XXXcl call to request_fn should not be needed but
+ * we get stuck without... needs investigating
+ */
+ xlbd_blk_queue->request_fn(xlbd_blk_queue);
+ }
+
+}
+
+
+int blkif_open(struct inode *inode, struct file *filep)
+{
+ struct gendisk *gd = inode->i_bdev->bd_disk;
+ struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+ /* Update of usage count is protected by per-device semaphore. */
+ di->mi->usage++;
+
+ return 0;
+}
+
+
+int blkif_release(struct inode *inode, struct file *filep)
+{
+ struct gendisk *gd = inode->i_bdev->bd_disk;
+ struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+ /*
+ * When usage drops to zero it may allow more VBD updates to occur.
+ * Update of usage count is protected by a per-device semaphore.
+ */
+ if (--di->mi->usage == 0) {
+ vbd_update();
+ }
+
+ return 0;
+}
+
+
+int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument)
+{
+ int i;
+ /* struct gendisk *gd = inode->i_bdev->bd_disk; */
+
+ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+ command, (long)argument, inode->i_rdev);
+
+ switch (command) {
+
+ case HDIO_GETGEO:
+ /* return ENOSYS to use defaults */
+ return -ENOSYS;
+
+ case CDROMMULTISESSION:
+ DPRINTK("FIXME: support multisession CDs later\n");
+ for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
+ if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
+ return 0;
+
+ default:
+ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
+ command);*/
+ return -EINVAL; /* same return as native Linux */
+ }
+
+ return 0;
+}
+
+#if 0
+/* check media change: should probably do something here in some cases :-) */
+int blkif_check(kdev_t dev)
+{
+ DPRINTK("blkif_check\n");
+ return 0;
+}
+
+int blkif_revalidate(kdev_t dev)
+{
+ struct block_device *bd;
+ struct gendisk *gd;
+ xen_block_t *disk;
+ unsigned long capacity;
+ int i, rc = 0;
+
+ if ( (bd = bdget(dev)) == NULL )
+ return -EINVAL;
+
+ /*
+ * Update of partition info, and check of usage count, is protected
+ * by the per-block-device semaphore.
+ */
+ down(&bd->bd_sem);
+
+ if ( ((gd = get_gendisk(dev)) == NULL) ||
+ ((disk = xldev_to_xldisk(dev)) == NULL) ||
+ ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( disk->usage > 1 )
+ {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Only reread partition table if VBDs aren't mapped to partitions. */
+ if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+ {
+ for ( i = gd->max_p - 1; i >= 0; i-- )
+ {
+ invalidate_device(dev+i, 1);
+ gd->part[MINOR(dev+i)].start_sect = 0;
+ gd->part[MINOR(dev+i)].nr_sects = 0;
+ gd->sizes[MINOR(dev+i)] = 0;
+ }
+
+ grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+ }
+
+ out:
+ up(&bd->bd_sem);
+ bdput(bd);
+ return rc;
+}
+#endif
+
+/*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+ struct xlbd_disk_info *di =
+ (struct xlbd_disk_info *)req->rq_disk->private_data;
+ unsigned long buffer_ma;
+ blkif_request_t *ring_req;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int idx;
+ unsigned long id;
+ unsigned int fsect, lsect;
+
+ if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+ return 1;
+
+ /* Fill out a communications ring structure. */
+ ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+ id = GET_ID_FROM_FREELIST();
+ rec_ring[id].id = (unsigned long) req;
+
+ ring_req->id = id;
+ ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
+ BLKIF_OP_READ;
+ ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->device = di->xd_device;
+
+ ring_req->nr_segments = 0;
+ rq_for_each_bio(bio, req)
+ {
+ bio_for_each_segment(bvec, bio, idx)
+ {
+ if ( ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST )
+ BUG();
+ buffer_ma = page_to_phys(bvec->bv_page);
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ ring_req->frame_and_sects[ring_req->nr_segments++] =
+ buffer_ma | (fsect << 3) | lsect;
+ }
+ }
+
+ req_prod++;
+
+ /* Keep a private copy so we can reissue requests when recovering. */
+ translate_req_to_pfn(&rec_ring[id], ring_req);
+
+ return 0;
+}
+
+
+/*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+void do_blkif_request(request_queue_t *rq)
+{
+ struct request *req;
+ int queued;
+
+ DPRINTK("Entered do_blkif_request\n");
+
+ queued = 0;
+
+ while ((req = elv_next_request(rq)) != NULL) {
+ if (!blk_fs_request(req)) {
+ end_request(req, 0);
+ continue;
+ }
+
+ if ( BLKIF_RING_FULL )
+ {
+ blk_stop_queue(rq);
+ break;
+ }
+ DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
+ req, req->cmd, req->sector, req->current_nr_sectors,
+ req->nr_sectors, req->buffer,
+ rq_data_dir(req) ? "write" : "read");
+ blkdev_dequeue_request(req);
+ if (blkif_queue_request(req)) {
+ blk_stop_queue(rq);
+ break;
+ }
+ queued++;
+ }
+
+ if (queued != 0)
+ flush_requests();
+}
+
+
+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ struct request *req;
+ blkif_response_t *bret;
+ BLKIF_RING_IDX i, rp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) ||
+ unlikely(recovery) )
+ {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ rp = blk_ring->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for ( i = resp_cons; i != rp; i++ )
+ {
+ unsigned long id;
+ bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
+
+ id = bret->id;
+ req = (struct request *)rec_ring[id].id;
+
+ blkif_completion( &rec_ring[id] );
+
+ ADD_ID_TO_FREELIST(id); /* overwrites req */
+
+ switch ( bret->operation )
+ {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
+ DPRINTK("Bad return from blkdev data request: %x\n",
+ bret->status);
+
+ if ( unlikely(end_that_request_first
+ (req,
+ (bret->status == BLKIF_RSP_OKAY),
+ req->hard_nr_sectors)) )
+ BUG();
+ end_that_request_last(req);
+
+ break;
+ case BLKIF_OP_PROBE:
+ memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+ blkif_control_rsp_valid = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ resp_cons = i;
+
+ kick_pending_request_queues();
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+#else
+/************************** KERNEL VERSION 2.4 **************************/
+
+static kdev_t sg_dev;
+static unsigned long sg_next_sect;
+
+/*
+ * Request queues with outstanding work, but ring is currently full.
+ * We need no special lock here, as we always access this with the
+ * blkif_io_lock held. We only need a small maximum list.
+ */
+#define MAX_PENDING 8
+static request_queue_t *pending_queues[MAX_PENDING];
+static int nr_pending;
+
+
+#define blkif_io_lock io_request_lock
+
+/*============================================================================*/
+#if ENABLE_VBD_UPDATE
+
+/*
+ * blkif_update_int/update-vbds_task - handle VBD update events.
+ * Schedule a task for keventd to run, which will update the VBDs and perform
+ * the corresponding updates to our view of VBD state.
+ */
+static void update_vbds_task(void *unused)
+{
+ xlvbd_update_vbds();
+}
+
+static void vbd_update(void)
+{
+ static struct tq_struct update_tq;
+ update_tq.routine = update_vbds_task;
+ schedule_task(&update_tq);
+}
+
+#endif /* ENABLE_VBD_UPDATE */
+/*============================================================================*/
+
+
+static void kick_pending_request_queues(void)
+{
+ /* We kick pending request queues if the ring is reasonably empty. */
+ if ( (nr_pending != 0) &&
+ ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
+ {
+ /* Attempt to drain the queue, but bail if the ring becomes full. */
+ while ( (nr_pending != 0) && !BLKIF_RING_FULL )
+ do_blkif_request(pending_queues[--nr_pending]);
+ }
+}
+
+int blkif_open(struct inode *inode, struct file *filep)
+{
+ short xldev = inode->i_rdev;
+ struct gendisk *gd = get_gendisk(xldev);
+ xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+ short minor = MINOR(xldev);
+
+ if ( gd->part[minor].nr_sects == 0 )
+ {
+ /*
+ * Device either doesn't exist, or has zero capacity; we use a few
+ * cheesy heuristics to return the relevant error code
+ */
+ if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
+ ((minor & (gd->max_p - 1)) != 0) )
+ {
+ /*
+ * We have a real device, but no such partition, or we just have a
+ * partition number so guess this is the problem.
+ */
+ return -ENXIO; /* no such device or address */
+ }
+ else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
+ {
+ /* This is a removable device => assume that media is missing. */
+ return -ENOMEDIUM; /* media not present (this is a guess) */
+ }
+ else
+ {
+ /* Just go for the general 'no such device' error. */
+ return -ENODEV; /* no such device */
+ }
+ }
+
+ /* Update of usage count is protected by per-device semaphore. */
+ disk->usage++;
+
+ return 0;
+}
+
+
+int blkif_release(struct inode *inode, struct file *filep)
+{
+ xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+
+ /*
+ * When usage drops to zero it may allow more VBD updates to occur.
+ * Update of usage count is protected by a per-device semaphore.
+ */
+ if ( --disk->usage == 0 ) {
+ vbd_update();
+ }
+
+ return 0;
+}
+
+
+int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument)
+{
+ kdev_t dev = inode->i_rdev;
+ struct hd_geometry *geo = (struct hd_geometry *)argument;
+ struct gendisk *gd;
+ struct hd_struct *part;
+ int i;
+ unsigned short cylinders;
+ byte heads, sectors;
+
+ /* NB. No need to check permissions. That is done for us. */
+
+ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+ command, (long) argument, dev);
+
+ gd = get_gendisk(dev);
+ part = &gd->part[MINOR(dev)];
+
+ switch ( command )
+ {
+ case BLKGETSIZE:
+ DPRINTK_IOCTL(" BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects);
+ return put_user(part->nr_sects, (unsigned long *) argument);
+
+ case BLKGETSIZE64:
+ DPRINTK_IOCTL(" BLKGETSIZE64: %x %llx\n", BLKGETSIZE64,
+ (u64)part->nr_sects * 512);
+ return put_user((u64)part->nr_sects * 512, (u64 *) argument);
+
+ case BLKRRPART: /* re-read partition table */
+ DPRINTK_IOCTL(" BLKRRPART: %x\n", BLKRRPART);
+ return blkif_revalidate(dev);
+
+ case BLKSSZGET:
+ return hardsect_size[MAJOR(dev)][MINOR(dev)];
+
+ case BLKBSZGET: /* get block size */
+ DPRINTK_IOCTL(" BLKBSZGET: %x\n", BLKBSZGET);
+ break;
+
+ case BLKBSZSET: /* set block size */
+ DPRINTK_IOCTL(" BLKBSZSET: %x\n", BLKBSZSET);
+ break;
+
+ case BLKRASET: /* set read-ahead */
+ DPRINTK_IOCTL(" BLKRASET: %x\n", BLKRASET);
+ break;
+
+ case BLKRAGET: /* get read-ahead */
+ DPRINTK_IOCTL(" BLKRAFET: %x\n", BLKRAGET);
+ break;
+
+ case HDIO_GETGEO:
+ DPRINTK_IOCTL(" HDIO_GETGEO: %x\n", HDIO_GETGEO);
+ if (!argument) return -EINVAL;
+
+ /* We don't have real geometry info, but let's at least return
+ values consistent with the size of the device */
+
+ heads = 0xff;
+ sectors = 0x3f;
+ cylinders = part->nr_sects / (heads * sectors);
+
+ if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT;
+ if (put_user(heads, (byte *)&geo->heads)) return -EFAULT;
+ if (put_user(sectors, (byte *)&geo->sectors)) return -EFAULT;
+ if (put_user(cylinders, (unsigned short *)&geo->cylinders)) return
-EFAULT;
+
+ return 0;
+
+ case HDIO_GETGEO_BIG:
+ DPRINTK_IOCTL(" HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
+ if (!argument) return -EINVAL;
+
+ /* We don't have real geometry info, but let's at least return
+ values consistent with the size of the device */
+
+ heads = 0xff;
+ sectors = 0x3f;
+ cylinders = part->nr_sects / (heads * sectors);
+
+ if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT;
+ if (put_user(heads, (byte *)&geo->heads)) return -EFAULT;
+ if (put_user(sectors, (byte *)&geo->sectors)) return -EFAULT;
+ if (put_user(cylinders, (unsigned int *) &geo->cylinders)) return
-EFAULT;
+
+ return 0;
+
+ case CDROMMULTISESSION:
+ DPRINTK("FIXME: support multisession CDs later\n");
+ for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
+ if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
+ return 0;
+
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in XL blkif");
+ return -ENOSYS;
+
+ default:
+ WPRINTK("ioctl %08x not supported by XL blkif\n", command);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+
+
+/* check media change: should probably do something here in some cases :-) */
+int blkif_check(kdev_t dev)
+{
+ DPRINTK("blkif_check\n");
+ return 0;
+}
+
+int blkif_revalidate(kdev_t dev)
+{
+ struct block_device *bd;
+ struct gendisk *gd;
+ xl_disk_t *disk;
+ unsigned long capacity;
+ int i, rc = 0;
+
+ if ( (bd = bdget(dev)) == NULL )
+ return -EINVAL;
+
+ /*
+ * Update of partition info, and check of usage count, is protected
+ * by the per-block-device semaphore.
+ */
+ down(&bd->bd_sem);
+
+ if ( ((gd = get_gendisk(dev)) == NULL) ||
+ ((disk = xldev_to_xldisk(dev)) == NULL) ||
+ ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( disk->usage > 1 )
+ {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Only reread partition table if VBDs aren't mapped to partitions. */
+ if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+ {
+ for ( i = gd->max_p - 1; i >= 0; i-- )
+ {
+ invalidate_device(dev+i, 1);
+ gd->part[MINOR(dev+i)].start_sect = 0;
+ gd->part[MINOR(dev+i)].nr_sects = 0;
+ gd->sizes[MINOR(dev+i)] = 0;
+ }
+
+ grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+ }
+
+ out:
+ up(&bd->bd_sem);
+ bdput(bd);
+ return rc;
+}
+
+
+/*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+static int blkif_queue_request(unsigned long id,
+ int operation,
+ char * buffer,
+ unsigned long sector_number,
+ unsigned short nr_sectors,
+ kdev_t device)
+{
+ unsigned long buffer_ma = virt_to_bus(buffer);
+ unsigned long xid;
+ struct gendisk *gd;
+ blkif_request_t *req;
+ struct buffer_head *bh;
+ unsigned int fsect, lsect;
+
+ fsect = (buffer_ma & ~PAGE_MASK) >> 9;
+ lsect = fsect + nr_sectors - 1;
+
+ /* Buffer must be sector-aligned. Extent mustn't cross a page boundary. */
+ if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) )
+ BUG();
+ if ( lsect > 7 )
+ BUG();
+
+ buffer_ma &= PAGE_MASK;
+
+ if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+ return 1;
+
+ switch ( operation )
+ {
+
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ gd = get_gendisk(device);
+
+ /*
+ * Update the sector_number we'll pass down as appropriate; note that
+ * we could sanity check that resulting sector will be in this
+ * partition, but this will happen in driver backend anyhow.
+ */
+ sector_number += gd->part[MINOR(device)].start_sect;
+
+ /*
+ * If this unit doesn't consist of virtual partitions then we clear
+ * the partn bits from the device number.
+ */
+ if ( !(gd->flags[MINOR(device)>>gd->minor_shift] &
+ GENHD_FL_VIRT_PARTNS) )
+ device &= ~(gd->max_p - 1);
+
+ if ( (sg_operation == operation) &&
+ (sg_dev == device) &&
+ (sg_next_sect == sector_number) )
+ {
+
+ req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
+ bh = (struct buffer_head *)id;
+
+ bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
+
+
+ rec_ring[req->id].id = id;
+
+ req->frame_and_sects[req->nr_segments] =
+ buffer_ma | (fsect<<3) | lsect;
+ if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST )
+ sg_next_sect += nr_sectors;
+ else
+ DISABLE_SCATTERGATHER();
+
+ /* Update the copy of the request in the recovery ring. */
+ translate_req_to_pfn(&rec_ring[req->id], req );
+
+ return 0;
+ }
+ else if ( BLKIF_RING_FULL )
+ {
+ return 1;
+ }
+ else
+ {
+ sg_operation = operation;
+ sg_dev = device;
+ sg_next_sect = sector_number + nr_sectors;
+ }
+ break;
+
+ default:
+ panic("unknown op %d\n", operation);
+ }
+
+ /* Fill out a communications ring structure. */
+ req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+
+ xid = GET_ID_FROM_FREELIST();
+ rec_ring[xid].id = id;
+
+ req->id = xid;
+ req->operation = operation;
+ req->sector_number = (blkif_sector_t)sector_number;
+ req->device = device;
+ req->nr_segments = 1;
+ req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
+
+ req_prod++;
+
+ /* Keep a private copy so we can reissue requests when recovering. */
+ translate_req_to_pfn(&rec_ring[xid], req );
+
+ return 0;
+}
+
+
+/*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+void do_blkif_request(request_queue_t *rq)
+{
+ struct request *req;
+ struct buffer_head *bh, *next_bh;
+ int rw, nsect, full, queued = 0;
+
+ DPRINTK("Entered do_blkif_request\n");
+
+ while ( !rq->plugged && !list_empty(&rq->queue_head))
+ {
+ if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL )
+ goto out;
+
+ DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
+ req, req->cmd, req->sector,
+ req->current_nr_sectors, req->nr_sectors, req->bh);
+
+ rw = req->cmd;
+ if ( rw == READA )
+ rw = READ;
+ if ( unlikely((rw != READ) && (rw != WRITE)) )
+ panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw);
+
+ req->errors = 0;
+
+ bh = req->bh;
+ while ( bh != NULL )
+ {
+ next_bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ full = blkif_queue_request(
+ (unsigned long)bh,
+ (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE,
+ bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
+
+ if ( full )
+ {
+ bh->b_reqnext = next_bh;
+ pending_queues[nr_pending++] = rq;
+ if ( unlikely(nr_pending >= MAX_PENDING) )
+ BUG();
+ goto out;
+ }
+
+ queued++;
+
+ /* Dequeue the buffer head from the request. */
+ nsect = bh->b_size >> 9;
+ bh = req->bh = next_bh;
+
+ if ( bh != NULL )
+ {
+ /* There's another buffer head to do. Update the request. */
+ req->hard_sector += nsect;
+ req->hard_nr_sectors -= nsect;
+ req->sector = req->hard_sector;
+ req->nr_sectors = req->hard_nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ }
+ else
+ {
+ /* That was the last buffer head. Finalise the request. */
+ if ( unlikely(end_that_request_first(req, 1, "XenBlk")) )
+ BUG();
+ blkdev_dequeue_request(req);
+ end_that_request_last(req);
+ }
+ }
+ }
+
+ out:
+ if ( queued != 0 )
+ flush_requests();
+}
+
+
+static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ BLKIF_RING_IDX i, rp;
+ unsigned long flags;
+ struct buffer_head *bh, *next_bh;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
+ {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return;
+ }
+
+ rp = blk_ring->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for ( i = resp_cons; i != rp; i++ )
+ {
+ unsigned long id;
+ blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
+
+ id = bret->id;
+ bh = (struct buffer_head *)rec_ring[id].id;
+
+ blkif_completion( &rec_ring[id] );
+
+ ADD_ID_TO_FREELIST(id);
+
+ switch ( bret->operation )
+ {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
+ DPRINTK("Bad return from blkdev data request: %lx\n",
+ bret->status);
+ for ( ; bh != NULL; bh = next_bh )
+ {
+ next_bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ bh->b_end_io(bh, bret->status == BLKIF_RSP_OKAY);
+ }
+
+ break;
+ case BLKIF_OP_PROBE:
+ memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+ blkif_control_rsp_valid = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ resp_cons = i;
+
+ kick_pending_request_queues();
+
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+#endif
+
+/***************************** COMMON CODE *******************************/
+
+
+void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
+{
+ unsigned long flags, id;
+
+ retry:
+ while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+ if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+ {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ goto retry;
+ }
+
+ DISABLE_SCATTERGATHER();
+ blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;
+
+ id = GET_ID_FROM_FREELIST();
+ blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
+ rec_ring[id].id = (unsigned long) req;
+
+ translate_req_to_pfn( &rec_ring[id], req );
+
+ req_prod++;
+ flush_requests();
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ while ( !blkif_control_rsp_valid )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ memcpy(rsp, &blkif_control_rsp, sizeof(*rsp));
+ blkif_control_rsp_valid = 0;
+}
+
+
+/* Send a driver status notification to the domain controller. */
+static void send_driver_status(int ok)
+{
+ ctrl_msg_t cmsg = {
+ .type = CMSG_BLKIF_FE,
+ .subtype = CMSG_BLKIF_FE_DRIVER_STATUS,
+ .length = sizeof(blkif_fe_driver_status_t),
+ };
+ blkif_fe_driver_status_t *msg = (void*)cmsg.msg;
+
+ msg->status = (ok ? BLKIF_DRIVER_STATUS_UP : BLKIF_DRIVER_STATUS_DOWN);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+/* Tell the controller to bring up the interface. */
+static void blkif_send_interface_connect(void)
+{
+ ctrl_msg_t cmsg = {
+ .type = CMSG_BLKIF_FE,
+ .subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
+ .length = sizeof(blkif_fe_interface_connect_t),
+ };
+ blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+
+ msg->handle = 0;
+ msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+static void blkif_free(void)
+{
+ /* Prevent new requests being issued until we fix things up. */
+ spin_lock_irq(&blkif_io_lock);
+ recovery = 1;
+ blkif_state = BLKIF_STATE_DISCONNECTED;
+ spin_unlock_irq(&blkif_io_lock);
+
+ /* Free resources associated with old device channel. */
+ if ( blk_ring != NULL )
+ {
+ free_page((unsigned long)blk_ring);
+ blk_ring = NULL;
+ }
+ free_irq(blkif_irq, NULL);
+ blkif_irq = 0;
+
+ unbind_evtchn_from_irq(blkif_evtchn);
+ blkif_evtchn = 0;
+}
+
+static void blkif_close(void)
+{
+}
+
+/* Move from CLOSED to DISCONNECTED state. */
+static void blkif_disconnect(void)
+{
+ if ( blk_ring != NULL )
+ free_page((unsigned long)blk_ring);
+ blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
+ blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
+ blkif_state = BLKIF_STATE_DISCONNECTED;
+ blkif_send_interface_connect();
+}
+
+static void blkif_reset(void)
+{
+ blkif_free();
+ blkif_disconnect();
+}
+
+static void blkif_recover(void)
+{
+ int i;
+
+ /* Hmm, requests might be re-ordered when we re-issue them.
+ * This will need to be fixed once we have barriers */
+
+ /* Stage 1 : Find active and move to safety. */
+ for ( i = 0; i < BLKIF_RING_SIZE; i++ )
+ {
+ if ( rec_ring[i].id >= PAGE_OFFSET )
+ {
+ translate_req_to_mfn(
+ &blk_ring->ring[req_prod].req, &rec_ring[i]);
+ req_prod++;
+ }
+ }
+
+ /* Stage 2 : Set up shadow list. */
+ for ( i = 0; i < req_prod; i++ )
+ {
+ rec_ring[i].id = blk_ring->ring[i].req.id;
+ blk_ring->ring[i].req.id = i;
+ translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
+ }
+
+ /* Stage 3 : Set up free list. */
+ for ( ; i < BLKIF_RING_SIZE; i++ )
+ rec_ring[i].id = i+1;
+ rec_ring_free = req_prod;
+ rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
+
+ /* blk_ring->req_prod will be set when we flush_requests().*/
+ wmb();
+
+ /* Switch off recovery mode, using a memory barrier to ensure that
+ * it's seen before we flush requests - we don't want to miss any
+ * interrupts. */
+ recovery = 0;
+ wmb();
+
+ /* Kicks things back into life. */
+ flush_requests();
+
+ /* Now safe to left other peope use interface. */
+ blkif_state = BLKIF_STATE_CONNECTED;
+}
+
+static void blkif_connect(blkif_fe_interface_status_t *status)
+{
+ int err = 0;
+
+ blkif_evtchn = status->evtchn;
+ blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
+
+ err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
+ if ( err )
+ {
+ WPRINTK("request_irq failed (err=%d)\n", err);
+ return;
+ }
+
+ if ( recovery )
+ {
+ blkif_recover();
+ }
+ else
+ {
+ /* Transition to connected in case we need to do
+ * a partition probe on a whole disk. */
+ blkif_state = BLKIF_STATE_CONNECTED;
+
+ /* Probe for discs attached to the interface. */
+ xlvbd_init();
+ }
+
+ /* Kick pending requests. */
+ spin_lock_irq(&blkif_io_lock);
+ kick_pending_request_queues();
+ spin_unlock_irq(&blkif_io_lock);
+}
+
+static void unexpected(blkif_fe_interface_status_t *status)
+{
+ DPRINTK(" Unexpected blkif status %u in state %u\n",
+ status->status, blkif_state);
+}
+
+static void blkif_status(blkif_fe_interface_status_t *status)
+{
+ if ( status->handle != blkif_handle )
+ {
+ WPRINTK(" Invalid blkif: handle=%u", status->handle);
+ return;
+ }
+
+ switch ( status->status )
+ {
+ case BLKIF_INTERFACE_STATUS_CLOSED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ unexpected(status);
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ case BLKIF_STATE_CONNECTED:
+ unexpected(status);
+ blkif_close();
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_DISCONNECTED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ blkif_disconnect();
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ case BLKIF_STATE_CONNECTED:
+ /* unexpected(status); */ /* occurs during suspend/resume */
+ blkif_reset();
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_CONNECTED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ unexpected(status);
+ blkif_disconnect();
+ blkif_connect(status);
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ blkif_connect(status);
+ break;
+ case BLKIF_STATE_CONNECTED:
+ unexpected(status);
+ blkif_connect(status);
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_CHANGED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ case BLKIF_STATE_DISCONNECTED:
+ unexpected(status);
+ break;
+ case BLKIF_STATE_CONNECTED:
+ vbd_update();
+ break;
+ }
+ break;
+
+ default:
+ WPRINTK(" Invalid blkif status: %d\n", status->status);
+ break;
+ }
+}
+
+
+static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ switch ( msg->subtype )
+ {
+ case CMSG_BLKIF_FE_INTERFACE_STATUS:
+ if ( msg->length != sizeof(blkif_fe_interface_status_t) )
+ goto parse_error;
+ blkif_status((blkif_fe_interface_status_t *)
+ &msg->msg[0]);
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+int wait_for_blkif(void)
+{
+ int err = 0;
+ int i;
+ send_driver_status(1);
+
+ /*
+ * We should read 'nr_interfaces' from response message and wait
+ * for notifications before proceeding. For now we assume that we
+ * will be notified of exactly one interface.
+ */
+ for ( i=0; (blkif_state != BLKIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ if ( blkif_state != BLKIF_STATE_CONNECTED )
+ {
+ WPRINTK("Timeout connecting to device!\n");
+ err = -ENOSYS;
+ }
+ return err;
+}
+
+int __init xlblk_init(void)
+{
+ int i;
+
+ if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
+ (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+ return 0;
+
+ IPRINTK("Initialising virtual block device driver\n");
+
+ rec_ring_free = 0;
+ for ( i = 0; i < BLKIF_RING_SIZE; i++ )
+ rec_ring[i].id = i+1;
+ rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
+
+ (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ wait_for_blkif();
+
+ return 0;
+}
+
+void blkdev_suspend(void)
+{
+}
+
+void blkdev_resume(void)
+{
+ send_driver_status(1);
+}
+
+/* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
+
+void blkif_completion(blkif_request_t *req)
+{
+ int i;
+
+ switch ( req->operation )
+ {
+ case BLKIF_OP_READ:
+ for ( i = 0; i < req->nr_segments; i++ )
+ {
+ unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
+ unsigned long mfn = phys_to_machine_mapping[pfn];
+ xen_machphys_update(mfn, pfn);
+ }
+ break;
+ }
+
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,125 @@
+/******************************************************************************
+ * block.h
+ *
+ * Shared definitions between all levels of XenLinux Virtual block devices.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004-2005, Christian Limpach
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_DRIVERS_BLOCK_H__
+#define __XEN_DRIVERS_BLOCK_H__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/major.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/io/blkif.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#if 1
+#define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_blk: " fmt, ##args)
+#else
+#define IPRINTK(fmt, args...) ((void)0)
+#endif
+
+#if 1
+#define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_blk: " fmt, ##args)
+#else
+#define WPRINTK(fmt, args...) ((void)0)
+#endif
+
+#if 0
+#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if 0
+#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
+#endif
+
+struct xlbd_type_info {
+ int partn_shift;
+ int disks_per_major;
+ char *devname;
+ char *diskname;
+};
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'. They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct xlbd_major_info {
+ int major;
+ int index;
+ int usage;
+ struct xlbd_type_info *type;
+};
+
+struct xlbd_disk_info {
+ int xd_device;
+ struct xlbd_major_info *mi;
+};
+
+typedef struct xen_block {
+ int usage;
+} xen_block_t;
+
+extern struct request_queue *xlbd_blk_queue;
+extern spinlock_t blkif_io_lock;
+
+extern int blkif_open(struct inode *inode, struct file *filep);
+extern int blkif_release(struct inode *inode, struct file *filep);
+extern int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument);
+extern int blkif_check(dev_t dev);
+extern int blkif_revalidate(dev_t dev);
+extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
+extern void do_blkif_request (request_queue_t *rq);
+
+extern void xlvbd_update_vbds(void);
+
+/* Virtual block-device subsystem. */
+extern int xlvbd_init(void);
+extern void xlvbd_cleanup(void);
+
+#endif /* __XEN_DRIVERS_BLOCK_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,494 @@
+/******************************************************************************
+ * vbd.c
+ *
+ * XenLinux virtual block-device driver (xvd).
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004-2005, Christian Limpach
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "block.h"
+#include <linux/blkdev.h>
+#include <linux/list.h>
+
+/*
+ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
+ * potentially combinations of the two) in the naming scheme and in a few other
+ * places.
+ */
+
+#define NUM_IDE_MAJORS 10
+#define NUM_SCSI_MAJORS 9
+#define NUM_VBD_MAJORS 1
+
+struct lvdisk
+{
+ blkif_sector_t capacity; /* 0: Size in terms of 512-byte sectors. */
+ blkif_vdev_t device; /* 8: Device number (opaque 16 bit value). */
+ u16 info;
+ struct list_head list;
+};
+
+static struct xlbd_type_info xlbd_ide_type = {
+ .partn_shift = 6,
+ .disks_per_major = 2,
+ .devname = "ide",
+ .diskname = "hd",
+};
+
+static struct xlbd_type_info xlbd_scsi_type = {
+ .partn_shift = 4,
+ .disks_per_major = 16,
+ .devname = "sd",
+ .diskname = "sd",
+};
+
+static struct xlbd_type_info xlbd_vbd_type = {
+ .partn_shift = 4,
+ .disks_per_major = 16,
+ .devname = "xvd",
+ .diskname = "xvd",
+};
+
+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
+ NUM_VBD_MAJORS];
+
+#define XLBD_MAJOR_IDE_START 0
+#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
+#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
+
+#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START
- 1
+#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START
- 1
+#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START
+ NUM_VBD_MAJORS - 1
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+struct list_head vbds_list;
+
+struct request_queue *xlbd_blk_queue = NULL;
+
+#define MAJOR_XEN(dev) ((dev)>>8)
+#define MINOR_XEN(dev) ((dev) & 0xff)
+
+static struct block_device_operations xlvbd_block_fops =
+{
+ .owner = THIS_MODULE,
+ .open = blkif_open,
+ .release = blkif_release,
+ .ioctl = blkif_ioctl,
+};
+
+spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
+
+static struct lvdisk *xlvbd_device_alloc(void)
+{
+ struct lvdisk *disk;
+
+ disk = kmalloc(sizeof(*disk), GFP_KERNEL);
+ if (disk != NULL) {
+ memset(disk, 0, sizeof(*disk));
+ INIT_LIST_HEAD(&disk->list);
+ }
+ return disk;
+}
+
+static void xlvbd_device_free(struct lvdisk *disk)
+{
+ list_del(&disk->list);
+ kfree(disk);
+}
+
+static vdisk_t *xlvbd_probe(int *ret)
+{
+ blkif_response_t rsp;
+ blkif_request_t req;
+ vdisk_t *disk_info = NULL;
+ unsigned long buf;
+ int nr;
+
+ buf = __get_free_page(GFP_KERNEL);
+ if ((void *)buf == NULL)
+ goto out;
+
+ memset(&req, 0, sizeof(req));
+ req.operation = BLKIF_OP_PROBE;
+ req.nr_segments = 1;
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+ blkif_control_probe_send(&req, &rsp,
+ (unsigned long)(virt_to_machine(buf)));
+#else
+ req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+
+ blkif_control_send(&req, &rsp);
+#endif
+ if ( rsp.status <= 0 ) {
+ WPRINTK("Could not probe disks (%d)\n", rsp.status);
+ goto out;
+ }
+ nr = rsp.status;
+ if ( nr > MAX_VBDS )
+ nr = MAX_VBDS;
+
+ disk_info = kmalloc(nr * sizeof(vdisk_t), GFP_KERNEL);
+ if (disk_info != NULL)
+ memcpy(disk_info, (void *) buf, nr * sizeof(vdisk_t));
+
+ if (ret != NULL)
+ *ret = nr;
+
+out:
+ free_page(buf);
+ return disk_info;
+}
+
+static struct xlbd_major_info *xlbd_alloc_major_info(
+ int major, int minor, int index)
+{
+ struct xlbd_major_info *ptr;
+
+ ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
+ if (ptr == NULL)
+ return NULL;
+
+ memset(ptr, 0, sizeof(struct xlbd_major_info));
+
+ ptr->major = major;
+
+ switch (index) {
+ case XLBD_MAJOR_IDE_RANGE:
+ ptr->type = &xlbd_ide_type;
+ ptr->index = index - XLBD_MAJOR_IDE_START;
+ break;
+ case XLBD_MAJOR_SCSI_RANGE:
+ ptr->type = &xlbd_scsi_type;
+ ptr->index = index - XLBD_MAJOR_SCSI_START;
+ break;
+ case XLBD_MAJOR_VBD_RANGE:
+ ptr->type = &xlbd_vbd_type;
+ ptr->index = index - XLBD_MAJOR_VBD_START;
+ break;
+ }
+
+ if (register_blkdev(ptr->major, ptr->type->devname)) {
+ WPRINTK("can't get major %d with name %s\n",
+ ptr->major, ptr->type->devname);
+ kfree(ptr);
+ return NULL;
+ }
+
+ devfs_mk_dir(ptr->type->devname);
+ major_info[index] = ptr;
+ return ptr;
+}
+
+static struct xlbd_major_info *xlbd_get_major_info(int device)
+{
+ int major, minor, index;
+
+ major = MAJOR_XEN(device);
+ minor = MINOR_XEN(device);
+
+ switch (major) {
+ case IDE0_MAJOR: index = 0; break;
+ case IDE1_MAJOR: index = 1; break;
+ case IDE2_MAJOR: index = 2; break;
+ case IDE3_MAJOR: index = 3; break;
+ case IDE4_MAJOR: index = 4; break;
+ case IDE5_MAJOR: index = 5; break;
+ case IDE6_MAJOR: index = 6; break;
+ case IDE7_MAJOR: index = 7; break;
+ case IDE8_MAJOR: index = 8; break;
+ case IDE9_MAJOR: index = 9; break;
+ case SCSI_DISK0_MAJOR: index = 10; break;
+ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
+ index = 11 + major - SCSI_DISK1_MAJOR;
+ break;
+ case SCSI_CDROM_MAJOR: index = 18; break;
+ default: index = 19; break;
+ }
+
+ return ((major_info[index] != NULL) ? major_info[index] :
+ xlbd_alloc_major_info(major, minor, index));
+}
+
+static int xlvbd_blk_queue_alloc(struct xlbd_type_info *type)
+{
+ xlbd_blk_queue = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ if (xlbd_blk_queue == NULL)
+ return -1;
+
+ elevator_init(xlbd_blk_queue, "noop");
+
+ /*
+ * Turn off barking 'headactive' mode. We dequeue
+ * buffer heads as soon as we pass them to back-end
+ * driver.
+ */
+ blk_queue_headactive(xlbd_blk_queue, 0);
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_hardsect_size(xlbd_blk_queue, 512);
+ blk_queue_max_sectors(xlbd_blk_queue, 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(xlbd_blk_queue, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(xlbd_blk_queue, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_phys_segments(xlbd_blk_queue,
BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ blk_queue_max_hw_segments(xlbd_blk_queue, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(xlbd_blk_queue, 511);
+ return 0;
+}
+
+struct gendisk *xlvbd_alloc_gendisk(
+ struct xlbd_major_info *mi, int minor, vdisk_t *disk)
+{
+ struct gendisk *gd;
+ struct xlbd_disk_info *di;
+ int nr_minors = 1;
+
+ di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL);
+ if (di == NULL)
+ goto out;
+ di->mi = mi;
+ di->xd_device = disk->device;
+
+ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
+ nr_minors = 1 << mi->type->partn_shift;
+
+ gd = alloc_disk(nr_minors);
+ if ( !gd )
+ goto out;
+
+ if (nr_minors > 1)
+ sprintf(gd->disk_name, "%s%c", mi->type->diskname,
+ 'a' + mi->index * mi->type->disks_per_major +
+ (minor >> mi->type->partn_shift));
+ else
+ sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
+ 'a' + mi->index * mi->type->disks_per_major +
+ (minor >> mi->type->partn_shift),
+ minor & ((1 << mi->type->partn_shift) - 1));
+
+ gd->major = mi->major;
+ gd->first_minor = minor;
+ gd->fops = &xlvbd_block_fops;
+ gd->private_data = di;
+ set_capacity(gd, disk->capacity);
+
+ if ((xlbd_blk_queue == NULL) && xlvbd_blk_queue_alloc(mi->type))
+ goto out_gendisk;
+
+ if (VDISK_READONLY(disk->info))
+ set_disk_ro(gd, 1);
+
+ if (VDISK_TYPE(disk->info) == VDISK_TYPE_CDROM)
+ gd->flags |= GENHD_FL_REMOVABLE | GENHD_FL_CD;
+
+ gd->queue = xlbd_blk_queue;
+ add_disk(gd);
+ return gd;
+
+out_gendisk:
+ del_gendisk(gd);
+out:
+ kfree(di);
+ return NULL;
+}
+
+static int xlvbd_device_add(struct list_head *list, vdisk_t *disk)
+{
+ struct lvdisk *new;
+ int minor;
+ dev_t device;
+ struct block_device *bd;
+ struct gendisk *gd;
+ struct xlbd_major_info *mi;
+
+ mi = xlbd_get_major_info(disk->device);
+ if (mi == NULL)
+ return -EPERM;
+
+ new = xlvbd_device_alloc();
+ if (new == NULL)
+ return -1;
+ new->capacity = disk->capacity;
+ new->device = disk->device;
+ new->info = disk->info;
+
+ minor = MINOR_XEN(disk->device);
+ device = MKDEV(mi->major, minor);
+
+ bd = bdget(device);
+ if (bd == NULL)
+ goto out;
+
+ gd = xlvbd_alloc_gendisk(mi, minor, disk);
+ if (gd == NULL)
+ goto out_bd;
+
+ list_add(&new->list, list);
+out_bd:
+ bdput(bd);
+out:
+ return 0;
+}
+
+static int xlvbd_device_del(struct lvdisk *disk)
+{
+ dev_t device;
+ struct block_device *bd;
+ struct gendisk *gd;
+ struct xlbd_disk_info *di;
+ int ret = 0, unused;
+
+ device = MKDEV(MAJOR_XEN(disk->device), MINOR_XEN(disk->device));
+
+ bd = bdget(device);
+ if (bd == NULL)
+ return -1;
+
+ gd = get_gendisk(device, &unused);
+ di = gd->private_data;
+
+ if (di->mi->usage != 0) {
+ WPRINTK("disk removal failed: used [dev=%x]\n", device);
+ ret = -1;
+ goto out;
+ }
+
+ del_gendisk(gd);
+
+ xlvbd_device_free(disk);
+out:
+ bdput(bd);
+ return ret;
+}
+
+static int xlvbd_device_update(struct lvdisk *ldisk, vdisk_t *disk)
+{
+ dev_t device;
+ struct block_device *bd;
+ struct gendisk *gd;
+ int unused;
+
+ if ((ldisk->capacity == disk->capacity) && (ldisk->info == disk->info))
+ return 0;
+
+ device = MKDEV(MAJOR_XEN(ldisk->device), MINOR_XEN(ldisk->device));
+
+ bd = bdget(device);
+ if (bd == NULL)
+ return -1;
+
+ gd = get_gendisk(device, &unused);
+ set_capacity(gd, disk->capacity);
+ ldisk->capacity = disk->capacity;
+
+ bdput(bd);
+
+ return 0;
+}
+
+void xlvbd_refresh(void)
+{
+ vdisk_t *newdisks;
+ struct list_head *tmp, *tmp2;
+ struct lvdisk *disk;
+ int i, nr;
+
+ newdisks = xlvbd_probe(&nr);
+ if (newdisks == NULL) {
+ WPRINTK("failed to probe\n");
+ return;
+ }
+
+ i = 0;
+ list_for_each_safe(tmp, tmp2, &vbds_list) {
+ disk = list_entry(tmp, struct lvdisk, list);
+
+ for (i = 0; i < nr; i++) {
+ if ( !newdisks[i].device )
+ continue;
+ if ( disk->device == newdisks[i].device ) {
+ xlvbd_device_update(disk, &newdisks[i]);
+ newdisks[i].device = 0;
+ break;
+ }
+ }
+ if (i == nr) {
+ xlvbd_device_del(disk);
+ newdisks[i].device = 0;
+ }
+ }
+ for (i = 0; i < nr; i++)
+ if ( newdisks[i].device )
+ xlvbd_device_add(&vbds_list, &newdisks[i]);
+ kfree(newdisks);
+}
+
+/*
+ * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
+ * state. The VBDs need to be updated in this way when the domain is
+ * initialised and also each time we receive an XLBLK_UPDATE event.
+ */
+void xlvbd_update_vbds(void)
+{
+ xlvbd_refresh();
+}
+
+/*
+ * Set up all the linux device goop for the virtual block devices
+ * (vbd's) that we know about. Note that although from the backend
+ * driver's p.o.v. VBDs are addressed simply an opaque 16-bit device
+ * number, the domain creation tools conventionally allocate these
+ * numbers to correspond to those used by 'real' linux -- this is just
+ * for convenience as it means e.g. that the same /etc/fstab can be
+ * used when booting with or without Xen.
+ */
+int xlvbd_init(void)
+{
+ int i, nr;
+ vdisk_t *disks;
+
+ INIT_LIST_HEAD(&vbds_list);
+
+ memset(major_info, 0, sizeof(major_info));
+
+ disks = xlvbd_probe(&nr);
+ if (disks == NULL) {
+ WPRINTK("failed to probe\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr; i++)
+ xlvbd_device_add(&vbds_list, &disks[i]);
+
+ kfree(disks);
+ return 0;
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/console/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y := console.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/console/console.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,781 @@
+/******************************************************************************
+ * console.c
+ *
+ * Virtual console driver.
+ *
+ * Copyright (c) 2002-2004, K A Fraser.
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm-xen/xen-public/event_channel.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/ctrl_if.h>
+
+/*
+ * Modes:
+ * 'xencons=off' [XC_OFF]: Console is disabled.
+ * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
+ * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
+ * [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
+ *
+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
+ * warnings from standard distro startup scripts.
+ */
+static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
+
+static int __init xencons_setup(char *str)
+{
+ if ( !strcmp(str, "tty") )
+ xc_mode = XC_TTY;
+ else if ( !strcmp(str, "ttyS") )
+ xc_mode = XC_SERIAL;
+ else if ( !strcmp(str, "off") )
+ xc_mode = XC_OFF;
+ return 1;
+}
+__setup("xencons=", xencons_setup);
+
+/* The kernel and user-land drivers share a common transmit buffer. */
+static unsigned int wbuf_size = 4096;
+#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
+static char *wbuf;
+static unsigned int wc, wp; /* write_cons, write_prod */
+
+static int __init xencons_bufsz_setup(char *str)
+{
+ unsigned int goal;
+ goal = simple_strtoul(str, NULL, 0);
+ while ( wbuf_size < goal )
+ wbuf_size <<= 1;
+ return 1;
+}
+__setup("xencons_bufsz=", xencons_bufsz_setup);
+
+/* This lock protects accesses to the common transmit buffer. */
+static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
+
+/* Common transmit-kick routine. */
+static void __xencons_tx_flush(void);
+
+/* This task is used to defer sending console data until there is space. */
+static void xencons_tx_flush_task_routine(void *data);
+
+static DECLARE_TQUEUE(xencons_tx_flush_task,
+ xencons_tx_flush_task_routine,
+ NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static struct tty_driver *xencons_driver;
+#else
+static struct tty_driver xencons_driver;
+#endif
+
+
+/******************** Kernel console driver ********************************/
+
+static void kcons_write(
+ struct console *c, const char *s, unsigned int count)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ for ( i = 0; i < count; i++ )
+ {
+ if ( (wp - wc) >= (wbuf_size - 1) )
+ break;
+ if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
+ wbuf[WBUF_MASK(wp++)] = '\r';
+ }
+
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void kcons_write_dom0(
+ struct console *c, const char *s, unsigned int count)
+{
+ int rc;
+
+ while ( count > 0 )
+ {
+ if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write,
+ count, (char *)s)) > 0 )
+ {
+ count -= rc;
+ s += rc;
+ }
+ else
+ break;
+ }
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static struct tty_driver *kcons_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return xencons_driver;
+}
+#else
+static kdev_t kcons_device(struct console *c)
+{
+ return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
+}
+#endif
+
+static struct console kcons_info = {
+ device: kcons_device,
+ flags: CON_PRINTBUFFER,
+ index: -1
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define __RETCODE 0
+static int __init xen_console_init(void)
+#else
+#define __RETCODE
+void xen_console_init(void)
+#endif
+{
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ if ( xc_mode == XC_DEFAULT )
+ xc_mode = XC_SERIAL;
+ kcons_info.write = kcons_write_dom0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ if ( xc_mode == XC_SERIAL )
+ kcons_info.flags |= CON_ENABLED;
+#endif
+ }
+ else
+ {
+ if ( xc_mode == XC_DEFAULT )
+ xc_mode = XC_TTY;
+ kcons_info.write = kcons_write;
+ }
+
+ if ( xc_mode == XC_OFF )
+ return __RETCODE;
+
+ if ( xc_mode == XC_SERIAL )
+ strcpy(kcons_info.name, "ttyS");
+ else
+ strcpy(kcons_info.name, "tty");
+
+ wbuf = alloc_bootmem(wbuf_size);
+
+ register_console(&kcons_info);
+ return __RETCODE;
+}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+console_initcall(xen_console_init);
+#endif
+
+/*** Useful function for console debugging -- goes straight to Xen. ***/
+asmlinkage int xprintk(const char *fmt, ...)
+{
+ va_list args;
+ int printk_len;
+ static char printk_buf[1024];
+
+ /* Emit the output into the temporary buffer */
+ va_start(args, fmt);
+ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+ va_end(args);
+
+ /* Send the processed output directly to Xen. */
+ kcons_write_dom0(NULL, printk_buf, printk_len);
+
+ return 0;
+}
+
+/*** Forcibly flush console data before dying. ***/
+void xencons_force_flush(void)
+{
+ ctrl_msg_t msg;
+ int sz;
+
+ /* Emergency console is synchronous, so there's nothing to flush. */
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ return;
+
+ /*
+ * We use dangerous control-interface functions that require a quiescent
+ * system and no interrupts. Try to ensure this with a global cli().
+ */
+ local_irq_disable(); /* XXXsmp */
+
+ /* Spin until console data is flushed through to the domain controller. */
+ while ( (wc != wp) && !ctrl_if_transmitter_empty() )
+ {
+ /* Interrupts are disabled -- we must manually reap responses. */
+ ctrl_if_discard_responses();
+
+ if ( (sz = wp - wc) == 0 )
+ continue;
+ if ( sz > sizeof(msg.msg) )
+ sz = sizeof(msg.msg);
+ if ( sz > (wbuf_size - WBUF_MASK(wc)) )
+ sz = wbuf_size - WBUF_MASK(wc);
+
+ msg.type = CMSG_CONSOLE;
+ msg.subtype = CMSG_CONSOLE_DATA;
+ msg.length = sz;
+ memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
+
+ if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+ wc += sz;
+ }
+}
+
+
+/******************** User-space console driver (/dev/console) ************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define DRV(_d) (_d)
+#define TTY_INDEX(_tty) ((_tty)->index)
+#else
+static int xencons_refcount;
+static struct tty_struct *xencons_table[MAX_NR_CONSOLES];
+#define DRV(_d) (&(_d))
+#define TTY_INDEX(_tty) (MINOR((_tty)->device) - xencons_driver.minor_start)
+#endif
+
+static struct termios *xencons_termios[MAX_NR_CONSOLES];
+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
+static struct tty_struct *xencons_tty;
+static int xencons_priv_irq;
+static char x_char;
+
+/* Non-privileged receive callback. */
+static void xencons_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ if ( xencons_tty != NULL )
+ {
+ for ( i = 0; i < msg->length; i++ )
+ tty_insert_flip_char(xencons_tty, msg->msg[i], 0);
+ tty_flip_buffer_push(xencons_tty);
+ }
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+/* Privileged and non-privileged transmit worker. */
+static void __xencons_tx_flush(void)
+{
+ int sz, work_done = 0;
+ ctrl_msg_t msg;
+
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ if ( x_char )
+ {
+ kcons_write_dom0(NULL, &x_char, 1);
+ x_char = 0;
+ work_done = 1;
+ }
+
+ while ( wc != wp )
+ {
+ sz = wp - wc;
+ if ( sz > (wbuf_size - WBUF_MASK(wc)) )
+ sz = wbuf_size - WBUF_MASK(wc);
+ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
+ wc += sz;
+ work_done = 1;
+ }
+ }
+ else
+ {
+ while ( x_char )
+ {
+ msg.type = CMSG_CONSOLE;
+ msg.subtype = CMSG_CONSOLE_DATA;
+ msg.length = 1;
+ msg.msg[0] = x_char;
+
+ if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+ x_char = 0;
+ else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+ break;
+
+ work_done = 1;
+ }
+
+ while ( wc != wp )
+ {
+ sz = wp - wc;
+ if ( sz > sizeof(msg.msg) )
+ sz = sizeof(msg.msg);
+ if ( sz > (wbuf_size - WBUF_MASK(wc)) )
+ sz = wbuf_size - WBUF_MASK(wc);
+
+ msg.type = CMSG_CONSOLE;
+ msg.subtype = CMSG_CONSOLE_DATA;
+ msg.length = sz;
+ memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
+
+ if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+ wc += sz;
+ else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+ break;
+
+ work_done = 1;
+ }
+ }
+
+ if ( work_done && (xencons_tty != NULL) )
+ {
+ wake_up_interruptible(&xencons_tty->write_wait);
+ if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ (xencons_tty->ldisc.write_wakeup != NULL) )
+ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
+ }
+}
+
+/* Non-privileged transmit kicker. */
+static void xencons_tx_flush_task_routine(void *data)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&xencons_lock, flags);
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+/* Privileged receive callback and transmit kicker. */
+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ static char rbuf[16];
+ int i, l;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ if ( xencons_tty != NULL )
+ {
+ /* Receive work. */
+ while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
+ for ( i = 0; i < l; i++ )
+ tty_insert_flip_char(xencons_tty, rbuf[i], 0);
+ if ( xencons_tty->flip.count != 0 )
+ tty_flip_buffer_push(xencons_tty);
+ }
+
+ /* Transmit work. */
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int xencons_write_room(struct tty_struct *tty)
+{
+ return wbuf_size - (wp - wc);
+}
+
+static int xencons_chars_in_buffer(struct tty_struct *tty)
+{
+ return wp - wc;
+}
+
+static void xencons_send_xchar(struct tty_struct *tty, char ch)
+{
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ x_char = ch;
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_throttle(struct tty_struct *tty)
+{
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ if ( I_IXOFF(tty) )
+ xencons_send_xchar(tty, STOP_CHAR(tty));
+}
+
+static void xencons_unthrottle(struct tty_struct *tty)
+{
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ if ( I_IXOFF(tty) )
+ {
+ if ( x_char != 0 )
+ x_char = 0;
+ else
+ xencons_send_xchar(tty, START_CHAR(tty));
+ }
+}
+
+static void xencons_flush_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ wc = wp = 0;
+ spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static inline int __xencons_put_char(int ch)
+{
+ char _ch = (char)ch;
+ if ( (wp - wc) == wbuf_size )
+ return 0;
+ wbuf[WBUF_MASK(wp++)] = _ch;
+ return 1;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static int xencons_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ int i;
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return count;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ for ( i = 0; i < count; i++ )
+ if ( !__xencons_put_char(buf[i]) )
+ break;
+
+ if ( i != 0 )
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return i;
+}
+#else
+static int xencons_write(struct tty_struct *tty, int from_user,
+ const u_char *buf, int count)
+{
+ int i;
+ unsigned long flags;
+
+ if ( from_user && verify_area(VERIFY_READ, buf, count) )
+ return -EINVAL;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return count;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+
+ for ( i = 0; i < count; i++ )
+ {
+ char ch;
+ if ( from_user )
+ __get_user(ch, buf + i);
+ else
+ ch = buf[i];
+ if ( !__xencons_put_char(ch) )
+ break;
+ }
+
+ if ( i != 0 )
+ __xencons_tx_flush();
+
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return i;
+}
+#endif
+
+static void xencons_put_char(struct tty_struct *tty, u_char ch)
+{
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ (void)__xencons_put_char(ch);
+ spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_flush_chars(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ unsigned long orig_jiffies = jiffies;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ while ( DRV(tty->driver)->chars_in_buffer(tty) )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ if ( signal_pending(current) )
+ break;
+ if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
+ break;
+ }
+
+ set_current_state(TASK_RUNNING);
+}
+
+static int xencons_open(struct tty_struct *tty, struct file *filp)
+{
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return 0;
+
+ spin_lock_irqsave(&xencons_lock, flags);
+ tty->driver_data = NULL;
+ if ( xencons_tty == NULL )
+ xencons_tty = tty;
+ __xencons_tx_flush();
+ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+}
+
+static void xencons_close(struct tty_struct *tty, struct file *filp)
+{
+ unsigned long flags;
+
+ if ( TTY_INDEX(tty) != 0 )
+ return;
+
+ if ( tty->count == 1 )
+ {
+ tty->closing = 1;
+ tty_wait_until_sent(tty, 0);
+ if ( DRV(tty->driver)->flush_buffer != NULL )
+ DRV(tty->driver)->flush_buffer(tty);
+ if ( tty->ldisc.flush_buffer != NULL )
+ tty->ldisc.flush_buffer(tty);
+ tty->closing = 0;
+ spin_lock_irqsave(&xencons_lock, flags);
+ xencons_tty = NULL;
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ }
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+static struct tty_operations xencons_ops = {
+ .open = xencons_open,
+ .close = xencons_close,
+ .write = xencons_write,
+ .write_room = xencons_write_room,
+ .put_char = xencons_put_char,
+ .flush_chars = xencons_flush_chars,
+ .chars_in_buffer = xencons_chars_in_buffer,
+ .send_xchar = xencons_send_xchar,
+ .flush_buffer = xencons_flush_buffer,
+ .throttle = xencons_throttle,
+ .unthrottle = xencons_unthrottle,
+ .wait_until_sent = xencons_wait_until_sent,
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static const char *xennullcon_startup(void)
+{
+ return NULL;
+}
+
+static int xennullcon_dummy(void)
+{
+ return 0;
+}
+
+#define DUMMY (void *)xennullcon_dummy
+
+/*
+ * The console `switch' structure for the dummy console
+ *
+ * Most of the operations are dummies.
+ */
+
+const struct consw xennull_con = {
+ .owner = THIS_MODULE,
+ .con_startup = xennullcon_startup,
+ .con_init = DUMMY,
+ .con_deinit = DUMMY,
+ .con_clear = DUMMY,
+ .con_putc = DUMMY,
+ .con_putcs = DUMMY,
+ .con_cursor = DUMMY,
+ .con_scroll = DUMMY,
+ .con_bmove = DUMMY,
+ .con_switch = DUMMY,
+ .con_blank = DUMMY,
+ .con_font_set = DUMMY,
+ .con_font_get = DUMMY,
+ .con_font_default = DUMMY,
+ .con_font_copy = DUMMY,
+ .con_set_palette = DUMMY,
+ .con_scrolldelta = DUMMY,
+};
+#endif
+#endif
+
+static int __init xencons_init(void)
+{
+ int rc;
+
+ if ( xc_mode == XC_OFF )
+ return 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ?
+ 1 : MAX_NR_CONSOLES);
+ if ( xencons_driver == NULL )
+ return -ENOMEM;
+#else
+ memset(&xencons_driver, 0, sizeof(struct tty_driver));
+ xencons_driver.magic = TTY_DRIVER_MAGIC;
+ xencons_driver.refcount = &xencons_refcount;
+ xencons_driver.table = xencons_table;
+ xencons_driver.num = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
+#endif
+
+ DRV(xencons_driver)->major = TTY_MAJOR;
+ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
+ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
+ DRV(xencons_driver)->init_termios = tty_std_termios;
+ DRV(xencons_driver)->flags =
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
+ DRV(xencons_driver)->termios = xencons_termios;
+ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
+
+ if ( xc_mode == XC_SERIAL )
+ {
+ DRV(xencons_driver)->name = "ttyS";
+ DRV(xencons_driver)->minor_start = 64;
+ DRV(xencons_driver)->name_base = 0;
+ }
+ else
+ {
+ DRV(xencons_driver)->name = "tty";
+ DRV(xencons_driver)->minor_start = 1;
+ DRV(xencons_driver)->name_base = 1;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ tty_set_operations(xencons_driver, &xencons_ops);
+#else
+ xencons_driver.open = xencons_open;
+ xencons_driver.close = xencons_close;
+ xencons_driver.write = xencons_write;
+ xencons_driver.write_room = xencons_write_room;
+ xencons_driver.put_char = xencons_put_char;
+ xencons_driver.flush_chars = xencons_flush_chars;
+ xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
+ xencons_driver.send_xchar = xencons_send_xchar;
+ xencons_driver.flush_buffer = xencons_flush_buffer;
+ xencons_driver.throttle = xencons_throttle;
+ xencons_driver.unthrottle = xencons_unthrottle;
+ xencons_driver.wait_until_sent = xencons_wait_until_sent;
+#endif
+
+ if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
+ {
+ printk("WARNING: Failed to register Xen virtual "
+ "console driver as '%s%d'\n",
+ DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ put_tty_driver(xencons_driver);
+ xencons_driver = NULL;
+#endif
+ return rc;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ tty_register_device(xencons_driver, 0, NULL);
+#endif
+
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+ (void)request_irq(xencons_priv_irq,
+ xencons_priv_interrupt, 0, "console", NULL);
+ }
+ else
+ {
+ (void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0);
+ }
+
+ printk("Xen virtual console successfully installed as %s\n",
+ DRV(xencons_driver)->name);
+
+ return 0;
+}
+
+module_init(xencons_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/evtchn/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y := evtchn.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,429 @@
+/******************************************************************************
+ * evtchn.c
+ *
+ * Xenolinux driver for receiving and demuxing event-channel signals.
+ *
+ * Copyright (c) 2004, K A Fraser
+ * Multi-process extensions Copyright (c) 2004, Steven Smith
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/poll.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <asm-xen/evtchn.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#include <linux/devfs_fs_kernel.h>
+#define OLD_DEVFS
+#else
+#include <linux/gfp.h>
+#endif
+
+#ifdef OLD_DEVFS
+/* NB. This must be shared amongst drivers if more things go in /dev/xen */
+static devfs_handle_t xen_dev_dir;
+#endif
+
+struct per_user_data {
+ /* Notification ring, accessed via /dev/xen/evtchn. */
+# define RING_SIZE 2048 /* 2048 16-bit entries */
+# define RING_MASK(_i) ((_i)&(RING_SIZE-1))
+ u16 *ring;
+ unsigned int ring_cons, ring_prod, ring_overflow;
+
+ /* Processes wait on this queue when ring is empty. */
+ wait_queue_head_t evtchn_wait;
+ struct fasync_struct *evtchn_async_queue;
+};
+
+/* Who's bound to each port? */
+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
+static spinlock_t port_user_lock;
+
+void evtchn_device_upcall(int port)
+{
+ struct per_user_data *u;
+
+ spin_lock(&port_user_lock);
+
+ mask_evtchn(port);
+ clear_evtchn(port);
+
+ if ( (u = port_user[port]) != NULL )
+ {
+ if ( (u->ring_prod - u->ring_cons) < RING_SIZE )
+ {
+ u->ring[RING_MASK(u->ring_prod)] = (u16)port;
+ if ( u->ring_cons == u->ring_prod++ )
+ {
+ wake_up_interruptible(&u->evtchn_wait);
+ kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
+ }
+ }
+ else
+ {
+ u->ring_overflow = 1;
+ }
+ }
+
+ spin_unlock(&port_user_lock);
+}
+
+static ssize_t evtchn_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc;
+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct per_user_data *u = file->private_data;
+
+ add_wait_queue(&u->evtchn_wait, &wait);
+
+ count &= ~1; /* even number of bytes */
+
+ if ( count == 0 )
+ {
+ rc = 0;
+ goto out;
+ }
+
+ if ( count > PAGE_SIZE )
+ count = PAGE_SIZE;
+
+ for ( ; ; )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ( (c = u->ring_cons) != (p = u->ring_prod) )
+ break;
+
+ if ( u->ring_overflow )
+ {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ if ( file->f_flags & O_NONBLOCK )
+ {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ( signal_pending(current) )
+ {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ schedule();
+ }
+
+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+ if ( ((c ^ p) & RING_SIZE) != 0 )
+ {
+ bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
+ bytes2 = RING_MASK(p) * sizeof(u16);
+ }
+ else
+ {
+ bytes1 = (p - c) * sizeof(u16);
+ bytes2 = 0;
+ }
+
+ /* Truncate chunks according to caller's maximum byte count. */
+ if ( bytes1 > count )
+ {
+ bytes1 = count;
+ bytes2 = 0;
+ }
+ else if ( (bytes1 + bytes2) > count )
+ {
+ bytes2 = count - bytes1;
+ }
+
+ if ( copy_to_user(buf, &u->ring[RING_MASK(c)], bytes1) ||
+ ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
+ {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
+
+ rc = bytes1 + bytes2;
+
+ out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&u->evtchn_wait, &wait);
+ return rc;
+}
+
+static ssize_t evtchn_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc, i;
+ u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
+ struct per_user_data *u = file->private_data;
+
+ if ( kbuf == NULL )
+ return -ENOMEM;
+
+ count &= ~1; /* even number of bytes */
+
+ if ( count == 0 )
+ {
+ rc = 0;
+ goto out;
+ }
+
+ if ( count > PAGE_SIZE )
+ count = PAGE_SIZE;
+
+ if ( copy_from_user(kbuf, buf, count) != 0 )
+ {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ spin_lock_irq(&port_user_lock);
+ for ( i = 0; i < (count/2); i++ )
+ if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
+ unmask_evtchn(kbuf[i]);
+ spin_unlock_irq(&port_user_lock);
+
+ rc = count;
+
+ out:
+ free_page((unsigned long)kbuf);
+ return rc;
+}
+
+static int evtchn_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct per_user_data *u = file->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ switch ( cmd )
+ {
+ case EVTCHN_RESET:
+ /* Initialise the ring to empty. Clear errors. */
+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+ break;
+
+ case EVTCHN_BIND:
+ if ( arg >= NR_EVENT_CHANNELS )
+ {
+ rc = -EINVAL;
+ }
+ else if ( port_user[arg] != NULL )
+ {
+ rc = -EISCONN;
+ }
+ else
+ {
+ port_user[arg] = u;
+ unmask_evtchn(arg);
+ }
+ break;
+
+ case EVTCHN_UNBIND:
+ if ( arg >= NR_EVENT_CHANNELS )
+ {
+ rc = -EINVAL;
+ }
+ else if ( port_user[arg] != u )
+ {
+ rc = -ENOTCONN;
+ }
+ else
+ {
+ port_user[arg] = NULL;
+ mask_evtchn(arg);
+ }
+ break;
+
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ return rc;
+}
+
+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = POLLOUT | POLLWRNORM;
+ struct per_user_data *u = file->private_data;
+
+ poll_wait(file, &u->evtchn_wait, wait);
+ if ( u->ring_cons != u->ring_prod )
+ mask |= POLLIN | POLLRDNORM;
+ if ( u->ring_overflow )
+ mask = POLLERR;
+ return mask;
+}
+
+static int evtchn_fasync(int fd, struct file *filp, int on)
+{
+ struct per_user_data *u = filp->private_data;
+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
+}
+
+static int evtchn_open(struct inode *inode, struct file *filp)
+{
+ struct per_user_data *u;
+
+ if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
+ return -ENOMEM;
+
+ memset(u, 0, sizeof(*u));
+ init_waitqueue_head(&u->evtchn_wait);
+
+ if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
+ {
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ filp->private_data = u;
+
+ return 0;
+}
+
+static int evtchn_release(struct inode *inode, struct file *filp)
+{
+ int i;
+ struct per_user_data *u = filp->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ free_page((unsigned long)u->ring);
+
+ for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+ {
+ if ( port_user[i] == u )
+ {
+ port_user[i] = NULL;
+ mask_evtchn(i);
+ }
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ return 0;
+}
+
+static struct file_operations evtchn_fops = {
+ owner: THIS_MODULE,
+ read: evtchn_read,
+ write: evtchn_write,
+ ioctl: evtchn_ioctl,
+ poll: evtchn_poll,
+ fasync: evtchn_fasync,
+ open: evtchn_open,
+ release: evtchn_release
+};
+
+static struct miscdevice evtchn_miscdev = {
+ .minor = EVTCHN_MINOR,
+ .name = "evtchn",
+ .fops = &evtchn_fops,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ .devfs_name = "misc/evtchn",
+#endif
+};
+
+static int __init evtchn_init(void)
+{
+#ifdef OLD_DEVFS
+ devfs_handle_t symlink_handle;
+ int pos;
+ char link_dest[64];
+#endif
+ int err;
+
+ spin_lock_init(&port_user_lock);
+ memset(port_user, 0, sizeof(port_user));
+
+ /* (DEVFS) create '/dev/misc/evtchn'. */
+ err = misc_register(&evtchn_miscdev);
+ if ( err != 0 )
+ {
+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ return err;
+ }
+
+#ifdef OLD_DEVFS
+ /* (DEVFS) create directory '/dev/xen'. */
+ xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
+
+ /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
+ pos = devfs_generate_path(evtchn_miscdev.devfs_handle,
+ &link_dest[3],
+ sizeof(link_dest) - 3);
+ if ( pos >= 0 )
+ strncpy(&link_dest[pos], "../", 3);
+
+ /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
+ (void)devfs_mk_symlink(xen_dev_dir,
+ "evtchn",
+ DEVFS_FL_DEFAULT,
+ &link_dest[pos],
+ &symlink_handle,
+ NULL);
+
+ /* (DEVFS) automatically destroy the symlink with its destination. */
+ devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
+#endif
+
+ printk("Event-channel device installed.\n");
+
+ return 0;
+}
+
+static void evtchn_cleanup(void)
+{
+ misc_deregister(&evtchn_miscdev);
+}
+
+module_init(evtchn_init);
+module_exit(evtchn_cleanup);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netback/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y := netback.o control.o interface.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,100 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/common.h
+ */
+
+#ifndef __NETIF__BACKEND__COMMON_H__
+#define __NETIF__BACKEND__COMMON_H__
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/xen-public/io/netif.h>
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+
+#if 0
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define ASSERT(_p) ((void)0)
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+typedef struct netif_st {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+
+ /* Physical parameters of the comms window. */
+ unsigned long tx_shmem_frame;
+ unsigned long rx_shmem_frame;
+ unsigned int evtchn;
+ int irq;
+
+ /* The shared rings and indexes. */
+ netif_tx_interface_t *tx;
+ netif_rx_interface_t *rx;
+
+ /* Private indexes into shared ring. */
+ NETIF_RING_IDX rx_req_cons;
+ NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
+ NETIF_RING_IDX tx_req_cons;
+ NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+
+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
+
+ /* Miscellaneous private stuff. */
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ int active;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
+ struct netif_st *hash_next;
+ struct list_head list; /* scheduling list */
+ atomic_t refcnt;
+ struct net_device *dev;
+ struct net_device_stats stats;
+
+ struct work_struct work;
+} netif_t;
+
+void netif_create(netif_be_create_t *create);
+void netif_destroy(netif_be_destroy_t *destroy);
+void netif_connect(netif_be_connect_t *connect);
+int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
+void netif_disconnect_complete(netif_t *netif);
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define netif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ netif_disconnect_complete(_b); \
+ } while (0)
+
+void netif_interface_init(void);
+void netif_ctrlif_init(void);
+
+void netif_schedule_work(netif_t *netif);
+void netif_deschedule_work(netif_t *netif);
+
+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+#endif /* __NETIF__BACKEND__COMMON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netback/control.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/control.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,65 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/control.c
+ *
+ * Routines for interfacing with the control plane.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ switch ( msg->subtype )
+ {
+ case CMSG_NETIF_BE_CREATE:
+ if ( msg->length != sizeof(netif_be_create_t) )
+ goto parse_error;
+ netif_create((netif_be_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_DESTROY:
+ if ( msg->length != sizeof(netif_be_destroy_t) )
+ goto parse_error;
+ netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_CONNECT:
+ if ( msg->length != sizeof(netif_be_connect_t) )
+ goto parse_error;
+ netif_connect((netif_be_connect_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_DISCONNECT:
+ if ( msg->length != sizeof(netif_be_disconnect_t) )
+ goto parse_error;
+ if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
+ return; /* Sending the response is deferred until later. */
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+void netif_ctrlif_init(void)
+{
+ ctrl_msg_t cmsg;
+ netif_be_driver_status_t st;
+
+ (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DRIVER_STATUS;
+ cmsg.length = sizeof(netif_be_driver_status_t);
+ st.status = NETIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,340 @@
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/interface.c
+ *
+ * Network-device interface management.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+#include <linux/rtnetlink.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#endif
+
+#define NETIF_HASHSZ 1024
+#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
+
+static netif_t *netif_hash[NETIF_HASHSZ];
+
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
+{
+ netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif != NULL) &&
+ ((netif->domid != domid) || (netif->handle != handle)) )
+ netif = netif->hash_next;
+ return netif;
+}
+
+static void __netif_up(netif_t *netif)
+{
+ struct net_device *dev = netif->dev;
+ spin_lock_bh(&dev->xmit_lock);
+ netif->active = 1;
+ spin_unlock_bh(&dev->xmit_lock);
+ (void)request_irq(netif->irq, netif_be_int, 0, dev->name, netif);
+ netif_schedule_work(netif);
+}
+
+static void __netif_down(netif_t *netif)
+{
+ struct net_device *dev = netif->dev;
+ spin_lock_bh(&dev->xmit_lock);
+ netif->active = 0;
+ spin_unlock_bh(&dev->xmit_lock);
+ free_irq(netif->irq, netif);
+ netif_deschedule_work(netif);
+}
+
+static int net_open(struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+ if ( netif->status == CONNECTED )
+ __netif_up(netif);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int net_close(struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+ netif_stop_queue(dev);
+ if ( netif->status == CONNECTED )
+ __netif_down(netif);
+ return 0;
+}
+
+static void __netif_disconnect_complete(void *arg)
+{
+ netif_t *netif = (netif_t *)arg;
+ ctrl_msg_t cmsg;
+ netif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in netif_disconnect() because at that point there
+ * may be outstanding requests in the network stack whose asynchronous
+ * responses must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(netif->evtchn);
+ vfree(netif->tx); /* Frees netif->rx as well. */
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DISCONNECT;
+ cmsg.id = netif->disconnect_rspid;
+ cmsg.length = sizeof(netif_be_disconnect_t);
+ disc.domid = netif->domid;
+ disc.netif_handle = netif->handle;
+ disc.status = NETIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'netif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( netif->status != DISCONNECTING )
+ BUG();
+ netif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+}
+
+void netif_disconnect_complete(netif_t *netif)
+{
+ INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif);
+ schedule_work(&netif->work);
+}
+
+void netif_create(netif_be_create_t *create)
+{
+ int err = 0;
+ domid_t domid = create->domid;
+ unsigned int handle = create->netif_handle;
+ struct net_device *dev;
+ netif_t **pnetif, *netif;
+ char name[IFNAMSIZ] = {};
+
+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
+ if ( dev == NULL )
+ {
+ DPRINTK("Could not create netif: out of memory\n");
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ netif = netdev_priv(dev);
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
+ netif->handle = handle;
+ netif->status = DISCONNECTED;
+ atomic_set(&netif->refcnt, 0);
+ netif->dev = dev;
+
+ netif->credit_bytes = netif->remaining_credit = ~0UL;
+ netif->credit_usec = 0UL;
+ /*init_ac_timer(&new_vif->credit_timeout);*/
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( *pnetif != NULL )
+ {
+ if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
+ {
+ DPRINTK("Could not create netif: already exists\n");
+ create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
+ free_netdev(dev);
+ return;
+ }
+ pnetif = &(*pnetif)->hash_next;
+ }
+
+ dev->hard_start_xmit = netif_be_start_xmit;
+ dev->get_stats = netif_be_get_stats;
+ dev->open = net_open;
+ dev->stop = net_close;
+
+ /* Disable queuing. */
+ dev->tx_queue_len = 0;
+
+ /*
+ * Initialise a dummy MAC address. We choose the numerically largest
+ * non-broadcast address to prevent the address getting stolen by an
+ * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
+ */
+ memset(dev->dev_addr, 0xFF, ETH_ALEN);
+ dev->dev_addr[0] &= ~0x01;
+
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+
+ if ( err != 0 )
+ {
+ DPRINTK("Could not register new net device %s: err=%d\n",
+ dev->name, err);
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ free_netdev(dev);
+ return;
+ }
+
+ netif->hash_next = *pnetif;
+ *pnetif = netif;
+
+ DPRINTK("Successfully created netif\n");
+ create->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_destroy(netif_be_destroy_t *destroy)
+{
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->netif_handle;
+ netif_t **pnetif, *netif;
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif = *pnetif) != NULL )
+ {
+ if ( (netif->domid == domid) && (netif->handle == handle) )
+ {
+ if ( netif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pnetif = &netif->hash_next;
+ }
+
+ destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pnetif = netif->hash_next;
+ unregister_netdev(netif->dev);
+ free_netdev(netif->dev);
+ destroy->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_connect(netif_be_connect_t *connect)
+{
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->netif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long tx_shmem_frame = connect->tx_shmem_frame;
+ unsigned long rx_shmem_frame = connect->rx_shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ netif_t *netif;
+#if 0
+ struct net_device *eth0_dev;
+#endif
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n",
+ connect->domid, connect->netif_handle);
+ connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( netif->status != DISCONNECTED )
+ {
+ connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+ }
+
+ if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm,
+ VMALLOC_VMADDR(vma->addr),
+ tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ error |= direct_remap_area_pages(&init_mm,
+ VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
+ rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = NETIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ netif->evtchn = evtchn;
+ netif->irq = bind_evtchn_to_irq(evtchn);
+ netif->tx_shmem_frame = tx_shmem_frame;
+ netif->rx_shmem_frame = rx_shmem_frame;
+ netif->tx =
+ (netif_tx_interface_t *)vma->addr;
+ netif->rx =
+ (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
+ netif->tx->resp_prod = netif->rx->resp_prod = 0;
+ netif_get(netif);
+ wmb(); /* Other CPUs see new state before interface is started. */
+
+ rtnl_lock();
+ netif->status = CONNECTED;
+ wmb();
+ if ( netif_running(netif->dev) )
+ __netif_up(netif);
+ rtnl_unlock();
+
+ connect->status = NETIF_BE_STATUS_OKAY;
+}
+
+int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->netif_handle;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_disconnect attempted for non-existent netif"
+ " (%u,%u)\n", disconnect->domid, disconnect->netif_handle);
+ disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( netif->status == CONNECTED )
+ {
+ rtnl_lock();
+ netif->status = DISCONNECTING;
+ netif->disconnect_rspid = rsp_id;
+ wmb();
+ if ( netif_running(netif->dev) )
+ __netif_down(netif);
+ rtnl_unlock();
+ netif_put(netif);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = NETIF_BE_STATUS_OKAY;
+ return 1;
+}
+
+void netif_interface_init(void)
+{
+ memset(netif_hash, 0, sizeof(netif_hash));
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netback/loopback.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/loopback.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,148 @@
+/******************************************************************************
+ * netback/loopback.c
+ *
+ * A two-interface loopback device to emulate a local netfront-netback
+ * connection. This ensures that local packet delivery looks identical
+ * to inter-domain delivery. Most importantly, packets delivered locally
+ * originating from other domains will get *copied* when they traverse this
+ * driver. This prevents unbounded delays in socket-buffer queues from
+ * causing the netback driver to "seize up".
+ *
+ * This driver creates a symmetric pair of loopback interfaces with names
+ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
+ * bridge, just like a proper netback interface, while a local IP interface
+ * is configured on 'veth0'.
+ *
+ * As with a real netback interface, vif0.0 is configured with a suitable
+ * dummy MAC address. No default is provided for veth0: a reasonable strategy
+ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
+ * (to avoid confusing the Etherbridge).
+ *
+ * Copyright (c) 2005 K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/dst.h>
+
+struct net_private {
+ struct net_device *loopback_dev;
+ struct net_device_stats stats;
+};
+
+static int loopback_open(struct net_device *dev)
+{
+ struct net_private *np = netdev_priv(dev);
+ memset(&np->stats, 0, sizeof(np->stats));
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int loopback_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_private *np = netdev_priv(dev);
+
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
+ skb_orphan(skb);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ /* Switch to loopback context. */
+ dev = np->loopback_dev;
+ np = netdev_priv(dev);
+
+ np->stats.rx_bytes += skb->len;
+ np->stats.rx_packets++;
+
+ skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+ dev->last_rx = jiffies;
+ netif_rx(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *loopback_get_stats(struct net_device *dev)
+{
+ struct net_private *np = netdev_priv(dev);
+ return &np->stats;
+}
+
+static void loopback_construct(struct net_device *dev, struct net_device *lo)
+{
+ struct net_private *np = netdev_priv(dev);
+
+ np->loopback_dev = lo;
+
+ dev->open = loopback_open;
+ dev->stop = loopback_close;
+ dev->hard_start_xmit = loopback_start_xmit;
+ dev->get_stats = loopback_get_stats;
+
+ dev->tx_queue_len = 0;
+
+ /*
+ * We do not set a jumbo MTU on the interface. Otherwise the network
+ * stack will try to send large packets that will get dropped by the
+ * Ethernet bridge (unless the physical Ethernet interface is configured
+ * to transfer jumbo packets). If a larger MTU is desired then the system
+ * administrator can specify it using the 'ifconfig' command.
+ */
+ /*dev->mtu = 16*1024;*/
+}
+
+static int __init loopback_init(void)
+{
+ struct net_device *dev1, *dev2;
+ int err = -ENOMEM;
+
+ dev1 = alloc_netdev(sizeof(struct net_private), "vif0.0", ether_setup);
+ dev2 = alloc_netdev(sizeof(struct net_private), "veth0", ether_setup);
+ if ( (dev1 == NULL) || (dev2 == NULL) )
+ goto fail;
+
+ loopback_construct(dev1, dev2);
+ loopback_construct(dev2, dev1);
+
+ /*
+ * Initialise a dummy MAC address for the 'dummy backend' interface. We
+ * choose the numerically largest non-broadcast address to prevent the
+ * address getting stolen by an Ethernet bridge for STP purposes.
+ */
+ memset(dev1->dev_addr, 0xFF, ETH_ALEN);
+ dev1->dev_addr[0] &= ~0x01;
+
+ if ( (err = register_netdev(dev1)) != 0 )
+ goto fail;
+
+ if ( (err = register_netdev(dev2)) != 0 )
+ {
+ unregister_netdev(dev1);
+ goto fail;
+ }
+
+ return 0;
+
+ fail:
+ if ( dev1 != NULL )
+ kfree(dev1);
+ if ( dev2 != NULL )
+ kfree(dev2);
+ return err;
+}
+
+module_init(loopback_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,794 @@
+/******************************************************************************
+ * drivers/xen/netback/netback.c
+ *
+ * Back-end of the driver for virtual network devices. This portion of the
+ * driver exports a 'unified' network-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+ * drivers/xen/netfront/netfront.c
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ */
+
+#include "common.h"
+#include <asm-xen/balloon.h>
+
+static void netif_idx_release(u16 pending_idx);
+static void netif_page_release(struct page *page);
+static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st);
+static int make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ memory_t addr,
+ u16 size);
+
+static void net_tx_action(unsigned long unused);
+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
+static void net_rx_action(unsigned long unused);
+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+
+static struct timer_list net_timer;
+
+static struct sk_buff_head rx_queue;
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
+static unsigned char rx_notify[NR_EVENT_CHANNELS];
+
+/* Don't currently gate addition of an interface to the tx scheduling list. */
+#define tx_work_exists(_if) (1)
+
+#define MAX_PENDING_REQS 256
+static unsigned long mmap_vstart;
+#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
+
+#define PKT_PROT_LEN 64
+
+static struct {
+ netif_tx_request_t req;
+ netif_t *netif;
+} pending_tx_info[MAX_PENDING_REQS];
+static u16 pending_ring[MAX_PENDING_REQS];
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
+static u16 dealloc_ring[MAX_PENDING_REQS];
+static PEND_RING_IDX dealloc_prod, dealloc_cons;
+
+static struct sk_buff_head tx_queue;
+static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
+
+static struct list_head net_schedule_list;
+static spinlock_t net_schedule_list_lock;
+
+#define MAX_MFN_ALLOC 64
+static unsigned long mfn_list[MAX_MFN_ALLOC];
+static unsigned int alloc_index = 0;
+static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
+
+static unsigned long alloc_mfn(void)
+{
+ unsigned long mfn = 0, flags;
+ spin_lock_irqsave(&mfn_lock, flags);
+ if ( unlikely(alloc_index == 0) )
+ alloc_index = HYPERVISOR_dom_mem_op(
+ MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
+ if ( alloc_index != 0 )
+ mfn = mfn_list[--alloc_index];
+ spin_unlock_irqrestore(&mfn_lock, flags);
+ return mfn;
+}
+
+static void free_mfn(unsigned long mfn)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&mfn_lock, flags);
+ if ( alloc_index != MAX_MFN_ALLOC )
+ mfn_list[alloc_index++] = mfn;
+ else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &mfn, 1, 0) != 1 )
+ BUG();
+ spin_unlock_irqrestore(&mfn_lock, flags);
+}
+
+static inline void maybe_schedule_tx_action(void)
+{
+ smp_mb();
+ if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&net_schedule_list) )
+ tasklet_schedule(&net_tx_tasklet);
+}
+
+/*
+ * A gross way of confirming the origin of an skb data page. The slab
+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
+ */
+static inline int is_xen_skb(struct sk_buff *skb)
+{
+ extern kmem_cache_t *skbuff_cachep;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
+#else
+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
+#endif
+ return (cp == skbuff_cachep);
+}
+
+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+
+ ASSERT(skb->dev == dev);
+
+ /* Drop the packet if the target domain has no receive buffers. */
+ if ( !netif->active ||
+ (netif->rx_req_cons == netif->rx->req_prod) ||
+ ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
+ goto drop;
+
+ /*
+ * We do not copy the packet unless:
+ * 1. The data is shared; or
+ * 2. The data is not allocated from our special cache.
+ * NB. We also couldn't cope with fragmented packets, but we won't get
+ * any because we not advertise the NETIF_F_SG feature.
+ */
+ if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
+ {
+ int hlen = skb->data - skb->head;
+ struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
+ if ( unlikely(nskb == NULL) )
+ goto drop;
+ skb_reserve(nskb, hlen);
+ __skb_put(nskb, skb->len);
+ (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
+ nskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+
+ netif->rx_req_cons++;
+ netif_get(netif);
+
+ skb_queue_tail(&rx_queue, skb);
+ tasklet_schedule(&net_rx_tasklet);
+
+ return 0;
+
+ drop:
+ netif->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+#if 0
+static void xen_network_done_notify(void)
+{
+ static struct net_device *eth0_dev = NULL;
+ if ( unlikely(eth0_dev == NULL) )
+ eth0_dev = __dev_get_by_name("eth0");
+ netif_rx_schedule(eth0_dev);
+}
+/*
+ * Add following to poll() function in NAPI driver (Tigon3 is example):
+ * if ( xen_network_done() )
+ * tg3_enable_ints(tp);
+ */
+int xen_network_done(void)
+{
+ return skb_queue_empty(&rx_queue);
+}
+#endif
+
+static void net_rx_action(unsigned long unused)
+{
+ netif_t *netif;
+ s8 status;
+ u16 size, id, evtchn;
+ mmu_update_t *mmu;
+ multicall_entry_t *mcl;
+ unsigned long vdata, mdata, new_mfn;
+ struct sk_buff_head rxq;
+ struct sk_buff *skb;
+ u16 notify_list[NETIF_RX_RING_SIZE];
+ int notify_nr = 0;
+
+ skb_queue_head_init(&rxq);
+
+ mcl = rx_mcl;
+ mmu = rx_mmu;
+ while ( (skb = skb_dequeue(&rx_queue)) != NULL )
+ {
+ netif = netdev_priv(skb->dev);
+ vdata = (unsigned long)skb->data;
+ mdata = virt_to_machine(vdata);
+
+ /* Memory squeeze? Back off for an arbitrary while. */
+ if ( (new_mfn = alloc_mfn()) == 0 )
+ {
+ if ( net_ratelimit() )
+ printk(KERN_WARNING "Memory squeeze in netback driver.\n");
+ mod_timer(&net_timer, jiffies + HZ);
+ skb_queue_head(&rx_queue, skb);
+ break;
+ }
+
+ /*
+ * Set the new P2M table entry before reassigning the old data page.
+ * Heed the comment in pgtable-2level.h:pte_page(). :-)
+ */
+ phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+
+ mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
+ mmu[1].ptr = MMU_EXTENDED_COMMAND;
+ mmu[1].val = MMUEXT_SET_FOREIGNDOM;
+ mmu[1].val |= (unsigned long)netif->domid << 16;
+ mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
+ mmu[2].val = MMUEXT_REASSIGN_PAGE;
+
+ mcl[0].op = __HYPERVISOR_update_va_mapping;
+ mcl[0].args[0] = vdata >> PAGE_SHIFT;
+ mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
+ mcl[0].args[2] = 0;
+ mcl[1].op = __HYPERVISOR_mmu_update;
+ mcl[1].args[0] = (unsigned long)mmu;
+ mcl[1].args[1] = 3;
+ mcl[1].args[2] = 0;
+
+ mcl += 2;
+ mmu += 3;
+
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
+ if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
+ break;
+ }
+
+ if ( mcl == rx_mcl )
+ return;
+
+ mcl[-2].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
+ BUG();
+
+ mcl = rx_mcl;
+ mmu = rx_mmu;
+ while ( (skb = __skb_dequeue(&rxq)) != NULL )
+ {
+ netif = netdev_priv(skb->dev);
+ size = skb->tail - skb->data;
+
+ /* Rederive the machine addresses. */
+ new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
+ mdata = ((mmu[2].ptr & PAGE_MASK) |
+ ((unsigned long)skb->data & ~PAGE_MASK));
+
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+
+ netif->stats.tx_bytes += size;
+ netif->stats.tx_packets++;
+
+ /* The update_va_mapping() must not fail. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ BUG();
+
+ /* Check the reassignment error code. */
+ status = NETIF_RSP_OKAY;
+ if ( unlikely(mcl[1].args[5] != 0) )
+ {
+ DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
+ free_mfn(mdata >> PAGE_SHIFT);
+ status = NETIF_RSP_ERROR;
+ }
+
+ evtchn = netif->evtchn;
+ id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+ if ( make_rx_response(netif, id, status, mdata, size) &&
+ (rx_notify[evtchn] == 0) )
+ {
+ rx_notify[evtchn] = 1;
+ notify_list[notify_nr++] = evtchn;
+ }
+
+ netif_put(netif);
+ dev_kfree_skb(skb);
+
+ mcl += 2;
+ mmu += 3;
+ }
+
+ while ( notify_nr != 0 )
+ {
+ evtchn = notify_list[--notify_nr];
+ rx_notify[evtchn] = 0;
+ notify_via_evtchn(evtchn);
+ }
+
+ /* More work to do? */
+ if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
+ tasklet_schedule(&net_rx_tasklet);
+#if 0
+ else
+ xen_network_done_notify();
+#endif
+}
+
+static void net_alarm(unsigned long unused)
+{
+ tasklet_schedule(&net_rx_tasklet);
+}
+
+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+ return &netif->stats;
+}
+
+static int __on_net_schedule_list(netif_t *netif)
+{
+ return netif->list.next != NULL;
+}
+
+static void remove_from_net_schedule_list(netif_t *netif)
+{
+ spin_lock_irq(&net_schedule_list_lock);
+ if ( likely(__on_net_schedule_list(netif)) )
+ {
+ list_del(&netif->list);
+ netif->list.next = NULL;
+ netif_put(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+}
+
+static void add_to_net_schedule_list_tail(netif_t *netif)
+{
+ if ( __on_net_schedule_list(netif) )
+ return;
+
+ spin_lock_irq(&net_schedule_list_lock);
+ if ( !__on_net_schedule_list(netif) && netif->active )
+ {
+ list_add_tail(&netif->list, &net_schedule_list);
+ netif_get(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+}
+
+void netif_schedule_work(netif_t *netif)
+{
+ if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+ ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+}
+
+void netif_deschedule_work(netif_t *netif)
+{
+ remove_from_net_schedule_list(netif);
+}
+
+#if 0
+static void tx_credit_callback(unsigned long data)
+{
+ netif_t *netif = (netif_t *)data;
+ netif->remaining_credit = netif->credit_bytes;
+ netif_schedule_work(netif);
+}
+#endif
+
+static void net_tx_action(unsigned long unused)
+{
+ struct list_head *ent;
+ struct sk_buff *skb;
+ netif_t *netif;
+ netif_tx_request_t txreq;
+ u16 pending_idx;
+ NETIF_RING_IDX i;
+ multicall_entry_t *mcl;
+ PEND_RING_IDX dc, dp;
+ unsigned int data_len;
+
+ if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
+ goto skip_dealloc;
+
+ mcl = tx_mcl;
+ while ( dc != dp )
+ {
+ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+ mcl[0].op = __HYPERVISOR_update_va_mapping;
+ mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+ mcl[0].args[1] = 0;
+ mcl[0].args[2] = 0;
+ mcl++;
+ }
+
+ mcl[-1].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+ BUG();
+
+ mcl = tx_mcl;
+ while ( dealloc_cons != dp )
+ {
+ /* The update_va_mapping() must not fail. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ BUG();
+
+ pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+
+ netif = pending_tx_info[pending_idx].netif;
+
+ make_tx_response(netif, pending_tx_info[pending_idx].req.id,
+ NETIF_RSP_OKAY);
+
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
+ /*
+ * Scheduling checks must happen after the above response is posted.
+ * This avoids a possible race with a guest OS on another CPU if that
+ * guest is testing against 'resp_prod' when deciding whether to notify
+ * us when it queues additional packets.
+ */
+ mb();
+ if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+ ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+ add_to_net_schedule_list_tail(netif);
+
+ netif_put(netif);
+
+ mcl++;
+ }
+
+ skip_dealloc:
+ mcl = tx_mcl;
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list) )
+ {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+ netif = list_entry(ent, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+ /* Work to do? */
+ i = netif->tx_req_cons;
+ if ( (i == netif->tx->req_prod) ||
+ ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
+ {
+ netif_put(netif);
+ continue;
+ }
+
+ netif->tx->req_cons = ++netif->tx_req_cons;
+
+ /*
+ * 1. Ensure that we see the request when we copy it.
+ * 2. Ensure that frontend sees updated req_cons before we check
+ * for more work to schedule.
+ */
+ mb();
+
+ memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
+ sizeof(txreq));
+
+#if 0
+ /* Credit-based scheduling. */
+ if ( tx.size > netif->remaining_credit )
+ {
+ s_time_t now = NOW(), next_credit =
+ netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
+ if ( next_credit <= now )
+ {
+ netif->credit_timeout.expires = now;
+ netif->remaining_credit = netif->credit_bytes;
+ }
+ else
+ {
+ netif->remaining_credit = 0;
+ netif->credit_timeout.expires = next_credit;
+ netif->credit_timeout.data = (unsigned long)netif;
+ netif->credit_timeout.function = tx_credit_callback;
+ netif->credit_timeout.cpu = smp_processor_id();
+ add_ac_timer(&netif->credit_timeout);
+ break;
+ }
+ }
+ netif->remaining_credit -= tx.size;
+#endif
+
+ netif_schedule_work(netif);
+
+ if ( unlikely(txreq.size < ETH_HLEN) ||
+ unlikely(txreq.size > ETH_FRAME_LEN) )
+ {
+ DPRINTK("Bad packet size: %d\n", txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ /* No crossing a page boundary as the payload mustn't fragment. */
+ if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
+ {
+ DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
+ txreq.addr, txreq.size,
+ (txreq.addr &~PAGE_MASK) + txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
+
+ if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
+ {
+ DPRINTK("Can't allocate a skb in start_xmit.\n");
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ break;
+ }
+
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, 16);
+
+ mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+ mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
+ mcl[0].args[2] = 0;
+ mcl[0].args[3] = netif->domid;
+ mcl++;
+
+ memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
+ pending_tx_info[pending_idx].netif = netif;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_queue_tail(&tx_queue, skb);
+
+ pending_cons++;
+
+ /* Filled the batch queue? */
+ if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
+ break;
+ }
+
+ if ( mcl == tx_mcl )
+ return;
+
+ if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+ BUG();
+
+ mcl = tx_mcl;
+ while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
+ {
+ pending_idx = *((u16 *)skb->data);
+ netif = pending_tx_info[pending_idx].netif;
+ memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
+
+ /* Check the remap error code. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ {
+ DPRINTK("Bad page frame\n");
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ kfree_skb(skb);
+ mcl++;
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ continue;
+ }
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+ FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
+
+ data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
+
+ __skb_put(skb, data_len);
+ memcpy(skb->data,
+ (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
+ data_len);
+
+ if ( data_len < txreq.size )
+ {
+ /* Append the packet payload as a fragment. */
+ skb_shinfo(skb)->frags[0].page =
+ virt_to_page(MMAP_VADDR(pending_idx));
+ skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
+ skb_shinfo(skb)->frags[0].page_offset =
+ (txreq.addr + data_len) & ~PAGE_MASK;
+ skb_shinfo(skb)->nr_frags = 1;
+ }
+ else
+ {
+ /* Schedule a response immediately. */
+ netif_idx_release(pending_idx);
+ }
+
+ skb->data_len = txreq.size - data_len;
+ skb->len += skb->data_len;
+
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif->stats.rx_bytes += txreq.size;
+ netif->stats.rx_packets++;
+
+ netif_rx(skb);
+ netif->dev->last_rx = jiffies;
+
+ mcl++;
+ }
+}
+
+static void netif_idx_release(u16 pending_idx)
+{
+ static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
+ unsigned long flags;
+
+ spin_lock_irqsave(&_lock, flags);
+ dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&_lock, flags);
+
+ tasklet_schedule(&net_tx_tasklet);
+}
+
+static void netif_page_release(struct page *page)
+{
+ u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+ /* Ready for next use. */
+ set_page_count(page, 1);
+
+ netif_idx_release(pending_idx);
+}
+
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ netif_t *netif = dev_id;
+ if ( tx_work_exists(netif) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+ return IRQ_HANDLED;
+}
+
+static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st)
+{
+ NETIF_RING_IDX i = netif->tx_resp_prod;
+ netif_tx_response_t *resp;
+
+ resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+ resp->id = id;
+ resp->status = st;
+ wmb();
+ netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ if ( i == netif->tx->event )
+ notify_via_evtchn(netif->evtchn);
+}
+
+static int make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ memory_t addr,
+ u16 size)
+{
+ NETIF_RING_IDX i = netif->rx_resp_prod;
+ netif_rx_response_t *resp;
+
+ resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+ resp->addr = addr;
+ resp->id = id;
+ resp->status = (s16)size;
+ if ( st < 0 )
+ resp->status = (s16)st;
+ wmb();
+ netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ return (i == netif->rx->event);
+}
+
+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct list_head *ent;
+ netif_t *netif;
+ int i = 0;
+
+ printk(KERN_ALERT "netif_schedule_list:\n");
+ spin_lock_irq(&net_schedule_list_lock);
+
+ list_for_each ( ent, &net_schedule_list )
+ {
+ netif = list_entry(ent, netif_t, list);
+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
+ i, netif->rx_req_cons, netif->rx_resp_prod);
+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
+ netif->tx_req_cons, netif->tx_resp_prod);
+ printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
+ netif->rx->req_prod, netif->rx->resp_prod);
+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
+ netif->rx->event, netif->tx->req_prod);
+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
+ netif->tx->resp_prod, netif->tx->event);
+ i++;
+ }
+
+ spin_unlock_irq(&net_schedule_list_lock);
+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+ return IRQ_HANDLED;
+}
+
+static int __init netback_init(void)
+{
+ int i;
+ struct page *page;
+
+ if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
+ !(xen_start_info.flags & SIF_INITDOMAIN) )
+ return 0;
+
+ printk("Initialising Xen netif backend\n");
+
+ /* We can increase reservation by this much in net_rx_action(). */
+ balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
+
+ skb_queue_head_init(&rx_queue);
+ skb_queue_head_init(&tx_queue);
+
+ init_timer(&net_timer);
+ net_timer.data = 0;
+ net_timer.function = net_alarm;
+
+ netif_interface_init();
+
+ if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
+ BUG();
+
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ {
+ page = virt_to_page(MMAP_VADDR(i));
+ set_page_count(page, 1);
+ SetPageForeign(page, netif_page_release);
+ }
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
+
+ spin_lock_init(&net_schedule_list_lock);
+ INIT_LIST_HEAD(&net_schedule_list);
+
+ netif_ctrlif_init();
+
+ (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+ netif_be_dbg, SA_SHIRQ,
+ "net-be-dbg", &netif_be_dbg);
+
+ return 0;
+}
+
+static void netback_cleanup(void)
+{
+ BUG();
+}
+
+module_init(netback_init);
+module_exit(netback_cleanup);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netfront/Kconfig
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/Kconfig Tue Aug 9 15:17:45 2005
@@ -0,0 +1,6 @@
+
+config XENNET
+ tristate "Xen network driver"
+ depends on NETDEVICES && ARCH_XEN
+ help
+ Network driver for Xen
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netfront/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/Makefile Tue Aug 9
15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y := netfront.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,1480 @@
+/******************************************************************************
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/proc_fs.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <net/arp.h>
+#include <net/route.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/xen-public/io/netif.h>
+#include <asm-xen/balloon.h>
+#include <asm/page.h>
+
+#ifndef __GFP_NOWARN
+#define __GFP_NOWARN 0
+#endif
+#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
+
+#define init_skb_shinfo(_skb) \
+ do { \
+ atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
+ skb_shinfo(_skb)->nr_frags = 0; \
+ skb_shinfo(_skb)->frag_list = NULL; \
+ } while (0)
+
+/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
+#define RX_HEADROOM 200
+
+/*
+ * If the backend driver is pipelining transmit requests then we can be very
+ * aggressive in avoiding new-packet notifications -- only need to send a
+ * notification if there are no outstanding unreceived responses.
+ * If the backend may be buffering our transmit buffers for any reason then we
+ * are rather more conservative.
+ */
+#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
+#else
+#define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
+#endif
+
+static void network_tx_buf_gc(struct net_device *dev);
+static void network_alloc_rx_buffers(struct net_device *dev);
+
+static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+
+#ifdef CONFIG_PROC_FS
+static int xennet_proc_init(void);
+static int xennet_proc_addif(struct net_device *dev);
+static void xennet_proc_delif(struct net_device *dev);
+#else
+#define xennet_proc_init() (0)
+#define xennet_proc_addif(d) (0)
+#define xennet_proc_delif(d) ((void)0)
+#endif
+
+static struct list_head dev_list;
+
+struct net_private
+{
+ struct list_head list;
+ struct net_device *dev;
+
+ struct net_device_stats stats;
+ NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
+ unsigned int tx_full;
+
+ netif_tx_interface_t *tx;
+ netif_rx_interface_t *rx;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ unsigned int handle;
+ unsigned int evtchn;
+ unsigned int irq;
+
+ /* What is the status of our connection to the remote backend? */
+#define BEST_CLOSED 0
+#define BEST_DISCONNECTED 1
+#define BEST_CONNECTED 2
+ unsigned int backend_state;
+
+ /* Is this interface open or closed (down or up)? */
+#define UST_CLOSED 0
+#define UST_OPEN 1
+ unsigned int user_state;
+
+ /* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_MAX_TARGET NETIF_RX_RING_SIZE
+ int rx_min_target, rx_max_target, rx_target;
+ struct sk_buff_head rx_batch;
+
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+ * array is an index into a chain of free entries.
+ */
+ struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
+ struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+};
+
+/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
+#define ADD_ID_TO_FREELIST(_list, _id) \
+ (_list)[(_id)] = (_list)[0]; \
+ (_list)[0] = (void *)(unsigned long)(_id);
+#define GET_ID_FROM_FREELIST(_list) \
+ ({ unsigned long _id = (unsigned long)(_list)[0]; \
+ (_list)[0] = (_list)[_id]; \
+ (unsigned short)_id; })
+
+static char *status_name[] = {
+ [NETIF_INTERFACE_STATUS_CLOSED] = "closed",
+ [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
+ [NETIF_INTERFACE_STATUS_CONNECTED] = "connected",
+ [NETIF_INTERFACE_STATUS_CHANGED] = "changed",
+};
+
+static char *be_state_name[] = {
+ [BEST_CLOSED] = "closed",
+ [BEST_DISCONNECTED] = "disconnected",
+ [BEST_CONNECTED] = "connected",
+};
+
+#if DEBUG
+#define DPRINTK(fmt, args...) \
+ printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
+#else
+#define DPRINTK(fmt, args...) ((void)0)
+#endif
+#define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_net: " fmt, ##args)
+#define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_net: " fmt, ##args)
+
+static struct net_device *find_dev_by_handle(unsigned int handle)
+{
+ struct list_head *ent;
+ struct net_private *np;
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->handle == handle)
+ return np->dev;
+ }
+ return NULL;
+}
+
+/** Network interface info. */
+struct netif_ctrl {
+ /** Number of interfaces. */
+ int interface_n;
+ /** Number of connected interfaces. */
+ int connected_n;
+ /** Error code. */
+ int err;
+ int up;
+};
+
+static struct netif_ctrl netctrl;
+
+static void netctrl_init(void)
+{
+ memset(&netctrl, 0, sizeof(netctrl));
+ netctrl.up = NETIF_DRIVER_STATUS_DOWN;
+}
+
+/** Get or set a network interface error.
+ */
+static int netctrl_err(int err)
+{
+ if ((err < 0) && !netctrl.err)
+ netctrl.err = err;
+ return netctrl.err;
+}
+
+/** Test if all network interfaces are connected.
+ *
+ * @return 1 if all connected, 0 if not, negative error code otherwise
+ */
+static int netctrl_connected(void)
+{
+ int ok;
+
+ if (netctrl.err)
+ ok = netctrl.err;
+ else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
+ ok = (netctrl.connected_n == netctrl.interface_n);
+ else
+ ok = 0;
+
+ return ok;
+}
+
+/** Count the connected network interfaces.
+ *
+ * @return connected count
+ */
+static int netctrl_connected_count(void)
+{
+
+ struct list_head *ent;
+ struct net_private *np;
+ unsigned int connected;
+
+ connected = 0;
+
+ list_for_each(ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->backend_state == BEST_CONNECTED)
+ connected++;
+ }
+
+ netctrl.connected_n = connected;
+ DPRINTK("> connected_n=%d interface_n=%d\n",
+ netctrl.connected_n, netctrl.interface_n);
+ return connected;
+}
+
+/** Send a packet on a net device to encourage switches to learn the
+ * MAC. We send a fake ARP request.
+ *
+ * @param dev device
+ * @return 0 on success, error code otherwise
+ */
+static int send_fake_arp(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ u32 src_ip, dst_ip;
+
+ dst_ip = INADDR_BROADCAST;
+ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
+
+ /* No IP? Then nothing to do. */
+ if (src_ip == 0)
+ return 0;
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+ dst_ip, dev, src_ip,
+ /*dst_hw*/ NULL, /*src_hw*/ NULL,
+ /*target_hw*/ dev->dev_addr);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ return dev_queue_xmit(skb);
+}
+
+static int network_open(struct net_device *dev)
+{
+ struct net_private *np = netdev_priv(dev);
+
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ np->user_state = UST_OPEN;
+
+ network_alloc_rx_buffers(dev);
+ np->rx->event = np->rx_resp_cons + 1;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void network_tx_buf_gc(struct net_device *dev)
+{
+ NETIF_RING_IDX i, prod;
+ unsigned short id;
+ struct net_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ if (np->backend_state != BEST_CONNECTED)
+ return;
+
+ do {
+ prod = np->tx->resp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
+
+ for (i = np->tx_resp_cons; i != prod; i++) {
+ id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+ skb = np->tx_skbs[id];
+ ADD_ID_TO_FREELIST(np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+
+ np->tx_resp_cons = prod;
+
+ /*
+ * Set a new event, then check for race with update of tx_cons. Note
+ * that it is essential to schedule a callback, no matter how few
+ * buffers are pending. Even if there is space in the transmit ring,
+ * higher layers may be blocked because too much data is outstanding:
+ * in such cases notification from Xen is likely to be the only kick
+ * that we'll get.
+ */
+ np->tx->event =
+ prod + ((np->tx->req_prod - prod) >> 1) + 1;
+ mb();
+ } while (prod != np->tx->resp_prod);
+
+ if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
+ np->tx_full = 0;
+ if (np->user_state == UST_OPEN)
+ netif_wake_queue(dev);
+ }
+}
+
+
+static void network_alloc_rx_buffers(struct net_device *dev)
+{
+ unsigned short id;
+ struct net_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i, batch_target;
+ NETIF_RING_IDX req_prod = np->rx->req_prod;
+
+ if (unlikely(np->backend_state != BEST_CONNECTED))
+ return;
+
+ /*
+ * Allocate skbuffs greedily, even though we batch updates to the
+ * receive ring. This creates a less bursty demand on the memory allocator,
+ * so should reduce the chance of failed allocation requests both for
+ * ourself and for other kernel subsystems.
+ */
+ batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+ if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
+ break;
+ __skb_queue_tail(&np->rx_batch, skb);
+ }
+
+ /* Is the batch large enough to be worthwhile? */
+ if (i < (np->rx_target/2))
+ return;
+
+ for (i = 0; ; i++) {
+ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
+ break;
+
+ skb->dev = dev;
+
+ id = GET_ID_FROM_FREELIST(np->rx_skbs);
+
+ np->rx_skbs[id] = skb;
+
+ np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+
+ rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+
+ /* Remove this page from pseudo phys map before passing back to Xen. */
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
+ = INVALID_P2M_ENTRY;
+
+ rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
+ rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ rx_mcl[i].args[1] = 0;
+ rx_mcl[i].args[2] = 0;
+ }
+
+ /*
+ * We may have allocated buffers which have entries outstanding in the page
+ * update queue -- make sure we flush those first!
+ */
+ flush_page_update_queue();
+
+ /* After all PTEs have been zapped we blow away stale TLB entries. */
+ rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
+
+ /* Give away a batch of pages. */
+ rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
+ rx_mcl[i].args[0] = MEMOP_decrease_reservation;
+ rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
+ rx_mcl[i].args[2] = (unsigned long)i;
+ rx_mcl[i].args[3] = 0;
+ rx_mcl[i].args[4] = DOMID_SELF;
+
+ /* Tell the ballon driver what is going on. */
+ balloon_update_driver_allowance(i);
+
+ /* Zap PTEs and give away pages in one big multicall. */
+ (void)HYPERVISOR_multicall(rx_mcl, i+1);
+
+ /* Check return status of HYPERVISOR_dom_mem_op(). */
+ if (unlikely(rx_mcl[i].args[5] != i))
+ panic("Unable to reduce memory reservation\n");
+
+ /* Above is a suitable barrier to ensure backend will see requests. */
+ np->rx->req_prod = req_prod + i;
+
+ /* Adjust our floating fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > np->rx_max_target))
+ np->rx_target = np->rx_max_target;
+}
+
+
+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned short id;
+ struct net_private *np = netdev_priv(dev);
+ netif_tx_request_t *tx;
+ NETIF_RING_IDX i;
+
+ if (unlikely(np->tx_full)) {
+ printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
+ netif_stop_queue(dev);
+ goto drop;
+ }
+
+ if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
+ PAGE_SIZE)) {
+ struct sk_buff *nskb;
+ if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
+ goto drop;
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+
+ spin_lock_irq(&np->tx_lock);
+
+ if (np->backend_state != BEST_CONNECTED) {
+ spin_unlock_irq(&np->tx_lock);
+ goto drop;
+ }
+
+ i = np->tx->req_prod;
+
+ id = GET_ID_FROM_FREELIST(np->tx_skbs);
+ np->tx_skbs[id] = skb;
+
+ tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+
+ tx->id = id;
+ tx->addr = virt_to_machine(skb->data);
+ tx->size = skb->len;
+
+ wmb(); /* Ensure that backend will see the request. */
+ np->tx->req_prod = i + 1;
+
+ network_tx_buf_gc(dev);
+
+ if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
+ np->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irq(&np->tx_lock);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ /* Only notify Xen if we really have to. */
+ mb();
+ if (np->tx->TX_TEST_IDX == i)
+ notify_via_evtchn(np->evtchn);
+
+ return 0;
+
+ drop:
+ np->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ struct net_device *dev = dev_id;
+ struct net_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+ network_tx_buf_gc(dev);
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state ==
UST_OPEN))
+ netif_rx_schedule(dev);
+
+ return IRQ_HANDLED;
+}
+
+
+static int netif_poll(struct net_device *dev, int *pbudget)
+{
+ struct net_private *np = netdev_priv(dev);
+ struct sk_buff *skb, *nskb;
+ netif_rx_response_t *rx;
+ NETIF_RING_IDX i, rp;
+ mmu_update_t *mmu = rx_mmu;
+ multicall_entry_t *mcl = rx_mcl;
+ int work_done, budget, more_to_do = 1;
+ struct sk_buff_head rxq;
+ unsigned long flags;
+
+ spin_lock(&np->rx_lock);
+
+ if (np->backend_state != BEST_CONNECTED) {
+ spin_unlock(&np->rx_lock);
+ return 0;
+ }
+
+ skb_queue_head_init(&rxq);
+
+ if ((budget = *pbudget) > dev->quota)
+ budget = dev->quota;
+
+ rp = np->rx->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for (i = np->rx_resp_cons, work_done = 0;
+ (i != rp) && (work_done < budget);
+ i++, work_done++) {
+ rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+
+ /*
+ * An error here is very odd. Usually indicates a backend bug,
+ * low-memory condition, or that we didn't have reservation headroom.
+ */
+ if (unlikely(rx->status <= 0)) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
+ np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
+ wmb();
+ np->rx->req_prod++;
+ work_done--;
+ continue;
+ }
+
+ skb = np->rx_skbs[rx->id];
+ ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
+
+ /* NB. We handle skb overflow later. */
+ skb->data = skb->head + (rx->addr & ~PAGE_MASK);
+ skb->len = rx->status;
+ skb->tail = skb->data + skb->len;
+
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += rx->status;
+
+ /* Remap the page. */
+ mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+ mmu->val = __pa(skb->head) >> PAGE_SHIFT;
+ mmu++;
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
+ mcl->args[2] = 0;
+ mcl++;
+
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
+ rx->addr >> PAGE_SHIFT;
+
+ __skb_queue_tail(&rxq, skb);
+ }
+
+ /* Some pages are no longer absent... */
+ balloon_update_driver_allowance(-work_done);
+
+ /* Do all the remapping work, and M->P updates, in one big hypercall. */
+ if (likely((mcl - rx_mcl) != 0)) {
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)rx_mmu;
+ mcl->args[1] = mmu - rx_mmu;
+ mcl->args[2] = 0;
+ mcl++;
+ (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+ }
+
+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ /*
+ * Enough room in skbuff for the data we were passed? Also, Linux
+ * expects at least 16 bytes headroom in each receive buffer.
+ */
+ if (unlikely(skb->tail > skb->end) ||
+ unlikely((skb->data - skb->head) < 16)) {
+ nskb = NULL;
+
+ /* Only copy the packet if it fits in the current MTU. */
+ if (skb->len <= (dev->mtu + ETH_HLEN)) {
+ if ((skb->tail > skb->end) && net_ratelimit())
+ printk(KERN_INFO "Received packet needs %d bytes more "
+ "headroom.\n", skb->tail - skb->end);
+
+ if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
+ skb_reserve(nskb, 2);
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ }
+ }
+ else if (net_ratelimit())
+ printk(KERN_INFO "Received packet too big for MTU "
+ "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
+
+ /* Reinitialise and then destroy the old skbuff. */
+ skb->len = 0;
+ skb->tail = skb->data;
+ init_skb_shinfo(skb);
+ dev_kfree_skb(skb);
+
+ /* Switch old for new, if we copied the buffer. */
+ if ((skb = nskb) == NULL)
+ continue;
+ }
+
+ /* Set the shared-info area, which is hidden behind the real data. */
+ init_skb_shinfo(skb);
+
+ /* Ethernet-specific work. Delayed to here as it peeks the header. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Pass it up. */
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ np->rx_resp_cons = i;
+
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
+ (--np->rx_target < np->rx_min_target))
+ np->rx_target = np->rx_min_target;
+
+ network_alloc_rx_buffers(dev);
+
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done < budget) {
+ local_irq_save(flags);
+
+ np->rx->event = i + 1;
+
+ /* Deal with hypervisor racing our resetting of rx_event. */
+ mb();
+ if (np->rx->resp_prod == i) {
+ __netif_rx_complete(dev);
+ more_to_do = 0;
+ }
+
+ local_irq_restore(flags);
+ }
+
+ spin_unlock(&np->rx_lock);
+
+ return more_to_do;
+}
+
+
+static int network_close(struct net_device *dev)
+{
+ struct net_private *np = netdev_priv(dev);
+ np->user_state = UST_CLOSED;
+ netif_stop_queue(np->dev);
+ return 0;
+}
+
+
+static struct net_device_stats *network_get_stats(struct net_device *dev)
+{
+ struct net_private *np = netdev_priv(dev);
+ return &np->stats;
+}
+
+
+static void network_connect(struct net_device *dev,
+ netif_fe_interface_status_t *status)
+{
+ struct net_private *np;
+ int i, requeue_idx;
+ netif_tx_request_t *tx;
+
+ np = netdev_priv(dev);
+ spin_lock_irq(&np->tx_lock);
+ spin_lock(&np->rx_lock);
+
+ /* Recovery procedure: */
+
+ /* Step 1: Reinitialise variables. */
+ np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
+ np->rx->event = np->tx->event = 1;
+
+ /* Step 2: Rebuild the RX and TX ring contents.
+ * NB. We could just free the queued TX packets now but we hope
+ * that sending them out might do some good. We have to rebuild
+ * the RX ring because some of our pages are currently flipped out
+ * so we can't just free the RX skbs.
+ * NB2. Freelist index entries are always going to be less than
+ * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
+ * greater than __PAGE_OFFSET: we use this property to distinguish
+ * them.
+ */
+
+ /* Rebuild the TX buffer freelist and the TX ring itself.
+ * NB. This reorders packets. We could keep more private state
+ * to avoid this but maybe it doesn't matter so much given the
+ * interface has been down.
+ */
+ for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
+ if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
+ struct sk_buff *skb = np->tx_skbs[i];
+
+ tx = &np->tx->ring[requeue_idx++].req;
+
+ tx->id = i;
+ tx->addr = virt_to_machine(skb->data);
+ tx->size = skb->len;
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+ }
+ }
+ wmb();
+ np->tx->req_prod = requeue_idx;
+
+ /* Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
+ if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
+ np->rx->ring[requeue_idx++].req.id = i;
+ wmb();
+ np->rx->req_prod = requeue_idx;
+
+ /* Step 3: All public and private state should now be sane. Get
+ * ready to start sending and receiving packets and give the driver
+ * domain a kick because we've probably just requeued some
+ * packets.
+ */
+ np->backend_state = BEST_CONNECTED;
+ wmb();
+ notify_via_evtchn(status->evtchn);
+ network_tx_buf_gc(dev);
+
+ if (np->user_state == UST_OPEN)
+ netif_start_queue(dev);
+
+ spin_unlock(&np->rx_lock);
+ spin_unlock_irq(&np->tx_lock);
+}
+
+static void vif_show(struct net_private *np)
+{
+#if DEBUG
+ if (np) {
+ IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
+ np->handle,
+ be_state_name[np->backend_state],
+ np->user_state ? "open" : "closed",
+ np->evtchn,
+ np->irq,
+ np->tx,
+ np->rx);
+ } else {
+ IPRINTK("<vif NULL>\n");
+ }
+#endif
+}
+
+/* Send a connect message to xend to tell it to bring up the interface. */
+static void send_interface_connect(struct net_private *np)
+{
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
+ .length = sizeof(netif_fe_interface_connect_t),
+ };
+ netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+
+ msg->handle = np->handle;
+ msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
+ msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
+
+/* Send a driver status notification to the domain controller. */
+static int send_driver_status(int ok)
+{
+ int err = 0;
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
+ .length = sizeof(netif_fe_driver_status_t),
+ };
+ netif_fe_driver_status_t *msg = (void*)cmsg.msg;
+
+ msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
+ err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ return err;
+}
+
+/* Stop network device and free tx/rx queues and irq.
+ */
+static void vif_release(struct net_private *np)
+{
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_irq(&np->tx_lock);
+ spin_lock(&np->rx_lock);
+ netif_stop_queue(np->dev);
+ /* np->backend_state = BEST_DISCONNECTED; */
+ spin_unlock(&np->rx_lock);
+ spin_unlock_irq(&np->tx_lock);
+
+ /* Free resources. */
+ if(np->tx != NULL){
+ free_irq(np->irq, np->dev);
+ unbind_evtchn_from_irq(np->evtchn);
+ free_page((unsigned long)np->tx);
+ free_page((unsigned long)np->rx);
+ np->irq = 0;
+ np->evtchn = 0;
+ np->tx = NULL;
+ np->rx = NULL;
+ }
+}
+
+/* Release vif resources and close it down completely.
+ */
+static void vif_close(struct net_private *np)
+{
+ WPRINTK("Unexpected netif-CLOSED message in state %s\n",
+ be_state_name[np->backend_state]);
+ vif_release(np);
+ np->backend_state = BEST_CLOSED;
+ /* todo: take dev down and free. */
+ vif_show(np);
+}
+
+/* Move the vif into disconnected state.
+ * Allocates tx/rx pages.
+ * Sends connect message to xend.
+ */
+static void vif_disconnect(struct net_private *np)
+{
+ if(np->tx) free_page((unsigned long)np->tx);
+ if(np->rx) free_page((unsigned long)np->rx);
+ // Before this np->tx and np->rx had better be null.
+ np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
+ np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
+ memset(np->tx, 0, PAGE_SIZE);
+ memset(np->rx, 0, PAGE_SIZE);
+ np->backend_state = BEST_DISCONNECTED;
+ send_interface_connect(np);
+ vif_show(np);
+}
+
+/* Begin interface recovery.
+ *
+ * NB. Whilst we're recovering, we turn the carrier state off. We
+ * take measures to ensure that this device isn't used for
+ * anything. We also stop the queue for this device. Various
+ * different approaches (e.g. continuing to buffer packets) have
+ * been tested but don't appear to improve the overall impact on
+ * TCP connections.
+ *
+ * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
+ * is initiated by a special "RESET" message - disconnect could
+ * just mean we're not allowed to use this interface any more.
+ */
+static void vif_reset(struct net_private *np)
+{
+ IPRINTK("Attempting to reconnect network interface: handle=%u\n",
+ np->handle);
+ vif_release(np);
+ vif_disconnect(np);
+ vif_show(np);
+}
+
+/* Move the vif into connected state.
+ * Sets the mac and event channel from the message.
+ * Binds the irq to the event channel.
+ */
+static void
+vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
+{
+ struct net_device *dev = np->dev;
+ memcpy(dev->dev_addr, status->mac, ETH_ALEN);
+ network_connect(dev, status);
+ np->evtchn = status->evtchn;
+ np->irq = bind_evtchn_to_irq(np->evtchn);
+ (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
+ netctrl_connected_count();
+ (void)send_fake_arp(dev);
+ vif_show(np);
+}
+
+
+/** Create a network device.
+ * @param handle device handle
+ * @param val return parameter for created device
+ * @return 0 on success, error code otherwise
+ */
+static int create_netdev(int handle, struct net_device **val)
+{
+ int i, err = 0;
+ struct net_device *dev = NULL;
+ struct net_private *np = NULL;
+
+ if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ np = netdev_priv(dev);
+ np->backend_state = BEST_CLOSED;
+ np->user_state = UST_CLOSED;
+ np->handle = handle;
+
+ spin_lock_init(&np->tx_lock);
+ spin_lock_init(&np->rx_lock);
+
+ skb_queue_head_init(&np->rx_batch);
+ np->rx_target = RX_MIN_TARGET;
+ np->rx_min_target = RX_MIN_TARGET;
+ np->rx_max_target = RX_MAX_TARGET;
+
+ /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
+ for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
+ np->tx_skbs[i] = (void *)(i+1);
+ for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
+ np->rx_skbs[i] = (void *)(i+1);
+
+ dev->open = network_open;
+ dev->hard_start_xmit = network_start_xmit;
+ dev->stop = network_close;
+ dev->get_stats = network_get_stats;
+ dev->poll = netif_poll;
+ dev->weight = 64;
+
+ if ((err = register_netdev(dev)) != 0) {
+ printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
+ goto exit;
+ }
+
+ if ((err = xennet_proc_addif(dev)) != 0) {
+ unregister_netdev(dev);
+ goto exit;
+ }
+
+ np->dev = dev;
+ list_add(&np->list, &dev_list);
+
+ exit:
+ if ((err != 0) && (dev != NULL))
+ kfree(dev);
+ else if (val != NULL)
+ *val = dev;
+ return err;
+}
+
+/* Get the target interface for a status message.
+ * Creates the interface when it makes sense.
+ * The returned interface may be null when there is no error.
+ *
+ * @param status status message
+ * @param np return parameter for interface state
+ * @return 0 on success, error code otherwise
+ */
+static int
+target_vif(netif_fe_interface_status_t *status, struct net_private **np)
+{
+ int err = 0;
+ struct net_device *dev;
+
+ DPRINTK("> handle=%d\n", status->handle);
+ if (status->handle < 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if ((dev = find_dev_by_handle(status->handle)) != NULL)
+ goto exit;
+
+ if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
+ goto exit;
+ if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
+ goto exit;
+
+ /* It's a new interface in a good state - create it. */
+ DPRINTK("> create device...\n");
+ if ((err = create_netdev(status->handle, &dev)) != 0)
+ goto exit;
+
+ netctrl.interface_n++;
+
+ exit:
+ if (np != NULL)
+ *np = ((dev && !err) ? netdev_priv(dev) : NULL);
+ DPRINTK("< err=%d\n", err);
+ return err;
+}
+
+/* Handle an interface status message. */
+static void netif_interface_status(netif_fe_interface_status_t *status)
+{
+ int err = 0;
+ struct net_private *np = NULL;
+
+ DPRINTK("> status=%s handle=%d\n",
+ status_name[status->status], status->handle);
+
+ if ((err = target_vif(status, &np)) != 0) {
+ WPRINTK("Invalid netif: handle=%u\n", status->handle);
+ return;
+ }
+
+ if (np == NULL) {
+ DPRINTK("> no vif\n");
+ return;
+ }
+
+ switch (status->status) {
+ case NETIF_INTERFACE_STATUS_CLOSED:
+ switch (np->backend_state) {
+ case BEST_CLOSED:
+ case BEST_DISCONNECTED:
+ case BEST_CONNECTED:
+ vif_close(np);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_DISCONNECTED:
+ switch (np->backend_state) {
+ case BEST_CLOSED:
+ vif_disconnect(np);
+ break;
+ case BEST_DISCONNECTED:
+ case BEST_CONNECTED:
+ vif_reset(np);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_CONNECTED:
+ switch (np->backend_state) {
+ case BEST_CLOSED:
+ WPRINTK("Unexpected netif status %s in state %s\n",
+ status_name[status->status],
+ be_state_name[np->backend_state]);
+ vif_disconnect(np);
+ vif_connect(np, status);
+ break;
+ case BEST_DISCONNECTED:
+ vif_connect(np, status);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_CHANGED:
+ /*
+ * The domain controller is notifying us that a device has been
+ * added or removed.
+ */
+ break;
+
+ default:
+ WPRINTK("Invalid netif status code %d\n", status->status);
+ break;
+ }
+
+ vif_show(np);
+}
+
+/*
+ * Initialize the network control interface.
+ */
+static void netif_driver_status(netif_fe_driver_status_t *status)
+{
+ netctrl.up = status->status;
+ netctrl_connected_count();
+}
+
+/* Receive handler for control messages. */
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+
+ switch (msg->subtype) {
+ case CMSG_NETIF_FE_INTERFACE_STATUS:
+ if (msg->length != sizeof(netif_fe_interface_status_t))
+ goto error;
+ netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
+ break;
+
+ case CMSG_NETIF_FE_DRIVER_STATUS:
+ if (msg->length != sizeof(netif_fe_driver_status_t))
+ goto error;
+ netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
+ break;
+
+ error:
+ default:
+ msg->length = 0;
+ break;
+ }
+
+ ctrl_if_send_response(msg);
+}
+
+
+#if 1
+/* Wait for all interfaces to be connected.
+ *
+ * This works OK, but we'd like to use the probing mode (see below).
+ */
+static int probe_interfaces(void)
+{
+ int err = 0, conn = 0;
+ int wait_i, wait_n = 100;
+
+ DPRINTK(">\n");
+
+ for (wait_i = 0; wait_i < wait_n; wait_i++) {
+ DPRINTK("> wait_i=%d\n", wait_i);
+ conn = netctrl_connected();
+ if(conn) break;
+ DPRINTK("> schedule_timeout...\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ DPRINTK("> wait finished...\n");
+ if (conn <= 0) {
+ err = netctrl_err(-ENETDOWN);
+ WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
+ }
+
+ DPRINTK("< err=%d\n", err);
+
+ return err;
+}
+#else
+/* Probe for interfaces until no more are found.
+ *
+ * This is the mode we'd like to use, but at the moment it panics the kernel.
+*/
+static int probe_interfaces(void)
+{
+ int err = 0;
+ int wait_i, wait_n = 100;
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
+ .length = sizeof(netif_fe_interface_status_t),
+ };
+ netif_fe_interface_status_t msg = {};
+ ctrl_msg_t rmsg = {};
+ netif_fe_interface_status_t *reply = (void*)rmsg.msg;
+ int state = TASK_UNINTERRUPTIBLE;
+ u32 query = -1;
+
+ DPRINTK(">\n");
+
+ netctrl.interface_n = 0;
+ for (wait_i = 0; wait_i < wait_n; wait_i++) {
+ DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
+ msg.handle = query;
+ memcpy(cmsg.msg, &msg, sizeof(msg));
+ DPRINTK("> set_current_state...\n");
+ set_current_state(state);
+ DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+ DPRINTK("> sending...\n");
+ err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
+ DPRINTK("> err=%d\n", err);
+ if(err) goto exit;
+ DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+ if((int)reply->handle < 0) {
+ // No more interfaces.
+ break;
+ }
+ query = -reply->handle - 2;
+ DPRINTK(">netif_interface_status ...\n");
+ netif_interface_status(reply);
+ }
+
+ exit:
+ if (err) {
+ err = netctrl_err(-ENETDOWN);
+ WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
+ }
+
+ DPRINTK("< err=%d\n", err);
+ return err;
+}
+
+#endif
+
+/*
+ * We use this notifier to send out a fake ARP reply to reset switches and
+ * router ARP caches when an IP interface is brought up on a VIF.
+ */
+static int
+inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct list_head *ent;
+ struct net_private *np;
+
+ if (event != NETDEV_UP)
+ goto out;
+
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->dev == dev)
+ (void)send_fake_arp(dev);
+ }
+
+ out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block notifier_inetdev = {
+ .notifier_call = inetdev_notify,
+ .next = NULL,
+ .priority = 0
+};
+
+static int __init netif_init(void)
+{
+ int err = 0;
+
+ if (xen_start_info.flags & SIF_INITDOMAIN)
+ return 0;
+
+ if ((err = xennet_proc_init()) != 0)
+ return err;
+
+ IPRINTK("Initialising virtual ethernet driver.\n");
+ INIT_LIST_HEAD(&dev_list);
+ (void)register_inetaddr_notifier(¬ifier_inetdev);
+ netctrl_init();
+ (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+ send_driver_status(1);
+ err = probe_interfaces();
+ if (err)
+ ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
+
+ DPRINTK("< err=%d\n", err);
+ return err;
+}
+
+static void vif_suspend(struct net_private *np)
+{
+ /* Avoid having tx/rx stuff happen until we're ready. */
+ free_irq(np->irq, np->dev);
+ unbind_evtchn_from_irq(np->evtchn);
+}
+
+static void vif_resume(struct net_private *np)
+{
+ /*
+ * Connect regardless of whether IFF_UP flag set.
+ * Stop bad things from happening until we're back up.
+ */
+ np->backend_state = BEST_DISCONNECTED;
+ memset(np->tx, 0, PAGE_SIZE);
+ memset(np->rx, 0, PAGE_SIZE);
+
+ send_interface_connect(np);
+}
+
+void netif_suspend(void)
+{
+ struct list_head *ent;
+ struct net_private *np;
+
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ vif_suspend(np);
+ }
+}
+
+void netif_resume(void)
+{
+ struct list_head *ent;
+ struct net_private *np;
+
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ vif_resume(np);
+ }
+}
+
+#ifdef CONFIG_PROC_FS
+
+#define TARGET_MIN 0UL
+#define TARGET_MAX 1UL
+#define TARGET_CUR 2UL
+
+static int xennet_proc_read(
+ char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
+ struct net_private *np = netdev_priv(dev);
+ int len = 0, which_target = (int)data & 3;
+
+ switch (which_target)
+ {
+ case TARGET_MIN:
+ len = sprintf(page, "%d\n", np->rx_min_target);
+ break;
+ case TARGET_MAX:
+ len = sprintf(page, "%d\n", np->rx_max_target);
+ break;
+ case TARGET_CUR:
+ len = sprintf(page, "%d\n", np->rx_target);
+ break;
+ }
+
+ *eof = 1;
+ return len;
+}
+
+static int xennet_proc_write(
+ struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
+ struct net_private *np = netdev_priv(dev);
+ int which_target = (int)data & 3;
+ char string[64];
+ long target;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (count <= 1)
+ return -EBADMSG; /* runt */
+ if (count > sizeof(string))
+ return -EFBIG; /* too long */
+
+ if (copy_from_user(string, buffer, count))
+ return -EFAULT;
+ string[sizeof(string)-1] = '\0';
+
+ target = simple_strtol(string, NULL, 10);
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock(&np->rx_lock);
+
+ switch (which_target)
+ {
+ case TARGET_MIN:
+ if (target > np->rx_max_target)
+ np->rx_max_target = target;
+ np->rx_min_target = target;
+ if (target > np->rx_target)
+ np->rx_target = target;
+ break;
+ case TARGET_MAX:
+ if (target < np->rx_min_target)
+ np->rx_min_target = target;
+ np->rx_max_target = target;
+ if (target < np->rx_target)
+ np->rx_target = target;
+ break;
+ case TARGET_CUR:
+ break;
+ }
+
+ network_alloc_rx_buffers(dev);
+
+ spin_unlock(&np->rx_lock);
+
+ return count;
+}
+
+static int xennet_proc_init(void)
+{
+ if (proc_mkdir("xen/net", NULL) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static int xennet_proc_addif(struct net_device *dev)
+{
+ struct proc_dir_entry *dir, *min, *max, *cur;
+ char name[30];
+
+ sprintf(name, "xen/net/%s", dev->name);
+
+ dir = proc_mkdir(name, NULL);
+ if (!dir)
+ goto nomem;
+
+ min = create_proc_entry("rxbuf_min", 0644, dir);
+ max = create_proc_entry("rxbuf_max", 0644, dir);
+ cur = create_proc_entry("rxbuf_cur", 0444, dir);
+ if (!min || !max || !cur)
+ goto nomem;
+
+ min->read_proc = xennet_proc_read;
+ min->write_proc = xennet_proc_write;
+ min->data = (void *)((unsigned long)dev | TARGET_MIN);
+
+ max->read_proc = xennet_proc_read;
+ max->write_proc = xennet_proc_write;
+ max->data = (void *)((unsigned long)dev | TARGET_MAX);
+
+ cur->read_proc = xennet_proc_read;
+ cur->write_proc = xennet_proc_write;
+ cur->data = (void *)((unsigned long)dev | TARGET_CUR);
+
+ return 0;
+
+ nomem:
+ xennet_proc_delif(dev);
+ return -ENOMEM;
+}
+
+static void xennet_proc_delif(struct net_device *dev)
+{
+ char name[30];
+
+ sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
+ remove_proc_entry(name, NULL);
+
+ sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
+ remove_proc_entry(name, NULL);
+
+ sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
+ remove_proc_entry(name, NULL);
+
+ sprintf(name, "xen/net/%s", dev->name);
+ remove_proc_entry(name, NULL);
+}
+
+#endif
+
+module_init(netif_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/privcmd/Makefile
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/Makefile Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2 @@
+
+obj-y := privcmd.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Tue Aug 9
15:17:45 2005
@@ -0,0 +1,220 @@
+/******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm-xen/linux-public/privcmd.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen_proc.h>
+
+static struct proc_dir_entry *privcmd_intf;
+
+static int privcmd_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long data)
+{
+ int ret = -ENOSYS;
+
+ switch ( cmd )
+ {
+ case IOCTL_PRIVCMD_HYPERCALL:
+ {
+ privcmd_hypercall_t hypercall;
+
+ if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
+ "movl 4(%%eax),%%ebx ;"
+ "movl 8(%%eax),%%ecx ;"
+ "movl 12(%%eax),%%edx ;"
+ "movl 16(%%eax),%%esi ;"
+ "movl 20(%%eax),%%edi ;"
+ "movl (%%eax),%%eax ;"
+ TRAP_INSTR "; "
+ "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
+ : "=a" (ret) : "0" (&hypercall) : "memory" );
+
+ }
+ break;
+
+ case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
+ {
+ extern int initdom_ctrlif_domcontroller_port;
+ ret = initdom_ctrlif_domcontroller_port;
+ }
+ break;
+
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ case IOCTL_PRIVCMD_MMAP:
+ {
+#define PRIVCMD_MMAP_SZ 32
+ privcmd_mmap_t mmapcmd;
+ privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+ int i, rc;
+
+ if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
+ return -EFAULT;
+
+ p = mmapcmd.entry;
+
+ for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
+ {
+ int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+ PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+ if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
+ return -EFAULT;
+
+ for ( j = 0; j < n; j++ )
+ {
+ struct vm_area_struct *vma =
+ find_vma( current->mm, msg[j].va );
+
+ if ( !vma )
+ return -EINVAL;
+
+ if ( msg[j].va > PAGE_OFFSET )
+ return -EINVAL;
+
+ if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
+ return -EINVAL;
+
+ if ( (rc = direct_remap_area_pages(vma->vm_mm,
+ msg[j].va&PAGE_MASK,
+ msg[j].mfn<<PAGE_SHIFT,
+ msg[j].npages<<PAGE_SHIFT,
+ vma->vm_page_prot,
+ mmapcmd.dom)) < 0 )
+ return rc;
+ }
+ }
+ ret = 0;
+ }
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH:
+ {
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+ privcmd_mmapbatch_t m;
+ struct vm_area_struct *vma = NULL;
+ unsigned long *p, addr;
+ unsigned long mfn;
+ int i;
+
+ if ( copy_from_user(&m, (void *)data, sizeof(m)) )
+ { ret = -EFAULT; goto batch_err; }
+
+ vma = find_vma( current->mm, m.addr );
+
+ if ( !vma )
+ { ret = -EINVAL; goto batch_err; }
+
+ if ( m.addr > PAGE_OFFSET )
+ { ret = -EFAULT; goto batch_err; }
+
+ if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
+ { ret = -EFAULT; goto batch_err; }
+
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)m.dom << 16;
+ v = w = &u[1];
+
+ p = m.arr;
+ addr = m.addr;
+ for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
+ {
+ if ( get_user(mfn, p) )
+ return -EFAULT;
+
+ v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
+
+ __direct_remap_area_pages(vma->vm_mm,
+ addr,
+ PAGE_SIZE,
+ v);
+
+ if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
+ put_user( 0xF0000000 | mfn, p );
+
+ v = w;
+ }
+ ret = 0;
+ break;
+
+ batch_err:
+ printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
+ ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
+ break;
+ }
+ break;
+#endif
+
+ case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
+ {
+ unsigned long m2p_start_mfn =
+ HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
+
+ if( put_user( m2p_start_mfn, (unsigned long *) data ) )
+ ret = -EFAULT;
+ else
+ ret = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
+{
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+ return 0;
+}
+
+static struct file_operations privcmd_file_ops = {
+ ioctl : privcmd_ioctl,
+ mmap: privcmd_mmap
+};
+
+
+static int __init privcmd_init(void)
+{
+ if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
+ return 0;
+
+ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
+ if ( privcmd_intf != NULL )
+ privcmd_intf->proc_fops = &privcmd_file_ops;
+
+ return 0;
+}
+
+__initcall(privcmd_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-generic/pgtable.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-generic/pgtable.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,147 @@
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH
+/*
+ * Establish a new mapping:
+ * - flush the old one
+ * - update the page tables
+ * - inform the TLB about the new one
+ *
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
+ *
+ * Note: the old pte is known to not be writable, so we don't need to
+ * worry about dirty bits etc getting lost.
+ */
+#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte_atomic(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Largely same as above, but only sets the access flags (dirty,
+ * accessed, and writable). Furthermore, we know it always gets set
+ * to a "more permissive" setting, which allows most architectures
+ * to optimize this.
+ */
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+do { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH_NEW
+/*
+ * Establish a mapping where none previously existed
+ */
+#define ptep_establish_new(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte(__ptep, __entry); \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ set_pte(ptep, pte_mkold(pte));
+ return 1;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young = ptep_test_and_clear_young(__ptep); \
+ if (__young) \
+ flush_tlb_page(__vma, __address); \
+ __young; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ return 0;
+ set_pte(ptep, pte_mkclean(pte));
+ return 1;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
+({ \
+ int __dirty = ptep_test_and_clear_dirty(__ptep); \
+ if (__dirty) \
+ flush_tlb_page(__vma, __address); \
+ __dirty; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(ptep);
+ return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define ptep_clear_flush(__vma, __address, __ptep) \
+({ \
+ pte_t __pte = ptep_get_and_clear(__ptep); \
+ flush_tlb_page(__vma, __address); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte(ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_MKDIRTY
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte(ptep, pte_mkdirty(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (pte_val(A) == pte_val(B))
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define page_test_and_clear_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+#define page_test_and_clear_young(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/agp.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/agp.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,37 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/system.h>
+
+/*
+ * Functions to keep the agpgart mappings coherent with the MMU.
+ * The GART gives the CPU a physical alias of pages in memory. The alias
region is
+ * mapped uncacheable. Make sure there are no conflicting mappings
+ * with different cachability attributes for the same page. This avoids
+ * data corruption on some CPUs.
+ */
+
+int map_page_into_agp(struct page *page);
+int unmap_page_from_agp(struct page *page);
+#define flush_agp_mappings() global_flush_tlb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+ need to be called for each cacheline of the whole page so it may not be
+ worth it. Would need a page for it. */
+#define flush_agp_cache() wbinvd()
+
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) phys_to_machine(x)
+#define gart_to_phys(x) machine_to_phys(x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) ({
\
+ char *_t; dma_addr_t _d; \
+ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
+ _t; })
+#define free_gatt_pages(table, order) \
+ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/bugs.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/bugs.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,214 @@
+/*
+ * include/asm-i386/bugs.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Cyrix stuff, June 1998 by:
+ * - Rafael R. Reilova (moved everything from head.S),
+ * <rreilova@xxxxxxxxxxxx>
+ * - Channing Corn (tests & fixes),
+ * - Andrew D. Balsa (code cleanup).
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+
+static int __init no_halt(char *s)
+{
+ boot_cpu_data.hlt_works_ok = 0;
+ return 1;
+}
+
+__setup("no-hlt", no_halt);
+
+static int __init mca_pentium(char *s)
+{
+ mca_pentium_flag = 1;
+ return 1;
+}
+
+__setup("mca-pentium", mca_pentium);
+
+static int __init no_387(char *s)
+{
+ boot_cpu_data.hard_math = 0;
+ write_cr0(0xE | read_cr0());
+ return 1;
+}
+
+__setup("no387", no_387);
+
+static double __initdata x = 4195835.0;
+static double __initdata y = 3145727.0;
+
+/*
+ * This used to check for exceptions..
+ * However, it turns out that to support that,
+ * the XMM trap handlers basically had to
+ * be buggy. So let's have a correct XMM trap
+ * handler, and forget about printing out
+ * some status at boot.
+ *
+ * We should really only care about bugs here
+ * anyway. Not features.
+ */
+static void __init check_fpu(void)
+{
+ if (!boot_cpu_data.hard_math) {
+#ifndef CONFIG_MATH_EMULATION
+ printk(KERN_EMERG "No coprocessor found and no math emulation
present.\n");
+ printk(KERN_EMERG "Giving up.\n");
+ for (;;) ;
+#endif
+ return;
+ }
+
+/* Enable FXSR and company _before_ testing for FP problems. */
+ /*
+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
+ */
+ if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
+ extern void __buggy_fxsr_alignment(void);
+ __buggy_fxsr_alignment();
+ }
+ if (cpu_has_fxsr) {
+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
+ set_in_cr4(X86_CR4_OSFXSR);
+ printk("done.\n");
+ }
+ if (cpu_has_xmm) {
+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception
support... ");
+ set_in_cr4(X86_CR4_OSXMMEXCPT);
+ printk("done.\n");
+ }
+
+ /* Test for the divl bug.. */
+ __asm__("fninit\n\t"
+ "fldl %1\n\t"
+ "fdivl %2\n\t"
+ "fmull %2\n\t"
+ "fldl %1\n\t"
+ "fsubp %%st,%%st(1)\n\t"
+ "fistpl %0\n\t"
+ "fwait\n\t"
+ "fninit"
+ : "=m" (*&boot_cpu_data.fdiv_bug)
+ : "m" (*&x), "m" (*&y));
+ stts();
+ if (boot_cpu_data.fdiv_bug)
+ printk("Hmm, FPU with FDIV bug.\n");
+}
+
+static void __init check_hlt(void)
+{
+ printk(KERN_INFO "Checking 'hlt' instruction... ");
+ if (!boot_cpu_data.hlt_works_ok) {
+ printk("disabled\n");
+ return;
+ }
+ __asm__ __volatile__("hlt ; hlt ; hlt ; hlt");
+ printk("OK.\n");
+}
+
+/*
+ * Most 386 processors have a bug where a POPAD can lock the
+ * machine even from user space.
+ */
+
+static void __init check_popad(void)
+{
+#ifndef CONFIG_X86_POPAD_OK
+ int res, inp = (int) &res;
+
+ printk(KERN_INFO "Checking for popad bug... ");
+ __asm__ __volatile__(
+ "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl
(%%edx,%%edi),%%ecx "
+ : "=&a" (res)
+ : "d" (inp)
+ : "ecx", "edi" );
+ /* If this fails, it means that any user program may lock the CPU hard.
Too bad. */
+ if (res != 12345678) printk( "Buggy.\n" );
+ else printk( "OK.\n" );
+#endif
+}
+
+/*
+ * Check whether we are able to run this kernel safely on SMP.
+ *
+ * - In order to run on a i386, we need to be compiled for i386
+ * (for due to lack of "invlpg" and working WP on a i386)
+ * - In order to run on anything without a TSC, we need to be
+ * compiled for a i486.
+ * - In order to support the local APIC on a buggy Pentium machine,
+ * we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
+ * which happens implicitly if compiled for a Pentium or lower
+ * (unless an advanced selection of CPU features is used) as an
+ * otherwise config implies a properly working local APIC without
+ * the need to do extra reads from the APIC.
+*/
+
+static void __init check_config(void)
+{
+/*
+ * We'd better not be a i386 if we're configured to use some
+ * i486+ only features! (WP works in supervisor mode and the
+ * new "invlpg" and "bswap" instructions)
+ */
+#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) ||
defined(CONFIG_X86_BSWAP)
+ if (boot_cpu_data.x86 == 3)
+ panic("Kernel requires i486+ for 'invlpg' and other features");
+#endif
+
+/*
+ * If we configured ourselves for a TSC, we'd better have one!
+ */
+#ifdef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ panic("Kernel compiled for Pentium+, requires TSC feature!");
+#endif
+
+/*
+ * If we were told we had a good local APIC, check for buggy Pentia,
+ * i.e. all B steppings and the C2 stepping of P54C when using their
+ * integrated APIC (see 11AP erratum in "Pentium Processor
+ * Specification Update").
+ */
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
+ && cpu_has_apic
+ && boot_cpu_data.x86 == 5
+ && boot_cpu_data.x86_model == 2
+ && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
+ panic("Kernel compiled for PMMX+, assumes a local APIC without
the read-before-write bug!");
+#endif
+}
+
+extern void alternative_instructions(void);
+
+static void __init check_bugs(void)
+{
+ identify_cpu(&boot_cpu_data);
+#ifndef CONFIG_SMP
+ printk("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+#endif
+ check_config();
+ check_fpu();
+ check_hlt();
+ check_popad();
+ system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 :
boot_cpu_data.x86);
+ alternative_instructions();
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/desc.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/desc.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,143 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <asm/ldt.h>
+#include <asm/segment.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/preempt.h>
+#include <linux/smp.h>
+
+#include <asm/mmu.h>
+
+extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
+struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+ unsigned short pad;
+} __attribute__ ((packed));
+
+extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
+
+#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a"
(GDT_ENTRY_LDT*8))
+
+#define get_cpu_gdt_table(_cpu) ((struct desc_struct
*)cpu_gdt_descr[(_cpu)].address)
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern void set_intr_gate(unsigned int irq, void * addr);
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
+ "movw %%ax,2(%2)\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,4(%2)\n\t" \
+ "movb %4,5(%2)\n\t" \
+ "movb $0,6(%2)\n\t" \
+ "movb %%ah,7(%2)\n\t" \
+ "rorl $16,%%eax" \
+ : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void
*addr)
+{
+ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
+ offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
+}
+
+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int
size)
+{
+ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
+ (int)addr, ((size << 3)-1), 0x82);
+}
+
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ 0x7000)
+
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i)
HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN
+ i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+ C(0); C(1); C(2);
+#undef C
+}
+
+static inline void clear_LDT(void)
+{
+ int cpu = get_cpu();
+
+ /*
+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
+ * it slows down context switching. Noone uses it anyway.
+ */
+ cpu = cpu; /* XXX avoid compiler warning */
+ queue_set_ldt(0UL, 0);
+ put_cpu();
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+{
+ void *segments = pc->ldt;
+ int count = pc->size;
+
+ if (likely(!count))
+ segments = NULL;
+
+ queue_set_ldt((unsigned long)segments, count);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+ int cpu = get_cpu();
+ load_LDT_nolock(pc, cpu);
+ flush_page_update_queue();
+ put_cpu();
+}
+
+static inline unsigned long get_desc_base(unsigned long *desc)
+{
+ unsigned long base;
+ base = ((desc[0] >> 16) & 0x0000ffff) |
+ ((desc[1] << 16) & 0x00ff0000) |
+ (desc[1] & 0xff000000);
+ return base;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,165 @@
+#ifndef _ASM_I386_DMA_MAPPING_H
+#define _ASM_I386_DMA_MAPPING_H
+
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+extern dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction);
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nents; i++ ) {
+ BUG_ON(!sg[i].page);
+
+ sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+ }
+
+ flush_write_buffers();
+ return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ return page_to_phys(page) + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+extern void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t
size,
+ enum dma_data_direction direction);
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if(mask < 0x00ffffff)
+ return 0;
+
+ return 1;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if(!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline int
+dma_get_cache_alignment(void)
+{
+ /* no easy way to get cache size on all x86, so return the
+ * maximum possible, to be safe */
+ return (1 << L1_CACHE_SHIFT_MAX);
+}
+
+#define dma_is_consistent(d) (1)
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size);
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/fixmap.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/fixmap.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,170 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+
+/* used by vmalloc.c, vsyscall.lds.S.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+#define __FIXADDR_TOP (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/acpi.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+ FIX_HOLE,
+ FIX_VSYSCALL,
+#ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+#endif
+#ifdef CONFIG_X86_VISWS_APIC
+ FIX_CO_CPU, /* Cobalt timer */
+ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
+ FIX_LI_PCIA, /* Lithium PCI Bridge A */
+ FIX_LI_PCIB, /* Lithium PCI Bridge B */
+#endif
+#ifdef CONFIG_X86_F00F_BUG
+ FIX_F00F_IDT, /* Virtual mapping for IDT */
+#endif
+#ifdef CONFIG_X86_CYCLONE_TIMER
+ FIX_CYCLONE_TIMER, /*cyclone timer register*/
+#endif
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+#ifdef CONFIG_ACPI_BOOT
+ FIX_ACPI_BEGIN,
+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
+#endif
+#ifdef CONFIG_PCI_MMCONFIG
+ FIX_PCIE_MCFG,
+#endif
+ FIX_SHARED_INFO,
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define NR_FIX_ISAMAPS 256
+ FIX_ISAMAP_END,
+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
+#endif
+ __end_of_permanent_fixed_addresses,
+ /* temporary boot-time mappings, used before ioremap() is functional */
+#define NR_FIX_BTMAPS 16
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+ FIX_WP_TEST,
+ __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+extern void __set_fixmap_ma (enum fixed_addresses idx,
+ unsigned long mach, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma(idx, phys) \
+ __set_fixmap_ma(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma_ro(idx, phys) \
+ __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
+
+#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
+
+#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * This is the range that is readable by user mode, and things
+ * acting like user mode such as get_user_pages.
+ */
+#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
+#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
+
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ __this_fixmap_does_not_exist();
+
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/floppy.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/floppy.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,147 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ *
+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
+ */
+#ifndef __ASM_XEN_I386_FLOPPY_H
+#define __ASM_XEN_I386_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
+#include <asm/dma.h>
+#undef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS 0
+#define CROSS_64KB(a,s) (0)
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value,port) outb_p(value,port)
+
+#define fd_request_dma() (0)
+#define fd_free_dma() ((void)0)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() (virtual_dma_count + virtual_dma_residue)
+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
+/*
+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
+ */
+#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size))
+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_pdma;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+ register unsigned char st;
+ register int lcount;
+ register char *lptr;
+
+ if (!doing_pdma)
+ return floppy_interrupt(irq, dev_id, regs);
+
+ st = 1;
+ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st=inb(virtual_dma_port+4) & 0xa0 ;
+ if(st != 0xa0)
+ break;
+ if(virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port+5);
+ else
+ *lptr = inb_p(virtual_dma_port+5);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port+4);
+
+ if(st == 0x20)
+ return IRQ_HANDLED;
+ if(!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id, regs);
+ return IRQ_HANDLED;
+ }
+ return IRQ_HANDLED;
+}
+
+static void fd_disable_dma(void)
+{
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static int fd_request_irq(void)
+{
+ return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
+ "floppy", NULL);
+}
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_pdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
+#define FDC1 xen_floppy_init()
+static int FDC2 = -1;
+
+static int xen_floppy_init(void)
+{
+ use_virtual_dma = 1;
+ can_use_virtual_dma = 1;
+ return 0x3f0;
+}
+
+/*
+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+ * coincides with another rtc CMOS user. Paul G.
+ */
+#define FLOPPY0_TYPE ({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = (CMOS_READ(0x10) >> 4) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define FLOPPY1_TYPE ({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = CMOS_READ(0x10) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+#define FLOPPY_MOTOR_MASK 0xf0
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_XEN_I386_FLOPPY_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/highmem.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/highmem.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,82 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@xxxxxxxxxxxxxx
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#include <asm/tlbflush.h>
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+extern void kmap_init(void);
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+/*
+ * Ordering is:
+ *
+ * FIXADDR_TOP
+ * fixed_addresses
+ * FIXADDR_START
+ * temp fixed addresses
+ * FIXADDR_BOOT_START
+ * Persistent kmap area
+ * PKMAP_BASE
+ * VMALLOC_END
+ * Vmalloc area
+ * VMALLOC_START
+ * high_memory
+ */
+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) &
PMD_MASK )
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void * FASTCALL(kmap_high(struct page *page));
+extern void FASTCALL(kunmap_high(struct page *page));
+
+void *kmap(struct page *page);
+void kunmap(struct page *page);
+void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_pte(struct page *page, enum km_type type);
+void kunmap_atomic(void *kvaddr, enum km_type type);
+struct page *kmap_atomic_to_page(void *ptr);
+
+#define flush_cache_kmaps() do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/io.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/io.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,425 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@xxxxxxxxxxxxxxxx>
+ */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
+
+#ifdef __KERNEL__
+
+#include <asm-generic/iomap.h>
+
+#include <linux/vmalloc.h>
+#include <asm/fixmap.h>
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+
+extern void __iomem * __ioremap(unsigned long offset, unsigned long size,
unsigned long flags);
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+
+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long
size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
+#else
+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
+#endif
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *) addr;
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *) addr;
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *) addr;
+}
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *) addr = b;
+}
+static inline void writew(unsigned short b, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *) addr = b;
+}
+static inline void writel(unsigned int b, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *) addr = b;
+}
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define mmiowb()
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val,
int count)
+{
+ memset((void __force *) addr, val, count);
+}
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
int count)
+{
+ __memcpy(dst, (void __force *) src, count);
+}
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
int count)
+{
+ __memcpy((void __force *) dst, src, count);
+}
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base +
(b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void
__force *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void
__force *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(volatile void __iomem * io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/*
+ * Cache management
+ *
+ * This needed for two cases
+ * 1. Out of order aware processors
+ * 2. Accidentally out of order processors (PPro errata #51)
+ */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size) flush_write_buffers()
+#define dma_cache_wback(_start,_size) flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#elif defined(__UNSAFE_IO__)
+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
+#else
+#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,2b\n" \
+ ".previous"
+#endif
+
+static inline void slow_down_io(void) {
+ __asm__ __volatile__(
+ __SLOW_DOWN_IO
+#ifdef REALLY_SLOW_IO
+ __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+ : : );
+}
+
+#ifdef CONFIG_X86_NUMAQ
+extern void *xquad_portio; /* Where the IO area was mapped */
+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) +
port)
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
+ if (xquad_portio) \
+ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
+ else \
+ out##bwl##_local(value, port); \
+} \
+static inline void out##bwl(unsigned type value, int port) { \
+ out##bwl##_quad(value, port, 0); \
+} \
+static inline unsigned type in##bwl##_quad(int port, int quad) { \
+ if (xquad_portio) \
+ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
+ else \
+ return in##bwl##_local(port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+ return in##bwl##_quad(port, 0); \
+}
+#else
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl(unsigned type value, int port) { \
+ out##bwl##_local(value, port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+ return in##bwl##_local(port); \
+}
+#endif
+
+
+#if __UNSAFE_IO__
+#define ____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value),
"Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+ unsigned type value; \
+ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) :
"Nd"(port)); \
+ return value; \
+}
+#else
+#define ____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+ __asm__ __volatile__("1: out" #bwl " %" #bw "0, %w1\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,2b\n" \
+ ".previous" : : "a"(value), "Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+ unsigned type value; \
+ __asm__ __volatile__("1:in" #bwl " %w1, %" #bw "0\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov" #bwl " $~0,%" #bw "0\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" : "=a"(value) : "Nd"(port)); \
+ return value; \
+}
+#endif
+
+#define BUILDIO(bwl,bw,type) \
+____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local_p(unsigned type value, int port) { \
+ out##bwl##_local(value, port); \
+ slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_local_p(int port) { \
+ unsigned type value = in##bwl##_local(port); \
+ slow_down_io(); \
+ return value; \
+} \
+__BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_p(unsigned type value, int port) { \
+ out##bwl(value, port); \
+ slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_p(int port) { \
+ unsigned type value = in##bwl(port); \
+ slow_down_io(); \
+ return value; \
+} \
+static inline void outs##bwl(int port, const void *addr, unsigned long count)
{ \
+ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) :
"d"(port)); \
+} \
+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
+ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) :
"d"(port)); \
+}
+
+BUILDIO(b,b,char)
+BUILDIO(w,w,short)
+BUILDIO(l,,int)
+
+/* We will be supplying our own /dev/mem implementation */
+#define ARCH_HAS_DEV_MEM
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
Tue Aug 9 15:17:45 2005
@@ -0,0 +1,131 @@
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ * FIRST_EXTERNAL_VECTOR:
+ * The first free place for external interrupts
+ *
+ * SYSCALL_VECTOR:
+ * The IRQ vector a syscall makes the user to kernel transition
+ * under.
+ *
+ * TIMER_IRQ:
+ * The IRQ number the timer interrupt comes in at.
+ *
+ * NR_IRQS:
+ * The total number of interrupt vectors (including all the
+ * architecture specific interrupts) needed.
+ *
+ */
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+
+#define SYSCALL_VECTOR 0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR 0xff
+#define ERROR_APIC_VECTOR 0xfe
+#define INVALIDATE_TLB_VECTOR 0xfd
+#define RESCHEDULE_VECTOR 0xfc
+#define CALL_FUNCTION_VECTOR 0xfb
+
+#define THERMAL_APIC_VECTOR 0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR 0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR 0x31
+#define FIRST_SYSTEM_VECTOR 0xef
+
+#define TIMER_IRQ timer_irq
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+#if 0
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+#endif
+
+#define FPU_IRQ 13
+
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+
+/*
+ * The flat IRQ space is divided into two regions:
+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
+ * if we have physical device-access privilege. This region is at the
+ * start of the IRQ space so that existing device drivers do not need
+ * to be modified to translate physical IRQ numbers into our IRQ space.
+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ * are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE 0
+#define NR_PIRQS 128
+
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS 128
+
+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+#ifndef __ASSEMBLY__
+/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+extern int bind_virq_to_irq(int virq);
+extern void unbind_virq_from_irq(int virq);
+extern int bind_evtchn_to_irq(int evtchn);
+extern void unbind_evtchn_from_irq(int evtchn);
+
+extern void irq_suspend(void);
+extern void irq_resume(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
Tue Aug 9 15:17:45 2005
@@ -0,0 +1,46 @@
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ *
+ * Description:
+ * This is included late in kernel/setup.c so that it can make
+ * use of all of the static functions.
+ **/
+
+static char * __init machine_specific_memory_setup(void)
+{
+ char *who;
+ unsigned long start_pfn, max_pfn;
+
+ who = "Xen";
+
+ start_pfn = 0;
+ max_pfn = xen_start_info.nr_pages;
+
+ e820.nr_map = 0;
+ add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) -
PFN_PHYS(start_pfn), E820_RAM);
+
+ return who;
+}
+
+void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
+{
+ clear_bit(X86_FEATURE_VME, c->x86_capability);
+ clear_bit(X86_FEATURE_DE, c->x86_capability);
+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
+ clear_bit(X86_FEATURE_PGE, c->x86_capability);
+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ clear_bit(X86_FEATURE_MTRR, c->x86_capability);
+}
+
+extern void hypervisor_callback(void);
+extern void failsafe_callback(void);
+
+static void __init machine_specific_arch_setup(void)
+{
+ HYPERVISOR_set_callbacks(
+ __KERNEL_CS, (unsigned long)hypervisor_callback,
+ __KERNEL_CS, (unsigned long)failsafe_callback);
+
+ machine_specific_modify_cpu_capabilities(&boot_cpu_data);
+}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
Tue Aug 9 15:17:45 2005
@@ -0,0 +1,5 @@
+/* Hook to call BIOS initialisation function */
+
+#define ARCH_SETUP machine_specific_arch_setup();
+
+static void __init machine_specific_arch_setup(void);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,74 @@
+#ifndef __I386_SCHED_H
+#define __I386_SCHED_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Used for LDT copy/destruction.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk)
+{
+#ifdef CONFIG_SMP
+ unsigned cpu = smp_processor_id();
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ int cpu = smp_processor_id();
+
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
+ cpu_set(cpu, next->cpu_vm_mask);
+
+ /* Re-load page tables */
+ load_cr3(next->pgd);
+
+ /*
+ * load the LDT, if the LDT is different:
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context, cpu);
+ }
+#ifdef CONFIG_SMP
+ else {
+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
+ BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
+
+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload %cr3.
+ */
+ load_cr3(next->pgd);
+ load_LDT_nolock(&next->context, cpu);
+ }
+ }
+#endif
+}
+
+#define deactivate_mm(tsk, mm) \
+ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+
+#define activate_mm(prev, next) do { \
+ switch_mm((prev),(next),NULL); \
+ flush_page_update_queue(); \
+} while (0)
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/msr.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/msr.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,272 @@
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+#include <linux/smp.h>
+#include <asm-xen/hypervisor.h>
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(_msr,_val1,_val2) do { \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 0; \
+ op.u.msr.msr = (_msr); \
+ op.u.msr.cpu_mask = (1 << smp_processor_id()); \
+ HYPERVISOR_dom0_op(&op); \
+ (_val1) = op.u.msr.out1; \
+ (_val2) = op.u.msr.out2; \
+} while(0)
+
+#define wrmsr(_msr,_val1,_val2) do { \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 1; \
+ op.u.msr.cpu_mask = (1 << smp_processor_id()); \
+ op.u.msr.msr = (_msr); \
+ op.u.msr.in1 = (_val1); \
+ op.u.msr.in2 = (_val2); \
+ HYPERVISOR_dom0_op(&op); \
+} while(0)
+
+#define rdmsrl(msr,val) do { \
+ unsigned long l__,h__; \
+ rdmsr (msr, l__, h__); \
+ val = l__; \
+ val |= ((u64)h__<<32); \
+} while(0)
+
+static inline void wrmsrl (unsigned long msr, unsigned long long val)
+{
+ unsigned long lo, hi;
+ lo = (unsigned long) val;
+ hi = val >> 32;
+ wrmsr (msr, lo, hi);
+}
+
+#define rdtsc(low,high) \
+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+ __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+ : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0
+#define MSR_IA32_P5_MC_TYPE 1
+#define MSR_IA32_PLATFORM_ID 0x17
+#define MSR_IA32_EBL_CR_POWERON 0x2a
+
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x79
+#define MSR_IA32_UCODE_REV 0x8b
+
+#define MSR_P6_PERFCTR0 0xc1
+#define MSR_P6_PERFCTR1 0xc2
+
+#define MSR_IA32_BBL_CR_CTL 0x119
+
+#define MSR_IA32_SYSENTER_CS 0x174
+#define MSR_IA32_SYSENTER_ESP 0x175
+#define MSR_IA32_SYSENTER_EIP 0x176
+
+#define MSR_IA32_MCG_CAP 0x179
+#define MSR_IA32_MCG_STATUS 0x17a
+#define MSR_IA32_MCG_CTL 0x17b
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX 0x180
+#define MSR_IA32_MCG_EBX 0x181
+#define MSR_IA32_MCG_ECX 0x182
+#define MSR_IA32_MCG_EDX 0x183
+#define MSR_IA32_MCG_ESI 0x184
+#define MSR_IA32_MCG_EDI 0x185
+#define MSR_IA32_MCG_EBP 0x186
+#define MSR_IA32_MCG_ESP 0x187
+#define MSR_IA32_MCG_EFLAGS 0x188
+#define MSR_IA32_MCG_EIP 0x189
+#define MSR_IA32_MCG_RESERVED 0x18A
+
+#define MSR_P6_EVNTSEL0 0x186
+#define MSR_P6_EVNTSEL1 0x187
+
+#define MSR_IA32_PERF_STATUS 0x198
+#define MSR_IA32_PERF_CTL 0x199
+
+#define MSR_IA32_THERM_CONTROL 0x19a
+#define MSR_IA32_THERM_INTERRUPT 0x19b
+#define MSR_IA32_THERM_STATUS 0x19c
+#define MSR_IA32_MISC_ENABLE 0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0 0x300
+#define MSR_P4_BPU_PERFCTR1 0x301
+#define MSR_P4_BPU_PERFCTR2 0x302
+#define MSR_P4_BPU_PERFCTR3 0x303
+#define MSR_P4_MS_PERFCTR0 0x304
+#define MSR_P4_MS_PERFCTR1 0x305
+#define MSR_P4_MS_PERFCTR2 0x306
+#define MSR_P4_MS_PERFCTR3 0x307
+#define MSR_P4_FLAME_PERFCTR0 0x308
+#define MSR_P4_FLAME_PERFCTR1 0x309
+#define MSR_P4_FLAME_PERFCTR2 0x30a
+#define MSR_P4_FLAME_PERFCTR3 0x30b
+#define MSR_P4_IQ_PERFCTR0 0x30c
+#define MSR_P4_IQ_PERFCTR1 0x30d
+#define MSR_P4_IQ_PERFCTR2 0x30e
+#define MSR_P4_IQ_PERFCTR3 0x30f
+#define MSR_P4_IQ_PERFCTR4 0x310
+#define MSR_P4_IQ_PERFCTR5 0x311
+#define MSR_P4_BPU_CCCR0 0x360
+#define MSR_P4_BPU_CCCR1 0x361
+#define MSR_P4_BPU_CCCR2 0x362
+#define MSR_P4_BPU_CCCR3 0x363
+#define MSR_P4_MS_CCCR0 0x364
+#define MSR_P4_MS_CCCR1 0x365
+#define MSR_P4_MS_CCCR2 0x366
+#define MSR_P4_MS_CCCR3 0x367
+#define MSR_P4_FLAME_CCCR0 0x368
+#define MSR_P4_FLAME_CCCR1 0x369
+#define MSR_P4_FLAME_CCCR2 0x36a
+#define MSR_P4_FLAME_CCCR3 0x36b
+#define MSR_P4_IQ_CCCR0 0x36c
+#define MSR_P4_IQ_CCCR1 0x36d
+#define MSR_P4_IQ_CCCR2 0x36e
+#define MSR_P4_IQ_CCCR3 0x36f
+#define MSR_P4_IQ_CCCR4 0x370
+#define MSR_P4_IQ_CCCR5 0x371
+#define MSR_P4_ALF_ESCR0 0x3ca
+#define MSR_P4_ALF_ESCR1 0x3cb
+#define MSR_P4_BPU_ESCR0 0x3b2
+#define MSR_P4_BPU_ESCR1 0x3b3
+#define MSR_P4_BSU_ESCR0 0x3a0
+#define MSR_P4_BSU_ESCR1 0x3a1
+#define MSR_P4_CRU_ESCR0 0x3b8
+#define MSR_P4_CRU_ESCR1 0x3b9
+#define MSR_P4_CRU_ESCR2 0x3cc
+#define MSR_P4_CRU_ESCR3 0x3cd
+#define MSR_P4_CRU_ESCR4 0x3e0
+#define MSR_P4_CRU_ESCR5 0x3e1
+#define MSR_P4_DAC_ESCR0 0x3a8
+#define MSR_P4_DAC_ESCR1 0x3a9
+#define MSR_P4_FIRM_ESCR0 0x3a4
+#define MSR_P4_FIRM_ESCR1 0x3a5
+#define MSR_P4_FLAME_ESCR0 0x3a6
+#define MSR_P4_FLAME_ESCR1 0x3a7
+#define MSR_P4_FSB_ESCR0 0x3a2
+#define MSR_P4_FSB_ESCR1 0x3a3
+#define MSR_P4_IQ_ESCR0 0x3ba
+#define MSR_P4_IQ_ESCR1 0x3bb
+#define MSR_P4_IS_ESCR0 0x3b4
+#define MSR_P4_IS_ESCR1 0x3b5
+#define MSR_P4_ITLB_ESCR0 0x3b6
+#define MSR_P4_ITLB_ESCR1 0x3b7
+#define MSR_P4_IX_ESCR0 0x3c8
+#define MSR_P4_IX_ESCR1 0x3c9
+#define MSR_P4_MOB_ESCR0 0x3aa
+#define MSR_P4_MOB_ESCR1 0x3ab
+#define MSR_P4_MS_ESCR0 0x3c0
+#define MSR_P4_MS_ESCR1 0x3c1
+#define MSR_P4_PMH_ESCR0 0x3ac
+#define MSR_P4_PMH_ESCR1 0x3ad
+#define MSR_P4_RAT_ESCR0 0x3bc
+#define MSR_P4_RAT_ESCR1 0x3bd
+#define MSR_P4_SAAT_ESCR0 0x3ae
+#define MSR_P4_SAAT_ESCR1 0x3af
+#define MSR_P4_SSU_ESCR0 0x3be
+#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in
manual */
+#define MSR_P4_TBPU_ESCR0 0x3c2
+#define MSR_P4_TBPU_ESCR1 0x3c3
+#define MSR_P4_TC_ESCR0 0x3c4
+#define MSR_P4_TC_ESCR1 0x3c5
+#define MSR_P4_U2L_ESCR0 0x3b0
+#define MSR_P4_U2L_ESCR1 0x3b1
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER 0xC0000080
+#define MSR_K6_STAR 0xC0000081
+#define MSR_K6_WHCR 0xC0000082
+#define MSR_K6_UWCCR 0xC0000085
+#define MSR_K6_EPMR 0xC0000086
+#define MSR_K6_PSOR 0xC0000087
+#define MSR_K6_PFIR 0xC0000088
+
+#define MSR_K7_EVNTSEL0 0xC0010000
+#define MSR_K7_EVNTSEL1 0xC0010001
+#define MSR_K7_EVNTSEL2 0xC0010002
+#define MSR_K7_EVNTSEL3 0xC0010003
+#define MSR_K7_PERFCTR0 0xC0010004
+#define MSR_K7_PERFCTR1 0xC0010005
+#define MSR_K7_PERFCTR2 0xC0010006
+#define MSR_K7_PERFCTR3 0xC0010007
+#define MSR_K7_HWCR 0xC0010015
+#define MSR_K7_CLK_CTL 0xC001001b
+#define MSR_K7_FID_VID_CTL 0xC0010041
+#define MSR_K7_FID_VID_STATUS 0xC0010042
+
+/* extended feature register */
+#define MSR_EFER 0xc0000080
+
+/* EFER bits: */
+
+/* Execute Disable enable */
+#define _EFER_NX 11
+#define EFER_NX (1<<_EFER_NX)
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x107
+#define MSR_IDT_FCR2 0x108
+#define MSR_IDT_FCR3 0x109
+#define MSR_IDT_FCR4 0x10a
+
+#define MSR_IDT_MCR0 0x110
+#define MSR_IDT_MCR1 0x111
+#define MSR_IDT_MCR2 0x112
+#define MSR_IDT_MCR3 0x113
+#define MSR_IDT_MCR4 0x114
+#define MSR_IDT_MCR5 0x115
+#define MSR_IDT_MCR6 0x116
+#define MSR_IDT_MCR7 0x117
+#define MSR_IDT_MCR_CTRL 0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x1107
+#define MSR_VIA_LONGHAUL 0x110a
+#define MSR_VIA_RNG 0x110b
+#define MSR_VIA_BCR2 0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+#endif /* __ASM_MSR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,203 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/foreign_page.h>
+
+#define arch_free_page(_page,_order) \
+({ int foreign = PageForeign(_page); \
+ if (foreign) \
+ (PageForeignDestructor(_page))(_page); \
+ foreign; \
+})
+#define HAVE_ARCH_FREE_PAGE
+
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+#define clear_page(page) mmx_clear_page((void *)(page))
+#define copy_page(to,from) mmx_copy_page(to,from)
+
+#else
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER |
__GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+/*
+ * On older X86 processors it's not a win to use MMX here it seems.
+ * Maybe the K6-III ?
+ */
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#endif
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern unsigned int *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
+#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+}
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+extern int nx_enabled;
+#ifdef CONFIG_X86_PAE
+extern unsigned long long __supported_pte_mask;
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long long pgd; } pgd_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
+#define pmd_val(x) ((x).pmd)
+#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define __pmd(x) ((pmd_t) { (x) } )
+#define HPAGE_SHIFT 21
+#else
+typedef struct { unsigned long pte_low; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define boot_pte_t pte_t /* or would you rather have a typedef */
+#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
+ (x).pte_low)
+#define pte_val_ma(x) ((x).pte_low)
+#define HPAGE_SHIFT 22
+#endif
+#define PTE_MASK PAGE_MASK
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+
+
+static inline unsigned long pgd_val(pgd_t x)
+{
+ unsigned long ret = x.pgd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+}
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ({ unsigned long _x = (x); \
+ (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
+#define __pte_ma(x) ((pte_t) { (x) } )
+#define __pgd(x) ({ unsigned long _x = (x); \
+ (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+extern unsigned int __VMALLOC_RESERVE;
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+extern int sysctl_legacy_va_layout;
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+#define __PAGE_OFFSET (0xC0000000)
+#else
+#define __PAGE_OFFSET (0xC0000000UL)
+#endif
+
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+#define MAXMEM
(HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned
long)(x)+PAGE_OFFSET))
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn) (mem_map + (pfn))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/param.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/param.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,23 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ 100 /* Internal kernel timer frequency */
+# define USER_HZ 100 /* .. some user interfaces are in
"ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#define COMMAND_LINE_SIZE 256
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pci.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pci.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,117 @@
+#ifndef __i386_PCI_H
+#define __i386_PCI_H
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+#include <linux/mm.h> /* for struct page */
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses() 0
+#endif
+#define pcibios_scan_all_fns(a, b) 0
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM (pci_mem_start)
+
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+/* Dynamic DMA mapping stuff.
+ * i386 has everything mapped statically.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* The PCI address space does equal the physical memory
+ * address space. The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+/* This is always fine. */
+#define pci_dac_dma_supported(pci_dev, mask) (1)
+
+static inline dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long
offset, int direction)
+{
+ return ((dma64_addr_t) page_to_phys(page) +
+ (dma64_addr_t) offset);
+}
+
+static inline struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return pfn_to_page(dma_addr >> PAGE_SHIFT);
+}
+
+static inline unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return (dma_addr & ~PAGE_MASK);
+}
+
+static inline void
+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr,
size_t len, int direction)
+{
+}
+
+static inline void
+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t
dma_addr, size_t len, int direction)
+{
+ flush_write_buffers();
+}
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int
write_combine);
+
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* implement the pci_ DMA API in terms of the generic device dma_ one */
+#include <asm-generic/pci-dma-compat.h>
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+/* On Xen we have to scan all functions since Xen hides bridges from
+ * us. If a bridge is at fn=0 and that slot has a multifunction
+ * device, we won't find the additional devices without scanning all
+ * functions. */
+#undef pcibios_scan_all_fns
+#define pcibios_scan_all_fns(a, b) 1
+
+#endif /* __i386_PCI_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgalloc.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,52 @@
+#ifndef _I386_PGALLOC_H
+#define _I386_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+#include <linux/mm.h> /* for struct page */
+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+
+#define pmd_populate(mm, pmd, pte) do { \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + \
+ ((unsigned long long)page_to_pfn(pte) << \
+ (unsigned long long) PAGE_SHIFT))); \
+ flush_page_update_queue(); \
+} while (0)
+/*
+ * Allocate and free page tables.
+ */
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(pgd_t *pgd);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+ make_page_writable(pte);
+ flush_page_update_queue();
+}
+
+extern void pte_free(struct page *pte);
+
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+
+#ifdef CONFIG_X86_PAE
+/*
+ * In the PAE case we free the pmds as part of the pgd.
+ */
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x) do { } while (0)
+#define __pmd_free_tlb(tlb,x) do { } while (0)
+#define pud_populate(mm, pmd, pte) BUG()
+#endif
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _I386_PGALLOC_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
Tue Aug 9 15:17:45 2005
@@ -0,0 +1,19 @@
+#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+#define _I386_PGTABLE_2LEVEL_DEFS_H
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD_NO_HV (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+
+#define PTRS_PER_PTE 1024
+
+#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,118 @@
+#ifndef _I386_PGTABLE_2LEVEL_H
+#define _I386_PGTABLE_2LEVEL_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte_batched(pteptr, pteval) \
+ queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because Xen Linux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+ pte_t pte = *xp;
+ if (pte.pte_low)
+ set_pte(xp, __pte_ma(0));
+ return pte;
+}
+
+#define pte_same(a, b) ((a).pte_low == (b).pte_low)
+/*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
+ */
+#define INVALID_P2M_ENTRY (~0U)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#define pte_pfn(_pte) \
+({ \
+ unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
+ unsigned long pfn = mfn_to_pfn(mfn); \
+ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ pfn = max_mapnr; /* special: force !pfn_valid() */ \
+ pfn; \
+})
+
+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
+
+#define pte_none(x) (!(x).pte_low)
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) |
pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pmd_page_kernel(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/*
+ * All present user pages are user-executable:
+ */
+static inline int pte_exec(pte_t pte)
+{
+ return pte_user(pte);
+}
+
+/*
+ * All present pages are kernel-executable:
+ */
+static inline int pte_exec_kernel(pte_t pte)
+{
+ return 1;
+}
+
+/*
+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
+ * into this range:
+ */
+#define PTE_FILE_MAX_BITS 29
+
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+
+#define pgoff_to_pte(off) \
+ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x) (((x).val >> 1) & 0x1f)
+#define __swp_offset(x) ((x).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) |
((offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low
})
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* _I386_PGTABLE_2LEVEL_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,506 @@
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern unsigned long empty_zero_page[1024];
+extern pgd_t swapper_pg_dir[1024];
+extern kmem_cache_t *pgd_cache;
+extern kmem_cache_t *pmd_cache;
+extern kmem_cache_t *pte_cache;
+extern spinlock_t pgd_lock;
+extern struct page *pgd_list;
+
+void pte_ctor(void *, kmem_cache_t *, unsigned long);
+void pte_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pgtable_cache_init(void);
+void paging_init(void);
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level-defs.h>
+# define PMD_SIZE (1UL << PMD_SHIFT)
+# define PMD_MASK (~(PMD_SIZE-1))
+#else
+# include <asm/pgtable-2level-defs.h>
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT 22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
+ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+extern void *high_memory;
+extern unsigned long vmalloc_earlyreserve;
+
+/*
+ * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
+ * of the Pentium details, but assuming intel did the straightforward
+ * thing, this bit set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory.
+ */
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if
present.. */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
+#define _PAGE_BIT_UNUSED2 10
+#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_NX 63
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
+#define _PAGE_UNUSED1 0x200 /* available for programmer */
+#define _PAGE_UNUSED2 0x400
+#define _PAGE_UNUSED3 0x800
+
+#define _PAGE_FILE 0x040 /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x080 /* If not present */
+#ifdef CONFIG_X86_PAE
+#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
+#else
+#define _PAGE_NX 0
+#endif
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED
| _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |
_PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE \
+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY \
+ PAGE_COPY_NOEXEC
+#define PAGE_READONLY \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define _PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+#undef TEST_VERIFY_AREA
+
+/* The boot page tables (all created as a single array) */
+extern unsigned long pg0[];
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
+
+#define pmd_none(x) (!pmd_val(x))
+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+ can temporarily clear it. */
+#define pmd_present(x) (pmd_val(x))
+/* pmd_clear below */
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER &
~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_user(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
+static inline int pte_read(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return (pte).pte_low &
_PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return (pte).pte_low &
_PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return (pte).pte_low &
_PAGE_RW; }
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+static inline int pte_file(pte_t pte) { return (pte).pte_low &
_PAGE_FILE; }
+
+static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &=
~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &=
~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW;
return pte; }
+static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY;
return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |=
_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW;
return pte; }
+
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+#else
+# include <asm/pgtable-2level.h>
+#endif
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_dirty(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
+ return ret;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_young(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
+ return ret;
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (pte_write(pte))
+ set_pte(ptep, pte_wrprotect(pte));
+}
+
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable". On processors
which do not support
+ * it, this is a no-op.
+ */
+#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3)
\
+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD |
_PAGE_PWT)) : (prot))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low |= pgprot_val(newprot);
+#ifdef CONFIG_X86_PAE
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+ * the newprot (if present):
+ */
+ pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+ pte.pte_high |= (pgprot_val(newprot) >> 32) & \
+ (__supported_pte_mask >> 32);
+#endif
+ return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_clear(xp) do { \
+ set_pmd(xp, __pmd(0)); \
+ xen_flush_page_update_queue(); \
+} while (0)
+
+#define pmd_large(pmd) \
+((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+/*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_index_k(addr) pgd_index(addr)
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_index(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address);
+
+/*
+ * Make a given kernel text page executable/non-executable.
+ * Returns the previous executability setting of that page (which
+ * is used to restore the previous state). Used by the SMP bootup code.
+ * NOTE: this is an __init function for security reasons.
+ */
+#ifdef CONFIG_X86_PAE
+ extern int set_kernel_exec(unsigned long vaddr, int enable);
+#else
+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return
0;}
+#endif
+
+extern void noexec_setup(const char *str);
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
+ pte_index(address))
+#define pte_offset_map_nested(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
+ pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#else
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#endif
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Also, we only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
+ if ( likely((__vma)->vm_mm == current->mm) ) { \
+ xen_flush_page_update_queue(); \
+
HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, (__entry), UVMF_INVLPG); \
+ } else { \
+ xen_l1_entry_update((__ptep), (__entry).pte_low); \
+ flush_tlb_page((__vma), (__address)); \
+ } \
+ } \
+ } while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
+#define ptep_establish_new(__vma, __address, __ptep, __entry) \
+do { \
+ if (likely((__vma)->vm_mm == current->mm)) { \
+ xen_flush_page_update_queue(); \
+ HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, \
+ __entry, 0); \
+ } else { \
+ xen_l1_entry_update((__ptep), (__entry).pte_low); \
+ } \
+} while (0)
+
+/* NOTE: make_page* callers must call flush_page_update_queue() */
+void make_lowmem_page_readonly(void *va);
+void make_lowmem_page_writable(void *va);
+void make_page_readonly(void *va);
+void make_page_writable(void *va);
+void make_pages_readonly(void *va, unsigned int nr);
+void make_pages_writable(void *va, unsigned int nr);
+
+#define virt_to_ptep(__va) \
+({ \
+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
+ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
+ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
+ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
+})
+
+#define arbitrary_virt_to_machine(__va)
\
+({ \
+ pte_t *__pte = virt_to_ptep(__va); \
+ unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
+ __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
+})
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_DISCONTIGMEM
+#define kern_addr_valid(addr) (1)
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define DOMID_LOCAL (0xFFFFU)
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v);
+
+#define io_remap_page_range(vma,from,phys,size,prot) \
+direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _I386_PGTABLE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,684 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+#include <asm/system.h>
+#include <linux/cache.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/percpu.h>
+
+/* flag for disabling the tsc */
+extern int tsc_disable;
+
+struct desc_struct {
+ unsigned long a,b;
+};
+
+#define desc_empty(desc) \
+ (!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc));
pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ char wp_works_ok; /* It doesn't on 386's */
+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
+ char hard_math;
+ char rfu;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no
CPUID */
+ unsigned long x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int x86_cache_size; /* in KB - valid for CPUS which support this
+ call */
+ int x86_cache_alignment; /* In bytes */
+ int fdiv_bug;
+ int f00f_bug;
+ int coma_bug;
+ unsigned long loops_per_jiffy;
+ unsigned char x86_num_cores;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_NUM 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 new_cpu_data;
+extern struct tss_struct doublefault_tss;
+DECLARE_PER_CPU(struct tss_struct, init_tss);
+extern pgd_t *cur_pgd; /* XXXsmp */
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern int phys_proc_id[NR_CPUS];
+extern char ignore_fpu_irq;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+extern void dodgy_tsc(void);
+
+#ifdef CONFIG_X86_HT
+extern void detect_ht(struct cpuinfo_x86 *c);
+#else
+static inline void detect_ht(struct cpuinfo_x86 *c) {}
+#endif
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op), "c"(0));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx");
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ecx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx");
+ return edx;
+}
+
+#define load_cr3(pgdir) do { \
+ queue_pt_switch(__pa(pgdir)); \
+ cur_pgd = pgdir; /* XXXsmp */ \
+} while (/* CONSTCOND */0)
+
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3
*/
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ switch (mask) {
+ case X86_CR4_OSFXSR:
+ case X86_CR4_OSXMMEXCPT:
+ break;
+ default:
+ do {
+ const char *msg = "Xen unsupported cr4 update\n";
+ (void)HYPERVISOR_console_io(
+ CONSOLEIO_write, __builtin_strlen(msg),
+ (char *)msg);
+ BUG();
+ } while (0);
+ }
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features &= ~mask;
+ __asm__("movl %%cr4,%%eax\n\t"
+ "andl %0,%%eax\n\t"
+ "movl %%eax,%%cr4\n"
+ : : "irg" (~mask)
+ :"ax");
+}
+
+/*
+ * NSC/Cyrix CPU configuration register indexes
+ */
+
+#define CX86_PCR0 0x20
+#define CX86_GCR 0xb8
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_PCR1 0xf0
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ * NSC/Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax,%ecx,%edx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc8;"
+ : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+}
+
+/* from system description table in BIOS. Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
+
+/* Boot loader type from the setup header */
+extern int bootloader_type;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE (PAGE_OFFSET)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+/*
+ * Size of io_bitmap.
+ */
+#define IO_BITMAP_BITS 65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
+
+struct i387_fsave_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ long status; /* software status information */
+};
+
+struct i387_fxsave_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long mxcsr;
+ long mxcsr_mask;
+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ long padding[56];
+} __attribute__ ((aligned (16)));
+
+struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
+ struct info *info;
+ unsigned long entry_eip;
+};
+
+union i387_union {
+ struct i387_fsave_struct fsave;
+ struct i387_fxsave_struct fxsave;
+ struct i387_soft_struct soft;
+};
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct;
+
+struct tss_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h; /* ss1 is used to cache
MSR_IA32_SYSENTER_CS */
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long __cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, io_bitmap_base;
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+ * bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+ /*
+ * Cache the current maximum and the last task that used the bitmap:
+ */
+ unsigned long io_bitmap_max;
+ struct thread_struct *io_bitmap_owner;
+ /*
+ * pads the TSS to be cacheline-aligned (size is 0x100)
+ */
+ unsigned long __cacheline_filler[35];
+ /*
+ * .. and then another 0x100 bytes for emergency kernel stack
+ */
+ unsigned long stack[64];
+} __attribute__((packed));
+
+#define ARCH_MIN_TASKALIGN 16
+
+struct thread_struct {
+/* cached TLS descriptors. */
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+ unsigned long esp0;
+ unsigned long sysenter_cs;
+ unsigned long eip;
+ unsigned long esp;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned int io_pl;
+/* Hardware debugging registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
+/* fault info */
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387;
+/* virtual 86 mode info */
+ struct vm86_struct __user * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, saved_esp0;
+ unsigned int saved_fs, saved_gs;
+/* IO permissions */
+ unsigned long *io_bitmap_ptr;
+/* max allowed port in the bitmap, in bytes: */
+ unsigned long io_bitmap_max;
+};
+
+#define INIT_THREAD { \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+}
+
+/*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+#define INIT_TSS { \
+ .esp0 = sizeof(init_stack) + (long)&init_stack, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .ldt = GDT_ENTRY_LDT, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
+}
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct
*thread)
+{
+ tss->esp0 = thread->esp0;
+ /* This can only happen when SEP is enabled, no need to test
"SEP"arately */
+ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+ tss->ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+ HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
+}
+
+#define start_thread(regs, new_eip, new_esp) do { \
+ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+ regs->xss = __USER_DS; \
+ regs->xcs = __USER_CS; \
+ regs->eip = new_eip; \
+ regs->esp = new_esp; \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+void show_trace(struct task_struct *task, unsigned long *stack);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+#define KSTK_TOP(info) \
+({ \
+ unsigned long *__ptr = (unsigned long *)(info); \
+ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+})
+
+#define task_pt_regs(task) \
+({ \
+ struct pt_regs *__regs__; \
+ __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
+ __regs__ - 1; \
+})
+
+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
+
+
+struct microcode_header {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode {
+ struct microcode_header hdr;
+ unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE _IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+#define cpu_relax() rep_nop()
+
+/* generic versions from gas */
+#define GENERIC_NOP1 ".byte 0x90\n"
+#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
+#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
+#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
+#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
+#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
+
+/* Opteron nops */
+#define K8_NOP1 GENERIC_NOP1
+#define K8_NOP2 ".byte 0x66,0x90\n"
+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
+#define K8_NOP5 K8_NOP3 K8_NOP2
+#define K8_NOP6 K8_NOP3 K8_NOP3
+#define K8_NOP7 K8_NOP4 K8_NOP3
+#define K8_NOP8 K8_NOP4 K8_NOP4
+
+/* K7 nops */
+/* uses eax dependencies (arbitary choice) */
+#define K7_NOP1 GENERIC_NOP1
+#define K7_NOP2 ".byte 0x8b,0xc0\n"
+#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
+#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
+#define K7_NOP5 K7_NOP4 ASM_NOP1
+#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
+#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
+#define K7_NOP8 K7_NOP7 ASM_NOP1
+
+#ifdef CONFIG_MK8
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+#elif defined(CONFIG_MK7)
+#define ASM_NOP1 K7_NOP1
+#define ASM_NOP2 K7_NOP2
+#define ASM_NOP3 K7_NOP3
+#define ASM_NOP4 K7_NOP4
+#define ASM_NOP5 K7_NOP5
+#define ASM_NOP6 K7_NOP6
+#define ASM_NOP7 K7_NOP7
+#define ASM_NOP8 K7_NOP8
+#else
+#define ASM_NOP1 GENERIC_NOP1
+#define ASM_NOP2 GENERIC_NOP2
+#define ASM_NOP3 GENERIC_NOP3
+#define ASM_NOP4 GENERIC_NOP4
+#define ASM_NOP5 GENERIC_NOP5
+#define ASM_NOP6 GENERIC_NOP6
+#define ASM_NOP7 GENERIC_NOP7
+#define ASM_NOP8 GENERIC_NOP8
+#endif
+
+#define ASM_NOP_MAX 8
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+ because they are microcoded there and very slow.
+ However we don't do prefetches for pre XP Athlons currently
+ That should be fixed. */
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+ alternative_input(ASM_NOP4,
+ "prefetchnta (%1)",
+ X86_FEATURE_XMM,
+ "r" (x));
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for
+ spinlocks to avoid one state transition in the cache coherency protocol. */
+extern inline void prefetchw(const void *x)
+{
+ alternative_input(ASM_NOP4,
+ "prefetchw (%1)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+}
+#define spin_lock_prefetch(x) prefetchw(x)
+
+extern void select_idle_routine(const struct cpuinfo_x86 *c);
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern unsigned long boot_option_idle_override;
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/ptrace.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/ptrace.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,69 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS 13
+#define EFL 14
+#define UESP 15
+#define SS 16
+#define FRAME_SIZE 17
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ int xds;
+ int xes;
+ long orig_eax;
+ long eip;
+ int xcs;
+ long eflags;
+ long esp;
+ int xss;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFPXREGS 18
+#define PTRACE_SETFPXREGS 19
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#define PTRACE_GET_THREAD_AREA 25
+#define PTRACE_SET_THREAD_AREA 26
+
+#ifdef __KERNEL__
+struct task_struct;
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int
error_code);
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
+#define instruction_pointer(regs) ((regs)->eip)
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+#endif
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/segment.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/segment.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,96 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/*
+ * The layout of the per-CPU GDT under Linux:
+ *
+ * 0 - null
+ * 1 - reserved
+ * 2 - reserved
+ * 3 - reserved
+ *
+ * 4 - unused <==== new cacheline
+ * 5 - unused
+ *
+ * ------- start of TLS (Thread-Local Storage) segments:
+ *
+ * 6 - TLS segment #1 [ glibc's TLS segment ]
+ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
+ * 8 - TLS segment #3
+ * 9 - reserved
+ * 10 - reserved
+ * 11 - reserved
+ *
+ * ------- start of kernel segments:
+ *
+ * 12 - kernel code segment <==== new cacheline
+ * 13 - kernel data segment
+ * 14 - default user CS
+ * 15 - default user DS
+ * 16 - TSS
+ * 17 - LDT
+ * 18 - PNPBIOS support (16->32 gate)
+ * 19 - PNPBIOS support
+ * 20 - PNPBIOS support
+ * 21 - PNPBIOS support
+ * 22 - PNPBIOS support
+ * 23 - APM BIOS support
+ * 24 - APM BIOS support
+ * 25 - APM BIOS support
+ *
+ * 26 - unused
+ * 27 - unused
+ * 28 - unused
+ * 29 - unused
+ * 30 - unused
+ * 31 - TSS for double fault handler
+ */
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define GDT_ENTRY_TLS_MIN 6
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#define GDT_ENTRY_DEFAULT_USER_CS 14
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
+
+#define GDT_ENTRY_DEFAULT_USER_DS 15
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
+
+#define GDT_ENTRY_KERNEL_BASE 12
+
+#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
+
+#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
+
+#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
+#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
+
+#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
+#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
+
+#define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+/*
+ * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
+ */
+#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
+
+#define GDT_SIZE (GDT_ENTRIES * 8)
+
+/* Simple and small GDT entries for booting only */
+
+#define __BOOT_CS FLAT_GUESTOS_CS
+
+#define __BOOT_DS FLAT_GUESTOS_DS
+
+/*
+ * The interrupt descriptor table has room for 256 idt's,
+ * the global descriptor table is dependent on the number
+ * of tasks we can have..
+ */
+#define IDT_ENTRIES 256
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,66 @@
+/*
+ * Just a place holder. We don't want to have to test x86 before
+ * we include stuff
+ */
+
+#ifndef _i386_SETUP_H
+#define _i386_SETUP_H
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+/*
+ * Reserved space for vmalloc and iomap - defined in asm/page.h
+ */
+#define MAXMEM_PFN PFN_DOWN(MAXMEM)
+#define MAX_NONPAE_PFN (1 << 20)
+
+#define PARAM_SIZE 2048
+#define COMMAND_LINE_SIZE 256
+
+#define OLD_CL_MAGIC_ADDR 0x90020
+#define OLD_CL_MAGIC 0xA33F
+#define OLD_CL_BASE_ADDR 0x90000
+#define OLD_CL_OFFSET 0x90022
+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
+
+#ifndef __ASSEMBLY__
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+extern unsigned char boot_params[PARAM_SIZE];
+
+#define PARAM (boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
+#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
+#define INITRD_START (__pa(xen_start_info.mod_start))
+#define INITRD_SIZE (xen_start_info.mod_len)
+#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _i386_SETUP_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,83 @@
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btsl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btrl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btcl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__ (
+ "lock btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "btl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+ return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,522 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <asm/synch_bitops.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/evtchn.h>
+
+#ifdef __KERNEL__
+
+struct task_struct; /* one of the stranger aspects of C forward
declarations.. */
+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev,
struct task_struct *next));
+
+#define switch_to(prev,next,last) do { \
+ unsigned long esi,edi; \
+ asm volatile("pushfl\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %5,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %6\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popfl" \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
+ "=a" (last),"=S" (esi),"=D" (edi) \
+ :"m" (next->thread.esp),"m" (next->thread.eip), \
+ "2" (prev), "d" (next)); \
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "0" (base) \
+ ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %2,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "0" (limit) \
+ ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "pushl $0\n\t" \
+ "popl %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" \
+ : :"m" (*(unsigned int *)&(value)))
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+ asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+/* NB. 'clts' is done for us by Xen during virtual trap. */
+#define clts() ((void)0)
+#define read_cr0() \
+ BUG();
+#define write_cr0(x) \
+ BUG();
+
+#define read_cr4() \
+ BUG();
+#define write_cr4(x) \
+ BUG();
+#define stts() (HYPERVISOR_fpu_taskswitch())
+
+#endif /* __KERNEL__ */
+
+static inline void wbinvd(void)
+{
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_FLUSH_CACHE;
+ (void)HYPERVISOR_mmu_update(&u, 1, NULL);
+}
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned
long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+ unsigned int low, unsigned int high)
+{
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "lock cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x) *(((unsigned int*)&(x))+0)
+#define ll_high(x) *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int
size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+#endif
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#ifdef __KERNEL__
+struct alt_instr {
+ __u8 *instr; /* original instruction */
+ __u8 *replacement;
+ __u8 cpuid; /* cpuid bit set for replacement */
+ __u8 instrlen; /* length of original instruction */
+ __u8 replacementlen; /* length of new instruction, <= instrlen */
+ __u8 pad;
+};
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */
\
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n"
\
+ ".section .altinstr_replacement,\"ax\"\n"
\
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature) : "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...)
\
+ asm volatile ("661:\n\t" oldinstr "\n662:\n"
\
+ ".section .altinstructions,\"a\"\n"
\
+ " .align 4\n"
\
+ " .long 661b\n" /* label */
\
+ " .long 663f\n" /* new instruction */
\
+ " .byte %c0\n" /* feature bit */
\
+ " .byte 662b-661b\n" /* sourcelen */
\
+ " .byte 664f-663f\n" /* replacementlen */
\
+ ".previous\n"
\
+ ".section .altinstr_replacement,\"ax\"\n"
\
+ "663:\n\t" newinstr "\n664:\n" /* replacement */
\
+ ".previous" :: "i" (feature), ##input)
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+
+/*
+ * Actually only lfence would be needed for mb() because all stores done
+ * by the kernel should be already ordered. But keep a full barrier for now.
+ */
+
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like thiswhere there are no data dependencies.
+ **/
+
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_X86_OOSTORE
+/* Actually there are no OOO store capable CPUs for now that do SSE,
+ but make it already an possibility. */
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define wmb() __asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* interrupt control.. */
+
+/*
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli()
\
+do { \
+ HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ barrier(); \
+} while (0)
+
+#define __sti()
\
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+} while (0)
+
+#define __save_flags(x)
\
+do { \
+ (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+} while (0)
+
+#define __restore_flags(x) \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ } \
+} while (0)
+
+#define safe_halt() ((void)0)
+
+#define __save_and_cli(x) \
+do { \
+ (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+ HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ barrier(); \
+} while (0)
+
+#define __save_and_sti(x) \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
+ _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+} while (0)
+
+#define local_irq_save(x) __save_and_cli(x)
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#define irqs_disabled() HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern int es7000_plat;
+void cpu_idle_wait(void);
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/tlbflush.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,109 @@
+#ifndef _I386_TLBFLUSH_H
+#define _I386_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb() xen_tlb_flush()
+#define __flush_tlb_global() xen_tlb_flush()
+
+extern unsigned long pgkern_mask;
+
+# define __flush_tlb_all() \
+ do { \
+ if (cpu_has_pge) \
+ __flush_tlb_global(); \
+ else \
+ __flush_tlb(); \
+ } while (0)
+
+#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
+
+#define __flush_tlb_single(addr) xen_invlpg(addr)
+
+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long
start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+struct tlb_state
+{
+ struct mm_struct *active_mm;
+ int state;
+ char __cacheline_padding[L1_CACHE_BYTES-8];
+};
+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
+
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* i386 does not keep any page table caches in TLB */
+}
+
+#endif /* _I386_TLBFLUSH_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/vga.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/vga.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,20 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@xxxxxx>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then
+ * access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/asm-i386/xor.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/xor.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,884 @@
+/*
+ * include/asm-i386/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for MMX and SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High-speed RAID5 checksumming functions utilizing MMX instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
+#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
+#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
+#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
+#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
+#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
+
+#include <asm/i387.h>
+
+static void
+xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ ST(i,0) \
+ XO1(i+1,1) \
+ ST(i+1,1) \
+ XO1(i+2,2) \
+ ST(i+2,2) \
+ XO1(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ ST(i,0) \
+ XO2(i+1,1) \
+ ST(i+1,1) \
+ XO2(i+2,2) \
+ ST(i+2,2) \
+ XO2(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ ST(i,0) \
+ XO3(i+1,1) \
+ ST(i+1,1) \
+ XO3(i+2,2) \
+ ST(i+2,2) \
+ XO3(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " addl $128, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+
+static void
+xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ /* Make sure GCC forgets anything it knows about p4 or p5,
+ such that it won't pass to the asm volatile below a
+ register that is shared with any other variable. That's
+ because we modify p4 and p5 there, but we can't mark them
+ as read/write, otherwise we'd overflow the 10-asm-operands
+ limit of GCC < 3.1. */
+ __asm__ ("" : "+r" (p4), "+r" (p5));
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ XO4(i,0) \
+ ST(i,0) \
+ XO4(i+1,1) \
+ ST(i+1,1) \
+ XO4(i+2,2) \
+ ST(i+2,2) \
+ XO4(i+3,3) \
+ ST(i+3,3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " addl $128, %4 ;\n"
+ " addl $128, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ /* p4 and p5 were modified, and now the variables are dead.
+ Clobber them just to be sure nobody does something stupid
+ like assuming they have some legal value. */
+ __asm__ ("" : "=r" (p4), "=r" (p5));
+
+ kernel_fpu_end();
+}
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef BLOCK
+
+static void
+xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+ " .align 32 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ :
+ : "memory" );
+
+ kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor (%4), %%mm0 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " pxor 8(%4), %%mm1 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " pxor 16(%4), %%mm2 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 24(%4), %%mm3 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " pxor 32(%4), %%mm4 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " pxor 40(%4), %%mm5 ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%4), %%mm6 ;\n"
+ " pxor 56(%4), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " addl $64, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ /* Make sure GCC forgets anything it knows about p4 or p5,
+ such that it won't pass to the asm volatile below a
+ register that is shared with any other variable. That's
+ because we modify p4 and p5 there, but we can't mark them
+ as read/write, otherwise we'd overflow the 10-asm-operands
+ limit of GCC < 3.1. */
+ __asm__ ("" : "+r" (p4), "+r" (p5));
+
+ __asm__ __volatile__ (
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " pxor (%4), %%mm0 ;\n"
+ " pxor 8(%4), %%mm1 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " pxor (%5), %%mm0 ;\n"
+ " pxor 8(%5), %%mm1 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 16(%4), %%mm2 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " pxor 16(%5), %%mm2 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 24(%4), %%mm3 ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 24(%5), %%mm3 ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%4), %%mm4 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " pxor 32(%5), %%mm4 ;\n"
+ " pxor 40(%4), %%mm5 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " pxor 40(%5), %%mm5 ;\n"
+ " pxor 48(%4), %%mm6 ;\n"
+ " pxor 56(%4), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%5), %%mm6 ;\n"
+ " pxor 56(%5), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " addl $64, %4 ;\n"
+ " addl $64, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ /* p4 and p5 were modified, and now the variables are dead.
+ Clobber them just to be sure nobody does something stupid
+ like assuming they have some legal value. */
+ __asm__ ("" : "=r" (p4), "=r" (p5));
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_pII_mmx = {
+ .name = "pII_mmx",
+ .do_2 = xor_pII_mmx_2,
+ .do_3 = xor_pII_mmx_3,
+ .do_4 = xor_pII_mmx_4,
+ .do_5 = xor_pII_mmx_5,
+};
+
+static struct xor_block_template xor_block_p5_mmx = {
+ .name = "p5_mmx",
+ .do_2 = xor_p5_mmx_2,
+ .do_3 = xor_p5_mmx_3,
+ .do_4 = xor_p5_mmx_4,
+ .do_5 = xor_p5_mmx_5,
+};
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+#define XMMS_SAVE do { \
+ preempt_disable(); \
+ if (!(current_thread_info()->status & TS_USEDFPU)) \
+ clts(); \
+ __asm__ __volatile__ ( \
+ "movups %%xmm0,(%1) ;\n\t" \
+ "movups %%xmm1,0x10(%1) ;\n\t" \
+ "movups %%xmm2,0x20(%1) ;\n\t" \
+ "movups %%xmm3,0x30(%1) ;\n\t" \
+ : "=&r" (cr0) \
+ : "r" (xmm_save) \
+ : "memory"); \
+} while(0)
+
+#define XMMS_RESTORE do { \
+ __asm__ __volatile__ ( \
+ "sfence ;\n\t" \
+ "movups (%1),%%xmm0 ;\n\t" \
+ "movups 0x10(%1),%%xmm1 ;\n\t" \
+ "movups 0x20(%1),%%xmm2 ;\n\t" \
+ "movups 0x30(%1),%%xmm3 ;\n\t" \
+ : \
+ : "r" (cr0), "r" (xmm_save) \
+ : "memory"); \
+ if (!(current_thread_info()->status & TS_USEDFPU)) \
+ stts(); \
+ preempt_enable(); \
+} while(0)
+
+#define ALIGN16 __attribute__((aligned(16)))
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1)
;\n"
+#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y"
;\n"
+#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1)
;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
+#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
+#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
+#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
+#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
+#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
+#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i,0) \
+ LD(i+1,1) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r"(p2), "+r"(p3)
+ :
+ : "memory" );
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " addl $256, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory" );
+
+ XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+ char xmm_save[16*4] ALIGN16;
+ int cr0;
+
+ XMMS_SAVE;
+
+ /* Make sure GCC forgets anything it knows about p4 or p5,
+ such that it won't pass to the asm volatile below a
+ register that is shared with any other variable. That's
+ because we modify p4 and p5 there, but we can't mark them
+ as read/write, otherwise we'd overflow the 10-asm-operands
+ limit of GCC < 3.1. */
+ __asm__ ("" : "+r" (p4), "+r" (p5));
+
+ __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i+2) \
+ LD(i,0) \
+ LD(i+1,1) \
+ LD(i+2,2) \
+ LD(i+3,3) \
+ PF2(i) \
+ PF2(i+2) \
+ XO1(i,0) \
+ XO1(i+1,1) \
+ XO1(i+2,2) \
+ XO1(i+3,3) \
+ PF3(i) \
+ PF3(i+2) \
+ XO2(i,0) \
+ XO2(i+1,1) \
+ XO2(i+2,2) \
+ XO2(i+3,3) \
+ PF4(i) \
+ PF4(i+2) \
+ PF0(i+4) \
+ PF0(i+6) \
+ XO3(i,0) \
+ XO3(i+1,1) \
+ XO3(i+2,2) \
+ XO3(i+3,3) \
+ XO4(i,0) \
+ XO4(i+1,1) \
+ XO4(i+2,2) \
+ XO4(i+3,3) \
+ ST(i,0) \
+ ST(i+1,1) \
+ ST(i+2,2) \
+ ST(i+3,3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " addl $256, %4 ;\n"
+ " addl $256, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ /* p4 and p5 were modified, and now the variables are dead.
+ Clobber them just to be sure nobody does something stupid
+ like assuming they have some legal value. */
+ __asm__ ("" : "=r" (p4), "=r" (p5));
+
+ XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_pIII_sse = {
+ .name = "pIII_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
+};
+
+/* Also try the generic routines. */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ if (cpu_has_xmm) \
+ xor_speed(&xor_block_pIII_sse); \
+ if (cpu_has_mmx) { \
+ xor_speed(&xor_block_pII_mmx); \
+ xor_speed(&xor_block_p5_mmx); \
+ } \
+ } while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/balloon.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/balloon.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * balloon.h
+ *
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_BALLOON_H__
+#define __ASM_BALLOON_H__
+
+/*
+ * Inform the balloon driver that it should allow some slop for device-driver
+ * memory activities.
+ */
+extern void balloon_update_driver_allowance(long delta);
+
+/* Give up unmapped pages to the balloon driver. */
+extern void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns);
+
+/*
+ * Prevent the balloon driver from changing the memory reservation during
+ * a driver critical region.
+ */
+extern spinlock_t balloon_lock;
+#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
+#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
+
+#endif /* __ASM_BALLOON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/ctrl_if.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/ctrl_if.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,160 @@
+/******************************************************************************
+ * ctrl_if.h
+ *
+ * Management functions for special interface to the domain controller.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_XEN__CTRL_IF_H__
+#define __ASM_XEN__CTRL_IF_H__
+
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/queues.h>
+
+typedef control_msg_t ctrl_msg_t;
+
+/*
+ * Callback function type. Called for asynchronous processing of received
+ * request messages, and responses to previously-transmitted request messages.
+ * The parameters are (@msg, @id).
+ * @msg: Original request/response message (not a copy). The message can be
+ * modified in-place by the handler (e.g., a response callback can
+ * turn a request message into a response message in place). The message
+ * is no longer accessible after the callback handler returns -- if the
+ * message is required to persist for longer then it must be copied.
+ * @id: (Response callbacks only) The 'id' that was specified when the
+ * original request message was queued for transmission.
+ */
+typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
+
+/*
+ * Send @msg to the domain controller. Execute @hnd when a response is
+ * received, passing the response message and the specified @id. This
+ * operation will not block: it will return -EAGAIN if there is no space.
+ * Notes:
+ * 1. The @msg is copied if it is transmitted and so can be freed after this
+ * function returns.
+ * 2. If @hnd is NULL then no callback is executed.
+ */
+int
+ctrl_if_send_message_noblock(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id);
+
+/*
+ * Send @msg to the domain controller. Execute @hnd when a response is
+ * received, passing the response message and the specified @id. This
+ * operation will block until the message is sent, or a signal is received
+ * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
+ * Notes:
+ * 1. The @msg is copied if it is transmitted and so can be freed after this
+ * function returns.
+ * 2. If @hnd is NULL then no callback is executed.
+ */
+int
+ctrl_if_send_message_block(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id,
+ long wait_state);
+
+/*
+ * Send @msg to the domain controller. Block until the response is received,
+ * and then copy it into the provided buffer, @rmsg.
+ */
+int
+ctrl_if_send_message_and_get_response(
+ ctrl_msg_t *msg,
+ ctrl_msg_t *rmsg,
+ long wait_state);
+
+/*
+ * Request a callback when there is /possibly/ space to immediately send a
+ * message to the domain controller. This function returns 0 if there is
+ * already space to trasnmit a message --- in this case the callback task /may/
+ * still be executed. If this function returns 1 then the callback /will/ be
+ * executed when space becomes available.
+ */
+int
+ctrl_if_enqueue_space_callback(
+ struct tq_struct *task);
+
+/*
+ * Send a response (@msg) to a message from the domain controller. This will
+ * never block.
+ * Notes:
+ * 1. The @msg is copied and so can be freed after this function returns.
+ * 2. The @msg may be the original request message, modified in-place.
+ */
+void
+ctrl_if_send_response(
+ ctrl_msg_t *msg);
+
+/*
+ * Register a receiver for typed messages from the domain controller. The
+ * handler (@hnd) is called for every received message of specified @type.
+ * Returns TRUE (non-zero) if the handler was successfully registered.
+ * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
+ * occur in a context in which it is safe to yield (i.e., process context).
+ */
+#define CALLBACK_IN_BLOCKING_CONTEXT 1
+int ctrl_if_register_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd,
+ unsigned int flags);
+
+/*
+ * Unregister a receiver for typed messages from the domain controller. The
+ * handler (@hnd) will not be executed after this function returns.
+ */
+void
+ctrl_if_unregister_receiver(
+ u8 type, ctrl_msg_handler_t hnd);
+
+/* Suspend/resume notifications. */
+void ctrl_if_suspend(void);
+void ctrl_if_resume(void);
+
+/* Start-of-day setup. */
+void ctrl_if_init(void);
+
+/*
+ * Returns TRUE if there are no outstanding message requests at the domain
+ * controller. This can be used to ensure that messages have really flushed
+ * through when it is not possible to use the response-callback interface.
+ * WARNING: If other subsystems are using the control interface then this
+ * function might never return TRUE!
+ */
+int ctrl_if_transmitter_empty(void); /* !! DANGEROUS FUNCTION !! */
+
+/*
+ * Manually discard response messages from the domain controller.
+ * WARNING: This is usually done automatically -- this function should only
+ * be called when normal interrupt mechanisms are disabled!
+ */
+void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
+
+#endif /* __ASM_XEN__CONTROL_IF_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/evtchn.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,104 @@
+/******************************************************************************
+ * evtchn.h
+ *
+ * Communication via Xen event channels.
+ * Also definitions for the device that demuxes notifications to userspace.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_EVTCHN_H__
+#define __ASM_EVTCHN_H__
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
+#include <asm-xen/xen-public/event_channel.h>
+
+/*
+ * LOW-LEVEL DEFINITIONS
+ */
+
+/* Entry point for notifications into Linux subsystems. */
+asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
+
+/* Entry point for notifications into the userland character device. */
+void evtchn_device_upcall(int port);
+
+static inline void mask_evtchn(int port)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+ synch_set_bit(port, &s->evtchn_mask[0]);
+}
+
+static inline void unmask_evtchn(int port)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+
+ synch_clear_bit(port, &s->evtchn_mask[0]);
+
+ /*
+ * The following is basically the equivalent of 'hw_resend_irq'. Just like
+ * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
+ */
+ if ( synch_test_bit (port, &s->evtchn_pending[0]) &&
+ !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+ {
+ s->vcpu_data[0].evtchn_upcall_pending = 1;
+ if ( !s->vcpu_data[0].evtchn_upcall_mask )
+ force_evtchn_callback();
+ }
+}
+
+static inline void clear_evtchn(int port)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+ synch_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+static inline int notify_via_evtchn(int port)
+{
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_send;
+ op.u.send.local_port = port;
+ return HYPERVISOR_event_channel_op(&op);
+}
+
+/*
+ * CHARACTER-DEVICE DEFINITIONS
+ */
+
+/* /dev/xen/evtchn resides at device number major=10, minor=201 */
+#define EVTCHN_MINOR 201
+
+/* /dev/xen/evtchn ioctls: */
+/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
+#define EVTCHN_RESET _IO('E', 1)
+/* EVTCHN_BIND: Bind to teh specified event-channel port. */
+#define EVTCHN_BIND _IO('E', 2)
+/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
+#define EVTCHN_UNBIND _IO('E', 3)
+
+#endif /* __ASM_EVTCHN_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/foreign_page.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h Tue Aug 9
15:17:45 2005
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * foreign_page.h
+ *
+ * Provide a "foreign" page type, that is owned by a foreign allocator and
+ * not the normal buddy allocator in page_alloc.c
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __ASM_XEN_FOREIGN_PAGE_H__
+#define __ASM_XEN_FOREIGN_PAGE_H__
+
+#define PG_foreign PG_arch_1
+
+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
+
+#define SetPageForeign(page, dtor) do { \
+ set_bit(PG_foreign, &(page)->flags); \
+ (page)->mapping = (void *)dtor; \
+} while (0)
+
+#define ClearPageForeign(page) do { \
+ clear_bit(PG_foreign, &(page)->flags); \
+ (page)->mapping = NULL; \
+} while (0)
+
+#define PageForeignDestructor(page) \
+ ( (void (*) (struct page *)) (page)->mapping )
+
+#endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/hypervisor.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/hypervisor.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,582 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen-public/io/domain_controller.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#if defined(__i386__)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <asm-generic/pgtable-nopmd.h>
+#endif
+#endif
+
+/* arch/xen/i386/kernel/setup.c */
+union xen_start_info_union
+{
+ start_info_t xen_start_info;
+ char padding[512];
+};
+extern union xen_start_info_union xen_start_info_union;
+#define xen_start_info (xen_start_info_union.xen_start_info)
+
+/* arch/xen/kernel/evtchn.c */
+/* Force a proper event-channel callback from Xen. */
+void force_evtchn_callback(void);
+
+/* arch/xen/kernel/process.c */
+void xen_cpu_idle (void);
+
+/* arch/xen/i386/kernel/hypervisor.c */
+void do_hypervisor_callback(struct pt_regs *regs);
+
+/* arch/xen/i386/kernel/head.S */
+void lgdt_finish(void);
+
+/* arch/xen/i386/mm/hypervisor.c */
+/*
+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
+ * be MACHINE addresses.
+ */
+
+extern unsigned int mmu_update_queue_idx;
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+void queue_l2_entry_update(pmd_t *ptr, pmd_t val);
+void queue_pt_switch(unsigned long ptr);
+void queue_tlb_flush(void);
+void queue_invlpg(unsigned long ptr);
+void queue_pgd_pin(unsigned long ptr);
+void queue_pgd_unpin(unsigned long ptr);
+void queue_pte_pin(unsigned long ptr);
+void queue_pte_unpin(unsigned long ptr);
+void queue_set_ldt(unsigned long ptr, unsigned long bytes);
+void queue_machphys_update(unsigned long mfn, unsigned long pfn);
+void xen_l1_entry_update(pte_t *ptr, unsigned long val);
+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
+void xen_pt_switch(unsigned long ptr);
+void xen_tlb_flush(void);
+void xen_invlpg(unsigned long ptr);
+void xen_pgd_pin(unsigned long ptr);
+void xen_pgd_unpin(unsigned long ptr);
+void xen_pte_pin(unsigned long ptr);
+void xen_pte_unpin(unsigned long ptr);
+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
+void xen_machphys_update(unsigned long mfn, unsigned long pfn);
+
+void _flush_page_update_queue(void);
+static inline int flush_page_update_queue(void)
+{
+ unsigned int idx = mmu_update_queue_idx;
+ if ( idx != 0 ) _flush_page_update_queue();
+ return idx;
+}
+#define xen_flush_page_update_queue() (_flush_page_update_queue())
+#define XEN_flush_page_update_queue() (_flush_page_update_queue())
+void MULTICALL_flush_page_update_queue(void);
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+/* Allocate a contiguous empty region of low memory. Return virtual start. */
+unsigned long allocate_empty_lowmem_region(unsigned long pages);
+#endif
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_set_trap_table), "1" (table)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+ "3" (success_count)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+ : "memory" );
+
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
+ "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=S" (ign2)
+ : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
+ "S" (srec) : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_crash(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_crash << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ int ret;
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
+ : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ int ret;
+ unsigned long ign1;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
+ : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ int ret;
+ unsigned long ign1, ign2;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
+ : "memory" );
+
+ return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ unsigned long ret;
+ unsigned long ign;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word1, unsigned long word2)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
+ "3" (word2)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_fast_trap(
+ int idx)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
+ "=D" (ign5)
+ : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
+ "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_va_mapping),
+ "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
+ : "memory" );
+
+ if ( unlikely(ret < 0) )
+ {
+ printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
+ page_nr, (new_val).pte_low, flags);
+ BUG();
+ }
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ int ret;
+ unsigned long ignore;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd)
+{
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_xen_version), "1" (cmd)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
+ "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
+ "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
+ : "memory" );
+
+ return ret;
+}
+
+#endif /* __HYPERVISOR_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,88 @@
+/******************************************************************************
+ * privcmd.h
+ *
+ * Interface to /proc/xen/privcmd.
+ *
+ * Copyright (c) 2003-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __PRIVCMD_H__
+#define __PRIVCMD_H__
+
+typedef struct privcmd_hypercall
+{
+ unsigned long op;
+ unsigned long arg[5];
+} privcmd_hypercall_t;
+
+typedef struct privcmd_mmap_entry {
+ unsigned long va;
+ unsigned long mfn;
+ unsigned long npages;
+} privcmd_mmap_entry_t;
+
+typedef struct privcmd_mmap {
+ int num;
+ domid_t dom; /* target domain */
+ privcmd_mmap_entry_t *entry;
+} privcmd_mmap_t;
+
+typedef struct privcmd_mmapbatch {
+ int num; /* number of pages to populate */
+ domid_t dom; /* target domain */
+ unsigned long addr; /* virtual address */
+ unsigned long *arr; /* array of mfns - top nibble set on err */
+} privcmd_mmapbatch_t;
+
+typedef struct privcmd_blkmsg
+{
+ unsigned long op;
+ void *buf;
+ int buf_size;
+} privcmd_blkmsg_t;
+
+/*
+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
+ * @arg: &privcmd_hypercall_t
+ * Return: Value returned from execution of the specified hypercall.
+ */
+#define IOCTL_PRIVCMD_HYPERCALL \
+ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
+
+/*
+ * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
+ * @arg: n/a
+ * Return: Port associated with domain-controller end of control event channel
+ * for the initial domain.
+ */
+#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
+ _IOC(_IOC_NONE, 'P', 1, 0)
+#define IOCTL_PRIVCMD_MMAP \
+ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
+#define IOCTL_PRIVCMD_MMAPBATCH \
+ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
+#define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN \
+ _IOC(_IOC_READ, 'P', 4, sizeof(unsigned long))
+
+#endif /* __PRIVCMD_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/linux-public/suspend.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/linux-public/suspend.h Tue Aug
9 15:17:45 2005
@@ -0,0 +1,43 @@
+/******************************************************************************
+ * suspend.h
+ *
+ * Copyright (c) 2003-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_XEN_SUSPEND_H__
+#define __ASM_XEN_SUSPEND_H__
+
+typedef struct suspend_record_st {
+ /* To be filled in before resume. */
+ start_info_t resume_info;
+ /*
+ * The number of a machine frame containing, in sequence, the number of
+ * each machine frame that contains PFN -> MFN translation table data.
+ */
+ unsigned long pfn_to_mfn_frame_list;
+ /* Number of entries in the PFN -> MFN translation table. */
+ unsigned long nr_pfns;
+} suspend_record_t;
+
+#endif /* __ASM_XEN_SUSPEND_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/multicall.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/multicall.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,107 @@
+/******************************************************************************
+ * multicall.h
+ *
+ * Copyright (c) 2003-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __MULTICALL_H__
+#define __MULTICALL_H__
+
+#include <asm-xen/hypervisor.h>
+
+extern multicall_entry_t multicall_list[];
+extern int nr_multicall_ents;
+
+static inline void queue_multicall0(unsigned long op)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall1(unsigned long op, unsigned long arg1)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall2(
+ unsigned long op, unsigned long arg1, unsigned long arg2)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall3(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ multicall_list[i].args[2] = arg3;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall4(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ multicall_list[i].args[2] = arg3;
+ multicall_list[i].args[3] = arg4;
+ nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall5(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ int i = nr_multicall_ents;
+ multicall_list[i].op = op;
+ multicall_list[i].args[0] = arg1;
+ multicall_list[i].args[1] = arg2;
+ multicall_list[i].args[2] = arg3;
+ multicall_list[i].args[3] = arg4;
+ multicall_list[i].args[4] = arg5;
+ nr_multicall_ents = i+1;
+}
+
+static inline void execute_multicall_list(void)
+{
+ if ( unlikely(nr_multicall_ents == 0) ) return;
+ (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
+ nr_multicall_ents = 0;
+}
+
+#endif /* __MULTICALL_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/queues.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/queues.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,81 @@
+
+/*
+ * Oh dear. Task queues were removed from Linux 2.6 and replaced by work
+ * queues. Unfortunately the semantics is not the same. With task queues we
+ * can defer work until a particular event occurs -- this is not
+ * straightforwardly done with work queues (queued work is performed asap, or
+ * after some fixed timeout). Conversely, work queues are a (slightly) neater
+ * way of deferring work to a process context than using task queues in 2.4.
+ *
+ * This is a bit of a needless reimplementation -- should have just pulled
+ * the code from 2.4, but I tried leveraging work queues to simplify things.
+ * They didn't help. :-(
+ */
+
+#ifndef __QUEUES_H__
+#define __QUEUES_H__
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+struct tq_struct {
+ void (*fn)(void *);
+ void *arg;
+ struct list_head list;
+ unsigned long pending;
+};
+#define INIT_TQUEUE(_name, _fn, _arg) \
+ do { \
+ INIT_LIST_HEAD(&(_name)->list); \
+ (_name)->pending = 0; \
+ (_name)->fn = (_fn); (_name)->arg = (_arg); \
+ } while ( 0 )
+#define DECLARE_TQUEUE(_name, _fn, _arg) \
+ struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
+
+typedef struct {
+ struct list_head list;
+ spinlock_t lock;
+} task_queue;
+#define DECLARE_TASK_QUEUE(_name) \
+ task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
+
+static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
+{
+ unsigned long flags;
+ if ( test_and_set_bit(0, &tqe->pending) )
+ return 0;
+ spin_lock_irqsave(&tql->lock, flags);
+ list_add_tail(&tqe->list, &tql->list);
+ spin_unlock_irqrestore(&tql->lock, flags);
+ return 1;
+}
+
+static inline void run_task_queue(task_queue *tql)
+{
+ struct list_head head, *ent;
+ struct tq_struct *tqe;
+ unsigned long flags;
+ void (*fn)(void *);
+ void *arg;
+
+ spin_lock_irqsave(&tql->lock, flags);
+ list_add(&head, &tql->list);
+ list_del_init(&tql->list);
+ spin_unlock_irqrestore(&tql->lock, flags);
+
+ while ( !list_empty(&head) )
+ {
+ ent = head.next;
+ list_del_init(ent);
+ tqe = list_entry(ent, struct tq_struct, list);
+ fn = tqe->fn;
+ arg = tqe->arg;
+ wmb();
+ tqe->pending = 0;
+ fn(arg);
+ }
+}
+
+#endif /* __QUEUES_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/asm-xen/xen_proc.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,13 @@
+
+#ifndef __ASM_XEN_PROC_H__
+#define __ASM_XEN_PROC_H__
+
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+extern struct proc_dir_entry *create_xen_proc_entry(
+ const char *name, mode_t mode);
+extern void remove_xen_proc_entry(
+ const char *name);
+
+#endif /* __ASM_XEN_PROC_H__ */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/include/linux/gfp.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/linux/gfp.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,138 @@
+#ifndef __LINUX_GFP_H
+#define __LINUX_GFP_H
+
+#include <linux/mmzone.h>
+#include <linux/stddef.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+
+struct vm_area_struct;
+
+/*
+ * GFP bitmasks..
+ */
+/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
+#define __GFP_DMA 0x01
+#define __GFP_HIGHMEM 0x02
+
+/*
+ * Action modifiers - doesn't change the zoning
+ *
+ * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
+ * _might_ fail. This depends upon the particular VM implementation.
+ *
+ * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures.
+ *
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ */
+#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
+#define __GFP_HIGH 0x20 /* Should access emergency pools? */
+#define __GFP_IO 0x40 /* Can start physical IO? */
+#define __GFP_FS 0x80 /* Can call down to low-level FS? */
+#define __GFP_COLD 0x100 /* Cache-cold page required */
+#define __GFP_NOWARN 0x200 /* Suppress page allocation failure warning */
+#define __GFP_REPEAT 0x400 /* Retry the allocation. Might fail */
+#define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */
+#define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */
+#define __GFP_NO_GROW 0x2000 /* Slab internal usage */
+#define __GFP_COMP 0x4000 /* Add compound page metadata */
+#define __GFP_ZERO 0x8000 /* Return zeroed page on success */
+
+#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */
+#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
+
+/* if you forget to add the bitmask here kernel will crash, period */
+#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
+ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
+ __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP)
+
+#define GFP_ATOMIC (__GFP_HIGH)
+#define GFP_NOIO (__GFP_WAIT)
+#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
+#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA __GFP_DMA
+
+
+/*
+ * There is only one page-allocator function, and two main namespaces to
+ * it. The alloc_page*() variants return 'struct page *' and as such
+ * can allocate highmem pages, the *get*page*() variants return
+ * virtual kernel addresses to the allocated page(s).
+ */
+
+/*
+ * We get the zone list from the current node and the gfp_mask.
+ * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ *
+ * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
+ * optimized to &contig_page_data at compile-time.
+ */
+
+/*
+ * If arch_free_page returns non-zero then the generic free_page code can
+ * immediately bail: the arch-specific function has done all the work.
+ */
+#ifndef HAVE_ARCH_FREE_PAGE
+#define arch_free_page(page, order) 0
+#endif
+
+extern struct page *
+FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
+
+static inline struct page *alloc_pages_node(int nid, unsigned int gfp_mask,
+ unsigned int order)
+{
+ if (unlikely(order >= MAX_ORDER))
+ return NULL;
+
+ return __alloc_pages(gfp_mask, order,
+ NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+}
+
+#ifdef CONFIG_NUMA
+extern struct page *alloc_pages_current(unsigned gfp_mask, unsigned order);
+
+static inline struct page *
+alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+ if (unlikely(order >= MAX_ORDER))
+ return NULL;
+
+ return alloc_pages_current(gfp_mask, order);
+}
+extern struct page *alloc_page_vma(unsigned gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr);
+#else
+#define alloc_pages(gfp_mask, order) \
+ alloc_pages_node(numa_node_id(), gfp_mask, order)
+#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#endif
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+
+extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned
int order));
+extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
+
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask),0)
+
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA,(order))
+
+extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
+extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
+extern void FASTCALL(free_hot_page(struct page *page));
+extern void FASTCALL(free_cold_page(struct page *page));
+
+#define __free_page(page) __free_pages((page), 0)
+#define free_page(addr) free_pages((addr),0)
+
+void page_alloc_init(void);
+
+#endif /* __LINUX_GFP_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6-xen-sparse/include/linux/highmem.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/linux/highmem.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,106 @@
+#ifndef _LINUX_HIGHMEM_H
+#define _LINUX_HIGHMEM_H
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_HIGHMEM
+
+#include <asm/highmem.h>
+
+/* declarations for linux/mm/highmem.c */
+unsigned int nr_free_highpages(void);
+void kmap_flush_unused(void);
+
+#else /* CONFIG_HIGHMEM */
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline void kmap_flush_unused(void) { }
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+#define kunmap(page) do { (void) (page); } while (0)
+
+#define kmap_atomic(page, idx) page_address(page)
+#define kunmap_atomic(addr, idx) do { } while (0)
+#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
+
+#endif /* CONFIG_HIGHMEM */
+
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_atomic(page, KM_USER0);
+ clear_user_page(addr, vaddr, page);
+ kunmap_atomic(addr, KM_USER0);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+static inline struct page *
+alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
+{
+ struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr);
+
+ if (page)
+ clear_user_highpage(page, vaddr);
+
+ return page;
+}
+#endif
+
+static inline void clear_highpage(struct page *page)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+ clear_page(kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+}
+
+/*
+ * Same but also flushes aliased cache contents to RAM.
+ */
+static inline void memclear_highpage_flush(struct page *page, unsigned int
offset, unsigned int size)
+{
+ void *kaddr;
+
+ BUG_ON(offset + size > PAGE_SIZE);
+
+ kaddr = kmap_atomic(page, KM_USER0);
+ memset((char *)kaddr + offset, 0, size);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+}
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vto, KM_USER1);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vto, KM_USER1);
+}
+
+#endif /* _LINUX_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/include/linux/irq.h
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/include/linux/irq.h Tue Aug 9 15:17:45 2005
@@ -0,0 +1,98 @@
+#ifndef __irq_h
+#define __irq_h
+
+/*
+ * Please do not include this file in generic code. There is currently
+ * no requirement for any architecture to implement anything held
+ * within this file.
+ *
+ * Thanks. --rmk
+ */
+
+#include <linux/config.h>
+
+#if !defined(CONFIG_ARCH_S390)
+
+#include <linux/linkage.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
+
+#include <asm/irq.h>
+#include <asm/ptrace.h>
+
+/*
+ * IRQ line status.
+ */
+#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
+#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
+#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
+#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
+#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
+#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
+#define IRQ_LEVEL 64 /* IRQ level triggered */
+#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
+#define IRQ_PER_CPU 256 /* IRQ is per CPU */
+
+/*
+ * Interrupt controller descriptor. This is all we need
+ * to describe about the low-level hardware.
+ */
+struct hw_interrupt_type {
+ const char * typename;
+ unsigned int (*startup)(unsigned int irq);
+ void (*shutdown)(unsigned int irq);
+ void (*enable)(unsigned int irq);
+ void (*disable)(unsigned int irq);
+ void (*ack)(unsigned int irq);
+ void (*end)(unsigned int irq);
+ void (*set_affinity)(unsigned int irq, cpumask_t dest);
+};
+
+typedef struct hw_interrupt_type hw_irq_controller;
+
+/*
+ * This is the "IRQ descriptor", which contains various information
+ * about the irq, including what kind of hardware handling it has,
+ * whether it is disabled etc etc.
+ *
+ * Pad this out to 32 bytes for cache and indexing reasons.
+ */
+typedef struct irq_desc {
+ hw_irq_controller *handler;
+ void *handler_data;
+ struct irqaction *action; /* IRQ action list */
+ unsigned int status; /* IRQ status */
+ unsigned int depth; /* nested irq disables */
+ unsigned int irq_count; /* For detecting broken interrupts */
+ unsigned int irqs_unhandled;
+ spinlock_t lock;
+} ____cacheline_aligned irq_desc_t;
+
+extern irq_desc_t irq_desc [NR_IRQS];
+
+#include <asm/hw_irq.h> /* the arch dependent stuff */
+
+extern int setup_irq(unsigned int irq, struct irqaction * new);
+extern int teardown_irq(unsigned int irq, struct irqaction * old);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern cpumask_t irq_affinity[NR_IRQS];
+extern int no_irq_affinity;
+extern int noirqdebug_setup(char *str);
+
+extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+ struct irqaction *action);
+extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
+extern void note_interrupt(unsigned int irq, irq_desc_t *desc, int action_ret);
+extern void report_bad_irq(unsigned int irq, irq_desc_t *desc, int action_ret);
+extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+
+extern void init_irq_proc(void);
+#endif
+
+extern hw_irq_controller no_irq_type; /* needed in every arch ? */
+
+#endif
+
+#endif /* __irq_h */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/kernel/irq/manage.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/kernel/irq/manage.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,392 @@
+/*
+ * linux/kernel/irq/manage.c
+ *
+ * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains driver APIs to the irq subsystem.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+
+#include "internals.h"
+
+#ifdef CONFIG_SMP
+
+cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
+/**
+ * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+ *
+ * This function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void synchronize_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_desc + irq;
+
+ while (desc->status & IRQ_INPROGRESS)
+ cpu_relax();
+}
+
+EXPORT_SYMBOL(synchronize_irq);
+
+#endif
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables and Enables are
+ * nested.
+ * Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(disable_irq_nosync);
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Enables and Disables are
+ * nested.
+ * This function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void disable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+
+ disable_irq_nosync(irq);
+ if (desc->action)
+ synchronize_irq(irq);
+}
+
+EXPORT_SYMBOL(disable_irq);
+
+/**
+ * enable_irq - enable handling of an irq
+ * @irq: Interrupt to enable
+ *
+ * Undoes the effect of one call to disable_irq(). If this
+ * matches the last disable, processing of interrupts on this
+ * IRQ line is re-enabled.
+ *
+ * This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 0:
+ WARN_ON(1);
+ break;
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ desc->handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(enable_irq);
+
+/*
+ * Internal function that tells the architecture code whether a
+ * particular irq has been exclusively allocated or is available
+ * for driver use.
+ */
+int can_request_irq(unsigned int irq, unsigned long irqflags)
+{
+ struct irqaction *action;
+
+ if (irq >= NR_IRQS)
+ return 0;
+
+ action = irq_desc[irq].action;
+ if (action)
+ if (irqflags & action->flags & SA_SHIRQ)
+ action = NULL;
+
+ return !action;
+}
+
+/**
+ * setup_irq - register an irqaction structure
+ * @irq: Interrupt to register
+ * @irqaction: The irqaction structure to be registered
+ *
+ * Normally called by request_irq, this function can be used
+ * directly to allocate special interrupts that are part of the
+ * architecture.
+ */
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+ struct irq_desc *desc = irq_desc + irq;
+ struct irqaction *old, **p;
+ unsigned long flags;
+ int shared = 0;
+
+ if (desc->handler == &no_irq_type)
+ return -ENOSYS;
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
+ IRQ_WAITING | IRQ_INPROGRESS);
+ if (desc->handler->startup)
+ desc->handler->startup(irq);
+ else
+ desc->handler->enable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ new->irq = irq;
+ register_irq_proc(irq);
+ new->dir = NULL;
+ register_handler_proc(irq, new);
+
+ return 0;
+}
+
+/*
+ * teardown_irq - unregister an irqaction
+ * @irq: Interrupt line being freed
+ * @old: Pointer to the irqaction that is to be unregistered
+ *
+ * This function is called by free_irq and does the actual
+ * business of unregistering the handler. It exists as a
+ * seperate function to enable handlers to be unregistered
+ * for irqactions that have been allocated statically at
+ * boot time.
+ *
+ * This function must not be called from interrupt context.
+ */
+int teardown_irq(unsigned int irq, struct irqaction * old)
+{
+ struct irq_desc *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return -ENOENT;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+
+ if (action) {
+ struct irqaction **pp = p;
+
+ p = &action->next;
+ if (action != old)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ if (desc->handler->shutdown)
+ desc->handler->shutdown(irq);
+ else
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+ unregister_handler_proc(irq, action);
+
+ /* Make sure it's not being used on another CPU */
+ synchronize_irq(irq);
+ return 0;
+ }
+ printk(KERN_ERR "Trying to teardown free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -ENOENT;
+ }
+}
+
+/**
+ * free_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function. The function
+ * does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc;
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ for (action = desc->action; action != NULL; action = action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ if (teardown_irq(irq, action) == 0)
+ kfree(action);
+ return;
+ }
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return;
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/**
+ * request_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. From the point this
+ * call is made your handler function may be invoked. Since
+ * your handler function must clear any interrupt the board
+ * raises, you must take care both to initialise your hardware
+ * and to set up the interrupt handler in the right order.
+ *
+ * Dev_id must be globally unique. Normally the address of the
+ * device data structure is used as the cookie. Since the handler
+ * receives this value it makes sense to use it.
+ *
+ * If your interrupt is shared you must pass a non NULL dev_id
+ * as this is required when freeing the interrupt.
+ *
+ * Flags:
+ *
+ * SA_SHIRQ Interrupt is shared
+ * SA_INTERRUPT Disable local interrupts while processing
+ * SA_SAMPLE_RANDOM The interrupt can be used for entropy
+ *
+ */
+int request_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id)
+{
+ struct irqaction * action;
+ int retval;
+
+ /*
+ * Sanity-check: shared interrupts must pass in a real dev-ID,
+ * otherwise we'll have trouble later trying to figure out
+ * which interrupt is which (messes up the interrupt freeing
+ * logic etc).
+ */
+ if ((irqflags & SA_SHIRQ) && !dev_id)
+ return -EINVAL;
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ cpus_clear(action->mask);
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+
+ return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/mkbuildtree
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/mkbuildtree Tue Aug 9 15:17:45 2005
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+# mkbuildtree <build tree>
+#
+# Creates symbolic links in <build tree> for the sparse tree
+# in the current directory.
+
+# Script to determine the relative path between two directories.
+# Copyright (c) D. J. Hawkey Jr. 2002
+# Fixed for Xen project by K. Fraser in 2003.
+abs_to_rel ()
+{
+ local CWD SRCPATH
+
+ if [ "$1" != "/" -a "${1##*[^/]}" = "/" ]; then
+ SRCPATH=${1%?}
+ else
+ SRCPATH=$1
+ fi
+ if [ "$2" != "/" -a "${2##*[^/]}" = "/" ]; then
+ DESTPATH=${2%?}
+ else
+ DESTPATH=$2
+ fi
+
+ CWD=$PWD
+ [ "${1%%[^/]*}" != "/" ] && cd $1 && SRCPATH=$PWD
+ [ "${2%%[^/]*}" != "/" ] && cd $2 && DESTPATH=$PWD
+ [ "$CWD" != "$PWD" ] && cd $CWD
+
+ BASEPATH=$SRCPATH
+
+ [ "$SRCPATH" = "$DESTPATH" ] && DESTPATH="." && return
+ [ "$SRCPATH" = "/" ] && DESTPATH=${DESTPATH#?} && return
+
+ while [ "$BASEPATH/" != "${DESTPATH%${DESTPATH#$BASEPATH/}}" ]; do
+ BASEPATH=${BASEPATH%/*}
+ done
+
+ SRCPATH=${SRCPATH#$BASEPATH}
+ DESTPATH=${DESTPATH#$BASEPATH}
+ DESTPATH=${DESTPATH#?}
+ while [ -n "$SRCPATH" ]; do
+ SRCPATH=${SRCPATH%/*}
+ DESTPATH="../$DESTPATH"
+ done
+
+ [ -z "$BASEPATH" ] && BASEPATH="/"
+ [ "${DESTPATH##*[^/]}" = "/" ] && DESTPATH=${DESTPATH%?}
+}
+
+# relative_lndir <target_dir>
+# Creates a tree of symlinks in the current working directory that mirror
+# real files in <target_dir>. <target_dir> should be relative to the current
+# working directory. Symlinks in <target_dir> are ignored. Source-control files
+# are ignored.
+relative_lndir ()
+{
+ local SYMLINK_DIR REAL_DIR pref i j
+ SYMLINK_DIR=$PWD
+ REAL_DIR=$1
+ (
+ cd $REAL_DIR
+ for i in `find . -type d | grep -v SCCS`; do
+ [ -d $SYMLINK_DIR/$i ] || mkdir -p $SYMLINK_DIR/$i
+ (
+ cd $i
+ pref=`echo $i | sed -e 's#/[^/]*#../#g' -e 's#^\.##'`
+ for j in `find . -maxdepth 1 -type f -o -type l`; do
+ ln -sf ${pref}${REAL_DIR}/$i/$j ${SYMLINK_DIR}/$i/$j
+ done
+ )
+ done
+ )
+}
+
+[ "$1" == "" ] && { echo "Syntax: $0 <linux tree to xenify>"; exit 1; }
+
+# Get absolute path to the destination directory
+pushd . >/dev/null
+cd ${1} || { echo "cannot cd to ${1}"; exit 1; }
+AD=$PWD
+popd >/dev/null
+
+# Get absolute path to the source directory
+AS=`pwd`
+
+# Get path to source, relative to destination
+abs_to_rel ${AD} ${AS}
+RS=$DESTPATH
+
+# Remove old copies of files and directories at the destination
+for i in `find . -type f -o -type l` ; do rm -f ${AD}/${i#./} ; done
+
+# We now work from the destination directory
+cd ${AD} || { echo "cannot cd to ${AD}"; exit 1; }
+
+# Remove old symlinks
+for i in `find . -type l`; do rm -f $i; done
+
+# Create symlinks of files and directories which exist in the sparse source
+relative_lndir ${RS}
+rm -f mkbuildtree
+
+
+# Create links to the shared definitions of the Xen interfaces.
+rm -rf ${AD}/include/asm-xen/xen-public
+mkdir ${AD}/include/asm-xen/xen-public
+cd ${AD}/include/asm-xen/xen-public
+relative_lndir ../../../${RS}/../xen/include/public
+
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/mm/highmem.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/mm/highmem.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,614 @@
+/*
+ * High memory handling common code and variables.
+ *
+ * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@xxxxxxx
+ * Gerhard Wichert, Siemens AG, Gerhard.Wichert@xxxxxxxxxxxxxx
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * 64-bit physical space. With current x86 CPUs this
+ * means up to 64 Gigabytes physical RAM.
+ *
+ * Rewrote high memory support to move the page cache into
+ * high memory. Implemented permanent (schedulable) kmaps
+ * based on Linus' idea.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/swap.h>
+#include <linux/bio.h>
+#include <linux/pagemap.h>
+#include <linux/mempool.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/hash.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static mempool_t *page_pool, *isa_page_pool;
+
+static void *page_pool_alloc(int gfp_mask, void *data)
+{
+ int gfp = gfp_mask | (int) (long) data;
+
+ return alloc_page(gfp);
+}
+
+static void page_pool_free(void *page, void *data)
+{
+ __free_page(page);
+}
+
+/*
+ * Virtual_count is not a pure "count".
+ * 0 means that it is not mapped, and has not been mapped
+ * since a TLB flush - it is usable.
+ * 1 means that there are no users, but it has been mapped
+ * since the last TLB flush - so we can't use it.
+ * n means that there are (n-1) current users of it.
+ */
+#ifdef CONFIG_HIGHMEM
+static int pkmap_count[LAST_PKMAP];
+static unsigned int last_pkmap_nr;
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
+
+pte_t * pkmap_page_table;
+
+static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+
+static void flush_all_zero_pkmaps(void)
+{
+ int i;
+
+ flush_cache_kmaps();
+
+ for (i = 0; i < LAST_PKMAP; i++) {
+ struct page *page;
+
+ /*
+ * zero means we don't have anything to do,
+ * >1 means that it is still in use. Only
+ * a count of 1 means that it is free but
+ * needs to be unmapped
+ */
+ if (pkmap_count[i] != 1)
+ continue;
+ pkmap_count[i] = 0;
+
+ /* sanity check */
+ if (pte_none(pkmap_page_table[i]))
+ BUG();
+
+ /*
+ * Don't need an atomic fetch-and-clear op here;
+ * no-one has the page mapped, and cannot get at
+ * its virtual address (and hence PTE) without first
+ * getting the kmap_lock (which is held here).
+ * So no dangers, even with speculative execution.
+ */
+ page = pte_page(pkmap_page_table[i]);
+ pte_clear(&pkmap_page_table[i]);
+
+ set_page_address(page, NULL);
+ }
+ flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+}
+
+static inline unsigned long map_new_virtual(struct page *page)
+{
+ unsigned long vaddr;
+ int count;
+
+start:
+ count = LAST_PKMAP;
+ /* Find an empty entry */
+ for (;;) {
+ last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
+ if (!last_pkmap_nr) {
+ flush_all_zero_pkmaps();
+ count = LAST_PKMAP;
+ }
+ if (!pkmap_count[last_pkmap_nr])
+ break; /* Found a usable entry */
+ if (--count)
+ continue;
+
+ /*
+ * Sleep for somebody else to unmap their entries
+ */
+ {
+ DECLARE_WAITQUEUE(wait, current);
+
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&pkmap_map_wait, &wait);
+ spin_unlock(&kmap_lock);
+ schedule();
+ remove_wait_queue(&pkmap_map_wait, &wait);
+ spin_lock(&kmap_lock);
+
+ /* Somebody else might have mapped it while we slept */
+ if (page_address(page))
+ return (unsigned long)page_address(page);
+
+ /* Re-start */
+ goto start;
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
+ set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+
+ pkmap_count[last_pkmap_nr] = 1;
+ set_page_address(page, (void *)vaddr);
+
+ return vaddr;
+}
+
+void kmap_flush_unused(void)
+{
+ spin_lock(&kmap_lock);
+ flush_all_zero_pkmaps();
+ spin_unlock(&kmap_lock);
+}
+
+EXPORT_SYMBOL(kmap_flush_unused);
+
+void fastcall *kmap_high(struct page *page)
+{
+ unsigned long vaddr;
+
+ /*
+ * For highmem pages, we can't trust "virtual" until
+ * after we have the lock.
+ *
+ * We cannot call this from interrupts, as it may block
+ */
+ spin_lock(&kmap_lock);
+ vaddr = (unsigned long)page_address(page);
+ if (!vaddr)
+ vaddr = map_new_virtual(page);
+ pkmap_count[PKMAP_NR(vaddr)]++;
+ if (pkmap_count[PKMAP_NR(vaddr)] < 2)
+ BUG();
+ spin_unlock(&kmap_lock);
+ return (void*) vaddr;
+}
+
+EXPORT_SYMBOL(kmap_high);
+
+void fastcall kunmap_high(struct page *page)
+{
+ unsigned long vaddr;
+ unsigned long nr;
+ int need_wakeup;
+
+ spin_lock(&kmap_lock);
+ vaddr = (unsigned long)page_address(page);
+ if (!vaddr)
+ BUG();
+ nr = PKMAP_NR(vaddr);
+
+ /*
+ * A count must never go down to zero
+ * without a TLB flush!
+ */
+ need_wakeup = 0;
+ switch (--pkmap_count[nr]) {
+ case 0:
+ BUG();
+ case 1:
+ /*
+ * Avoid an unnecessary wake_up() function call.
+ * The common case is pkmap_count[] == 1, but
+ * no waiters.
+ * The tasks queued in the wait-queue are guarded
+ * by both the lock in the wait-queue-head and by
+ * the kmap_lock. As the kmap_lock is held here,
+ * no need for the wait-queue-head's lock. Simply
+ * test if the queue is empty.
+ */
+ need_wakeup = waitqueue_active(&pkmap_map_wait);
+ }
+ spin_unlock(&kmap_lock);
+
+ /* do wake-up, if needed, race-free outside of the spin lock */
+ if (need_wakeup)
+ wake_up(&pkmap_map_wait);
+}
+
+EXPORT_SYMBOL(kunmap_high);
+
+#define POOL_SIZE 64
+
+static __init int init_emergency_pool(void)
+{
+ struct sysinfo i;
+ si_meminfo(&i);
+ si_swapinfo(&i);
+
+ if (!i.totalhigh)
+ return 0;
+
+ page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free,
NULL);
+ if (!page_pool)
+ BUG();
+ printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
+
+ return 0;
+}
+
+__initcall(init_emergency_pool);
+
+/*
+ * highmem version, map in to vec
+ */
+static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
+{
+ unsigned long flags;
+ unsigned char *vto;
+
+ local_irq_save(flags);
+ vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
+ memcpy(vto + to->bv_offset, vfrom, to->bv_len);
+ kunmap_atomic(vto, KM_BOUNCE_READ);
+ local_irq_restore(flags);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define bounce_copy_vec(to, vfrom) \
+ memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom,
(to)->bv_len)
+
+#endif
+
+#define ISA_POOL_SIZE 16
+
+/*
+ * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
+ * as the max address, so check if the pool has already been created.
+ */
+int init_emergency_isa_pool(void)
+{
+ if (isa_page_pool)
+ return 0;
+
+ isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc,
page_pool_free, (void *) __GFP_DMA);
+ if (!isa_page_pool)
+ BUG();
+
+ printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
+ return 0;
+}
+
+/*
+ * Simple bounce buffer support for highmem pages. Depending on the
+ * queue gfp mask set, *to may or may not be a highmem page. kmap it
+ * always, it will do the Right Thing
+ */
+static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
+{
+ unsigned char *vfrom;
+ struct bio_vec *tovec, *fromvec;
+ int i;
+
+ __bio_for_each_segment(tovec, to, i, 0) {
+ fromvec = from->bi_io_vec + i;
+
+ /*
+ * not bounced
+ */
+ if (tovec->bv_page == fromvec->bv_page)
+ continue;
+
+ /*
+ * fromvec->bv_offset and fromvec->bv_len might have been
+ * modified by the block layer, so use the original copy,
+ * bounce_copy_vec already uses tovec->bv_len
+ */
+ vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+
+ flush_dcache_page(tovec->bv_page);
+ bounce_copy_vec(tovec, vfrom);
+ }
+}
+
+static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
+{
+ struct bio *bio_orig = bio->bi_private;
+ struct bio_vec *bvec, *org_vec;
+ int i;
+
+ if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+ set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
+
+ /*
+ * free up bounce indirect pages used
+ */
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ org_vec = bio_orig->bi_io_vec + i;
+ if (bvec->bv_page == org_vec->bv_page)
+ continue;
+
+ mempool_free(bvec->bv_page, pool);
+ }
+
+ bio_endio(bio_orig, bio_orig->bi_size, err);
+ bio_put(bio);
+}
+
+static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int
err)
+{
+ if (bio->bi_size)
+ return 1;
+
+ bounce_end_io(bio, page_pool, err);
+ return 0;
+}
+
+static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done,
int err)
+{
+ if (bio->bi_size)
+ return 1;
+
+ bounce_end_io(bio, isa_page_pool, err);
+ return 0;
+}
+
+static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
+{
+ struct bio *bio_orig = bio->bi_private;
+
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ copy_to_high_bio_irq(bio_orig, bio);
+
+ bounce_end_io(bio, pool, err);
+}
+
+static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int
err)
+{
+ if (bio->bi_size)
+ return 1;
+
+ __bounce_end_io_read(bio, page_pool, err);
+ return 0;
+}
+
+static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done,
int err)
+{
+ if (bio->bi_size)
+ return 1;
+
+ __bounce_end_io_read(bio, isa_page_pool, err);
+ return 0;
+}
+
+static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
+ mempool_t *pool)
+{
+ struct page *page;
+ struct bio *bio = NULL;
+ int i, rw = bio_data_dir(*bio_orig);
+ struct bio_vec *to, *from;
+
+ bio_for_each_segment(from, *bio_orig, i) {
+ page = from->bv_page;
+
+ /*
+ * is destination page below bounce pfn?
+ */
+ if (page_to_pfn(page) < q->bounce_pfn)
+ continue;
+
+ /*
+ * irk, bounce it
+ */
+ if (!bio)
+ bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
+
+ to = bio->bi_io_vec + i;
+
+ to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+ to->bv_len = from->bv_len;
+ to->bv_offset = from->bv_offset;
+
+ if (rw == WRITE) {
+ char *vto, *vfrom;
+
+ flush_dcache_page(from->bv_page);
+ vto = page_address(to->bv_page) + to->bv_offset;
+ vfrom = kmap(from->bv_page) + from->bv_offset;
+ memcpy(vto, vfrom, to->bv_len);
+ kunmap(from->bv_page);
+ }
+ }
+
+ /*
+ * no pages bounced
+ */
+ if (!bio)
+ return;
+
+ /*
+ * at least one page was bounced, fill in possible non-highmem
+ * pages
+ */
+ __bio_for_each_segment(from, *bio_orig, i, 0) {
+ to = bio_iovec_idx(bio, i);
+ if (!to->bv_page) {
+ to->bv_page = from->bv_page;
+ to->bv_len = from->bv_len;
+ to->bv_offset = from->bv_offset;
+ }
+ }
+
+ bio->bi_bdev = (*bio_orig)->bi_bdev;
+ bio->bi_flags |= (1 << BIO_BOUNCED);
+ bio->bi_sector = (*bio_orig)->bi_sector;
+ bio->bi_rw = (*bio_orig)->bi_rw;
+
+ bio->bi_vcnt = (*bio_orig)->bi_vcnt;
+ bio->bi_idx = (*bio_orig)->bi_idx;
+ bio->bi_size = (*bio_orig)->bi_size;
+
+ if (pool == page_pool) {
+ bio->bi_end_io = bounce_end_io_write;
+ if (rw == READ)
+ bio->bi_end_io = bounce_end_io_read;
+ } else {
+ bio->bi_end_io = bounce_end_io_write_isa;
+ if (rw == READ)
+ bio->bi_end_io = bounce_end_io_read_isa;
+ }
+
+ bio->bi_private = *bio_orig;
+ *bio_orig = bio;
+}
+
+void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
+{
+ mempool_t *pool;
+
+ /*
+ * for non-isa bounce case, just check if the bounce pfn is equal
+ * to or bigger than the highest pfn in the system -- in that case,
+ * don't waste time iterating over bio segments
+ */
+ if (!(q->bounce_gfp & GFP_DMA)) {
+ if (q->bounce_pfn >= blk_max_pfn)
+ return;
+ pool = page_pool;
+ } else {
+ BUG_ON(!isa_page_pool);
+ pool = isa_page_pool;
+ }
+
+ /*
+ * slow path
+ */
+ __blk_queue_bounce(q, bio_orig, pool);
+}
+
+EXPORT_SYMBOL(blk_queue_bounce);
+
+#if defined(HASHED_PAGE_VIRTUAL)
+
+#define PA_HASH_ORDER 7
+
+/*
+ * Describes one page->virtual association
+ */
+struct page_address_map {
+ struct page *page;
+ void *virtual;
+ struct list_head list;
+};
+
+/*
+ * page_address_map freelist, allocated from page_address_maps.
+ */
+static struct list_head page_address_pool; /* freelist */
+static spinlock_t pool_lock; /* protects page_address_pool */
+
+/*
+ * Hash table bucket
+ */
+static struct page_address_slot {
+ struct list_head lh; /* List of page_address_maps */
+ spinlock_t lock; /* Protect this bucket's list */
+} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
+
+static struct page_address_slot *page_slot(struct page *page)
+{
+ return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
+}
+
+void *page_address(struct page *page)
+{
+ unsigned long flags;
+ void *ret;
+ struct page_address_slot *pas;
+
+ if (!PageHighMem(page))
+ return lowmem_page_address(page);
+
+ pas = page_slot(page);
+ ret = NULL;
+ spin_lock_irqsave(&pas->lock, flags);
+ if (!list_empty(&pas->lh)) {
+ struct page_address_map *pam;
+
+ list_for_each_entry(pam, &pas->lh, list) {
+ if (pam->page == page) {
+ ret = pam->virtual;
+ goto done;
+ }
+ }
+ }
+done:
+ spin_unlock_irqrestore(&pas->lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL(page_address);
+
+void set_page_address(struct page *page, void *virtual)
+{
+ unsigned long flags;
+ struct page_address_slot *pas;
+ struct page_address_map *pam;
+
+ BUG_ON(!PageHighMem(page));
+
+ pas = page_slot(page);
+ if (virtual) { /* Add */
+ BUG_ON(list_empty(&page_address_pool));
+
+ spin_lock_irqsave(&pool_lock, flags);
+ pam = list_entry(page_address_pool.next,
+ struct page_address_map, list);
+ list_del(&pam->list);
+ spin_unlock_irqrestore(&pool_lock, flags);
+
+ pam->page = page;
+ pam->virtual = virtual;
+
+ spin_lock_irqsave(&pas->lock, flags);
+ list_add_tail(&pam->list, &pas->lh);
+ spin_unlock_irqrestore(&pas->lock, flags);
+ } else { /* Remove */
+ spin_lock_irqsave(&pas->lock, flags);
+ list_for_each_entry(pam, &pas->lh, list) {
+ if (pam->page == page) {
+ list_del(&pam->list);
+ spin_unlock_irqrestore(&pas->lock, flags);
+ spin_lock_irqsave(&pool_lock, flags);
+ list_add_tail(&pam->list, &page_address_pool);
+ spin_unlock_irqrestore(&pool_lock, flags);
+ goto done;
+ }
+ }
+ spin_unlock_irqrestore(&pas->lock, flags);
+ }
+done:
+ return;
+}
+
+static struct page_address_map page_address_maps[LAST_PKMAP];
+
+void __init page_address_init(void)
+{
+ int i;
+
+ INIT_LIST_HEAD(&page_address_pool);
+ for (i = 0; i < ARRAY_SIZE(page_address_maps); i++)
+ list_add(&page_address_maps[i].list, &page_address_pool);
+ for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
+ INIT_LIST_HEAD(&page_address_htable[i].lh);
+ spin_lock_init(&page_address_htable[i].lock);
+ }
+ spin_lock_init(&pool_lock);
+}
+
+#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/mm/memory.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/mm/memory.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2303 @@
+/*
+ * linux/mm/memory.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+/*
+ * demand-loading started 01.12.91 - seems it is high on the list of
+ * things wanted, and it should be easy to implement. - Linus
+ */
+
+/*
+ * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
+ * pages started 02.12.91, seems to work. - Linus.
+ *
+ * Tested sharing by executing about 30 /bin/sh: under the old kernel it
+ * would have taken more than the 6M I have free, but it worked well as
+ * far as I could see.
+ *
+ * Also corrected some "invalidate()"s - I wasn't doing enough of them.
+ */
+
+/*
+ * Real VM (paging to/from disk) started 18.12.91. Much more work and
+ * thought has to go into this. Oh, well..
+ * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
+ * Found it. Everything seems to work now.
+ * 20.12.91 - Ok, making the swap-device changeable like the root.
+ */
+
+/*
+ * 05.04.94 - Multi-page memory management added for v1.1.
+ * Idea by Alex Bligh (alex@xxxxxxxxxxxxxxx)
+ *
+ * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
+ * (Gerhard.Wichert@xxxxxxxxxxxxxx)
+ *
+ * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/acct.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+
+#include <linux/swapops.h>
+#include <linux/elf.h>
+
+#ifndef CONFIG_DISCONTIGMEM
+/* use the per-pgdat data instead for discontigmem - mbligh */
+unsigned long max_mapnr;
+struct page *mem_map;
+
+EXPORT_SYMBOL(max_mapnr);
+EXPORT_SYMBOL(mem_map);
+#endif
+
+unsigned long num_physpages;
+/*
+ * A number of key systems in x86 including ioremap() rely on the assumption
+ * that high_memory defines the upper bound on direct map memory, then end
+ * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
+ * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
+ * and ZONE_HIGHMEM.
+ */
+void * high_memory;
+unsigned long vmalloc_earlyreserve;
+
+EXPORT_SYMBOL(num_physpages);
+EXPORT_SYMBOL(high_memory);
+EXPORT_SYMBOL(vmalloc_earlyreserve);
+
+/*
+ * Note: this doesn't free the actual pages themselves. That
+ * has been handled earlier when unmapping all the memory regions.
+ */
+static inline void clear_pmd_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long start, unsigned long end)
+{
+ struct page *page;
+
+ if (pmd_none(*pmd))
+ return;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return;
+ }
+ if (!((start | end) & ~PMD_MASK)) {
+ /* Only clear full, aligned ranges */
+ page = pmd_page(*pmd);
+ pmd_clear(pmd);
+ dec_page_state(nr_page_table_pages);
+ tlb->mm->nr_ptes--;
+ pte_free_tlb(tlb, page);
+ }
+}
+
+static inline void clear_pud_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long start, unsigned long end)
+{
+ unsigned long addr = start, next;
+ pmd_t *pmd, *__pmd;
+
+ if (pud_none(*pud))
+ return;
+ if (unlikely(pud_bad(*pud))) {
+ pud_ERROR(*pud);
+ pud_clear(pud);
+ return;
+ }
+
+ pmd = __pmd = pmd_offset(pud, start);
+ do {
+ next = (addr + PMD_SIZE) & PMD_MASK;
+ if (next > end || next <= addr)
+ next = end;
+
+ clear_pmd_range(tlb, pmd, addr, next);
+ pmd++;
+ addr = next;
+ } while (addr && (addr < end));
+
+ if (!((start | end) & ~PUD_MASK)) {
+ /* Only clear full, aligned ranges */
+ pud_clear(pud);
+ pmd_free_tlb(tlb, __pmd);
+ }
+}
+
+
+static inline void clear_pgd_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start, unsigned long end)
+{
+ unsigned long addr = start, next;
+ pud_t *pud, *__pud;
+
+ if (pgd_none(*pgd))
+ return;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+ return;
+ }
+
+ pud = __pud = pud_offset(pgd, start);
+ do {
+ next = (addr + PUD_SIZE) & PUD_MASK;
+ if (next > end || next <= addr)
+ next = end;
+
+ clear_pud_range(tlb, pud, addr, next);
+ pud++;
+ addr = next;
+ } while (addr && (addr < end));
+
+ if (!((start | end) & ~PGDIR_MASK)) {
+ /* Only clear full, aligned ranges */
+ pgd_clear(pgd);
+ pud_free_tlb(tlb, __pud);
+ }
+}
+
+/*
+ * This function clears user-level page tables of a process.
+ *
+ * Must be called with pagetable lock held.
+ */
+void clear_page_range(struct mmu_gather *tlb, unsigned long start, unsigned
long end)
+{
+ unsigned long addr = start, next;
+ pgd_t * pgd = pgd_offset(tlb->mm, start);
+ unsigned long i;
+
+ for (i = pgd_index(start); i <= pgd_index(end-1); i++) {
+ next = (addr + PGDIR_SIZE) & PGDIR_MASK;
+ if (next > end || next <= addr)
+ next = end;
+
+ clear_pgd_range(tlb, pgd, addr, next);
+ pgd++;
+ addr = next;
+ }
+}
+
+pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long
address)
+{
+ if (!pmd_present(*pmd)) {
+ struct page *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pte_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pmd_present(*pmd)) {
+ pte_free(new);
+ goto out;
+ }
+ mm->nr_ptes++;
+ inc_page_state(nr_page_table_pages);
+ pmd_populate(mm, pmd, new);
+ }
+out:
+ return pte_offset_map(pmd, address);
+}
+
+pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned
long address)
+{
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pte_alloc_one_kernel(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pmd_present(*pmd)) {
+ pte_free_kernel(new);
+ goto out;
+ }
+ pmd_populate_kernel(mm, pmd, new);
+ }
+out:
+ return pte_offset_kernel(pmd, address);
+}
+
+/*
+ * copy one vm_area from one task to the other. Assumes the page tables
+ * already present in the new task to be cleared in the whole range
+ * covered by this vma.
+ *
+ * dst->page_table_lock is held on entry and exit,
+ * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
+ */
+
+static inline void
+copy_swap_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t pte)
+{
+ if (pte_file(pte))
+ return;
+ swap_duplicate(pte_to_swp_entry(pte));
+ if (list_empty(&dst_mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ list_add(&dst_mm->mmlist, &src_mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+}
+
+static inline void
+copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
+ unsigned long addr)
+{
+ pte_t pte = *src_pte;
+ struct page *page;
+ unsigned long pfn;
+
+ /* pte contains position in swap, so copy. */
+ if (!pte_present(pte)) {
+ copy_swap_pte(dst_mm, src_mm, pte);
+ set_pte(dst_pte, pte);
+ return;
+ }
+ pfn = pte_pfn(pte);
+ /* the pte points outside of valid memory, the
+ * mapping is assumed to be good, meaningful
+ * and not mapped via rmap - duplicate the
+ * mapping as is.
+ */
+ page = NULL;
+ if (pfn_valid(pfn))
+ page = pfn_to_page(pfn);
+
+ if (!page || PageReserved(page)) {
+ set_pte(dst_pte, pte);
+ return;
+ }
+
+ /*
+ * If it's a COW mapping, write protect it both
+ * in the parent and the child
+ */
+ if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
+ ptep_set_wrprotect(src_pte);
+ pte = *src_pte;
+ }
+
+ /*
+ * If it's a shared mapping, mark it clean in
+ * the child
+ */
+ if (vm_flags & VM_SHARED)
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+ get_page(page);
+ dst_mm->rss++;
+ if (PageAnon(page))
+ dst_mm->anon_rss++;
+ set_pte(dst_pte, pte);
+ page_dup_rmap(page);
+}
+
+static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ pte_t *src_pte, *dst_pte;
+ pte_t *s, *d;
+ unsigned long vm_flags = vma->vm_flags;
+
+ d = dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
+ if (!dst_pte)
+ return -ENOMEM;
+
+ spin_lock(&src_mm->page_table_lock);
+ s = src_pte = pte_offset_map_nested(src_pmd, addr);
+ for (; addr < end; addr += PAGE_SIZE, s++, d++) {
+ if (pte_none(*s))
+ continue;
+ copy_one_pte(dst_mm, src_mm, d, s, vm_flags, addr);
+ }
+ pte_unmap_nested(src_pte);
+ pte_unmap(dst_pte);
+ spin_unlock(&src_mm->page_table_lock);
+ cond_resched_lock(&dst_mm->page_table_lock);
+ return 0;
+}
+
+static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ pmd_t *src_pmd, *dst_pmd;
+ int err = 0;
+ unsigned long next;
+
+ src_pmd = pmd_offset(src_pud, addr);
+ dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
+ if (!dst_pmd)
+ return -ENOMEM;
+
+ for (; addr < end; addr = next, src_pmd++, dst_pmd++) {
+ next = (addr + PMD_SIZE) & PMD_MASK;
+ if (next > end || next <= addr)
+ next = end;
+ if (pmd_none(*src_pmd))
+ continue;
+ if (pmd_bad(*src_pmd)) {
+ pmd_ERROR(*src_pmd);
+ pmd_clear(src_pmd);
+ continue;
+ }
+ err = copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
+ vma, addr, next);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ pud_t *src_pud, *dst_pud;
+ int err = 0;
+ unsigned long next;
+
+ src_pud = pud_offset(src_pgd, addr);
+ dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
+ if (!dst_pud)
+ return -ENOMEM;
+
+ for (; addr < end; addr = next, src_pud++, dst_pud++) {
+ next = (addr + PUD_SIZE) & PUD_MASK;
+ if (next > end || next <= addr)
+ next = end;
+ if (pud_none(*src_pud))
+ continue;
+ if (pud_bad(*src_pud)) {
+ pud_ERROR(*src_pud);
+ pud_clear(src_pud);
+ continue;
+ }
+ err = copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
+ vma, addr, next);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma)
+{
+ pgd_t *src_pgd, *dst_pgd;
+ unsigned long addr, start, end, next;
+ int err = 0;
+
+ if (is_vm_hugetlb_page(vma))
+ return copy_hugetlb_page_range(dst, src, vma);
+
+ start = vma->vm_start;
+ src_pgd = pgd_offset(src, start);
+ dst_pgd = pgd_offset(dst, start);
+
+ end = vma->vm_end;
+ addr = start;
+ while (addr && (addr < end-1)) {
+ next = (addr + PGDIR_SIZE) & PGDIR_MASK;
+ if (next > end || next <= addr)
+ next = end;
+ if (pgd_none(*src_pgd))
+ goto next_pgd;
+ if (pgd_bad(*src_pgd)) {
+ pgd_ERROR(*src_pgd);
+ pgd_clear(src_pgd);
+ goto next_pgd;
+ }
+ err = copy_pud_range(dst, src, dst_pgd, src_pgd,
+ vma, addr, next);
+ if (err)
+ break;
+
+next_pgd:
+ src_pgd++;
+ dst_pgd++;
+ addr = next;
+ }
+
+ return err;
+}
+
+static void zap_pte_range(struct mmu_gather *tlb,
+ pmd_t *pmd, unsigned long address,
+ unsigned long size, struct zap_details *details)
+{
+ unsigned long offset;
+ pte_t *ptep;
+
+ if (pmd_none(*pmd))
+ return;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return;
+ }
+ ptep = pte_offset_map(pmd, address);
+ offset = address & ~PMD_MASK;
+ if (offset + size > PMD_SIZE)
+ size = PMD_SIZE - offset;
+ size &= PAGE_MASK;
+ if (details && !details->check_mapping && !details->nonlinear_vma)
+ details = NULL;
+ for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+ pte_t pte = *ptep;
+ if (pte_none(pte))
+ continue;
+ if (pte_present(pte)) {
+ struct page *page = NULL;
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
+ }
+ if (unlikely(details) && page) {
+ /*
+ * unmap_shared_mapping_pages() wants to
+ * invalidate cache without truncating:
+ * unmap shared but keep private pages.
+ */
+ if (details->check_mapping &&
+ details->check_mapping != page->mapping)
+ continue;
+ /*
+ * Each page->index must be checked when
+ * invalidating or truncating nonlinear.
+ */
+ if (details->nonlinear_vma &&
+ (page->index < details->first_index ||
+ page->index > details->last_index))
+ continue;
+ }
+ pte = ptep_get_and_clear(ptep);
+ tlb_remove_tlb_entry(tlb, ptep, address+offset);
+ if (unlikely(!page))
+ continue;
+ if (unlikely(details) && details->nonlinear_vma
+ && linear_page_index(details->nonlinear_vma,
+ address+offset) != page->index)
+ set_pte(ptep, pgoff_to_pte(page->index));
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+ if (PageAnon(page))
+ tlb->mm->anon_rss--;
+ else if (pte_young(pte))
+ mark_page_accessed(page);
+ tlb->freed++;
+ page_remove_rmap(page);
+ tlb_remove_page(tlb, page);
+ continue;
+ }
+ /*
+ * If details->check_mapping, we leave swap entries;
+ * if details->nonlinear_vma, we leave file entries.
+ */
+ if (unlikely(details))
+ continue;
+ if (!pte_file(pte))
+ free_swap_and_cache(pte_to_swp_entry(pte));
+ pte_clear(ptep);
+ }
+ pte_unmap(ptep-1);
+}
+
+static void zap_pmd_range(struct mmu_gather *tlb,
+ pud_t *pud, unsigned long address,
+ unsigned long size, struct zap_details *details)
+{
+ pmd_t * pmd;
+ unsigned long end;
+
+ if (pud_none(*pud))
+ return;
+ if (unlikely(pud_bad(*pud))) {
+ pud_ERROR(*pud);
+ pud_clear(pud);
+ return;
+ }
+ pmd = pmd_offset(pud, address);
+ end = address + size;
+ if (end > ((address + PUD_SIZE) & PUD_MASK))
+ end = ((address + PUD_SIZE) & PUD_MASK);
+ do {
+ zap_pte_range(tlb, pmd, address, end - address, details);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+}
+
+static void zap_pud_range(struct mmu_gather *tlb,
+ pgd_t * pgd, unsigned long address,
+ unsigned long end, struct zap_details *details)
+{
+ pud_t * pud;
+
+ if (pgd_none(*pgd))
+ return;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+ return;
+ }
+ pud = pud_offset(pgd, address);
+ do {
+ zap_pmd_range(tlb, pud, address, end - address, details);
+ address = (address + PUD_SIZE) & PUD_MASK;
+ pud++;
+ } while (address && (address < end));
+}
+
+static void unmap_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, struct zap_details *details)
+{
+ unsigned long next;
+ pgd_t *pgd;
+ int i;
+
+ BUG_ON(address >= end);
+ pgd = pgd_offset(vma->vm_mm, address);
+ tlb_start_vma(tlb, vma);
+ for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
+ next = (address + PGDIR_SIZE) & PGDIR_MASK;
+ if (next <= address || next > end)
+ next = end;
+ zap_pud_range(tlb, pgd, address, next, details);
+ address = next;
+ pgd++;
+ }
+ tlb_end_vma(tlb, vma);
+}
+
+#ifdef CONFIG_PREEMPT
+# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
+#else
+/* No preempt: go for improved straight-line efficiency */
+# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
+#endif
+
+/**
+ * unmap_vmas - unmap a range of memory covered by a list of vma's
+ * @tlbp: address of the caller's struct mmu_gather
+ * @mm: the controlling mm_struct
+ * @vma: the starting vma
+ * @start_addr: virtual address at which to start unmapping
+ * @end_addr: virtual address at which to end unmapping
+ * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
+ * @details: details of nonlinear truncation or shared cache invalidation
+ *
+ * Returns the number of vma's which were covered by the unmapping.
+ *
+ * Unmap all pages in the vma list. Called under page_table_lock.
+ *
+ * We aim to not hold page_table_lock for too long (for scheduling latency
+ * reasons). So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
+ * return the ending mmu_gather to the caller.
+ *
+ * Only addresses between `start' and `end' will be unmapped.
+ *
+ * The VMA list must be sorted in ascending virtual address order.
+ *
+ * unmap_vmas() assumes that the caller will flush the whole unmapped address
+ * range after unmap_vmas() returns. So the only responsibility here is to
+ * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
+ * drops the lock and schedules.
+ */
+int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long start_addr,
+ unsigned long end_addr, unsigned long *nr_accounted,
+ struct zap_details *details)
+{
+ unsigned long zap_bytes = ZAP_BLOCK_SIZE;
+ unsigned long tlb_start = 0; /* For tlb_finish_mmu */
+ int tlb_start_valid = 0;
+ int ret = 0;
+ spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
+ int fullmm = tlb_is_full_mm(*tlbp);
+
+ for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
+ unsigned long start;
+ unsigned long end;
+
+ start = max(vma->vm_start, start_addr);
+ if (start >= vma->vm_end)
+ continue;
+ end = min(vma->vm_end, end_addr);
+ if (end <= vma->vm_start)
+ continue;
+
+ if (vma->vm_flags & VM_ACCOUNT)
+ *nr_accounted += (end - start) >> PAGE_SHIFT;
+
+ ret++;
+ while (start != end) {
+ unsigned long block;
+
+ if (!tlb_start_valid) {
+ tlb_start = start;
+ tlb_start_valid = 1;
+ }
+
+ if (is_vm_hugetlb_page(vma)) {
+ block = end - start;
+ unmap_hugepage_range(vma, start, end);
+ } else {
+ block = min(zap_bytes, end - start);
+ unmap_page_range(*tlbp, vma, start,
+ start + block, details);
+ }
+
+ start += block;
+ zap_bytes -= block;
+ if ((long)zap_bytes > 0)
+ continue;
+
+ tlb_finish_mmu(*tlbp, tlb_start, start);
+
+ if (need_resched() ||
+ need_lockbreak(&mm->page_table_lock) ||
+ (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
+ if (i_mmap_lock) {
+ /* must reset count of rss freed */
+ *tlbp = tlb_gather_mmu(mm, fullmm);
+ details->break_addr = start;
+ goto out;
+ }
+ spin_unlock(&mm->page_table_lock);
+ cond_resched();
+ spin_lock(&mm->page_table_lock);
+ }
+
+ *tlbp = tlb_gather_mmu(mm, fullmm);
+ tlb_start_valid = 0;
+ zap_bytes = ZAP_BLOCK_SIZE;
+ }
+ }
+out:
+ return ret;
+}
+
+/**
+ * zap_page_range - remove user pages in a given range
+ * @vma: vm_area_struct holding the applicable pages
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
+ * @details: details of nonlinear truncation or shared cache invalidation
+ */
+void zap_page_range(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather *tlb;
+ unsigned long end = address + size;
+ unsigned long nr_accounted = 0;
+
+ if (is_vm_hugetlb_page(vma)) {
+ zap_hugepage_range(vma, address, size);
+ return;
+ }
+
+ lru_add_drain();
+ spin_lock(&mm->page_table_lock);
+ tlb = tlb_gather_mmu(mm, 0);
+ unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
+ tlb_finish_mmu(tlb, address, end);
+ acct_update_integrals();
+ spin_unlock(&mm->page_table_lock);
+}
+
+/*
+ * Do a quick page-table lookup for a single page.
+ * mm->page_table_lock must be held.
+ */
+static struct page *
+__follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ unsigned long pfn;
+ struct page *page;
+
+ page = follow_huge_addr(mm, address, write);
+ if (! IS_ERR(page))
+ return page;
+
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ goto out;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ goto out;
+
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ goto out;
+ if (pmd_huge(*pmd))
+ return follow_huge_pmd(mm, address, pmd, write);
+
+ ptep = pte_offset_map(pmd, address);
+ if (!ptep)
+ goto out;
+
+ pte = *ptep;
+ pte_unmap(ptep);
+ if (pte_present(pte)) {
+ if (write && !pte_write(pte))
+ goto out;
+ if (read && !pte_read(pte))
+ goto out;
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (write && !pte_dirty(pte) && !PageDirty(page))
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ return page;
+ }
+ }
+
+out:
+ return NULL;
+}
+
+struct page *
+follow_page(struct mm_struct *mm, unsigned long address, int write)
+{
+ return __follow_page(mm, address, /*read*/0, write);
+}
+
+int
+check_user_page_readable(struct mm_struct *mm, unsigned long address)
+{
+ return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
+}
+
+EXPORT_SYMBOL(check_user_page_readable);
+
+/*
+ * Given a physical address, is there a useful struct page pointing to
+ * it? This may become more complex in the future if we start dealing
+ * with IO-aperture pages for direct-IO.
+ */
+
+static inline struct page *get_page_map(struct page *page)
+{
+ if (!pfn_valid(page_to_pfn(page)))
+ return NULL;
+ return page;
+}
+
+
+static inline int
+untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
+ unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ /* Check if the vma is for an anonymous mapping. */
+ if (vma->vm_ops && vma->vm_ops->nopage)
+ return 0;
+
+ /* Check if page directory entry exists. */
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ return 1;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ return 1;
+
+ /* Check if page middle directory entry exists. */
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ return 1;
+
+ /* There is a pte slot for 'address' in 'mm'. */
+ return 0;
+}
+
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int i;
+ unsigned int flags;
+
+ /*
+ * Require read or write permissions.
+ * If 'force' is set, we only require the "MAY" flags.
+ */
+ flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+ do {
+ struct vm_area_struct * vma;
+
+ vma = find_extend_vma(mm, start);
+ if (!vma && in_gate_area(tsk, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ struct vm_area_struct *gate_vma = get_gate_vma(tsk);
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ if (write) /* user gate pages are read-only */
+ return i ? : -EFAULT;
+ if (pg > TASK_SIZE)
+ pgd = pgd_offset_k(pg);
+ else
+ pgd = pgd_offset_gate(mm, pg);
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd, pg);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, pg);
+ BUG_ON(pmd_none(*pmd));
+ pte = pte_offset_map(pmd, pg);
+ BUG_ON(pte_none(*pte));
+ if (pages) {
+ pages[i] = pte_page(*pte);
+ get_page(pages[i]);
+ }
+ pte_unmap(pte);
+ if (vmas)
+ vmas[i] = gate_vma;
+ i++;
+ start += PAGE_SIZE;
+ len--;
+ continue;
+ }
+
+ if (!vma || (vma->vm_flags & VM_IO)
+ || !(flags & vma->vm_flags))
+ return i ? : -EFAULT;
+
+ if (is_vm_hugetlb_page(vma)) {
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &len, i);
+ continue;
+ }
+ spin_lock(&mm->page_table_lock);
+ do {
+ struct page *map;
+ int lookup_write = write;
+
+ cond_resched_lock(&mm->page_table_lock);
+ while (!(map = follow_page(mm, start, lookup_write))) {
+ /*
+ * Shortcut for anonymous pages. We don't want
+ * to force the creation of pages tables for
+ * insanly big anonymously mapped areas that
+ * nobody touched so far. This is important
+ * for doing a core dump for these mappings.
+ */
+ if (!lookup_write &&
+ untouched_anonymous_page(mm,vma,start)) {
+ map = ZERO_PAGE(start);
+ break;
+ }
+ spin_unlock(&mm->page_table_lock);
+ switch (handle_mm_fault(mm,vma,start,write)) {
+ case VM_FAULT_MINOR:
+ tsk->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ tsk->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ return i ? i : -EFAULT;
+ case VM_FAULT_OOM:
+ return i ? i : -ENOMEM;
+ default:
+ BUG();
+ }
+ /*
+ * Now that we have performed a write fault
+ * and surely no longer have a shared page we
+ * shouldn't write, we shouldn't ignore an
+ * unwritable page in the page table if
+ * we are forcing write access.
+ */
+ lookup_write = write && !force;
+ spin_lock(&mm->page_table_lock);
+ }
+ if (pages) {
+ pages[i] = get_page_map(map);
+ if (!pages[i]) {
+ spin_unlock(&mm->page_table_lock);
+ while (i--)
+ page_cache_release(pages[i]);
+ i = -EFAULT;
+ goto out;
+ }
+ flush_dcache_page(pages[i]);
+ if (!PageReserved(pages[i]))
+ page_cache_get(pages[i]);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ i++;
+ start += PAGE_SIZE;
+ len--;
+ } while(len && start < vma->vm_end);
+ spin_unlock(&mm->page_table_lock);
+ } while(len);
+out:
+ return i;
+}
+
+EXPORT_SYMBOL(get_user_pages);
+
+static void zeromap_pte_range(pte_t * pte, unsigned long address,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address),
prot));
+ BUG_ON(!pte_none(*pte));
+ set_pte(pte, zero_pte);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd,
+ unsigned long address, unsigned long size, pgprot_t prot)
+{
+ unsigned long base, end;
+
+ base = address & PUD_MASK;
+ address &= ~PUD_MASK;
+ end = address + size;
+ if (end > PUD_SIZE)
+ end = PUD_SIZE;
+ do {
+ pte_t * pte = pte_alloc_map(mm, pmd, base + address);
+ if (!pte)
+ return -ENOMEM;
+ zeromap_pte_range(pte, base + address, end - address, prot);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+static inline int zeromap_pud_range(struct mm_struct *mm, pud_t * pud,
+ unsigned long address,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long base, end;
+ int error = 0;
+
+ base = address & PGDIR_MASK;
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ pmd_t * pmd = pmd_alloc(mm, pud, base + address);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = zeromap_pmd_range(mm, pmd, base + address,
+ end - address, prot);
+ if (error)
+ break;
+ address = (address + PUD_SIZE) & PUD_MASK;
+ pud++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int zeromap_page_range(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, pgprot_t prot)
+{
+ int i;
+ int error = 0;
+ pgd_t * pgd;
+ unsigned long beg = address;
+ unsigned long end = address + size;
+ unsigned long next;
+ struct mm_struct *mm = vma->vm_mm;
+
+ pgd = pgd_offset(mm, address);
+ flush_cache_range(vma, beg, end);
+ BUG_ON(address >= end);
+ BUG_ON(end > vma->vm_end);
+
+ spin_lock(&mm->page_table_lock);
+ for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
+ pud_t *pud = pud_alloc(mm, pgd, address);
+ error = -ENOMEM;
+ if (!pud)
+ break;
+ next = (address + PGDIR_SIZE) & PGDIR_MASK;
+ if (next <= beg || next > end)
+ next = end;
+ error = zeromap_pud_range(mm, pud, address,
+ next - address, prot);
+ if (error)
+ break;
+ address = next;
+ pgd++;
+ }
+ /*
+ * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
+ */
+ flush_tlb_range(vma, beg, end);
+ spin_unlock(&mm->page_table_lock);
+ return error;
+}
+
+/*
+ * maps a range of physical memory into the requested pages. the old
+ * mappings are removed. any references to nonexistent pages results
+ * in null mappings (currently treated as "copy-on-access")
+ */
+static inline void
+remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long pfn, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ BUG_ON(!pte_none(*pte));
+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
+ set_pte(pte, pfn_pte(pfn, prot));
+ address += PAGE_SIZE;
+ pfn++;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int
+remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
+ unsigned long size, unsigned long pfn, pgprot_t prot)
+{
+ unsigned long base, end;
+
+ base = address & PUD_MASK;
+ address &= ~PUD_MASK;
+ end = address + size;
+ if (end > PUD_SIZE)
+ end = PUD_SIZE;
+ pfn -= (address >> PAGE_SHIFT);
+ do {
+ pte_t * pte = pte_alloc_map(mm, pmd, base + address);
+ if (!pte)
+ return -ENOMEM;
+ remap_pte_range(pte, base + address, end - address,
+ (address >> PAGE_SHIFT) + pfn, prot);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+static inline int remap_pud_range(struct mm_struct *mm, pud_t * pud,
+ unsigned long address, unsigned long size,
+ unsigned long pfn, pgprot_t prot)
+{
+ unsigned long base, end;
+ int error;
+
+ base = address & PGDIR_MASK;
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ pfn -= address >> PAGE_SHIFT;
+ do {
+ pmd_t *pmd = pmd_alloc(mm, pud, base+address);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = remap_pmd_range(mm, pmd, base + address, end - address,
+ (address >> PAGE_SHIFT) + pfn, prot);
+ if (error)
+ break;
+ address = (address + PUD_SIZE) & PUD_MASK;
+ pud++;
+ } while (address && (address < end));
+ return error;
+}
+
+/* Note: this is only safe if the mm semaphore is held when called. */
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t *pgd;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ unsigned long next;
+ struct mm_struct *mm = vma->vm_mm;
+ int i;
+
+ pfn -= from >> PAGE_SHIFT;
+ pgd = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+ BUG_ON(from >= end);
+
+ /*
+ * Physically remapped pages are special. Tell the
+ * rest of the world about it:
+ * VM_IO tells people not to look at these pages
+ * (accesses can have side effects).
+ * VM_RESERVED tells swapout not to try to touch
+ * this region.
+ */
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+ spin_lock(&mm->page_table_lock);
+ for (i = pgd_index(beg); i <= pgd_index(end-1); i++) {
+ pud_t *pud = pud_alloc(mm, pgd, from);
+ error = -ENOMEM;
+ if (!pud)
+ break;
+ next = (from + PGDIR_SIZE) & PGDIR_MASK;
+ if (next > end || next <= from)
+ next = end;
+ error = remap_pud_range(mm, pud, from, end - from,
+ pfn + (from >> PAGE_SHIFT), prot);
+ if (error)
+ break;
+ from = next;
+ pgd++;
+ }
+ /*
+ * Why flush? remap_pte_range has a BUG_ON for !pte_none()
+ */
+ flush_tlb_range(vma, beg, end);
+ spin_unlock(&mm->page_table_lock);
+
+ return error;
+}
+
+EXPORT_SYMBOL(remap_pfn_range);
+
+/*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
+ * servicing faults for write access. In the normal case, do always want
+ * pte_mkwrite. But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_WRITE))
+ pte = pte_mkwrite(pte);
+ return pte;
+}
+
+/*
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+ */
+static inline void break_cow(struct vm_area_struct * vma, struct page *
new_page, unsigned long address,
+ pte_t *page_table)
+{
+ pte_t entry;
+
+ flush_cache_page(vma, address);
+ entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
+ vma);
+ ptep_establish(vma, address, page_table, entry);
+ update_mmu_cache(vma, address, entry);
+}
+
+/*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+ * and decrementing the shared-page counter for the old page.
+ *
+ * Goto-purists beware: the only reason for goto's here is that it results
+ * in better assembly code.. The "default" path will see no jumps at all.
+ *
+ * Note that this routine assumes that the protection checks have been
+ * done by the caller (the low-level page fault routine in most cases).
+ * Thus we can safely just mark it writable once we've done any necessary
+ * COW.
+ *
+ * We also mark the page dirty at this point even though the page will
+ * change only once the write actually happens. This avoids a few races,
+ * and potentially makes it more efficient.
+ *
+ * We hold the mm semaphore and the page_table_lock on entry and exit
+ * with the page_table_lock released.
+ */
+static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
+ unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
+{
+ struct page *old_page, *new_page;
+ unsigned long pfn = pte_pfn(pte);
+ pte_t entry;
+
+ if (unlikely(!pfn_valid(pfn))) {
+ /*
+ * This should really halt the system so it can be debugged or
+ * at least the kernel stops what it's doing before it corrupts
+ * data, but for the moment just pretend this is OOM.
+ */
+ pte_unmap(page_table);
+ printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
+ address);
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_OOM;
+ }
+ old_page = pfn_to_page(pfn);
+
+ if (!TestSetPageLocked(old_page)) {
+ int reuse = can_share_swap_page(old_page);
+ unlock_page(old_page);
+ if (reuse) {
+ flush_cache_page(vma, address);
+ entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
+ vma);
+ ptep_set_access_flags(vma, address, page_table, entry,
1);
+ update_mmu_cache(vma, address, entry);
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_MINOR;
+ }
+ }
+ pte_unmap(page_table);
+
+ /*
+ * Ok, we need to copy. Oh, well..
+ */
+ if (!PageReserved(old_page))
+ page_cache_get(old_page);
+ spin_unlock(&mm->page_table_lock);
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto no_new_page;
+ if (old_page == ZERO_PAGE(address)) {
+ new_page = alloc_zeroed_user_highpage(vma, address);
+ if (!new_page)
+ goto no_new_page;
+ } else {
+ new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ if (!new_page)
+ goto no_new_page;
+ copy_user_highpage(new_page, old_page, address);
+ }
+ /*
+ * Re-check the pte - we dropped the lock
+ */
+ spin_lock(&mm->page_table_lock);
+ page_table = pte_offset_map(pmd, address);
+ if (likely(pte_same(*page_table, pte))) {
+ if (PageAnon(old_page))
+ mm->anon_rss--;
+ if (PageReserved(old_page)) {
+ ++mm->rss;
+ acct_update_integrals();
+ update_mem_hiwater();
+ } else
+ page_remove_rmap(old_page);
+ break_cow(vma, new_page, address, page_table);
+ lru_cache_add_active(new_page);
+ page_add_anon_rmap(new_page, vma, address);
+
+ /* Free the old page.. */
+ new_page = old_page;
+ }
+ pte_unmap(page_table);
+ page_cache_release(new_page);
+ page_cache_release(old_page);
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_MINOR;
+
+no_new_page:
+ page_cache_release(old_page);
+ return VM_FAULT_OOM;
+}
+
+/*
+ * Helper functions for unmap_mapping_range().
+ *
+ * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
+ *
+ * We have to restart searching the prio_tree whenever we drop the lock,
+ * since the iterator is only valid while the lock is held, and anyway
+ * a later vma might be split and reinserted earlier while lock dropped.
+ *
+ * The list of nonlinear vmas could be handled more efficiently, using
+ * a placeholder, but handle it in the same way until a need is shown.
+ * It is important to search the prio_tree before nonlinear list: a vma
+ * may become nonlinear and be shifted from prio_tree to nonlinear list
+ * while the lock is dropped; but never shifted from list to prio_tree.
+ *
+ * In order to make forward progress despite restarting the search,
+ * vm_truncate_count is used to mark a vma as now dealt with, so we can
+ * quickly skip it next time around. Since the prio_tree search only
+ * shows us those vmas affected by unmapping the range in question, we
+ * can't efficiently keep all vmas in step with mapping->truncate_count:
+ * so instead reset them all whenever it wraps back to 0 (then go to 1).
+ * mapping->truncate_count and vma->vm_truncate_count are protected by
+ * i_mmap_lock.
+ *
+ * In order to make forward progress despite repeatedly restarting some
+ * large vma, note the break_addr set by unmap_vmas when it breaks out:
+ * and restart from that address when we reach that vma again. It might
+ * have been split or merged, shrunk or extended, but never shifted: so
+ * restart_addr remains valid so long as it remains in the vma's range.
+ * unmap_mapping_range forces truncate_count to leap over page-aligned
+ * values so we can save vma's restart_addr in its truncate_count field.
+ */
+#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
+
+static void reset_vma_truncate_counts(struct address_space *mapping)
+{
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
+ vma->vm_truncate_count = 0;
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
+ vma->vm_truncate_count = 0;
+}
+
+static int unmap_mapping_range_vma(struct vm_area_struct *vma,
+ unsigned long start_addr, unsigned long end_addr,
+ struct zap_details *details)
+{
+ unsigned long restart_addr;
+ int need_break;
+
+again:
+ restart_addr = vma->vm_truncate_count;
+ if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
+ start_addr = restart_addr;
+ if (start_addr >= end_addr) {
+ /* Top of vma has been split off since last time */
+ vma->vm_truncate_count = details->truncate_count;
+ return 0;
+ }
+ }
+
+ details->break_addr = end_addr;
+ zap_page_range(vma, start_addr, end_addr - start_addr, details);
+
+ /*
+ * We cannot rely on the break test in unmap_vmas:
+ * on the one hand, we don't want to restart our loop
+ * just because that broke out for the page_table_lock;
+ * on the other hand, it does no test when vma is small.
+ */
+ need_break = need_resched() ||
+ need_lockbreak(details->i_mmap_lock);
+
+ if (details->break_addr >= end_addr) {
+ /* We have now completed this vma: mark it so */
+ vma->vm_truncate_count = details->truncate_count;
+ if (!need_break)
+ return 0;
+ } else {
+ /* Note restart_addr in vma's truncate_count field */
+ vma->vm_truncate_count = details->break_addr;
+ if (!need_break)
+ goto again;
+ }
+
+ spin_unlock(details->i_mmap_lock);
+ cond_resched();
+ spin_lock(details->i_mmap_lock);
+ return -EINTR;
+}
+
+static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
+ struct zap_details *details)
+{
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ pgoff_t vba, vea, zba, zea;
+
+restart:
+ vma_prio_tree_foreach(vma, &iter, root,
+ details->first_index, details->last_index) {
+ /* Skip quickly over those we have already dealt with */
+ if (vma->vm_truncate_count == details->truncate_count)
+ continue;
+
+ vba = vma->vm_pgoff;
+ vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
+ /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
+ zba = details->first_index;
+ if (zba < vba)
+ zba = vba;
+ zea = details->last_index;
+ if (zea > vea)
+ zea = vea;
+
+ if (unmap_mapping_range_vma(vma,
+ ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
+ ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
+ details) < 0)
+ goto restart;
+ }
+}
+
+static inline void unmap_mapping_range_list(struct list_head *head,
+ struct zap_details *details)
+{
+ struct vm_area_struct *vma;
+
+ /*
+ * In nonlinear VMAs there is no correspondence between virtual address
+ * offset and file offset. So we must perform an exhaustive search
+ * across *all* the pages in each nonlinear VMA, not just the pages
+ * whose virtual address lies outside the file truncation point.
+ */
+restart:
+ list_for_each_entry(vma, head, shared.vm_set.list) {
+ /* Skip quickly over those we have already dealt with */
+ if (vma->vm_truncate_count == details->truncate_count)
+ continue;
+ details->nonlinear_vma = vma;
+ if (unmap_mapping_range_vma(vma, vma->vm_start,
+ vma->vm_end, details) < 0)
+ goto restart;
+ }
+}
+
+/**
+ * unmap_mapping_range - unmap the portion of all mmaps
+ * in the specified address_space corresponding to the specified
+ * page range in the underlying file.
+ * @address_space: the address space containing mmaps to be unmapped.
+ * @holebegin: byte in first page to unmap, relative to the start of
+ * the underlying file. This will be rounded down to a PAGE_SIZE
+ * boundary. Note that this is different from vmtruncate(), which
+ * must keep the partial page. In contrast, we must get rid of
+ * partial pages.
+ * @holelen: size of prospective hole in bytes. This will be rounded
+ * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
+ * end of the file.
+ * @even_cows: 1 when truncating a file, unmap even private COWed pages;
+ * but 0 when invalidating pagecache, don't throw away private data.
+ */
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows)
+{
+ struct zap_details details;
+ pgoff_t hba = holebegin >> PAGE_SHIFT;
+ pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* Check for overflow. */
+ if (sizeof(holelen) > sizeof(hlen)) {
+ long long holeend =
+ (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (holeend & ~(long long)ULONG_MAX)
+ hlen = ULONG_MAX - hba + 1;
+ }
+
+ details.check_mapping = even_cows? NULL: mapping;
+ details.nonlinear_vma = NULL;
+ details.first_index = hba;
+ details.last_index = hba + hlen - 1;
+ if (details.last_index < details.first_index)
+ details.last_index = ULONG_MAX;
+ details.i_mmap_lock = &mapping->i_mmap_lock;
+
+ spin_lock(&mapping->i_mmap_lock);
+
+ /* serialize i_size write against truncate_count write */
+ smp_wmb();
+ /* Protect against page faults, and endless unmapping loops */
+ mapping->truncate_count++;
+ /*
+ * For archs where spin_lock has inclusive semantics like ia64
+ * this smp_mb() will prevent to read pagetable contents
+ * before the truncate_count increment is visible to
+ * other cpus.
+ */
+ smp_mb();
+ if (unlikely(is_restart_addr(mapping->truncate_count))) {
+ if (mapping->truncate_count == 0)
+ reset_vma_truncate_counts(mapping);
+ mapping->truncate_count++;
+ }
+ details.truncate_count = mapping->truncate_count;
+
+ if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
+ unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
+ spin_unlock(&mapping->i_mmap_lock);
+}
+EXPORT_SYMBOL(unmap_mapping_range);
+
+/*
+ * Handle all mappings that got truncated by a "truncate()"
+ * system call.
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page. Ugly, but necessary.
+ */
+int vmtruncate(struct inode * inode, loff_t offset)
+{
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long limit;
+
+ if (inode->i_size < offset)
+ goto do_expand;
+ /*
+ * truncation of in-use swapfiles is disallowed - it would cause
+ * subsequent swapout to scribble on the now-freed blocks.
+ */
+ if (IS_SWAPFILE(inode))
+ goto out_busy;
+ i_size_write(inode, offset);
+ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, offset);
+ goto out_truncate;
+
+do_expand:
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+ goto out_big;
+ i_size_write(inode, offset);
+
+out_truncate:
+ if (inode->i_op && inode->i_op->truncate)
+ inode->i_op->truncate(inode);
+ return 0;
+out_sig:
+ send_sig(SIGXFSZ, current, 0);
+out_big:
+ return -EFBIG;
+out_busy:
+ return -ETXTBSY;
+}
+
+EXPORT_SYMBOL(vmtruncate);
+
+/*
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time. We also make sure to queue
+ * the 'original' request together with the readahead ones...
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ */
+void swapin_readahead(swp_entry_t entry, unsigned long addr,struct
vm_area_struct *vma)
+{
+#ifdef CONFIG_NUMA
+ struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
+#endif
+ int i, num;
+ struct page *new_page;
+ unsigned long offset;
+
+ /*
+ * Get the number of handles we should do readahead io to.
+ */
+ num = valid_swaphandles(entry, &offset);
+ for (i = 0; i < num; offset++, i++) {
+ /* Ok, do the async read-ahead now */
+ new_page = read_swap_cache_async(swp_entry(swp_type(entry),
+ offset), vma, addr);
+ if (!new_page)
+ break;
+ page_cache_release(new_page);
+#ifdef CONFIG_NUMA
+ /*
+ * Find the next applicable VMA for the NUMA policy.
+ */
+ addr += PAGE_SIZE;
+ if (addr == 0)
+ vma = NULL;
+ if (vma) {
+ if (addr >= vma->vm_end) {
+ vma = next_vma;
+ next_vma = vma ? vma->vm_next : NULL;
+ }
+ if (vma && addr < vma->vm_start)
+ vma = NULL;
+ } else {
+ if (next_vma && addr >= next_vma->vm_start) {
+ vma = next_vma;
+ next_vma = vma->vm_next;
+ }
+ }
+#endif
+ }
+ lru_add_drain(); /* Push any new pages onto the LRU now */
+}
+
+/*
+ * We hold the mm semaphore and the page_table_lock on entry and
+ * should release the pagetable lock on exit..
+ */
+static int do_swap_page(struct mm_struct * mm,
+ struct vm_area_struct * vma, unsigned long address,
+ pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
+{
+ struct page *page;
+ swp_entry_t entry = pte_to_swp_entry(orig_pte);
+ pte_t pte;
+ int ret = VM_FAULT_MINOR;
+
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ page = lookup_swap_cache(entry);
+ if (!page) {
+ swapin_readahead(entry, address, vma);
+ page = read_swap_cache_async(entry, vma, address);
+ if (!page) {
+ /*
+ * Back out if somebody else faulted in this pte while
+ * we released the page table lock.
+ */
+ spin_lock(&mm->page_table_lock);
+ page_table = pte_offset_map(pmd, address);
+ if (likely(pte_same(*page_table, orig_pte)))
+ ret = VM_FAULT_OOM;
+ else
+ ret = VM_FAULT_MINOR;
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ goto out;
+ }
+
+ /* Had to read the page from swap area: Major fault */
+ ret = VM_FAULT_MAJOR;
+ inc_page_state(pgmajfault);
+ grab_swap_token();
+ }
+
+ mark_page_accessed(page);
+ lock_page(page);
+
+ /*
+ * Back out if somebody else faulted in this pte while we
+ * released the page table lock.
+ */
+ spin_lock(&mm->page_table_lock);
+ page_table = pte_offset_map(pmd, address);
+ if (unlikely(!pte_same(*page_table, orig_pte))) {
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ unlock_page(page);
+ page_cache_release(page);
+ ret = VM_FAULT_MINOR;
+ goto out;
+ }
+
+ /* The page isn't present yet, go ahead with the fault. */
+
+ swap_free(entry);
+ if (vm_swap_full())
+ remove_exclusive_swap_page(page);
+
+ mm->rss++;
+ acct_update_integrals();
+ update_mem_hiwater();
+
+ pte = mk_pte(page, vma->vm_page_prot);
+ if (write_access && can_share_swap_page(page)) {
+ pte = maybe_mkwrite(pte_mkdirty(pte), vma);
+ write_access = 0;
+ }
+ unlock_page(page);
+
+ flush_icache_page(vma, page);
+ set_pte(page_table, pte);
+ page_add_anon_rmap(page, vma, address);
+
+ if (write_access) {
+ if (do_wp_page(mm, vma, address,
+ page_table, pmd, pte) == VM_FAULT_OOM)
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, pte);
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+out:
+ return ret;
+}
+
+/*
+ * We are called with the MM semaphore and page_table_lock
+ * spinlock held to protect against concurrent faults in
+ * multithreaded programs.
+ */
+static int
+do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ pte_t *page_table, pmd_t *pmd, int write_access,
+ unsigned long addr)
+{
+ pte_t entry;
+ struct page * page = ZERO_PAGE(addr);
+
+ /* Read-only mapping of ZERO_PAGE. */
+ entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
+
+ /* ..except if it's a write access */
+ if (write_access) {
+ /* Allocate our own private page. */
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto no_mem;
+ page = alloc_zeroed_user_highpage(vma, addr);
+ if (!page)
+ goto no_mem;
+
+ spin_lock(&mm->page_table_lock);
+ page_table = pte_offset_map(pmd, addr);
+
+ if (!pte_none(*page_table)) {
+ pte_unmap(page_table);
+ page_cache_release(page);
+ spin_unlock(&mm->page_table_lock);
+ goto out;
+ }
+ mm->rss++;
+ acct_update_integrals();
+ update_mem_hiwater();
+ entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
+ vma->vm_page_prot)),
+ vma);
+ lru_cache_add_active(page);
+ SetPageReferenced(page);
+ page_add_anon_rmap(page, vma, addr);
+ }
+
+ ptep_establish_new(vma, addr, page_table, entry);
+ pte_unmap(page_table);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, entry);
+ spin_unlock(&mm->page_table_lock);
+out:
+ return VM_FAULT_MINOR;
+no_mem:
+ return VM_FAULT_OOM;
+}
+
+/*
+ * do_no_page() tries to create a new page mapping. It aggressively
+ * tries to share with existing pages, but makes a separate copy if
+ * the "write_access" parameter is true in order to avoid the next
+ * page fault.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
+ *
+ * This is called with the MM semaphore held and the page table
+ * spinlock held. Exit with the spinlock released.
+ */
+static int
+do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
+{
+ struct page * new_page;
+ struct address_space *mapping = NULL;
+ pte_t entry;
+ unsigned int sequence = 0;
+ int ret = VM_FAULT_MINOR;
+ int anon = 0;
+
+ if (!vma->vm_ops || !vma->vm_ops->nopage)
+ return do_anonymous_page(mm, vma, page_table,
+ pmd, write_access, address);
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+
+ if (vma->vm_file) {
+ mapping = vma->vm_file->f_mapping;
+ sequence = mapping->truncate_count;
+ smp_rmb(); /* serializes i_size against truncate_count */
+ }
+retry:
+ cond_resched();
+ new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
+ /*
+ * No smp_rmb is needed here as long as there's a full
+ * spin_lock/unlock sequence inside the ->nopage callback
+ * (for the pagecache lookup) that acts as an implicit
+ * smp_mb() and prevents the i_size read to happen
+ * after the next truncate_count read.
+ */
+
+ /* no page was available -- either SIGBUS or OOM */
+ if (new_page == NOPAGE_SIGBUS)
+ return VM_FAULT_SIGBUS;
+ if (new_page == NOPAGE_OOM)
+ return VM_FAULT_OOM;
+
+ /*
+ * Should we do an early C-O-W break?
+ */
+ if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ struct page *page;
+
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ if (!page)
+ goto oom;
+ copy_user_highpage(page, new_page, address);
+ page_cache_release(new_page);
+ new_page = page;
+ anon = 1;
+ }
+
+ spin_lock(&mm->page_table_lock);
+ /*
+ * For a file-backed vma, someone could have truncated or otherwise
+ * invalidated this page. If unmap_mapping_range got called,
+ * retry getting the page.
+ */
+ if (mapping && unlikely(sequence != mapping->truncate_count)) {
+ sequence = mapping->truncate_count;
+ spin_unlock(&mm->page_table_lock);
+ page_cache_release(new_page);
+ goto retry;
+ }
+ page_table = pte_offset_map(pmd, address);
+
+ /*
+ * This silly early PAGE_DIRTY setting removes a race
+ * due to the bad i386 page protection. But it's valid
+ * for other architectures too.
+ *
+ * Note that if write_access is true, we either now have
+ * an exclusive copy of the page, or this is a shared mapping,
+ * so we can make it writable and dirty to avoid having to
+ * handle that later.
+ */
+ /* Only go through if we didn't race with anybody else... */
+ if (pte_none(*page_table)) {
+ if (!PageReserved(new_page))
+ ++mm->rss;
+ acct_update_integrals();
+ update_mem_hiwater();
+
+ flush_icache_page(vma, new_page);
+ entry = mk_pte(new_page, vma->vm_page_prot);
+ if (write_access)
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ ptep_establish_new(vma, address, page_table, entry);
+ if (anon) {
+ lru_cache_add_active(new_page);
+ page_add_anon_rmap(new_page, vma, address);
+ } else
+ page_add_file_rmap(new_page);
+ pte_unmap(page_table);
+ } else {
+ /* One of our sibling threads was faster, back out. */
+ pte_unmap(page_table);
+ page_cache_release(new_page);
+ spin_unlock(&mm->page_table_lock);
+ goto out;
+ }
+
+ /* no need to invalidate: a not-present page shouldn't be cached */
+ update_mmu_cache(vma, address, entry);
+ spin_unlock(&mm->page_table_lock);
+out:
+ return ret;
+oom:
+ page_cache_release(new_page);
+ ret = VM_FAULT_OOM;
+ goto out;
+}
+
+/*
+ * Fault of a previously existing named mapping. Repopulate the pte
+ * from the encoded file_pte if possible. This enables swappable
+ * nonlinear vmas.
+ */
+static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
+ unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
+{
+ unsigned long pgoff;
+ int err;
+
+ BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
+ /*
+ * Fall back to the linear mapping if the fs does not support
+ * ->populate:
+ */
+ if (!vma->vm_ops || !vma->vm_ops->populate ||
+ (write_access && !(vma->vm_flags & VM_SHARED))) {
+ pte_clear(pte);
+ return do_no_page(mm, vma, address, write_access, pte, pmd);
+ }
+
+ pgoff = pte_to_pgoff(*pte);
+
+ pte_unmap(pte);
+ spin_unlock(&mm->page_table_lock);
+
+ err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
vma->vm_page_prot, pgoff, 0);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err)
+ return VM_FAULT_SIGBUS;
+ return VM_FAULT_MAJOR;
+}
+
+/*
+ * These routines also need to handle stuff like marking pages dirty
+ * and/or accessed for architectures that don't do it in hardware (most
+ * RISC architectures). The early dirtying is also good on the i386.
+ *
+ * There is also a hook called "update_mmu_cache()" that architectures
+ * with external mmu caches can use to update those (ie the Sparc or
+ * PowerPC hashed page tables that act as extended TLBs).
+ *
+ * Note the "page_table_lock". It is to protect against kswapd removing
+ * pages from under us. Note that kswapd only ever _removes_ pages, never
+ * adds them. As such, once we have noticed that the page is not present,
+ * we can drop the lock early.
+ *
+ * The adding of pages is protected by the MM semaphore (which we hold),
+ * so we don't need to worry about a page being suddenly been added into
+ * our VM.
+ *
+ * We enter with the pagetable spinlock held, we are supposed to
+ * release it when done.
+ */
+static inline int handle_pte_fault(struct mm_struct *mm,
+ struct vm_area_struct * vma, unsigned long address,
+ int write_access, pte_t *pte, pmd_t *pmd)
+{
+ pte_t entry;
+
+ entry = *pte;
+ if (!pte_present(entry)) {
+ /*
+ * If it truly wasn't present, we know that kswapd
+ * and the PTE updates will not touch it later. So
+ * drop the lock.
+ */
+ if (pte_none(entry))
+ return do_no_page(mm, vma, address, write_access, pte,
pmd);
+ if (pte_file(entry))
+ return do_file_page(mm, vma, address, write_access,
pte, pmd);
+ return do_swap_page(mm, vma, address, pte, pmd, entry,
write_access);
+ }
+
+ if (write_access) {
+ if (!pte_write(entry))
+ return do_wp_page(mm, vma, address, pte, pmd, entry);
+
+ entry = pte_mkdirty(entry);
+ }
+ entry = pte_mkyoung(entry);
+ ptep_set_access_flags(vma, address, pte, entry, write_access);
+ update_mmu_cache(vma, address, entry);
+ pte_unmap(pte);
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_MINOR;
+}
+
+/*
+ * By the time we get here, we already hold the mm semaphore
+ */
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
+ unsigned long address, int write_access)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ __set_current_state(TASK_RUNNING);
+
+ inc_page_state(pgfault);
+
+ if (is_vm_hugetlb_page(vma))
+ return VM_FAULT_SIGBUS; /* mapping truncation does this. */
+
+ /*
+ * We need the page table lock to synchronize with kswapd
+ * and the SMP-safe atomic PTE updates.
+ */
+ pgd = pgd_offset(mm, address);
+ spin_lock(&mm->page_table_lock);
+
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+ goto oom;
+
+ pmd = pmd_alloc(mm, pud, address);
+ if (!pmd)
+ goto oom;
+
+ pte = pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ goto oom;
+
+ return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+
+ oom:
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_OOM;
+}
+
+#ifndef __ARCH_HAS_4LEVEL_HACK
+/*
+ * Allocate page upper directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ *
+ * On a two-level or three-level page table, this ends up actually being
+ * entirely optimized away.
+ */
+pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
+{
+ pud_t *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pud_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pgd_present(*pgd)) {
+ pud_free(new);
+ goto out;
+ }
+ pgd_populate(mm, pgd, new);
+ out:
+ return pud_offset(pgd, address);
+}
+
+/*
+ * Allocate page middle directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ *
+ * On a two-level page table, this ends up actually being entirely
+ * optimized away.
+ */
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long
address)
+{
+ pmd_t *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pmd_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pud_present(*pud)) {
+ pmd_free(new);
+ goto out;
+ }
+ pud_populate(mm, pud, new);
+ out:
+ return pmd_offset(pud, address);
+}
+#else
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long
address)
+{
+ pmd_t *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pmd_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pgd_present(*pud)) {
+ pmd_free(new);
+ goto out;
+ }
+ pgd_populate(mm, pud, new);
+out:
+ return pmd_offset(pud, address);
+}
+#endif
+
+int make_pages_present(unsigned long addr, unsigned long end)
+{
+ int ret, len, write;
+ struct vm_area_struct * vma;
+
+ vma = find_vma(current->mm, addr);
+ if (!vma)
+ return -1;
+ write = (vma->vm_flags & VM_WRITE) != 0;
+ if (addr >= end)
+ BUG();
+ if (end > vma->vm_end)
+ BUG();
+ len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
+ ret = get_user_pages(current, current->mm, addr,
+ len, write, 0, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ return ret == len ? 0 : -1;
+}
+
+/*
+ * Map a vmalloc()-space virtual address to the physical page.
+ */
+struct page * vmalloc_to_page(void * vmalloc_addr)
+{
+ unsigned long addr = (unsigned long) vmalloc_addr;
+ struct page *page = NULL;
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ if (!pgd_none(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ page = pte_page(pte);
+ pte_unmap(ptep);
+ }
+ }
+ }
+ return page;
+}
+
+EXPORT_SYMBOL(vmalloc_to_page);
+
+/*
+ * Map a vmalloc()-space virtual address to the physical page frame number.
+ */
+unsigned long vmalloc_to_pfn(void * vmalloc_addr)
+{
+ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
+}
+
+EXPORT_SYMBOL(vmalloc_to_pfn);
+
+/*
+ * update_mem_hiwater
+ * - update per process rss and vm high water data
+ */
+void update_mem_hiwater(void)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk->mm) {
+ if (tsk->mm->hiwater_rss < tsk->mm->rss)
+ tsk->mm->hiwater_rss = tsk->mm->rss;
+ if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
+ tsk->mm->hiwater_vm = tsk->mm->total_vm;
+ }
+}
+
+#if !defined(__HAVE_ARCH_GATE_AREA)
+
+#if defined(AT_SYSINFO_EHDR)
+struct vm_area_struct gate_vma;
+
+static int __init gate_vma_init(void)
+{
+ gate_vma.vm_mm = NULL;
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_page_prot = PAGE_READONLY;
+ gate_vma.vm_flags = 0;
+ return 0;
+}
+__initcall(gate_vma_init);
+#endif
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+#ifdef AT_SYSINFO_EHDR
+ return &gate_vma;
+#else
+ return NULL;
+#endif
+}
+
+int in_gate_area_no_task(unsigned long addr)
+{
+#ifdef AT_SYSINFO_EHDR
+ if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
+ return 1;
+#endif
+ return 0;
+}
+
+#endif /* __HAVE_ARCH_GATE_AREA */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6-xen-sparse/mm/page_alloc.c
--- /dev/null Thu Aug 4 01:13:46 2005
+++ b/linux-2.6-xen-sparse/mm/page_alloc.c Tue Aug 9 15:17:45 2005
@@ -0,0 +1,2157 @@
+/*
+ * linux/mm/page_alloc.c
+ *
+ * Manages the free list, the system allocates free pages here.
+ * Note that kmalloc() lives in slab.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
+ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
+ * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
+ * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/pagevec.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/topology.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+#include <linux/nodemask.h>
+#include <linux/vmalloc.h>
+
+#include <asm/tlbflush.h>
+#include "internal.h"
+
+/* MCD - HACK: Find somewhere to initialize this EARLY, or make this
initializer cleaner */
+nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t node_possible_map = NODE_MASK_ALL;
+struct pglist_data *pgdat_list;
+unsigned long totalram_pages;
+unsigned long totalhigh_pages;
+long nr_swap_pages;
+/*
+ * results with 256, 32 in the lowmem_reserve sysctl:
+ * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
+ * 1G machine -> (16M dma, 784M normal, 224M high)
+ * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
+ * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
+ * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
+ */
+int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
+
+EXPORT_SYMBOL(totalram_pages);
+EXPORT_SYMBOL(nr_swap_pages);
+
+/*
+ * Used by page_zone() to look up the address of the struct zone whose
+ * id is encoded in the upper bits of page->flags
+ */
+struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
+EXPORT_SYMBOL(zone_table);
+
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
+int min_free_kbytes = 1024;
+
+unsigned long __initdata nr_kernel_pages;
+unsigned long __initdata nr_all_pages;
+
+/*
+ * Temporary debugging check for pages not lying within a given zone.
+ */
+static int bad_range(struct zone *zone, struct page *page)
+{
+ if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
+ return 1;
+ if (page_to_pfn(page) < zone->zone_start_pfn)
+ return 1;
+#ifdef CONFIG_HOLES_IN_ZONE
+ if (!pfn_valid(page_to_pfn(page)))
+ return 1;
+#endif
+ if (zone != page_zone(page))
+ return 1;
+ return 0;
+}
+
+static void bad_page(const char *function, struct page *page)
+{
+ printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
+ function, current->comm, page);
+ printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
+ (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags,
+ page->mapping, page_mapcount(page), page_count(page));
+ printk(KERN_EMERG "Backtrace:\n");
+ dump_stack();
+ printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
+ page->flags &= ~(1 << PG_private |
+ 1 << PG_locked |
+ 1 << PG_lru |
+ 1 << PG_active |
+ 1 << PG_dirty |
+ 1 << PG_swapcache |
+ 1 << PG_writeback);
+ set_page_count(page, 0);
+ reset_page_mapcount(page);
+ page->mapping = NULL;
+ tainted |= TAINT_BAD_PAGE;
+}
+
+#ifndef CONFIG_HUGETLB_PAGE
+#define prep_compound_page(page, order) do { } while (0)
+#define destroy_compound_page(page, order) do { } while (0)
+#else
+/*
+ * Higher-order pages are called "compound pages". They are structured thusly:
+ *
+ * The first PAGE_SIZE page is called the "head page".
+ *
+ * The remaining PAGE_SIZE pages are called "tail pages".
+ *
+ * All pages have PG_compound set. All pages have their ->private pointing at
+ * the head page (even the head page has this).
+ *
+ * The first tail page's ->mapping, if non-zero, holds the address of the
+ * compound page's put_page() function.
+ *
+ * The order of the allocation is stored in the first tail page's ->index
+ * This is only for debug at present. This usage means that zero-order pages
+ * may not be compound.
+ */
+static void prep_compound_page(struct page *page, unsigned long order)
+{
+ int i;
+ int nr_pages = 1 << order;
+
+ page[1].mapping = NULL;
+ page[1].index = order;
+ for (i = 0; i < nr_pages; i++) {
+ struct page *p = page + i;
+
+ SetPageCompound(p);
+ p->private = (unsigned long)page;
+ }
+}
+
+static void destroy_compound_page(struct page *page, unsigned long order)
+{
+ int i;
+ int nr_pages = 1 << order;
+
+ if (!PageCompound(page))
+ return;
+
+ if (page[1].index != order)
+ bad_page(__FUNCTION__, page);
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *p = page + i;
+
+ if (!PageCompound(p))
+ bad_page(__FUNCTION__, page);
+ if (p->private != (unsigned long)page)
+ bad_page(__FUNCTION__, page);
+ ClearPageCompound(p);
+ }
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+/*
+ * function for dealing with page's order in buddy system.
+ * zone->lock is already acquired when we use these.
+ * So, we don't need atomic page->flags operations here.
+ */
+static inline unsigned long page_order(struct page *page) {
+ return page->private;
+}
+
+static inline void set_page_order(struct page *page, int order) {
+ page->private = order;
+ __SetPagePrivate(page);
+}
+
+static inline void rmv_page_order(struct page *page)
+{
+ __ClearPagePrivate(page);
+ page->private = 0;
+}
+
+/*
+ * This function checks whether a page is free && is the buddy
+ * we can do coalesce a page and its buddy if
+ * (a) the buddy is free &&
+ * (b) the buddy is on the buddy system &&
+ * (c) a page and its buddy have the same order.
+ * for recording page's order, we use page->private and PG_private.
+ *
+ */
+static inline int page_is_buddy(struct page *page, int order)
+{
+ if (PagePrivate(page) &&
+ (page_order(page) == order) &&
+ !PageReserved(page) &&
+ page_count(page) == 0)
+ return 1;
+ return 0;
+}
+
+/*
+ * Freeing function for a buddy system allocator.
+ *
+ * The concept of a buddy system is to maintain direct-mapped table
+ * (containing bit values) for memory blocks of various "orders".
+ * The bottom level table contains the map for the smallest allocatable
+ * units of memory (here, pages), and each level above it describes
+ * pairs of units from the levels below, hence, "buddies".
+ * At a high level, all that happens here is marking the table entry
+ * at the bottom level available, and propagating the changes upward
+ * as necessary, plus some accounting needed to play nicely with other
+ * parts of the VM system.
+ * At each level, we keep a list of pages, which are heads of continuous
+ * free pages of length of (1 << order) and marked with PG_Private.Page's
+ * order is recorded in page->private field.
+ * So when we are allocating or freeing one, we can derive the state of the
+ * other. That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
+ * If a block is freed, and its buddy is also free, then this
+ * triggers coalescing into a block of larger size.
+ *
+ * -- wli
+ */
+
+static inline void __free_pages_bulk (struct page *page, struct page *base,
+ struct zone *zone, unsigned int order)
+{
+ unsigned long page_idx;
+ struct page *coalesced;
+ int order_size = 1 << order;
+
+ if (unlikely(order))
+ destroy_compound_page(page, order);
+
+ page_idx = page - base;
+
+ BUG_ON(page_idx & (order_size - 1));
+ BUG_ON(bad_range(zone, page));
+
+ zone->free_pages += order_size;
+ while (order < MAX_ORDER-1) {
+ struct free_area *area;
+ struct page *buddy;
+ int buddy_idx;
+
+ buddy_idx = (page_idx ^ (1 << order));
+ buddy = base + buddy_idx;
+ if (bad_range(zone, buddy))
+ break;
+ if (!page_is_buddy(buddy, order))
+ break;
+ /* Move the buddy up one level. */
+ list_del(&buddy->lru);
+ area = zone->free_area + order;
+ area->nr_free--;
+ rmv_page_order(buddy);
+ page_idx &= buddy_idx;
+ order++;
+ }
+ coalesced = base + page_idx;
+ set_page_order(coalesced, order);
+ list_add(&coalesced->lru, &zone->free_area[order].free_list);
+ zone->free_area[order].nr_free++;
+}
+
+static inline void free_pages_check(const char *function, struct page *page)
+{
+ if ( page_mapped(page) ||
+ page->mapping != NULL ||
+ page_count(page) != 0 ||
+ (page->flags & (
+ 1 << PG_lru |
+ 1 << PG_private |
+ 1 << PG_locked |
+ 1 << PG_active |
+ 1 << PG_reclaim |
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback )))
+ bad_page(function, page);
+ if (PageDirty(page))
+ ClearPageDirty(page);
+}
+
+/*
+ * Frees a list of pages.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free, or 0 for all on the list.
+ *
+ * If the zone was previously in an "all pages pinned" state then look to
+ * see if this freeing clears that state.
+ *
+ * And clear the zone's pages_scanned counter, to hold off the "all pages are
+ * pinned" detection logic.
+ */
+static int
+free_pages_bulk(struct zone *zone, int count,
+ struct list_head *list, unsigned int order)
+{
+ unsigned long flags;
+ struct page *base, *page = NULL;
+ int ret = 0;
+
+ base = zone->zone_mem_map;
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+ while (!list_empty(list) && count--) {
+ page = list_entry(list->prev, struct page, lru);
+ /* have to delete it as __free_pages_bulk list manipulates */
+ list_del(&page->lru);
+ __free_pages_bulk(page, base, zone, order);
+ ret++;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return ret;
+}
+
+void __free_pages_ok(struct page *page, unsigned int order)
+{
+ LIST_HEAD(list);
+ int i;
+
+ if (arch_free_page(page, order))
+ return;
+
+ mod_page_state(pgfree, 1 << order);
+
+#ifndef CONFIG_MMU
+ if (order > 0)
+ for (i = 1 ; i < (1 << order) ; ++i)
+ __put_page(page + i);
+#endif
+
+ for (i = 0 ; i < (1 << order) ; ++i)
+ free_pages_check(__FUNCTION__, page + i);
+ list_add(&page->lru, &list);
+ kernel_map_pages(page, 1<<order, 0);
+ free_pages_bulk(page_zone(page), 1, &list, order);
+}
+
+
+/*
+ * The order of subdivision here is critical for the IO subsystem.
+ * Please do not alter this order without good reasons and regression
+ * testing. Specifically, as large blocks of memory are subdivided,
+ * the order in which smaller blocks are delivered depends on the order
+ * they're subdivided in this function. This is the primary factor
+ * influencing the order in which pages are delivered to the IO
+ * subsystem according to empirical testing, and this is also justified
+ * by considering the behavior of a buddy system containing a single
+ * large block of memory acted on by a series of small allocations.
+ * This behavior is a critical factor in sglist merging's success.
+ *
+ * -- wli
+ */
+static inline struct page *
+expand(struct zone *zone, struct page *page,
+ int low, int high, struct free_area *area)
+{
+ unsigned long size = 1 << high;
+
+ while (high > low) {
+ area--;
+ high--;
+ size >>= 1;
+ BUG_ON(bad_range(zone, &page[size]));
+ list_add(&page[size].lru, &area->free_list);
+ area->nr_free++;
+ set_page_order(&page[size], high);
+ }
+ return page;
+}
+
+void set_page_refs(struct page *page, int order)
+{
+#ifdef CONFIG_MMU
+ set_page_count(page, 1);
+#else
+ int i;
+
+ /*
+ * We need to reference all the pages for this order, otherwise if
+ * anyone accesses one of the pages with (get/put) it will be freed.
+ * - eg: access_process_vm()
+ */
+ for (i = 0; i < (1 << order); i++)
+ set_page_count(page + i, 1);
+#endif /* CONFIG_MMU */
+}
+
+/*
+ * This page is about to be returned from the page allocator
+ */
+static void prep_new_page(struct page *page, int order)
+{
+ if (page->mapping || page_mapped(page) ||
+ (page->flags & (
+ 1 << PG_private |
+ 1 << PG_locked |
+ 1 << PG_lru |
+ 1 << PG_active |
+ 1 << PG_dirty |
+ 1 << PG_reclaim |
+ 1 << PG_swapcache |
+ 1 << PG_writeback )))
+ bad_page(__FUNCTION__, page);
+
+ page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+ 1 << PG_referenced | 1 << PG_arch_1 |
+ 1 << PG_checked | 1 << PG_mappedtodisk);
+ page->private = 0;
+ set_page_refs(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+}
+
+/*
+ * Do the hard work of removing an element from the buddy allocator.
+ * Call me with the zone->lock already held.
+ */
+static struct page *__rmqueue(struct zone *zone, unsigned int order)
+{
+ struct free_area * area;
+ unsigned int current_order;
+ struct page *page;
+
+ for (current_order = order; current_order < MAX_ORDER; ++current_order)
{
+ area = zone->free_area + current_order;
+ if (list_empty(&area->free_list))
+ continue;
+
+ page = list_entry(area->free_list.next, struct page, lru);
+ list_del(&page->lru);
+ rmv_page_order(page);
+ area->nr_free--;
+ zone->free_pages -= 1UL << order;
+ return expand(zone, page, order, current_order, area);
+ }
+
+ return NULL;
+}
+
+/*
+ * Obtain a specified number of elements from the buddy allocator, all under
+ * a single hold of the lock, for efficiency. Add them to the supplied list.
+ * Returns the number of new pages which were placed at *list.
+ */
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ unsigned long count, struct list_head *list)
+{
+ unsigned long flags;
+ int i;
+ int allocated = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (i = 0; i < count; ++i) {
+ page = __rmqueue(zone, order);
+ if (page == NULL)
+ break;
+ allocated++;
+ list_add_tail(&page->lru, list);
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return allocated;
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
+static void __drain_pages(unsigned int cpu)
+{
+ struct zone *zone;
+ int i;
+
+ for_each_zone(zone) {
+ struct per_cpu_pageset *pset;
+
+ pset = &zone->pageset[cpu];
+ for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
+ struct per_cpu_pages *pcp;
+
+ pcp = &pset->pcp[i];
+ pcp->count -= free_pages_bulk(zone, pcp->count,
+ &pcp->list, 0);
+ }
+ }
+}
+#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_PM
+
+void mark_free_pages(struct zone *zone)
+{
+ unsigned long zone_pfn, flags;
+ int order;
+ struct list_head *curr;
+
+ if (!zone->spanned_pages)
+ return;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
+ ClearPageNosaveFree(pfn_to_page(zone_pfn +
zone->zone_start_pfn));
+
+ for (order = MAX_ORDER - 1; order >= 0; --order)
+ list_for_each(curr, &zone->free_area[order].free_list) {
+ unsigned long start_pfn, i;
+
+ start_pfn = page_to_pfn(list_entry(curr, struct page,
lru));
+
+ for (i=0; i < (1<<order); i++)
+ SetPageNosaveFree(pfn_to_page(start_pfn+i));
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __drain_pages(smp_processor_id());
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_PM */
+
+static void zone_statistics(struct zonelist *zonelist, struct zone *z)
+{
+#ifdef CONFIG_NUMA
+ unsigned long flags;
+ int cpu;
+ pg_data_t *pg = z->zone_pgdat;
+ pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
+ struct per_cpu_pageset *p;
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ p = &z->pageset[cpu];
+ if (pg == orig) {
+ z->pageset[cpu].numa_hit++;
+ } else {
+ p->numa_miss++;
+ zonelist->zones[0]->pageset[cpu].numa_foreign++;
+ }
+ if (pg == NODE_DATA(numa_node_id()))
+ p->local_node++;
+ else
+ p->other_node++;
+ local_irq_restore(flags);
+#endif
+}
+
+/*
+ * Free a 0-order page
+ */
+static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
+static void fastcall free_hot_cold_page(struct page *page, int cold)
+{
+ struct zone *zone = page_zone(page);
+ struct per_cpu_pages *pcp;
+ unsigned long flags;
+
+ if (arch_free_page(page, 0))
+ return;
+
+ kernel_map_pages(page, 1, 0);
+ inc_page_state(pgfree);
+ if (PageAnon(page))
+ page->mapping = NULL;
+ free_pages_check(__FUNCTION__, page);
+ pcp = &zone->pageset[get_cpu()].pcp[cold];
+ local_irq_save(flags);
+ if (pcp->count >= pcp->high)
+ pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+ list_add(&page->lru, &pcp->list);
+ pcp->count++;
+ local_irq_restore(flags);
+ put_cpu();
+}
+
+void fastcall free_hot_page(struct page *page)
+{
+ free_hot_cold_page(page, 0);
+}
+
+void fastcall free_cold_page(struct page *page)
+{
+ free_hot_cold_page(page, 1);
+}
+
+static inline void prep_zero_page(struct page *page, int order, int gfp_flags)
+{
+ int i;
+
+ BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
+ for(i = 0; i < (1 << order); i++)
+ clear_highpage(page + i);
+}
+
+/*
+ * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
+ * we cheat by calling it from here, in the order > 0 path. Saves a branch
+ * or two.
+ */
+static struct page *
+buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
+{
+ unsigned long flags;
+ struct page *page = NULL;
+ int cold = !!(gfp_flags & __GFP_COLD);
+
+ if (order == 0) {
+ struct per_cpu_pages *pcp;
+
+ pcp = &zone->pageset[get_cpu()].pcp[cold];
+ local_irq_save(flags);
+ if (pcp->count <= pcp->low)
+ pcp->count += rmqueue_bulk(zone, 0,
+ pcp->batch, &pcp->list);
+ if (pcp->count) {
+ page = list_entry(pcp->list.next, struct page, lru);
+ list_del(&page->lru);
+ pcp->count--;
+ }
+ local_irq_restore(flags);
+ put_cpu();
+ }
+
+ if (page == NULL) {
+ spin_lock_irqsave(&zone->lock, flags);
+ page = __rmqueue(zone, order);
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ if (page != NULL) {
+ BUG_ON(bad_range(zone, page));
+ mod_page_state_zone(zone, pgalloc, 1 << order);
+ prep_new_page(page, order);
+
+ if (gfp_flags & __GFP_ZERO)
+ prep_zero_page(page, order, gfp_flags);
+
+ if (order && (gfp_flags & __GFP_COMP))
+ prep_compound_page(page, order);
+ }
+ return page;
+}
+
+/*
+ * Return 1 if free pages are above 'mark'. This takes into account the order
+ * of the allocation.
+ */
+int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+ int classzone_idx, int can_try_harder, int gfp_high)
+{
+ /* free_pages my go negative - that's OK */
+ long min = mark, free_pages = z->free_pages - (1 << order) + 1;
+ int o;
+
+ if (gfp_high)
+ min -= min / 2;
+ if (can_try_harder)
+ min -= min / 4;
+
+ if (free_pages <= min + z->lowmem_reserve[classzone_idx])
+ return 0;
+ for (o = 0; o < order; o++) {
+ /* At the next order, this order's pages become unavailable */
+ free_pages -= z->free_area[o].nr_free << o;
+
+ /* Require fewer higher order pages to be free */
+ min >>= 1;
+
+ if (free_pages <= min)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator.
+ */
+struct page * fastcall
+__alloc_pages(unsigned int gfp_mask, unsigned int order,
+ struct zonelist *zonelist)
+{
+ const int wait = gfp_mask & __GFP_WAIT;
+ struct zone **zones, *z;
+ struct page *page;
+ struct reclaim_state reclaim_state;
+ struct task_struct *p = current;
+ int i;
+ int classzone_idx;
+ int do_retry;
+ int can_try_harder;
+ int did_some_progress;
+
+ might_sleep_if(wait);
+
+ /*
+ * The caller may dip into page reserves a bit more if the caller
+ * cannot run direct reclaim, or is the caller has realtime scheduling
+ * policy
+ */
+ can_try_harder = (unlikely(rt_task(p)) && !in_interrupt()) || !wait;
+
+ zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
+
+ if (unlikely(zones[0] == NULL)) {
+ /* Should this ever happen?? */
+ return NULL;
+ }
+
+ classzone_idx = zone_idx(zones[0]);
+
+ restart:
+ /* Go through the zonelist once, looking for a zone with enough free */
+ for (i = 0; (z = zones[i]) != NULL; i++) {
+
+ if (!zone_watermark_ok(z, order, z->pages_low,
+ classzone_idx, 0, 0))
+ continue;
+
+ page = buffered_rmqueue(z, order, gfp_mask);
+ if (page)
+ goto got_pg;
+ }
+
+ for (i = 0; (z = zones[i]) != NULL; i++)
+ wakeup_kswapd(z, order);
+
+ /*
+ * Go through the zonelist again. Let __GFP_HIGH and allocations
+ * coming from realtime tasks to go deeper into reserves
+ */
+ for (i = 0; (z = zones[i]) != NULL; i++) {
+ if (!zone_watermark_ok(z, order, z->pages_min,
+ classzone_idx, can_try_harder,
+ gfp_mask & __GFP_HIGH))
+ continue;
+
+ page = buffered_rmqueue(z, order, gfp_mask);
+ if (page)
+ goto got_pg;
+ }
+
+ /* This allocation should allow future memory freeing. */
+ if (((p->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) {
+ /* go through the zonelist yet again, ignoring mins */
+ for (i = 0; (z = zones[i]) != NULL; i++) {
+ page = buffered_rmqueue(z, order, gfp_mask);
+ if (page)
+ goto got_pg;
+ }
+ goto nopage;
+ }
+
+ /* Atomic allocations - we can't balance anything */
+ if (!wait)
+ goto nopage;
+
+rebalance:
+ cond_resched();
+
+ /* We now go into synchronous reclaim */
+ p->flags |= PF_MEMALLOC;
+ reclaim_state.reclaimed_slab = 0;
+ p->reclaim_state = &reclaim_state;
+
+ did_some_progress = try_to_free_pages(zones, gfp_mask, order);
+
+ p->reclaim_state = NULL;
+ p->flags &= ~PF_MEMALLOC;
+
+ cond_resched();
+
+ if (likely(did_some_progress)) {
+ /*
+ * Go through the zonelist yet one more time, keep
+ * very high watermark here, this is only to catch
+ * a parallel oom killing, we must fail if we're still
+ * under heavy pressure.
+ */
+ for (i = 0; (z = zones[i]) != NULL; i++) {
+ if (!zone_watermark_ok(z, order, z->pages_min,
+ classzone_idx, can_try_harder,
+ gfp_mask & __GFP_HIGH))
+ continue;
+
+ page = buffered_rmqueue(z, order, gfp_mask);
+ if (page)
+ goto got_pg;
+ }
+ } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+ /*
+ * Go through the zonelist yet one more time, keep
+ * very high watermark here, this is only to catch
+ * a parallel oom killing, we must fail if we're still
+ * under heavy pressure.
+ */
+ for (i = 0; (z = zones[i]) != NULL; i++) {
+ if (!zone_watermark_ok(z, order, z->pages_high,
+ classzone_idx, 0, 0))
+ continue;
+
+ page = buffered_rmqueue(z, order, gfp_mask);
+ if (page)
+ goto got_pg;
+ }
+
+ out_of_memory(gfp_mask);
+ goto restart;
+ }
+
+ /*
+ * Don't let big-order allocations loop unless the caller explicitly
+ * requests that. Wait for some write requests to complete then retry.
+ *
+ * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
+ * <= 3, but that may not be true in other implementations.
+ */
+ do_retry = 0;
+ if (!(gfp_mask & __GFP_NORETRY)) {
+ if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
+ do_retry = 1;
+ if (gfp_mask & __GFP_NOFAIL)
+ do_retry = 1;
+ }
+ if (do_retry) {
+ blk_congestion_wait(WRITE, HZ/50);
+ goto rebalance;
+ }
+
+nopage:
+ if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
+ printk(KERN_WARNING "%s: page allocation failure."
+ " order:%d, mode:0x%x\n",
+ p->comm, order, gfp_mask);
+ dump_stack();
+ }
+ return NULL;
+got_pg:
+ zone_statistics(zonelist, z);
+ return page;
+}
+
+EXPORT_SYMBOL(__alloc_pages);
+
+/*
+ * Common helper functions.
+ */
+fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int
order)
+{
+ struct page * page;
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return 0;
+ return (unsigned long) page_address(page);
+}
+
+EXPORT_SYMBOL(__get_free_pages);
+
+fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
+{
+ struct page * page;
+
+ /*
+ * get_zeroed_page() returns a 32-bit address, which cannot represent
+ * a highmem page
+ */
+ BUG_ON(gfp_mask & __GFP_HIGHMEM);
+
+ page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
+ if (page)
+ return (unsigned long) page_address(page);
+ return 0;
+}
+
+EXPORT_SYMBOL(get_zeroed_page);
+
+void __pagevec_free(struct pagevec *pvec)
+{
+ int i = pagevec_count(pvec);
+
+ while (--i >= 0)
+ free_hot_cold_page(pvec->pages[i], pvec->cold);
+}
+
+fastcall void __free_pages(struct page *page, unsigned int order)
+{
+ if (!PageReserved(page) && put_page_testzero(page)) {
+ if (order == 0)
+ free_hot_page(page);
+ else
+ __free_pages_ok(page, order);
+ }
+}
+
+EXPORT_SYMBOL(__free_pages);
+
+fastcall void free_pages(unsigned long addr, unsigned int order)
+{
+ if (addr != 0) {
+ BUG_ON(!virt_addr_valid((void *)addr));
+ __free_pages(virt_to_page((void *)addr), order);
+ }
+}
+
+EXPORT_SYMBOL(free_pages);
+
+/*
+ * Total amount of free (allocatable) RAM:
+ */
+unsigned int nr_free_pages(void)
+{
+ unsigned int sum = 0;
+ struct zone *zone;
+
+ for_each_zone(zone)
+ sum += zone->free_pages;
+
+ return sum;
+}
+
+EXPORT_SYMBOL(nr_free_pages);
+
+#ifdef CONFIG_NUMA
+unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
+{
+ unsigned int i, sum = 0;
+
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ sum += pgdat->node_zones[i].free_pages;
+
+ return sum;
+}
+#endif
+
+static unsigned int nr_free_zone_pages(int offset)
+{
+ pg_data_t *pgdat;
+ unsigned int sum = 0;
+
+ for_each_pgdat(pgdat) {
+ struct zonelist *zonelist = pgdat->node_zonelists + offset;
+ struct zone **zonep = zonelist->zones;
+ struct zone *zone;
+
+ for (zone = *zonep++; zone; zone = *zonep++) {
+ unsigned long size = zone->present_pages;
+ unsigned long high = zone->pages_high;
+ if (size > high)
+ sum += size - high;
+ }
+ }
+
+ return sum;
+}
+
+/*
+ * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
+ */
+unsigned int nr_free_buffer_pages(void)
+{
+ return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
+}
+
+/*
+ * Amount of free RAM allocatable within all zones
+ */
+unsigned int nr_free_pagecache_pages(void)
+{
+ return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
+}
+
+#ifdef CONFIG_HIGHMEM
+unsigned int nr_free_highpages (void)
+{
+ pg_data_t *pgdat;
+ unsigned int pages = 0;
+
+ for_each_pgdat(pgdat)
+ pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+
+ return pages;
+}
+#endif
+
+#ifdef CONFIG_NUMA
+static void show_node(struct zone *zone)
+{
+ printk("Node %d ", zone->zone_pgdat->node_id);
+}
+#else
+#define show_node(zone) do { } while (0)
+#endif
+
+/*
+ * Accumulate the page_state information across all CPUs.
+ * The result is unavoidably approximate - it can change
+ * during and after execution of this function.
+ */
+static DEFINE_PER_CPU(struct page_state, page_states) = {0};
+
+atomic_t nr_pagecache = ATOMIC_INIT(0);
+EXPORT_SYMBOL(nr_pagecache);
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
+#endif
+
+void __get_page_state(struct page_state *ret, int nr)
+{
+ int cpu = 0;
+
+ memset(ret, 0, sizeof(*ret));
+
+ cpu = first_cpu(cpu_online_map);
+ while (cpu < NR_CPUS) {
+ unsigned long *in, *out, off;
+
+ in = (unsigned long *)&per_cpu(page_states, cpu);
+
+ cpu = next_cpu(cpu, cpu_online_map);
+
+ if (cpu < NR_CPUS)
+ prefetch(&per_cpu(page_states, cpu));
+
+ out = (unsigned long *)ret;
+ for (off = 0; off < nr; off++)
+ *out++ += *in++;
+ }
+}
+
+void get_page_state(struct page_state *ret)
+{
+ int nr;
+
+ nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
+ nr /= sizeof(unsigned long);
+
+ __get_page_state(ret, nr + 1);
+}
+
+void get_full_page_state(struct page_state *ret)
+{
+ __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
+}
+
+unsigned long __read_page_state(unsigned offset)
+{
+ unsigned long ret = 0;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ unsigned long in;
+
+ in = (unsigned long)&per_cpu(page_states, cpu) + offset;
+ ret += *((unsigned long *)in);
+ }
+ return ret;
+}
+
+void __mod_page_state(unsigned offset, unsigned long delta)
+{
+ unsigned long flags;
+ void* ptr;
+
+ local_irq_save(flags);
+ ptr = &__get_cpu_var(page_states);
+ *(unsigned long*)(ptr + offset) += delta;
+ local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(__mod_page_state);
+
+void __get_zone_counts(unsigned long *active, unsigned long *inactive,
+ unsigned long *free, struct pglist_data *pgdat)
+{
+ struct zone *zones = pgdat->node_zones;
+ int i;
+
+ *active = 0;
+ *inactive = 0;
+ *free = 0;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ *active += zones[i].nr_active;
+ *inactive += zones[i].nr_inactive;
+ *free += zones[i].free_pages;
+ }
+}
+
+void get_zone_counts(unsigned long *active,
+ unsigned long *inactive, unsigned long *free)
+{
+ struct pglist_data *pgdat;
+
+ *active = 0;
+ *inactive = 0;
+ *free = 0;
+ for_each_pgdat(pgdat) {
+ unsigned long l, m, n;
+ __get_zone_counts(&l, &m, &n, pgdat);
+ *active += l;
+ *inactive += m;
+ *free += n;
+ }
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ val->totalram = totalram_pages;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = nr_blockdev_pages();
+#ifdef CONFIG_HIGHMEM
+ val->totalhigh = totalhigh_pages;
+ val->freehigh = nr_free_highpages();
+#else
+ val->totalhigh = 0;
+ val->freehigh = 0;
+#endif
+ val->mem_unit = PAGE_SIZE;
+}
+
+EXPORT_SYMBOL(si_meminfo);
+
+#ifdef CONFIG_NUMA
+void si_meminfo_node(struct sysinfo *val, int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ val->totalram = pgdat->node_present_pages;
+ val->freeram = nr_free_pages_pgdat(pgdat);
+ val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
+ val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+ val->mem_unit = PAGE_SIZE;
+}
+#endif
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
+/*
+ * Show free area list (used inside shift_scroll-lock stuff)
+ * We also calculate the percentage fragmentation. We do this by counting the
+ * memory on each free list with the exception of the first item on the list.
+ */
+void show_free_areas(void)
+{
+ struct page_state ps;
+ int cpu, temperature;
+ unsigned long active;
+ unsigned long inactive;
+ unsigned long free;
+ struct zone *zone;
+
+ for_each_zone(zone) {
+ show_node(zone);
+ printk("%s per-cpu:", zone->name);
+
+ if (!zone->present_pages) {
+ printk(" empty\n");
+ continue;
+ } else
+ printk("\n");
+
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ struct per_cpu_pageset *pageset;
+
+ if (!cpu_possible(cpu))
+ continue;
+
+ pageset = zone->pageset + cpu;
+
+ for (temperature = 0; temperature < 2; temperature++)
+ printk("cpu %d %s: low %d, high %d, batch %d\n",
+ cpu,
+ temperature ? "cold" : "hot",
+ pageset->pcp[temperature].low,
+ pageset->pcp[temperature].high,
+ pageset->pcp[temperature].batch);
+ }
+ }
+
+ get_page_state(&ps);
+ get_zone_counts(&active, &inactive, &free);
+
+ printk("\nFree pages: %11ukB (%ukB HighMem)\n",
+ K(nr_free_pages()),
+ K(nr_free_highpages()));
+
+ printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
+ "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
+ active,
+ inactive,
+ ps.nr_dirty,
+ ps.nr_writeback,
+ ps.nr_unstable,
+ nr_free_pages(),
+ ps.nr_slab,
+ ps.nr_mapped,
+ ps.nr_page_table_pages);
+
+ for_each_zone(zone) {
+ int i;
+
+ show_node(zone);
+ printk("%s"
+ " free:%lukB"
+ " min:%lukB"
+ " low:%lukB"
+ " high:%lukB"
+ " active:%lukB"
+ " inactive:%lukB"
+ " present:%lukB"
+ " pages_scanned:%lu"
+ " all_unreclaimable? %s"
+ "\n",
+ zone->name,
+ K(zone->free_pages),
+ K(zone->pages_min),
+ K(zone->pages_low),
+ K(zone->pages_high),
+ K(zone->nr_active),
+ K(zone->nr_inactive),
+ K(zone->present_pages),
+ zone->pages_scanned,
+ (zone->all_unreclaimable ? "yes" : "no")
+ );
+ printk("lowmem_reserve[]:");
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ printk(" %lu", zone->lowmem_reserve[i]);
+ printk("\n");
+ }
+
+ for_each_zone(zone) {
+ unsigned long nr, flags, order, total = 0;
+
+ show_node(zone);
+ printk("%s: ", zone->name);
+ if (!zone->present_pages) {
+ printk("empty\n");
+ continue;
+ }
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ nr = zone->free_area[order].nr_free;
+ total += nr << order;
+ printk("%lu*%lukB ", nr, K(1UL) << order);
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ printk("= %lukB\n", K(total));
+ }
+
+ show_swap_cache_info();
+}
+
+/*
+ * Builds allocation fallback zone lists.
+ */
+static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist
*zonelist, int j, int k)
+{
+ switch (k) {
+ struct zone *zone;
+ default:
+ BUG();
+ case ZONE_HIGHMEM:
+ zone = pgdat->node_zones + ZONE_HIGHMEM;
+ if (zone->present_pages) {
+#ifndef CONFIG_HIGHMEM
+ BUG();
+#endif
+ zonelist->zones[j++] = zone;
+ }
+ case ZONE_NORMAL:
+ zone = pgdat->node_zones + ZONE_NORMAL;
+ if (zone->present_pages)
+ zonelist->zones[j++] = zone;
+ case ZONE_DMA:
+ zone = pgdat->node_zones + ZONE_DMA;
+ if (zone->present_pages)
+ zonelist->zones[j++] = zone;
+ }
+
+ return j;
+}
+
+#ifdef CONFIG_NUMA
+#define MAX_NODE_LOAD (num_online_nodes())
+static int __initdata node_load[MAX_NUMNODES];
+/**
+ * find_next_best_node - find the next node that should appear in a given
+ * node's fallback list
+ * @node: node whose fallback list we're appending
+ * @used_node_mask: nodemask_t of already used nodes
+ *
+ * We use a number of factors to determine which is the next node that should
+ * appear on a given node's fallback list. The node should not have appeared
+ * already in @node's fallback list, and it should be the next closest node
+ * according to the distance array (which contains arbitrary distance values
+ * from each node to each node in the system), and should also prefer nodes
+ * with no CPUs, since presumably they'll have very little allocation pressure
+ * on them otherwise.
+ * It returns -1 if no node is found.
+ */
+static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
+{
+ int i, n, val;
+ int min_val = INT_MAX;
+ int best_node = -1;
+
+ for_each_online_node(i) {
+ cpumask_t tmp;
+
+ /* Start from local node */
+ n = (node+i) % num_online_nodes();
+
+ /* Don't want a node to appear more than once */
+ if (node_isset(n, *used_node_mask))
+ continue;
+
+ /* Use the local node if we haven't already */
+ if (!node_isset(node, *used_node_mask)) {
+ best_node = node;
+ break;
+ }
+
+ /* Use the distance array to find the distance */
+ val = node_distance(node, n);
+
+ /* Give preference to headless and unused nodes */
+ tmp = node_to_cpumask(n);
+ if (!cpus_empty(tmp))
+ val += PENALTY_FOR_NODE_WITH_CPUS;
+
+ /* Slight preference for less loaded node */
+ val *= (MAX_NODE_LOAD*MAX_NUMNODES);
+ val += node_load[n];
+
+ if (val < min_val) {
+ min_val = val;
+ best_node = n;
+ }
+ }
+
+ if (best_node >= 0)
+ node_set(best_node, *used_node_mask);
+
+ return best_node;
+}
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+ int i, j, k, node, local_node;
+ int prev_node, load;
+ struct zonelist *zonelist;
+ nodemask_t used_mask;
+
+ /* initialize zonelists */
+ for (i = 0; i < GFP_ZONETYPES; i++) {
+ zonelist = pgdat->node_zonelists + i;
+ memset(zonelist, 0, sizeof(*zonelist));
+ zonelist->zones[0] = NULL;
+ }
+
+ /* NUMA-aware ordering of nodes */
+ local_node = pgdat->node_id;
+ load = num_online_nodes();
+ prev_node = local_node;
+ nodes_clear(used_mask);
+ while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
+ /*
+ * We don't want to pressure a particular node.
+ * So adding penalty to the first node in same
+ * distance group to make it round-robin.
+ */
+ if (node_distance(local_node, node) !=
+ node_distance(local_node, prev_node))
+ node_load[node] += load;
+ prev_node = node;
+ load--;
+ for (i = 0; i < GFP_ZONETYPES; i++) {
+ zonelist = pgdat->node_zonelists + i;
+ for (j = 0; zonelist->zones[j] != NULL; j++);
+
+ k = ZONE_NORMAL;
+ if (i & __GFP_HIGHMEM)
+ k = ZONE_HIGHMEM;
+ if (i & __GFP_DMA)
+ k = ZONE_DMA;
+
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j,
k);
+ zonelist->zones[j] = NULL;
+ }
+ }
+}
+
+#else /* CONFIG_NUMA */
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+ int i, j, k, node, local_node;
+
+ local_node = pgdat->node_id;
+ for (i = 0; i < GFP_ZONETYPES; i++) {
+ struct zonelist *zonelist;
+
+ zonelist = pgdat->node_zonelists + i;
+ memset(zonelist, 0, sizeof(*zonelist));
+
+ j = 0;
+ k = ZONE_NORMAL;
+ if (i & __GFP_HIGHMEM)
+ k = ZONE_HIGHMEM;
+ if (i & __GFP_DMA)
+ k = ZONE_DMA;
+
+ j = build_zonelists_node(pgdat, zonelist, j, k);
+ /*
+ * Now we build the zonelist so that it contains the zones
+ * of all the other nodes.
+ * We don't want to pressure a particular node, so when
+ * building the zones for node N, we make sure that the
+ * zones coming right after the local ones are those from
+ * node N+1 (modulo N)
+ */
+ for (node = local_node + 1; node < MAX_NUMNODES; node++) {
+ if (!node_online(node))
+ continue;
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j,
k);
+ }
+ for (node = 0; node < local_node; node++) {
+ if (!node_online(node))
+ continue;
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j,
k);
+ }
+
+ zonelist->zones[j] = NULL;
+ }
+}
+
+#endif /* CONFIG_NUMA */
+
+void __init build_all_zonelists(void)
+{
+ int i;
+
+ for_each_online_node(i)
+ build_zonelists(NODE_DATA(i));
+ printk("Built %i zonelists\n", num_online_nodes());
+}
+
+/*
+ * Helper functions to size the waitqueue hash table.
+ * Essentially these want to choose hash table sizes sufficiently
+ * large so that collisions trying to wait on pages are rare.
+ * But in fact, the number of active page waitqueues on typical
+ * systems is ridiculously low, less than 200. So this is even
+ * conservative, even though it seems large.
+ *
+ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
+ * waitqueues, i.e. the size of the waitq table given the number of pages.
+ */
+#define PAGES_PER_WAITQUEUE 256
+
+static inline unsigned long wait_table_size(unsigned long pages)
+{
+ unsigned long size = 1;
+
+ pages /= PAGES_PER_WAITQUEUE;
+
+ while (size < pages)
+ size <<= 1;
+
+ /*
+ * Once we have dozens or even hundreds of threads sleeping
+ * on IO we've got bigger problems than wait queue collision.
+ * Limit the size of the wait table to a reasonable size.
+ */
+ size = min(size, 4096UL);
+
+ return max(size, 4UL);
+}
+
+/*
+ * This is an integer logarithm so that shifts can be used later
+ * to extract the more random high bits from the multiplicative
+ * hash function before the remainder is taken.
+ */
+static inline unsigned long wait_table_bits(unsigned long size)
+{
+ return ffz(~size);
+}
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
+ unsigned long *zones_size, unsigned long *zholes_size)
+{
+ unsigned long realtotalpages, totalpages = 0;
+ int i;
+
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ totalpages += zones_size[i];
+ pgdat->node_spanned_pages = totalpages;
+
+ realtotalpages = totalpages;
+ if (zholes_size)
+ for (i = 0; i < MAX_NR_ZONES; i++)
+ realtotalpages -= zholes_size[i];
+ pgdat->node_present_pages = realtotalpages;
+ printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
realtotalpages);
+}
+
+
+/*
+ * Initially all pages are reserved - free ones are freed
+ * up by free_all_bootmem() once the early boot process is
+ * done. Non-atomic initialization, single-pass.
+ */
+void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn)
+{
+ struct page *start = pfn_to_page(start_pfn);
+ struct page *page;
+
+ for (page = start; page < (start + size); page++) {
+ set_page_zone(page, NODEZONE(nid, zone));
+ set_page_count(page, 0);
+ reset_page_mapcount(page);
+ SetPageReserved(page);
+ INIT_LIST_HEAD(&page->lru);
+#ifdef WANT_PAGE_VIRTUAL
+ /* The shift won't overflow because ZONE_NORMAL is below 4G. */
+ if (!is_highmem_idx(zone))
+ set_page_address(page, __va(start_pfn << PAGE_SHIFT));
+#endif
+ start_pfn++;
+ }
+}
+
+void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
+ unsigned long size)
+{
+ int order;
+ for (order = 0; order < MAX_ORDER ; order++) {
+ INIT_LIST_HEAD(&zone->free_area[order].free_list);
+ zone->free_area[order].nr_free = 0;
+ }
+}
+
+#ifndef __HAVE_ARCH_MEMMAP_INIT
+#define memmap_init(size, nid, zone, start_pfn) \
+ memmap_init_zone((size), (nid), (zone), (start_pfn))
+#endif
+
+/*
+ * Set up the zone data structures:
+ * - mark all pages reserved
+ * - mark all memory queues empty
+ * - clear the memory bitmaps
+ */
+static void __init free_area_init_core(struct pglist_data *pgdat,
+ unsigned long *zones_size, unsigned long *zholes_size)
+{
+ unsigned long i, j;
+ const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
+ int cpu, nid = pgdat->node_id;
+ unsigned long zone_start_pfn = pgdat->node_start_pfn;
+
+ pgdat->nr_zones = 0;
+ init_waitqueue_head(&pgdat->kswapd_wait);
+ pgdat->kswapd_max_order = 0;
+
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ struct zone *zone = pgdat->node_zones + j;
+ unsigned long size, realsize;
+ unsigned long batch;
+
+ zone_table[NODEZONE(nid, j)] = zone;
+ realsize = size = zones_size[j];
+ if (zholes_size)
+ realsize -= zholes_size[j];
+
+ if (j == ZONE_DMA || j == ZONE_NORMAL)
+ nr_kernel_pages += realsize;
+ nr_all_pages += realsize;
+
+ zone->spanned_pages = size;
+ zone->present_pages = realsize;
+ zone->name = zone_names[j];
+ spin_lock_init(&zone->lock);
+ spin_lock_init(&zone->lru_lock);
+ zone->zone_pgdat = pgdat;
+ zone->free_pages = 0;
+
+ zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+
+ /*
+ * The per-cpu-pages pools are set to around 1000th of the
+ * size of the zone. But no more than 1/4 of a meg - there's
+ * no point in going beyond the size of L2 cache.
+ *
+ * OK, so we don't know how big the cache is. So guess.
+ */
+ batch = zone->present_pages / 1024;
+ if (batch * PAGE_SIZE > 256 * 1024)
+ batch = (256 * 1024) / PAGE_SIZE;
+ batch /= 4; /* We effectively *= 4 below */
+ if (batch < 1)
+ batch = 1;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct per_cpu_pages *pcp;
+
+ pcp = &zone->pageset[cpu].pcp[0]; /* hot */
+ pcp->count = 0;
+ pcp->low = 2 * batch;
+ pcp->high = 6 * batch;
+ pcp->batch = 1 * batch;
+ INIT_LIST_HEAD(&pcp->list);
+
+ pcp = &zone->pageset[cpu].pcp[1]; /* cold */
+ pcp->count = 0;
+ pcp->low = 0;
+ pcp->high = 2 * batch;
+ pcp->batch = 1 * batch;
+ INIT_LIST_HEAD(&pcp->list);
+ }
+ printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
+ zone_names[j], realsize, batch);
+ INIT_LIST_HEAD(&zone->active_list);
+ INIT_LIST_HEAD(&zone->inactive_list);
+ zone->nr_scan_active = 0;
+ zone->nr_scan_inactive = 0;
+ zone->nr_active = 0;
+ zone->nr_inactive = 0;
+ if (!size)
+ continue;
+
+ /*
+ * The per-page waitqueue mechanism uses hashed waitqueues
+ * per zone.
+ */
+ zone->wait_table_size = wait_table_size(size);
+ zone->wait_table_bits =
+ wait_table_bits(zone->wait_table_size);
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, zone->wait_table_size
+ * sizeof(wait_queue_head_t));
+
+ for(i = 0; i < zone->wait_table_size; ++i)
+ init_waitqueue_head(zone->wait_table + i);
+
+ pgdat->nr_zones = j+1;
+
+ zone->zone_mem_map = pfn_to_page(zone_start_pfn);
+ zone->zone_start_pfn = zone_start_pfn;
+
+ if ((zone_start_pfn) & (zone_required_alignment-1))
+ printk(KERN_CRIT "BUG: wrong zone alignment, it will
crash\n");
+
+ memmap_init(size, nid, j, zone_start_pfn);
+
+ zone_start_pfn += size;
+
+ zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+ }
+}
+
+void __init node_alloc_mem_map(struct pglist_data *pgdat)
+{
+ unsigned long size;
+
+ size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+ pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
+#ifndef CONFIG_DISCONTIGMEM
+ mem_map = contig_page_data.node_mem_map;
+#endif
+}
+
+void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+ unsigned long *zones_size, unsigned long node_start_pfn,
+ unsigned long *zholes_size)
+{
+ pgdat->node_id = nid;
+ pgdat->node_start_pfn = node_start_pfn;
+ calculate_zone_totalpages(pgdat, zones_size, zholes_size);
+
+ if (!pfn_to_page(node_start_pfn))
+ node_alloc_mem_map(pgdat);
+
+ free_area_init_core(pgdat, zones_size, zholes_size);
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static bootmem_data_t contig_bootmem_data;
+struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
+
+EXPORT_SYMBOL(contig_page_data);
+
+void __init free_area_init(unsigned long *zones_size)
+{
+ free_area_init_node(0, &contig_page_data, zones_size,
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
+}
+#endif
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+
+static void *frag_start(struct seq_file *m, loff_t *pos)
+{
+ pg_data_t *pgdat;
+ loff_t node = *pos;
+
+ for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
+ --node;
+
+ return pgdat;
+}
+
+static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+ pg_data_t *pgdat = (pg_data_t *)arg;
+
+ (*pos)++;
+ return pgdat->pgdat_next;
+}
+
+static void frag_stop(struct seq_file *m, void *arg)
+{
+}
+
+/*
+ * This walks the free areas for each zone.
+ */
+static int frag_show(struct seq_file *m, void *arg)
+{
+ pg_data_t *pgdat = (pg_data_t *)arg;
+ struct zone *zone;
+ struct zone *node_zones = pgdat->node_zones;
+ unsigned long flags;
+ int order;
+
+ for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+ if (!zone->present_pages)
+ continue;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+ for (order = 0; order < MAX_ORDER; ++order)
+ seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
+ spin_unlock_irqrestore(&zone->lock, flags);
+ seq_putc(m, '\n');
+ }
+ return 0;
+}
+
+struct seq_operations fragmentation_op = {
+ .start = frag_start,
+ .next = frag_next,
+ .stop = frag_stop,
+ .show = frag_show,
+};
+
+static char *vmstat_text[] = {
+ "nr_dirty",
+ "nr_writeback",
+ "nr_unstable",
+ "nr_page_table_pages",
+ "nr_mapped",
+ "nr_slab",
+
+ "pgpgin",
+ "pgpgout",
+ "pswpin",
+ "pswpout",
+ "pgalloc_high",
+
+ "pgalloc_normal",
+ "pgalloc_dma",
+ "pgfree",
+ "pgactivate",
+ "pgdeactivate",
+
+ "pgfault",
+ "pgmajfault",
+ "pgrefill_high",
+ "pgrefill_normal",
+ "pgrefill_dma",
+
+ "pgsteal_high",
+ "pgsteal_normal",
+ "pgsteal_dma",
+ "pgscan_kswapd_high",
+ "pgscan_kswapd_normal",
+
+ "pgscan_kswapd_dma",
+ "pgscan_direct_high",
+ "pgscan_direct_normal",
+ "pgscan_direct_dma",
+ "pginodesteal",
+
+ "slabs_scanned",
+ "kswapd_steal",
+ "kswapd_inodesteal",
+ "pageoutrun",
+ "allocstall",
+
+ "pgrotated",
+};
+
+static void *vmstat_start(struct seq_file *m, loff_t *pos)
+{
+ struct page_state *ps;
+
+ if (*pos >= ARRAY_SIZE(vmstat_text))
+ return NULL;
+
+ ps = kmalloc(sizeof(*ps), GFP_KERNEL);
+ m->private = ps;
+ if (!ps)
+ return ERR_PTR(-ENOMEM);
+ get_full_page_state(ps);
+ ps->pgpgin /= 2; /* sectors -> kbytes */
+ ps->pgpgout /= 2;
+ return (unsigned long *)ps + *pos;
+}
+
+static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+ (*pos)++;
+ if (*pos >= ARRAY_SIZE(vmstat_text))
+ return NULL;
+ return (unsigned long *)m->private + *pos;
+}
+
+static int vmstat_show(struct seq_file *m, void *arg)
+{
+ unsigned long *l = arg;
+ unsigned long off = l - (unsigned long *)m->private;
+
+ seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
+ return 0;
+}
+
+static void vmstat_stop(struct seq_file *m, void *arg)
+{
+ kfree(m->private);
+ m->private = NULL;
+}
+
+struct seq_operations vmstat_op = {
+ .start = vmstat_start,
+ .next = vmstat_next,
+ .stop = vmstat_stop,
+ .show = vmstat_show,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int page_alloc_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ long *count;
+ unsigned long *src, *dest;
+
+ if (action == CPU_DEAD) {
+ int i;
+
+ /* Drain local pagecache count. */
+ count = &per_cpu(nr_pagecache_local, cpu);
+ atomic_add(*count, &nr_pagecache);
+ *count = 0;
+ local_irq_disable();
+ __drain_pages(cpu);
+
+ /* Add dead cpu's page_states to our own. */
+ dest = (unsigned long *)&__get_cpu_var(page_states);
+ src = (unsigned long *)&per_cpu(page_states, cpu);
+
+ for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
+ i++) {
+ dest[i] += src[i];
+ src[i] = 0;
+ }
+
+ local_irq_enable();
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void __init page_alloc_init(void)
+{
+ hotcpu_notifier(page_alloc_cpu_notify, 0);
+}
+
+/*
+ * setup_per_zone_lowmem_reserve - called whenever
+ * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
+ * has a correct pages reserved value, so an adequate number of
+ * pages are left in the zone after a successful __alloc_pages().
+ */
+static void setup_per_zone_lowmem_reserve(void)
+{
+ struct pglist_data *pgdat;
+ int j, idx;
+
+ for_each_pgdat(pgdat) {
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ struct zone * zone = pgdat->node_zones + j;
+ unsigned long present_pages = zone->present_pages;
+
+ zone->lowmem_reserve[j] = 0;
+
+ for (idx = j-1; idx >= 0; idx--) {
+ struct zone * lower_zone = pgdat->node_zones +
idx;
+
+ lower_zone->lowmem_reserve[j] = present_pages /
sysctl_lowmem_reserve_ratio[idx];
+ present_pages += lower_zone->present_pages;
+ }
+ }
+ }
+}
+
+/*
+ * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
+ * that the pages_{min,low,high} values for each zone are set correctly
+ * with respect to min_free_kbytes.
+ */
+static void setup_per_zone_pages_min(void)
+{
+ unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+ unsigned long lowmem_pages = 0;
+ struct zone *zone;
+ unsigned long flags;
+
+ /* Calculate total number of !ZONE_HIGHMEM pages */
+ for_each_zone(zone) {
+ if (!is_highmem(zone))
+ lowmem_pages += zone->present_pages;
+ }
+
+ for_each_zone(zone) {
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ if (is_highmem(zone)) {
+ /*
+ * Often, highmem doesn't need to reserve any pages.
+ * But the pages_min/low/high values are also used for
+ * batching up page reclaim activity so we need a
+ * decent value here.
+ */
+ int min_pages;
+
+ min_pages = zone->present_pages / 1024;
+ if (min_pages < SWAP_CLUSTER_MAX)
+ min_pages = SWAP_CLUSTER_MAX;
+ if (min_pages > 128)
+ min_pages = 128;
+ zone->pages_min = min_pages;
+ } else {
+ /* if it's a lowmem zone, reserve a number of pages
+ * proportionate to the zone's size.
+ */
+ zone->pages_min = (pages_min * zone->present_pages) /
+ lowmem_pages;
+ }
+
+ /*
+ * When interpreting these watermarks, just keep in mind that:
+ * zone->pages_min == (zone->pages_min * 4) / 4;
+ */
+ zone->pages_low = (zone->pages_min * 5) / 4;
+ zone->pages_high = (zone->pages_min * 6) / 4;
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ }
+}
+
+/*
+ * Initialise min_free_kbytes.
+ *
+ * For small machines we want it small (128k min). For large machines
+ * we want it large (64MB max). But it is not linear, because network
+ * bandwidth does not increase linearly with machine size. We use
+ *
+ * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
+ * min_free_kbytes = sqrt(lowmem_kbytes * 16)
+ *
+ * which yields
+ *
+ * 16MB: 512k
+ * 32MB: 724k
+ * 64MB: 1024k
+ * 128MB: 1448k
+ * 256MB: 2048k
+ * 512MB: 2896k
+ * 1024MB: 4096k
+ * 2048MB: 5792k
+ * 4096MB: 8192k
+ * 8192MB: 11584k
+ * 16384MB: 16384k
+ */
+static int __init init_per_zone_pages_min(void)
+{
+ unsigned long lowmem_kbytes;
+
+ lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
+
+ min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
+ if (min_free_kbytes < 128)
+ min_free_kbytes = 128;
+ if (min_free_kbytes > 65536)
+ min_free_kbytes = 65536;
+ setup_per_zone_pages_min();
+ setup_per_zone_lowmem_reserve();
+ return 0;
+}
+module_init(init_per_zone_pages_min)
+
+/*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+ * that we can call two helper functions whenever min_free_kbytes
+ * changes.
+ */
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *length, loff_t
*ppos)
+{
+ proc_dointvec(table, write, file, buffer, length, ppos);
+ setup_per_zone_pages_min();
+ return 0;
+}
+
+/*
+ * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
+ * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
+ * whenever sysctl_lowmem_reserve_ratio changes.
+ *
+ * The reserve ratio obviously has absolutely no relation with the
+ * pages_min watermarks. The lowmem reserve ratio can only make sense
+ * if in function of the boot time zone sizes.
+ */
+int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *length, loff_t
*ppos)
+{
+ proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ setup_per_zone_lowmem_reserve();
+ return 0;
+}
+
+__initdata int hashdist = HASHDIST_DEFAULT;
+
+#ifdef CONFIG_NUMA
+static int __init set_hashdist(char *str)
+{
+ if (!str)
+ return 0;
+ hashdist = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("hashdist=", set_hashdist);
+#endif
+
+/*
+ * allocate a large system hash table from bootmem
+ * - it is assumed that the hash table must contain an exact power-of-2
+ * quantity of entries
+ * - limit is the number of hash buckets, not the total allocation size
+ */
+void *__init alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long limit)
+{
+ unsigned long long max = limit;
+ unsigned long log2qty, size;
+ void *table = NULL;
+
+ /* allow the kernel cmdline to have a say */
+ if (!numentries) {
+ /* round applicable memory size up to nearest megabyte */
+ numentries = (flags & HASH_HIGHMEM) ? nr_all_pages :
nr_kernel_pages;
+ numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
+ numentries >>= 20 - PAGE_SHIFT;
+ numentries <<= 20 - PAGE_SHIFT;
+
+ /* limit to 1 bucket per 2^scale bytes of low memory */
+ if (scale > PAGE_SHIFT)
+ numentries >>= (scale - PAGE_SHIFT);
+ else
+ numentries <<= (PAGE_SHIFT - scale);
+ }
+ /* rounded up to nearest power of 2 in size */
+ numentries = 1UL << (long_log2(numentries) + 1);
+
+ /* limit allocation size to 1/16 total memory by default */
+ if (max == 0) {
+ max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
+ do_div(max, bucketsize);
+ }
+
+ if (numentries > max)
+ numentries = max;
+
+ log2qty = long_log2(numentries);
+
+ do {
+ size = bucketsize << log2qty;
+ if (flags & HASH_EARLY)
+ table = alloc_bootmem(size);
+ else if (hashdist)
+ table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
+ else {
+ unsigned long order;
+ for (order = 0; ((1UL << order) << PAGE_SHIFT) < size;
order++)
+ ;
+ table = (void*) __get_free_pages(GFP_ATOMIC, order);
+ }
+ } while (!table && size > PAGE_SIZE && --log2qty);
+
+ if (!table)
+ panic("Failed to allocate %s hash table\n", tablename);
+
+ printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
+ tablename,
+ (1U << log2qty),
+ long_log2(size) - PAGE_SHIFT,
+ size);
+
+ if (_hash_shift)
+ *_hash_shift = log2qty;
+ if (_hash_mask)
+ *_hash_mask = (1 << log2qty) - 1;
+
+ return table;
+}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/Makefile
--- a/linux-2.4.30-xen-sparse/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,592 +0,0 @@
-VERSION = 2
-PATCHLEVEL = 4
-SUBLEVEL = 30
-EXTRAVERSION =
-
-KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
-
-# SUBARCH always tells us the underlying machine architecture.
-# Unless overridden, by default ARCH is equivalent to SUBARCH.
-# This will be overriden for Xen and UML builds.
-SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e
s/arm.*/arm/ -e s/sa110/arm/)
-ARCH ?= $(SUBARCH)
-
-## XXX The following hack can be discarded after users have adjusted to the
-## architectural name change 'xeno' -> 'xen'.
-ifeq ($(ARCH),xeno)
- ARCH := xen
-endif
-
-KERNELPATH=kernel-$(shell echo $(KERNELRELEASE) | sed -e "s/-//g")
-
-CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
- else if [ -x /bin/bash ]; then echo /bin/bash; \
- else echo sh; fi ; fi)
-TOPDIR := $(shell /bin/pwd)
-
-HPATH = $(TOPDIR)/include
-FINDHPATH = $(HPATH)/asm $(HPATH)/linux $(HPATH)/scsi $(HPATH)/net
$(HPATH)/math-emu
-
-HOSTCC = gcc
-HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
-
-CROSS_COMPILE =
-
-#
-# Include the make variables (CC, etc...)
-#
-
-AS = $(CROSS_COMPILE)as
-LD = $(CROSS_COMPILE)ld
-CC = $(CROSS_COMPILE)gcc
-CPP = $(CC) -E
-AR = $(CROSS_COMPILE)ar
-NM = $(CROSS_COMPILE)nm
-STRIP = $(CROSS_COMPILE)strip
-OBJCOPY = $(CROSS_COMPILE)objcopy
-OBJDUMP = $(CROSS_COMPILE)objdump
-MAKEFILES = $(TOPDIR)/.config
-GENKSYMS = /sbin/genksyms
-DEPMOD = /sbin/depmod
-MODFLAGS = -DMODULE
-CFLAGS_KERNEL =
-PERL = perl
-AWK = awk
-RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \
- else echo rpm; fi)
-
-export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION KERNELRELEASE ARCH \
- CONFIG_SHELL TOPDIR HPATH HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
- CPP AR NM STRIP OBJCOPY OBJDUMP MAKE MAKEFILES GENKSYMS MODFLAGS PERL
AWK
-
-all: do-it-all
-
-#
-# Make "config" the default target if there is no configuration file or
-# "depend" the target if there is no top-level dependency information.
-#
-
-ifeq (.config,$(wildcard .config))
-include .config
-ifeq (.depend,$(wildcard .depend))
-include .depend
-do-it-all: Version vmlinux
-else
-CONFIGURATION = depend
-do-it-all: depend
-endif
-else
-CONFIGURATION = config
-do-it-all: config
-endif
-
-#
-# INSTALL_PATH specifies where to place the updated kernel and system map
-# images. Uncomment if you want to place them anywhere other than root.
-#
-
-#export INSTALL_PATH=/boot
-
-#
-# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
-# relocations required by build roots. This is not defined in the
-# makefile but the arguement can be passed to make if needed.
-#
-
-MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
-export MODLIB
-
-#
-# standard CFLAGS
-#
-
-CPPFLAGS := -D__KERNEL__ -I$(HPATH)
-
-CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \
- -fno-strict-aliasing -fno-common
-ifndef CONFIG_FRAME_POINTER
-CFLAGS += -fomit-frame-pointer
-endif
-AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS)
-
-#
-# ROOT_DEV specifies the default root-device when making the image.
-# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
-# the default of FLOPPY is used by 'build'.
-# This is i386 specific.
-#
-
-export ROOT_DEV = CURRENT
-
-#
-# If you want to preset the SVGA mode, uncomment the next line and
-# set SVGA_MODE to whatever number you want.
-# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
-# The number is the same as you would ordinarily press at bootup.
-# This is i386 specific.
-#
-
-export SVGA_MODE = -DSVGA_MODE=NORMAL_VGA
-
-#
-# If you want the RAM disk device, define this to be the size in blocks.
-# This is i386 specific.
-#
-
-#export RAMDISK = -DRAMDISK=512
-
-CORE_FILES =kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o
-NETWORKS =net/network.o
-
-LIBS =$(TOPDIR)/lib/lib.a
-SUBDIRS =kernel drivers mm fs net ipc lib crypto
-
-DRIVERS-n :=
-DRIVERS-y :=
-DRIVERS-m :=
-DRIVERS- :=
-
-DRIVERS-$(CONFIG_ACPI_BOOT) += drivers/acpi/acpi.o
-DRIVERS-$(CONFIG_PARPORT) += drivers/parport/driver.o
-DRIVERS-y += drivers/char/char.o \
- drivers/block/block.o \
- drivers/misc/misc.o \
- drivers/net/net.o
-DRIVERS-$(CONFIG_AGP) += drivers/char/agp/agp.o
-DRIVERS-$(CONFIG_DRM_NEW) += drivers/char/drm/drm.o
-DRIVERS-$(CONFIG_DRM_OLD) += drivers/char/drm-4.0/drm.o
-DRIVERS-$(CONFIG_NUBUS) += drivers/nubus/nubus.a
-DRIVERS-$(CONFIG_NET_FC) += drivers/net/fc/fc.o
-DRIVERS-$(CONFIG_DEV_APPLETALK) += drivers/net/appletalk/appletalk.o
-DRIVERS-$(CONFIG_TR) += drivers/net/tokenring/tr.o
-DRIVERS-$(CONFIG_WAN) += drivers/net/wan/wan.o
-DRIVERS-$(CONFIG_ARCNET) += drivers/net/arcnet/arcnetdrv.o
-DRIVERS-$(CONFIG_ATM) += drivers/atm/atm.o
-DRIVERS-$(CONFIG_IDE) += drivers/ide/idedriver.o
-DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a
-DRIVERS-$(CONFIG_SCSI) += drivers/scsi/scsidrv.o
-DRIVERS-$(CONFIG_FUSION_BOOT) += drivers/message/fusion/fusion.o
-DRIVERS-$(CONFIG_IEEE1394) += drivers/ieee1394/ieee1394drv.o
-
-ifneq
($(CONFIG_CD_NO_IDESCSI)$(CONFIG_BLK_DEV_IDECD)$(CONFIG_BLK_DEV_SR)$(CONFIG_PARIDE_PCD),)
-DRIVERS-y += drivers/cdrom/driver.o
-endif
-
-DRIVERS-$(CONFIG_SOUND) += drivers/sound/sounddrivers.o
-DRIVERS-$(CONFIG_PCI) += drivers/pci/driver.o
-DRIVERS-$(CONFIG_MTD) += drivers/mtd/mtdlink.o
-DRIVERS-$(CONFIG_PCMCIA) += drivers/pcmcia/pcmcia.o
-DRIVERS-$(CONFIG_NET_PCMCIA) += drivers/net/pcmcia/pcmcia_net.o
-DRIVERS-$(CONFIG_NET_WIRELESS) += drivers/net/wireless/wireless_net.o
-DRIVERS-$(CONFIG_PCMCIA_CHRDEV) += drivers/char/pcmcia/pcmcia_char.o
-DRIVERS-$(CONFIG_DIO) += drivers/dio/dio.a
-DRIVERS-$(CONFIG_SBUS) += drivers/sbus/sbus_all.o
-DRIVERS-$(CONFIG_ZORRO) += drivers/zorro/driver.o
-DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a
-DRIVERS-$(CONFIG_PPC32) += drivers/macintosh/macintosh.o
-DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o
-DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o
-DRIVERS-$(CONFIG_I2C) += drivers/i2c/i2c.o
-DRIVERS-$(CONFIG_VT) += drivers/video/video.o
-DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a
-DRIVERS-$(CONFIG_HAMRADIO) += drivers/net/hamradio/hamradio.o
-DRIVERS-$(CONFIG_TC) += drivers/tc/tc.a
-DRIVERS-$(CONFIG_USB) += drivers/usb/usbdrv.o
-DRIVERS-$(CONFIG_USB_GADGET) += drivers/usb/gadget/built-in.o
-DRIVERS-y +=drivers/media/media.o
-DRIVERS-$(CONFIG_INPUT) += drivers/input/inputdrv.o
-DRIVERS-$(CONFIG_HIL) += drivers/hil/hil.o
-DRIVERS-$(CONFIG_I2O) += drivers/message/i2o/i2o.o
-DRIVERS-$(CONFIG_IRDA) += drivers/net/irda/irda.o
-DRIVERS-$(CONFIG_PHONE) += drivers/telephony/telephony.o
-DRIVERS-$(CONFIG_MD) += drivers/md/mddev.o
-DRIVERS-$(CONFIG_GSC) += drivers/gsc/gscbus.o
-DRIVERS-$(CONFIG_BLUEZ) += drivers/bluetooth/bluetooth.o
-DRIVERS-$(CONFIG_HOTPLUG_PCI) += drivers/hotplug/vmlinux-obj.o
-DRIVERS-$(CONFIG_ISDN_BOOL) += drivers/isdn/vmlinux-obj.o
-DRIVERS-$(CONFIG_CRYPTO) += crypto/crypto.o
-
-DRIVERS := $(DRIVERS-y)
-
-
-# files removed with 'make clean'
-CLEAN_FILES = \
- kernel/ksyms.lst include/linux/compile.h \
- vmlinux System.map \
- .tmp* \
- drivers/char/consolemap_deftbl.c drivers/video/promcon_tbl.c \
- drivers/char/conmakehash \
- drivers/char/drm/*-mod.c \
- drivers/pci/devlist.h drivers/pci/classlist.h drivers/pci/gen-devlist \
- drivers/zorro/devlist.h drivers/zorro/gen-devlist \
- drivers/sound/bin2hex drivers/sound/hex2hex \
- drivers/atm/fore200e_mkfirm drivers/atm/{pca,sba}*{.bin,.bin1,.bin2} \
- drivers/scsi/aic7xxx/aicasm/aicasm \
- drivers/scsi/aic7xxx/aicasm/aicasm_gram.c \
- drivers/scsi/aic7xxx/aicasm/aicasm_gram.h \
- drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.c \
- drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.h \
- drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.c \
- drivers/scsi/aic7xxx/aicasm/aicasm_scan.c \
- drivers/scsi/aic7xxx/aicasm/aicdb.h \
- drivers/scsi/aic7xxx/aicasm/y.tab.h \
- drivers/scsi/53c700_d.h \
- drivers/tc/lk201-map.c \
- net/khttpd/make_times_h \
- net/khttpd/times.h \
- submenu* \
- drivers/ieee1394/oui.c
-# directories removed with 'make clean'
-CLEAN_DIRS = \
- modules
-
-# files removed with 'make mrproper'
-MRPROPER_FILES = \
- include/linux/autoconf.h include/linux/version.h \
- lib/crc32table.h lib/gen_crc32table \
- drivers/net/hamradio/soundmodem/sm_tbl_{afsk1200,afsk2666,fsk9600}.h \
- drivers/net/hamradio/soundmodem/sm_tbl_{hapn4800,psk4800}.h \
- drivers/net/hamradio/soundmodem/sm_tbl_{afsk2400_7,afsk2400_8}.h \
- drivers/net/hamradio/soundmodem/gentbl \
- drivers/sound/*_boot.h drivers/sound/.*.boot \
- drivers/sound/msndinit.c \
- drivers/sound/msndperm.c \
- drivers/sound/pndsperm.c \
- drivers/sound/pndspini.c \
- drivers/atm/fore200e_*_fw.c drivers/atm/.fore200e_*.fw \
- .version .config* config.in config.old \
- scripts/tkparse scripts/kconfig.tk scripts/kconfig.tmp \
- scripts/lxdialog/*.o scripts/lxdialog/lxdialog \
- .menuconfig.log \
- include/asm \
- .hdepend scripts/mkdep scripts/split-include scripts/docproc \
- $(TOPDIR)/include/linux/modversions.h \
- kernel.spec
-
-# directories removed with 'make mrproper'
-MRPROPER_DIRS = \
- include/config \
- $(TOPDIR)/include/linux/modules
-
-
-include arch/$(ARCH)/Makefile
-
-# Extra cflags for kbuild 2.4. The default is to forbid includes by kernel
code
-# from user space headers. Some UML code requires user space headers, in the
-# UML Makefiles add 'kbuild_2_4_nostdinc :=' before include Rules.make. No
-# other kernel code should include user space headers, if you need
-# 'kbuild_2_4_nostdinc :=' or -I/usr/include for kernel code and you are not
UML
-# then your code is broken! KAO.
-
-kbuild_2_4_nostdinc := -nostdinc -iwithprefix include
-export kbuild_2_4_nostdinc
-
-export CPPFLAGS CFLAGS CFLAGS_KERNEL AFLAGS AFLAGS_KERNEL
-
-export NETWORKS DRIVERS LIBS HEAD LDFLAGS LINKFLAGS MAKEBOOT ASFLAGS
-
-.S.s:
- $(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -traditional -o $*.s $<
-.S.o:
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -traditional -c -o $*.o $<
-
-Version: dummy
- @rm -f include/linux/compile.h
-
-boot: vmlinux
- @$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C arch/$(ARCH)/boot
-
-vmlinux: include/linux/version.h $(CONFIGURATION) init/main.o init/version.o
init/do_mounts.o linuxsubdirs
- $(LD) $(LINKFLAGS) $(HEAD) init/main.o init/version.o init/do_mounts.o \
- --start-group \
- $(CORE_FILES) \
- $(DRIVERS) \
- $(NETWORKS) \
- $(LIBS) \
- --end-group \
- -o vmlinux
- $(NM) vmlinux | grep -v '\(compiled\)\|\(\.o$$\)\|\( [aUw]
\)\|\(\.\.ng$$\)\|\(LASH[RL]DI\)' | sort > System.map
-
-symlinks:
- rm -f include/asm
- ( cd include ; ln -sf asm-$(ARCH) asm)
- @if [ ! -d include/linux/modules ]; then \
- mkdir include/linux/modules; \
- fi
-
-oldconfig: symlinks
- $(CONFIG_SHELL) scripts/Configure -d arch/$(ARCH)/config.in
-
-xconfig: symlinks
- $(MAKE) -C scripts kconfig.tk
- wish -f scripts/kconfig.tk
-
-menuconfig: include/linux/version.h symlinks
- $(MAKE) -C scripts/lxdialog all
- $(CONFIG_SHELL) scripts/Menuconfig arch/$(ARCH)/config.in
-
-config: symlinks
- $(CONFIG_SHELL) scripts/Configure arch/$(ARCH)/config.in
-
-include/config/MARKER: scripts/split-include include/linux/autoconf.h
- scripts/split-include include/linux/autoconf.h include/config
- @ touch include/config/MARKER
-
-linuxsubdirs: $(patsubst %, _dir_%, $(SUBDIRS))
-
-$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h
include/config/MARKER
- $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C $(patsubst _dir_%, %, $@)
-
-$(TOPDIR)/include/linux/version.h: include/linux/version.h
-$(TOPDIR)/include/linux/compile.h: include/linux/compile.h
-
-newversion:
- . scripts/mkversion > .tmpversion
- @mv -f .tmpversion .version
-
-uts_len := 64
-uts_truncate := sed -e 's/\(.\{1,$(uts_len)\}\).*/\1/'
-
-include/linux/compile.h: $(CONFIGURATION) include/linux/version.h newversion
- @echo -n \#`cat .version` > .ver1
- @if [ -n "$(CONFIG_SMP)" ] ; then echo -n " SMP" >> .ver1; fi
- @if [ -f .name ]; then echo -n \-`cat .name` >> .ver1; fi
- @LANG=C echo ' '`date` >> .ver1
- @echo \#define UTS_VERSION \"`cat .ver1 | $(uts_truncate)`\" > .ver
- @LANG=C echo \#define LINUX_COMPILE_TIME \"`date +%T`\" >> .ver
- @echo \#define LINUX_COMPILE_BY \"`whoami`\" >> .ver
- @echo \#define LINUX_COMPILE_HOST \"`hostname | $(uts_truncate)`\" >>
.ver
- @([ -x /bin/dnsdomainname ] && /bin/dnsdomainname > .ver1) || \
- ([ -x /bin/domainname ] && /bin/domainname > .ver1) || \
- echo > .ver1
- @echo \#define LINUX_COMPILE_DOMAIN \"`cat .ver1 | $(uts_truncate)`\"
>> .ver
- @echo \#define LINUX_COMPILER \"`$(CC) $(CFLAGS) -v 2>&1 | tail -n 1`\"
>> .ver
- @mv -f .ver $@
- @rm -f .ver1
-
-include/linux/version.h: ./Makefile
- @expr length "$(KERNELRELEASE)" \<= $(uts_len) > /dev/null || \
- (echo KERNELRELEASE \"$(KERNELRELEASE)\" exceeds $(uts_len)
characters >&2; false)
- @echo \#define UTS_RELEASE \"$(KERNELRELEASE)\" > .ver
- @echo \#define LINUX_VERSION_CODE `expr $(VERSION) \\* 65536 +
$(PATCHLEVEL) \\* 256 + $(SUBLEVEL)` >> .ver
- @echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
>>.ver
- @mv -f .ver $@
-
-comma := ,
-
-init/version.o: init/version.c include/linux/compile.h include/config/MARKER
- $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DUTS_MACHINE='"$(SUBARCH)"'
-DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o init/version.o
init/version.c
-
-init/main.o: init/main.c include/config/MARKER
- $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst
$(comma),_,$(subst -,_,$(*F))) -c -o $@ $<
-
-init/do_mounts.o: init/do_mounts.c include/config/MARKER
- $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst
$(comma),_,$(subst -,_,$(*F))) -c -o $@ $<
-
-fs lib mm ipc kernel drivers net: dummy
- $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" $(subst $@, _dir_$@, $@)
-
-TAGS: dummy
- { find include/asm-${ARCH} -name '*.h' -print ; \
- find include -type d \( -name "asm-*" -o -name config \) -prune -o
-name '*.h' -print ; \
- find $(SUBDIRS) init arch/${ARCH} -name '*.[chS]' ; } | grep -v SCCS |
grep -v '\.svn' | etags -
-
-# Exuberant ctags works better with -I
-tags: dummy
- CTAGSF=`ctags --version | grep -i exuberant >/dev/null && echo "-I
__initdata,__exitdata,EXPORT_SYMBOL,EXPORT_SYMBOL_NOVERS"`; \
- ctags $$CTAGSF `find include/asm-$(ARCH) -name '*.h'` && \
- find include -type d \( -name "asm-*" -o -name config \) -prune -o
-name '*.h' -print | xargs ctags $$CTAGSF -a && \
- find $(SUBDIRS) init -name '*.[ch]' | xargs ctags $$CTAGSF -a
-
-ifdef CONFIG_MODULES
-ifdef CONFIG_MODVERSIONS
-MODFLAGS += -DMODVERSIONS -include $(HPATH)/linux/modversions.h
-endif
-
-.PHONY: modules
-modules: $(patsubst %, _mod_%, $(SUBDIRS))
-
-.PHONY: $(patsubst %, _mod_%, $(SUBDIRS))
-$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h
include/config/MARKER
- $(MAKE) -C $(patsubst _mod_%, %, $@) CFLAGS="$(CFLAGS) $(MODFLAGS)"
MAKING_MODULES=1 modules
-
-.PHONY: modules_install
-modules_install: _modinst_ $(patsubst %, _modinst_%, $(SUBDIRS)) _modinst_post
-
-.PHONY: _modinst_
-_modinst_:
- @rm -rf $(MODLIB)/kernel
- @rm -f $(MODLIB)/build
- @mkdir -p $(MODLIB)/kernel
- @ln -s $(TOPDIR) $(MODLIB)/build
-
-# If System.map exists, run depmod. This deliberately does not have a
-# dependency on System.map since that would run the dependency tree on
-# vmlinux. This depmod is only for convenience to give the initial
-# boot a modules.dep even before / is mounted read-write. However the
-# boot script depmod is the master version.
-ifeq "$(strip $(INSTALL_MOD_PATH))" ""
-depmod_opts :=
-else
-depmod_opts := -b $(INSTALL_MOD_PATH) -r
-endif
-.PHONY: _modinst_post
-_modinst_post: _modinst_post_pcmcia
- if [ -r System.map ]; then $(DEPMOD) -ae -F System.map $(depmod_opts)
$(KERNELRELEASE); fi
-
-# Backwards compatibilty symlinks for people still using old versions
-# of pcmcia-cs with hard coded pathnames on insmod. Remove
-# _modinst_post_pcmcia for kernel 2.4.1.
-.PHONY: _modinst_post_pcmcia
-_modinst_post_pcmcia:
- cd $(MODLIB); \
- mkdir -p pcmcia; \
- find kernel -path '*/pcmcia/*' -name '*.o' | xargs -i -r ln -sf ../{}
pcmcia
-
-.PHONY: $(patsubst %, _modinst_%, $(SUBDIRS))
-$(patsubst %, _modinst_%, $(SUBDIRS)) :
- $(MAKE) -C $(patsubst _modinst_%, %, $@) modules_install
-
-# modules disabled....
-
-else
-modules modules_install: dummy
- @echo
- @echo "The present kernel configuration has modules disabled."
- @echo "Type 'make config' and enable loadable module support."
- @echo "Then build a kernel with module support enabled."
- @echo
- @exit 1
-endif
-
-clean: archclean
- find . \( -name '*.[oas]' -o -name core -o -name '.*.flags' \) -type f
-print \
- | grep -v lxdialog/ | xargs rm -f
- rm -f $(CLEAN_FILES)
- rm -rf $(CLEAN_DIRS)
- $(MAKE) -C Documentation/DocBook clean
-
-mrproper: clean archmrproper
- find . \( -size 0 -o -name .depend \) -type f -print | xargs rm -f
- rm -f $(MRPROPER_FILES)
- rm -rf $(MRPROPER_DIRS)
- $(MAKE) -C Documentation/DocBook mrproper
-
-distclean: mrproper
- rm -f core `find . \( -not -type d \) -and \
- \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
- -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
- -o -name '.*.rej' -o -name '.SUMS' -o -size 0 \) -type f
-print` TAGS tags
-
-backup: mrproper
- cd .. && tar cf - linux/ | gzip -9 > backup.gz
- sync
-
-sgmldocs:
- chmod 755 $(TOPDIR)/scripts/docgen
- chmod 755 $(TOPDIR)/scripts/gen-all-syms
- chmod 755 $(TOPDIR)/scripts/kernel-doc
- $(MAKE) -C $(TOPDIR)/Documentation/DocBook books
-
-psdocs: sgmldocs
- $(MAKE) -C Documentation/DocBook ps
-
-pdfdocs: sgmldocs
- $(MAKE) -C Documentation/DocBook pdf
-
-htmldocs: sgmldocs
- $(MAKE) -C Documentation/DocBook html
-
-mandocs:
- chmod 755 $(TOPDIR)/scripts/kernel-doc
- chmod 755 $(TOPDIR)/scripts/split-man
- $(MAKE) -C Documentation/DocBook man
-
-sums:
- find . -type f -print | sort | xargs sum > .SUMS
-
-dep-files: scripts/mkdep archdep include/linux/version.h
- rm -f .depend .hdepend
- $(MAKE) $(patsubst %,_sfdep_%,$(SUBDIRS))
_FASTDEP_ALL_SUB_DIRS="$(SUBDIRS)"
-ifdef CONFIG_MODVERSIONS
- $(MAKE) update-modverfile
-endif
- scripts/mkdep -- `find $(FINDHPATH) \( -name SCCS -o -name .svn \)
-prune -o -follow -name \*.h ! -name modversions.h -print` > .hdepend
- scripts/mkdep -- init/*.c > .depend
-
-ifdef CONFIG_MODVERSIONS
-MODVERFILE := $(TOPDIR)/include/linux/modversions.h
-else
-MODVERFILE :=
-endif
-export MODVERFILE
-
-depend dep: dep-files
-
-checkconfig:
- find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w
scripts/checkconfig.pl
-
-checkhelp:
- find * -name [cC]onfig.in -print | sort | xargs $(PERL) -w
scripts/checkhelp.pl
-
-checkincludes:
- find * -name '*.[hcS]' -type f -print | sort | xargs $(PERL) -w
scripts/checkincludes.pl
-
-ifdef CONFIGURATION
-..$(CONFIGURATION):
- @echo
- @echo "You have a bad or nonexistent" .$(CONFIGURATION) ": running
'make" $(CONFIGURATION)"'"
- @echo
- $(MAKE) $(CONFIGURATION)
- @echo
- @echo "Successful. Try re-making (ignore the error that follows)"
- @echo
- exit 1
-
-#dummy: ..$(CONFIGURATION)
-dummy:
-
-else
-
-dummy:
-
-endif
-
-include Rules.make
-
-#
-# This generates dependencies for the .h files.
-#
-
-scripts/mkdep: scripts/mkdep.c
- $(HOSTCC) $(HOSTCFLAGS) -o scripts/mkdep scripts/mkdep.c
-
-scripts/split-include: scripts/split-include.c
- $(HOSTCC) $(HOSTCFLAGS) -o scripts/split-include scripts/split-include.c
-
-#
-# RPM target
-#
-# If you do a make spec before packing the tarball you can rpm -ta it
-#
-spec:
- . scripts/mkspec >kernel.spec
-
-#
-# Build a tar ball, generate an rpm from it and pack the result
-# There arw two bits of magic here
-# 1) The use of /. to avoid tar packing just the symlink
-# 2) Removing the .dep files as they have source paths in them that
-# will become invalid
-#
-rpm: clean spec
- find . \( -size 0 -o -name .depend -o -name .hdepend \) -type f -print
| xargs rm -f
- set -e; \
- cd $(TOPDIR)/.. ; \
- ln -sf $(TOPDIR) $(KERNELPATH) ; \
- tar -cvz --exclude CVS -f $(KERNELPATH).tar.gz $(KERNELPATH)/. ; \
- rm $(KERNELPATH) ; \
- cd $(TOPDIR) ; \
- . scripts/mkversion > .version ; \
- $(RPM) -ta $(TOPDIR)/../$(KERNELPATH).tar.gz ; \
- rm $(TOPDIR)/../$(KERNELPATH).tar.gz
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/arch/xen/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,137 +0,0 @@
-#
-# xen/Makefile
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" and "archdep" for cleaning up and making dependencies for
-# this architecture
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 1994 by Linus Torvalds
-#
-# 19990713 Artur Skawina <skawina@xxxxxxxxxxxxx>
-# Added '-march' and '-mpreferred-stack-boundary' support
-#
-
-# If no .config file exists then use the appropriate defconfig-* file
-ifneq (.config,$(wildcard .config))
-DUMMYX:=$(shell cp $(TOPDIR)/arch/xen/defconfig$(EXTRAVERSION)
$(TOPDIR)/.config)
--include $(TOPDIR)/.config
-endif
-
-LD=$(CROSS_COMPILE)ld -m elf_i386
-OBJCOPY=$(CROSS_COMPILE)objcopy -R .note -R .comment -S
-LDFLAGS=-e stext
-LINKFLAGS =-T $(TOPDIR)/arch/xen/vmlinux.lds $(LDFLAGS)
-
-CFLAGS += -pipe
-
-check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null
2>&1; then echo "$(1)"; else echo "$(2)"; fi)
-
-# prevent gcc from keeping the stack 16 byte aligned
-CFLAGS += $(call check_gcc,-mpreferred-stack-boundary=2,)
-
-ifdef CONFIG_M686
-CFLAGS += -march=i686
-endif
-
-ifdef CONFIG_MPENTIUMIII
-CFLAGS += -march=i686
-endif
-
-ifdef CONFIG_MPENTIUM4
-CFLAGS += -march=i686
-endif
-
-ifdef CONFIG_MK7
-CFLAGS += $(call check_gcc,-march=athlon,-march=i686 -malign-functions=4)
-endif
-
-# Disable unit-at-a-time mode, it makes gcc use a lot more stack
-# due to the lack of sharing of stacklots.
-CFLAGS += $(call check_gcc,-fno-unit-at-a-time,)
-
-HEAD := arch/xen/kernel/head.o arch/xen/kernel/init_task.o
-
-SUBDIRS += arch/xen/kernel arch/xen/mm arch/xen/lib
-SUBDIRS += arch/xen/drivers/console
-SUBDIRS += arch/xen/drivers/evtchn
-SUBDIRS += arch/xen/drivers/blkif
-SUBDIRS += arch/xen/drivers/netif
-SUBDIRS += arch/xen/drivers/balloon
-ifdef CONFIG_XEN_PRIVILEGED_GUEST
-SUBDIRS += arch/xen/drivers/dom0
-endif
-
-CORE_FILES += arch/xen/kernel/kernel.o arch/xen/mm/mm.o
-CORE_FILES += arch/xen/drivers/evtchn/drv.o
-CORE_FILES += arch/xen/drivers/console/drv.o
-DRIVERS += arch/xen/drivers/blkif/drv.o
-DRIVERS += arch/xen/drivers/netif/drv.o
-ifdef CONFIG_XEN_PRIVILEGED_GUEST
-CORE_FILES += arch/xen/drivers/dom0/drv.o
-endif
-CORE_FILES += arch/xen/drivers/balloon/drv.o
-LIBS := $(TOPDIR)/arch/xen/lib/lib.a $(LIBS) $(TOPDIR)/arch/xen/lib/lib.a
-
-arch/xen/kernel: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/kernel
-
-arch/xen/mm: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/mm
-
-arch/xen/drivers/console: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/console
-
-arch/xen/drivers/network: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/network
-
-arch/xen/drivers/block: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/block
-
-arch/xen/drivers/dom0: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/dom0
-
-arch/xen/drivers/balloon: dummy
- $(MAKE) linuxsubdirs SUBDIRS=arch/xen/drivers/balloon
-
-MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
-
-vmlinux: arch/xen/vmlinux.lds
-
-FORCE: ;
-
-.PHONY: bzImage compressed clean archclean archmrproper archdep
-
-
-bzImage: vmlinux
- @$(MAKEBOOT) bzImage
-
-INSTALL_NAME ?= $(KERNELRELEASE)
-install: bzImage
- mkdir -p $(INSTALL_PATH)/boot
- ln -f -s vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
- rm -f $(INSTALL_PATH)/boot/vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0644 arch/$(ARCH)/boot/bzImage
$(INSTALL_PATH)/boot/vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0644 vmlinux
$(INSTALL_PATH)/boot/vmlinux-syms-$(INSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0664 .config
$(INSTALL_PATH)/boot/config-$(INSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0664 System.map
$(INSTALL_PATH)/boot/System.map-$(INSTALL_NAME)$(INSTALL_SUFFIX)
- ln -f -s vmlinuz-$(INSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-
-%_config: arch/xen/defconfig-%
- rm -f .config arch/xen/defconfig
- cp -f arch/xen/defconfig-$(@:_config=) arch/xen/defconfig
- cp -f arch/xen/defconfig-$(@:_config=) .config
-
-
-archclean:
- @$(MAKEBOOT) clean
-
-archmrproper:
- rm -f include/asm-xen/xen-public/arch
-
-archdep:
- @$(MAKEBOOT) dep
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/boot/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/boot/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,13 +0,0 @@
-#
-# arch/xen/boot/Makefile
-#
-
-bzImage: $(TOPDIR)/vmlinux
- $(OBJCOPY) $< Image
- gzip -f -9 < Image > $@
- rm -f Image
-
-dep:
-
-clean:
- rm -f bzImage Image
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/arch/xen/config.in
--- a/linux-2.4.30-xen-sparse/arch/xen/config.in Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,325 +0,0 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/config-language.txt.
-#
-mainmenu_name "Linux Kernel Configuration"
-
-define_bool CONFIG_XEN y
-
-define_bool CONFIG_X86 y
-define_bool CONFIG_ISA y
-define_bool CONFIG_SBUS n
-
-define_bool CONFIG_UID16 y
-
-mainmenu_option next_comment
-comment 'Xen'
-bool 'Support for privileged operations (domain 0)' CONFIG_XEN_PRIVILEGED_GUEST
-bool 'Device-driver domain (physical device access)' CONFIG_XEN_PHYSDEV_ACCESS
-bool 'Scrub memory before freeing it to Xen' CONFIG_XEN_SCRUB_PAGES
-bool 'Network-device frontend driver' CONFIG_XEN_NETDEV_FRONTEND
-bool 'Block-device frontend driver' CONFIG_XEN_BLKDEV_FRONTEND
-endmenu
-# The IBM S/390 patch needs this.
-define_bool CONFIG_NO_IDLE_HZ y
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" == "y" ]; then
- define_bool CONFIG_FOREIGN_PAGES y
-else
- define_bool CONFIG_FOREIGN_PAGES n
- define_bool CONFIG_NETDEVICES y
- define_bool CONFIG_VT n
-fi
-
-mainmenu_option next_comment
-comment 'Code maturity level options'
-bool 'Prompt for development and/or incomplete code/drivers'
CONFIG_EXPERIMENTAL
-endmenu
-
-mainmenu_option next_comment
-comment 'Loadable module support'
-bool 'Enable loadable module support' CONFIG_MODULES
-if [ "$CONFIG_MODULES" = "y" ]; then
- bool ' Set version information on all module symbols' CONFIG_MODVERSIONS
- bool ' Kernel module loader' CONFIG_KMOD
-fi
-endmenu
-
-mainmenu_option next_comment
-comment 'Processor type and features'
-choice 'Processor family' \
- "Pentium-Pro/Celeron/Pentium-II CONFIG_M686 \
- Pentium-III/Celeron(Coppermine) CONFIG_MPENTIUMIII \
- Pentium-4 CONFIG_MPENTIUM4 \
- Athlon/Duron/K7 CONFIG_MK7 \
- Opteron/Athlon64/Hammer/K8 CONFIG_MK8 \
- VIA-C3-2 CONFIG_MVIAC3_2" Pentium-Pro
-
- define_bool CONFIG_X86_WP_WORKS_OK y
- define_bool CONFIG_X86_INVLPG y
- define_bool CONFIG_X86_CMPXCHG y
- define_bool CONFIG_X86_XADD y
- define_bool CONFIG_X86_BSWAP y
- define_bool CONFIG_X86_POPAD_OK y
- define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
- define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
-
- define_bool CONFIG_X86_GOOD_APIC y
- define_bool CONFIG_X86_PGE y
- define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
- define_bool CONFIG_X86_TSC y
-
-if [ "$CONFIG_M686" = "y" ]; then
- define_int CONFIG_X86_L1_CACHE_SHIFT 5
-fi
-if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
- define_int CONFIG_X86_L1_CACHE_SHIFT 5
-fi
-if [ "$CONFIG_MPENTIUM4" = "y" ]; then
- define_int CONFIG_X86_L1_CACHE_SHIFT 7
-fi
-if [ "$CONFIG_MK8" = "y" ]; then
- define_bool CONFIG_MK7 y
-fi
-if [ "$CONFIG_MK7" = "y" ]; then
- define_int CONFIG_X86_L1_CACHE_SHIFT 6
- define_bool CONFIG_X86_USE_3DNOW y
-fi
-if [ "$CONFIG_MVIAC3_2" = "y" ]; then
- define_int CONFIG_X86_L1_CACHE_SHIFT 5
-fi
-
-#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
-# tristate 'BIOS Enhanced Disk Drive calls determine boot disk
(EXPERIMENTAL)' CONFIG_EDD
-#fi
-
-choice 'High Memory Support' \
- "off CONFIG_NOHIGHMEM \
- 4GB CONFIG_HIGHMEM4G" off
-# 64GB CONFIG_HIGHMEM64G" off
-if [ "$CONFIG_HIGHMEM4G" = "y" ]; then
- define_bool CONFIG_HIGHMEM y
-fi
-if [ "$CONFIG_HIGHMEM64G" = "y" ]; then
- define_bool CONFIG_HIGHMEM y
- define_bool CONFIG_X86_PAE y
-fi
-
-if [ "$CONFIG_HIGHMEM" = "y" ]; then
- bool 'HIGHMEM I/O support' CONFIG_HIGHIO
-fi
-
-define_int CONFIG_FORCE_MAX_ZONEORDER 11
-
-#bool 'Symmetric multi-processing support' CONFIG_SMP
-#if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
-# define_bool CONFIG_HAVE_DEC_LOCK y
-#fi
-endmenu
-
-mainmenu_option next_comment
-comment 'General setup'
-
-bool 'Networking support' CONFIG_NET
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- bool 'PCI support' CONFIG_PCI
- source drivers/pci/Config.in
-
- bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
-
- if [ "$CONFIG_HOTPLUG" = "y" ] ; then
- source drivers/pcmcia/Config.in
- source drivers/hotplug/Config.in
- else
- define_bool CONFIG_PCMCIA n
- define_bool CONFIG_HOTPLUG_PCI n
- fi
-fi
-
-bool 'System V IPC' CONFIG_SYSVIPC
-bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
-bool 'Sysctl support' CONFIG_SYSCTL
-if [ "$CONFIG_PROC_FS" = "y" ]; then
- choice 'Kernel core (/proc/kcore) format' \
- "ELF CONFIG_KCORE_ELF \
- A.OUT CONFIG_KCORE_AOUT" ELF
-fi
-tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
-bool 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
-tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
-bool 'Select task to kill on out of memory condition' CONFIG_OOM_KILLER
-
-endmenu
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- source drivers/mtd/Config.in
-
- source drivers/parport/Config.in
-
- source drivers/pnp/Config.in
-
- source drivers/block/Config.in
-
- source drivers/md/Config.in
-fi
-
-if [ "$CONFIG_NET" = "y" ]; then
- source net/Config.in
-fi
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- mainmenu_option next_comment
- comment 'ATA/IDE/MFM/RLL support'
-
- tristate 'ATA/IDE/MFM/RLL support' CONFIG_IDE
-
- if [ "$CONFIG_IDE" != "n" ]; then
- source drivers/ide/Config.in
- else
- define_bool CONFIG_BLK_DEV_HD n
- fi
- endmenu
-fi
-
-mainmenu_option next_comment
-comment 'SCSI support'
-
-tristate 'SCSI support' CONFIG_SCSI
-
-if [ "$CONFIG_SCSI" != "n" ]; then
- source drivers/scsi/Config.in
-fi
-endmenu
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- source drivers/message/fusion/Config.in
-
- source drivers/ieee1394/Config.in
-
- source drivers/message/i2o/Config.in
-
- if [ "$CONFIG_NET" = "y" ]; then
- mainmenu_option next_comment
- comment 'Network device support'
-
- bool 'Network device support' CONFIG_NETDEVICES
- if [ "$CONFIG_NETDEVICES" = "y" ]; then
- source drivers/net/Config.in
- if [ "$CONFIG_ATM" = "y" -o "$CONFIG_ATM" = "m" ]; then
- source drivers/atm/Config.in
- fi
- fi
- endmenu
- fi
-
- source net/ax25/Config.in
-
- source net/irda/Config.in
-
- mainmenu_option next_comment
- comment 'ISDN subsystem'
- if [ "$CONFIG_NET" != "n" ]; then
- tristate 'ISDN support' CONFIG_ISDN
- if [ "$CONFIG_ISDN" != "n" ]; then
- source drivers/isdn/Config.in
- fi
- fi
- endmenu
-
- if [ "$CONFIG_ISA" = "y" ]; then
- mainmenu_option next_comment
- comment 'Old CD-ROM drivers (not SCSI, not IDE)'
-
- bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
- if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
- source drivers/cdrom/Config.in
- fi
- endmenu
- fi
-
- #
- # input before char - char/joystick depends on it. As does USB.
- #
- source drivers/input/Config.in
-else
- #
- # Block device driver configuration
- #
- mainmenu_option next_comment
- comment 'Block devices'
- tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
- dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
- tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
- if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
- int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
- fi
- dep_bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
$CONFIG_BLK_DEV_RAM
- bool 'Per partition statistics in /proc/partitions' CONFIG_BLK_STATS
- define_bool CONFIG_BLK_DEV_HD n
- endmenu
-fi
-
-source drivers/char/Config.in
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- source drivers/media/Config.in
-fi
-
-source fs/Config.in
-
-mainmenu_option next_comment
-comment 'Console drivers'
-
-define_bool CONFIG_XEN_CONSOLE y
-
-if [ "$CONFIG_VT" = "y" ]; then
- bool 'VGA text console' CONFIG_VGA_CONSOLE
- bool 'Dummy console' CONFIG_DUMMY_CONSOLE
- if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- bool 'Video mode selection support' CONFIG_VIDEO_SELECT
- if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- tristate 'MDA text console (dual-headed) (EXPERIMENTAL)'
CONFIG_MDA_CONSOLE
- source drivers/video/Config.in
- fi
- fi
-fi
-endmenu
-
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" = "y" ]; then
- mainmenu_option next_comment
- comment 'Sound'
-
- tristate 'Sound card support' CONFIG_SOUND
- if [ "$CONFIG_SOUND" != "n" ]; then
- source drivers/sound/Config.in
- fi
- endmenu
-
- source drivers/usb/Config.in
-
- source net/bluetooth/Config.in
-fi
-
-mainmenu_option next_comment
-comment 'Kernel hacking'
-
-bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
-if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
- bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
- bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM
- bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
- bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
- bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
- bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
- bool ' Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE
- bool ' Load all symbols for debugging' CONFIG_KALLSYMS
- bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER
-fi
-
-int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
-
-endmenu
-
-source crypto/Config.in
-source lib/Config.in
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/defconfig-xen0
--- a/linux-2.4.30-xen-sparse/arch/xen/defconfig-xen0 Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,924 +0,0 @@
-#
-# Automatically generated by make menuconfig: don't edit
-#
-CONFIG_XEN=y
-CONFIG_X86=y
-CONFIG_ISA=y
-# CONFIG_SBUS is not set
-CONFIG_UID16=y
-
-#
-# Xen
-#
-CONFIG_XEN_PRIVILEGED_GUEST=y
-CONFIG_XEN_PHYSDEV_ACCESS=y
-CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_NO_IDLE_HZ=y
-CONFIG_FOREIGN_PAGES=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODVERSIONS=y
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-CONFIG_M686=y
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUM4 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MVIAC3_2 is not set
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_PGE=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-CONFIG_X86_TSC=y
-CONFIG_X86_L1_CACHE_SHIFT=5
-CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_FORCE_MAX_ZONEORDER=11
-
-#
-# General setup
-#
-CONFIG_NET=y
-CONFIG_PCI=y
-CONFIG_PCI_NAMES=y
-CONFIG_HOTPLUG=y
-
-#
-# PCMCIA/CardBus support
-#
-# CONFIG_PCMCIA is not set
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-# CONFIG_HOTPLUG_PCI_COMPAQ is not set
-# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
-# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
-# CONFIG_HOTPLUG_PCI_PCIE is not set
-# CONFIG_HOTPLUG_PCI_PCIE_POLL_EVENT_MODE is not set
-CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-CONFIG_KCORE_ELF=y
-# CONFIG_KCORE_AOUT is not set
-CONFIG_BINFMT_AOUT=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_OOM_KILLER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play configuration
-#
-CONFIG_PNP=y
-# CONFIG_ISAPNP is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_DEV_XD is not set
-# CONFIG_PARIDE is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_CISS_SCSI_TAPE is not set
-# CONFIG_CISS_MONITOR_THREAD is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_SX8 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_BLK_STATS is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-# CONFIG_MD_RAID0 is not set
-CONFIG_MD_RAID1=y
-# CONFIG_MD_RAID5 is not set
-# CONFIG_MD_MULTIPATH is not set
-CONFIG_BLK_DEV_LVM=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-# CONFIG_NETLINK_DEV is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_FILTER=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_INET_ECN is not set
-# CONFIG_SYN_COOKIES is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-# CONFIG_IP_NF_AMANDA is not set
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_IRC=m
-# CONFIG_IP_NF_QUEUE is not set
-CONFIG_IP_NF_IPTABLES=y
-# CONFIG_IP_NF_MATCH_LIMIT is not set
-# CONFIG_IP_NF_MATCH_MAC is not set
-# CONFIG_IP_NF_MATCH_PKTTYPE is not set
-# CONFIG_IP_NF_MATCH_MARK is not set
-# CONFIG_IP_NF_MATCH_MULTIPORT is not set
-# CONFIG_IP_NF_MATCH_TOS is not set
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
-# CONFIG_IP_NF_MATCH_DSCP is not set
-# CONFIG_IP_NF_MATCH_AH_ESP is not set
-# CONFIG_IP_NF_MATCH_LENGTH is not set
-# CONFIG_IP_NF_MATCH_TTL is not set
-# CONFIG_IP_NF_MATCH_TCPMSS is not set
-# CONFIG_IP_NF_MATCH_HELPER is not set
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-# CONFIG_IP_NF_MATCH_UNCLEAN is not set
-# CONFIG_IP_NF_MATCH_OWNER is not set
-CONFIG_IP_NF_MATCH_PHYSDEV=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-# CONFIG_IP_NF_TARGET_MIRROR is not set
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-# CONFIG_IP_NF_MANGLE is not set
-CONFIG_IP_NF_TARGET_LOG=y
-CONFIG_IP_NF_TARGET_ULOG=y
-# CONFIG_IP_NF_TARGET_TCPMSS is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-# CONFIG_KHTTPD is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-CONFIG_VLAN_8021Q=y
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_DECNET is not set
-CONFIG_BRIDGE=y
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_IPF=m
-CONFIG_BRIDGE_EBT_ARPF=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_VLANF=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_MARKF=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_LLC is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_FASTROUTE is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-
-#
-# ATA/IDE/MFM/RLL support
-#
-CONFIG_IDE=y
-
-#
-# IDE, ATA and ATAPI Block devices
-#
-CONFIG_BLK_DEV_IDE=y
-# CONFIG_BLK_DEV_HD_IDE is not set
-# CONFIG_BLK_DEV_HD is not set
-# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=y
-CONFIG_IDEDISK_MULTI_MODE=y
-CONFIG_IDEDISK_STROKE=y
-# CONFIG_BLK_DEV_IDECS is not set
-# CONFIG_BLK_DEV_DELKIN is not set
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDETAPE=y
-CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=y
-CONFIG_IDE_TASK_IOCTL=y
-CONFIG_BLK_DEV_CMD640=y
-CONFIG_BLK_DEV_CMD640_ENHANCED=y
-# CONFIG_BLK_DEV_ISAPNP is not set
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-CONFIG_BLK_DEV_OFFBOARD=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-CONFIG_IDEDMA_PCI_AUTO=y
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_PCI_WIP is not set
-CONFIG_BLK_DEV_ADMA100=y
-CONFIG_BLK_DEV_AEC62XX=y
-CONFIG_BLK_DEV_ALI15X3=y
-CONFIG_WDC_ALI15X3=y
-CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_AMD74XX_OVERRIDE=y
-# CONFIG_BLK_DEV_ATIIXP is not set
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_TRIFLEX=y
-CONFIG_BLK_DEV_CY82C693=y
-CONFIG_BLK_DEV_CS5530=y
-CONFIG_BLK_DEV_HPT34X=y
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_PIIX=y
-CONFIG_BLK_DEV_NS87415=y
-# CONFIG_BLK_DEV_OPTI621 is not set
-CONFIG_BLK_DEV_PDC202XX_OLD=y
-CONFIG_PDC202XX_BURST=y
-CONFIG_BLK_DEV_PDC202XX_NEW=y
-CONFIG_PDC202XX_FORCE=y
-CONFIG_BLK_DEV_RZ1000=y
-CONFIG_BLK_DEV_SC1200=y
-CONFIG_BLK_DEV_SVWKS=y
-CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_SIS5513=y
-CONFIG_BLK_DEV_SLC90E66=y
-CONFIG_BLK_DEV_TRM290=y
-CONFIG_BLK_DEV_VIA82CXXX=y
-CONFIG_IDE_CHIPSETS=y
-# CONFIG_BLK_DEV_4DRIVES is not set
-# CONFIG_BLK_DEV_ALI14XX is not set
-# CONFIG_BLK_DEV_DTC2278 is not set
-# CONFIG_BLK_DEV_HT6560B is not set
-# CONFIG_BLK_DEV_PDC4030 is not set
-# CONFIG_BLK_DEV_QD65XX is not set
-# CONFIG_BLK_DEV_UMC8672 is not set
-CONFIG_IDEDMA_AUTO=y
-# CONFIG_IDEDMA_IVB is not set
-# CONFIG_DMA_NONPCI is not set
-CONFIG_BLK_DEV_PDC202XX=y
-# CONFIG_BLK_DEV_ATARAID is not set
-# CONFIG_BLK_DEV_ATARAID_PDC is not set
-# CONFIG_BLK_DEV_ATARAID_HPT is not set
-# CONFIG_BLK_DEV_ATARAID_MEDLEY is not set
-# CONFIG_BLK_DEV_ATARAID_SII is not set
-
-#
-# SCSI support
-#
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SD_EXTRA_DEVS=40
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-CONFIG_CHR_DEV_SG=y
-# CONFIG_SCSI_DEBUG_QUEUES is not set
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_7000FASST is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AHA152X is not set
-# CONFIG_SCSI_AHA1542 is not set
-# CONFIG_SCSI_AHA1740 is not set
-CONFIG_SCSI_AACRAID=y
-CONFIG_SCSI_AIC7XXX=y
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_PROBE_EISA_VL is not set
-# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC79XX=y
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
-# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-CONFIG_AIC79XX_DEBUG_MASK=0
-# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_IN2000 is not set
-# CONFIG_SCSI_AM53C974 is not set
-CONFIG_SCSI_MEGARAID=y
-# CONFIG_SCSI_MEGARAID2 is not set
-CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_AHCI is not set
-# CONFIG_SCSI_SATA_SVW is not set
-CONFIG_SCSI_ATA_PIIX=y
-# CONFIG_SCSI_SATA_NV is not set
-# CONFIG_SCSI_SATA_QSTOR is not set
-CONFIG_SCSI_SATA_PROMISE=y
-CONFIG_SCSI_SATA_SX4=y
-CONFIG_SCSI_SATA_SIL=y
-CONFIG_SCSI_SATA_SIS=y
-# CONFIG_SCSI_SATA_ULI is not set
-CONFIG_SCSI_SATA_VIA=y
-CONFIG_SCSI_SATA_VITESSE=y
-CONFIG_SCSI_BUSLOGIC=y
-# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-# CONFIG_SCSI_CPQFCTS is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_DMA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_NCR53C406A is not set
-# CONFIG_SCSI_NCR53C7xx is not set
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_PCI2000 is not set
-# CONFIG_SCSI_PCI2220I is not set
-# CONFIG_SCSI_PSI240I is not set
-# CONFIG_SCSI_QLOGIC_FAS is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_SEAGATE is not set
-# CONFIG_SCSI_SIM710 is not set
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_BOOT is not set
-# CONFIG_FUSION_ISENSE is not set
-# CONFIG_FUSION_CTL is not set
-# CONFIG_FUSION_LAN is not set
-
-#
-# IEEE 1394 (FireWire) support (EXPERIMENTAL)
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-# CONFIG_I2O_PCI is not set
-# CONFIG_I2O_BLOCK is not set
-# CONFIG_I2O_LAN is not set
-# CONFIG_I2O_SCSI is not set
-# CONFIG_I2O_PROC is not set
-
-#
-# Network device support
-#
-CONFIG_NETDEVICES=y
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-# CONFIG_SUNLANCE is not set
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNBMAC is not set
-# CONFIG_SUNQE is not set
-# CONFIG_SUNGEM is not set
-CONFIG_NET_VENDOR_3COM=y
-# CONFIG_EL1 is not set
-# CONFIG_EL2 is not set
-# CONFIG_ELPLUS is not set
-# CONFIG_EL16 is not set
-# CONFIG_EL3 is not set
-# CONFIG_3C515 is not set
-# CONFIG_ELMC is not set
-# CONFIG_ELMC_II is not set
-CONFIG_VORTEX=y
-# CONFIG_TYPHOON is not set
-# CONFIG_LANCE is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_NET_VENDOR_RACAL is not set
-# CONFIG_AT1700 is not set
-# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
-# CONFIG_NET_ISA is not set
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_AC3200 is not set
-# CONFIG_APRICOT is not set
-# CONFIG_B44 is not set
-# CONFIG_CS89x0 is not set
-# CONFIG_TULIP is not set
-# CONFIG_DE4X5 is not set
-# CONFIG_DGRS is not set
-# CONFIG_DM9102 is not set
-# CONFIG_EEPRO100 is not set
-# CONFIG_EEPRO100_PIO is not set
-CONFIG_E100=y
-# CONFIG_LNE390 is not set
-# CONFIG_FEALNX is not set
-# CONFIG_NATSEMI is not set
-CONFIG_NE2K_PCI=y
-# CONFIG_FORCEDETH is not set
-# CONFIG_NE3210 is not set
-# CONFIG_ES3210 is not set
-# CONFIG_8139CP is not set
-# CONFIG_8139TOO is not set
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_SIS900 is not set
-# CONFIG_EPIC100 is not set
-# CONFIG_SUNDANCE is not set
-# CONFIG_SUNDANCE_MMIO is not set
-# CONFIG_TLAN is not set
-# CONFIG_VIA_RHINE is not set
-# CONFIG_VIA_RHINE_MMIO is not set
-# CONFIG_WINBOND_840 is not set
-# CONFIG_NET_POCKET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-# CONFIG_ACENIC is not set
-# CONFIG_DL2K is not set
-CONFIG_E1000=y
-# CONFIG_E1000_NAPI is not set
-# CONFIG_MYRI_SBUS is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
-CONFIG_TIGON3=y
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-# CONFIG_NET_FC is not set
-# CONFIG_RCPCI is not set
-# CONFIG_SHAPER is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-
-#
-# Amateur Radio support
-#
-# CONFIG_HAMRADIO is not set
-
-#
-# IrDA (infrared) support
-#
-# CONFIG_IRDA is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Old CD-ROM drivers (not SCSI, not IDE)
-#
-# CONFIG_CD_NO_IDESCSI is not set
-
-#
-# Input core support
-#
-# CONFIG_INPUT is not set
-# CONFIG_INPUT_KEYBDEV is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_UINPUT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-# CONFIG_SERIAL is not set
-# CONFIG_SERIAL_EXTENDED is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Mice
-#
-# CONFIG_BUSMOUSE is not set
-CONFIG_MOUSE=y
-CONFIG_PSMOUSE=y
-# CONFIG_82C710_MOUSE is not set
-# CONFIG_PC110_PAD is not set
-# CONFIG_MK712_MOUSE is not set
-
-#
-# Joysticks
-#
-# CONFIG_INPUT_GAMEPORT is not set
-# CONFIG_QIC02_TAPE is not set
-# CONFIG_IPMI_HANDLER is not set
-# CONFIG_IPMI_PANIC_EVENT is not set
-# CONFIG_IPMI_DEVICE_INTERFACE is not set
-# CONFIG_IPMI_KCS is not set
-# CONFIG_IPMI_WATCHDOG is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_SCx200 is not set
-# CONFIG_SCx200_GPIO is not set
-# CONFIG_AMD_RNG is not set
-# CONFIG_INTEL_RNG is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_AMD_PM768 is not set
-# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-
-#
-# Direct Rendering Manager (XFree86 DRI support)
-#
-# CONFIG_DRM is not set
-# CONFIG_MWAVE is not set
-# CONFIG_OBMOUSE is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# File systems
-#
-# CONFIG_QUOTA is not set
-# CONFIG_QFMT_V2 is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_ADFS_FS is not set
-# CONFIG_ADFS_FS_RW is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BEFS_DEBUG is not set
-# CONFIG_BFS_FS is not set
-CONFIG_EXT3_FS=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_UMSDOS_FS=y
-CONFIG_VFAT_FS=y
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_CRAMFS is not set
-CONFIG_TMPFS=y
-CONFIG_RAMFS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-# CONFIG_JFS_FS is not set
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_NTFS_FS is not set
-# CONFIG_NTFS_RW is not set
-# CONFIG_HPFS_FS is not set
-CONFIG_PROC_FS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVFS_MOUNT is not set
-# CONFIG_DEVFS_DEBUG is not set
-CONFIG_DEVPTS_FS=y
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_QNX4FS_RW is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_EXT2_FS=y
-# CONFIG_SYSV_FS is not set
-# CONFIG_UDF_FS is not set
-# CONFIG_UDF_RW is not set
-# CONFIG_UFS_FS is not set
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_TRACE is not set
-# CONFIG_XFS_DEBUG is not set
-
-#
-# Network File Systems
-#
-# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_DIRECTIO is not set
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-# CONFIG_NFSD_TCP is not set
-CONFIG_SUNRPC=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-# CONFIG_SMB_FS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_NCPFS_PACKET_SIGNING is not set
-# CONFIG_NCPFS_IOCTL_LOCKING is not set
-# CONFIG_NCPFS_STRONG is not set
-# CONFIG_NCPFS_NFS_NS is not set
-# CONFIG_NCPFS_OS2_NS is not set
-# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_NLS is not set
-# CONFIG_NCPFS_EXTRAS is not set
-CONFIG_ZISOFS_FS=y
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-# CONFIG_SMB_NLS is not set
-CONFIG_NLS=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS_DEFAULT="iso8559-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Console drivers
-#
-CONFIG_XEN_CONSOLE=y
-CONFIG_VGA_CONSOLE=y
-CONFIG_DUMMY_CONSOLE=y
-# CONFIG_VIDEO_SELECT is not set
-# CONFIG_MDA_CONSOLE is not set
-
-#
-# Frame-buffer support
-#
-# CONFIG_FB is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-
-#
-# Support for USB gadgets
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# Bluetooth support
-#
-# CONFIG_BLUEZ is not set
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_HIGHMEM is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_IOVIRT is not set
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_KALLSYMS=y
-# CONFIG_FRAME_POINTER is not set
-CONFIG_LOG_BUF_SHIFT=0
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
-CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-# CONFIG_CRYPTO_WP512 is not set
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC32 is not set
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-# CONFIG_FW_LOADER is not set
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/defconfig-xenU
--- a/linux-2.4.30-xen-sparse/arch/xen/defconfig-xenU Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,560 +0,0 @@
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_XEN=y
-CONFIG_X86=y
-CONFIG_ISA=y
-# CONFIG_SBUS is not set
-CONFIG_UID16=y
-
-#
-# Xen
-#
-# CONFIG_XEN_PRIVILEGED_GUEST is not set
-# CONFIG_XEN_PHYSDEV_ACCESS is not set
-CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_NO_IDLE_HZ=y
-# CONFIG_FOREIGN_PAGES is not set
-CONFIG_NETDEVICES=y
-# CONFIG_VT is not set
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODVERSIONS=y
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-CONFIG_M686=y
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUM4 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MVIAC3_2 is not set
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_PGE=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-CONFIG_X86_TSC=y
-CONFIG_X86_L1_CACHE_SHIFT=5
-CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_FORCE_MAX_ZONEORDER=11
-
-#
-# General setup
-#
-CONFIG_NET=y
-CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-CONFIG_KCORE_ELF=y
-# CONFIG_KCORE_AOUT is not set
-CONFIG_BINFMT_AOUT=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_OOM_KILLER is not set
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-# CONFIG_NETLINK_DEV is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_FILTER=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_INET_ECN is not set
-# CONFIG_SYN_COOKIES is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=y
-CONFIG_IP_NF_FTP=y
-# CONFIG_IP_NF_AMANDA is not set
-CONFIG_IP_NF_TFTP=y
-CONFIG_IP_NF_IRC=y
-# CONFIG_IP_NF_QUEUE is not set
-CONFIG_IP_NF_IPTABLES=y
-# CONFIG_IP_NF_MATCH_LIMIT is not set
-# CONFIG_IP_NF_MATCH_MAC is not set
-# CONFIG_IP_NF_MATCH_PKTTYPE is not set
-# CONFIG_IP_NF_MATCH_MARK is not set
-# CONFIG_IP_NF_MATCH_MULTIPORT is not set
-# CONFIG_IP_NF_MATCH_TOS is not set
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
-# CONFIG_IP_NF_MATCH_DSCP is not set
-# CONFIG_IP_NF_MATCH_AH_ESP is not set
-# CONFIG_IP_NF_MATCH_LENGTH is not set
-# CONFIG_IP_NF_MATCH_TTL is not set
-# CONFIG_IP_NF_MATCH_TCPMSS is not set
-# CONFIG_IP_NF_MATCH_HELPER is not set
-CONFIG_IP_NF_MATCH_STATE=y
-CONFIG_IP_NF_MATCH_CONNTRACK=y
-# CONFIG_IP_NF_MATCH_UNCLEAN is not set
-# CONFIG_IP_NF_MATCH_OWNER is not set
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-# CONFIG_IP_NF_TARGET_MIRROR is not set
-CONFIG_IP_NF_NAT=y
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
-CONFIG_IP_NF_NAT_IRC=y
-CONFIG_IP_NF_NAT_FTP=y
-CONFIG_IP_NF_NAT_TFTP=y
-# CONFIG_IP_NF_MANGLE is not set
-CONFIG_IP_NF_TARGET_LOG=y
-CONFIG_IP_NF_TARGET_ULOG=y
-# CONFIG_IP_NF_TARGET_TCPMSS is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-# CONFIG_KHTTPD is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-CONFIG_VLAN_8021Q=y
-
-#
-#
-#
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_DECNET is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_LLC is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_FASTROUTE is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-
-#
-# SCSI support
-#
-CONFIG_SCSI=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-CONFIG_SD_EXTRA_DEVS=40
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-CONFIG_CHR_DEV_SG=y
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_DEBUG_QUEUES is not set
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_SCSI_7000FASST is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AHA152X is not set
-# CONFIG_SCSI_AHA1542 is not set
-# CONFIG_SCSI_AHA1740 is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_IN2000 is not set
-# CONFIG_SCSI_AM53C974 is not set
-# CONFIG_SCSI_MEGARAID is not set
-# CONFIG_SCSI_MEGARAID2 is not set
-# CONFIG_SCSI_SATA is not set
-# CONFIG_SCSI_SATA_AHCI is not set
-# CONFIG_SCSI_SATA_SVW is not set
-# CONFIG_SCSI_ATA_PIIX is not set
-# CONFIG_SCSI_SATA_NV is not set
-# CONFIG_SCSI_SATA_QSTOR is not set
-# CONFIG_SCSI_SATA_PROMISE is not set
-# CONFIG_SCSI_SATA_SX4 is not set
-# CONFIG_SCSI_SATA_SIL is not set
-# CONFIG_SCSI_SATA_SIS is not set
-# CONFIG_SCSI_SATA_ULI is not set
-# CONFIG_SCSI_SATA_VIA is not set
-# CONFIG_SCSI_SATA_VITESSE is not set
-# CONFIG_SCSI_BUSLOGIC is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_DMA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_PPA is not set
-# CONFIG_SCSI_IMM is not set
-# CONFIG_SCSI_NCR53C406A is not set
-# CONFIG_SCSI_NCR53C7xx is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_PCI2000 is not set
-# CONFIG_SCSI_PCI2220I is not set
-# CONFIG_SCSI_PSI240I is not set
-# CONFIG_SCSI_QLOGIC_FAS is not set
-# CONFIG_SCSI_SEAGATE is not set
-# CONFIG_SCSI_SIM710 is not set
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Block devices
-#
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_BLK_STATS is not set
-# CONFIG_BLK_DEV_HD is not set
-
-#
-# Character devices
-#
-# CONFIG_VT is not set
-# CONFIG_SERIAL is not set
-# CONFIG_SERIAL_EXTENDED is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
-# CONFIG_PRINTER is not set
-# CONFIG_PPDEV is not set
-# CONFIG_TIPAR is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Mice
-#
-# CONFIG_BUSMOUSE is not set
-CONFIG_MOUSE=y
-CONFIG_PSMOUSE=y
-# CONFIG_82C710_MOUSE is not set
-# CONFIG_PC110_PAD is not set
-# CONFIG_MK712_MOUSE is not set
-
-#
-# Joysticks
-#
-# CONFIG_INPUT_GAMEPORT is not set
-# CONFIG_INPUT_NS558 is not set
-# CONFIG_INPUT_LIGHTNING is not set
-# CONFIG_INPUT_PCIGAME is not set
-# CONFIG_INPUT_CS461X is not set
-# CONFIG_INPUT_EMU10K1 is not set
-# CONFIG_INPUT_SERIO is not set
-# CONFIG_INPUT_SERPORT is not set
-
-#
-# Joysticks
-#
-# CONFIG_INPUT_ANALOG is not set
-# CONFIG_INPUT_A3D is not set
-# CONFIG_INPUT_ADI is not set
-# CONFIG_INPUT_COBRA is not set
-# CONFIG_INPUT_GF2K is not set
-# CONFIG_INPUT_GRIP is not set
-# CONFIG_INPUT_INTERACT is not set
-# CONFIG_INPUT_TMDC is not set
-# CONFIG_INPUT_SIDEWINDER is not set
-# CONFIG_INPUT_IFORCE_USB is not set
-# CONFIG_INPUT_IFORCE_232 is not set
-# CONFIG_INPUT_WARRIOR is not set
-# CONFIG_INPUT_MAGELLAN is not set
-# CONFIG_INPUT_SPACEORB is not set
-# CONFIG_INPUT_SPACEBALL is not set
-# CONFIG_INPUT_STINGER is not set
-# CONFIG_INPUT_DB9 is not set
-# CONFIG_INPUT_GAMECON is not set
-# CONFIG_INPUT_TURBOGRAFX is not set
-# CONFIG_QIC02_TAPE is not set
-# CONFIG_IPMI_HANDLER is not set
-# CONFIG_IPMI_PANIC_EVENT is not set
-# CONFIG_IPMI_DEVICE_INTERFACE is not set
-# CONFIG_IPMI_KCS is not set
-# CONFIG_IPMI_WATCHDOG is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_SCx200 is not set
-# CONFIG_SCx200_GPIO is not set
-# CONFIG_AMD_RNG is not set
-# CONFIG_INTEL_RNG is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_AMD_PM768 is not set
-# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-
-#
-# Direct Rendering Manager (XFree86 DRI support)
-#
-# CONFIG_DRM is not set
-# CONFIG_MWAVE is not set
-# CONFIG_OBMOUSE is not set
-
-#
-# File systems
-#
-# CONFIG_QUOTA is not set
-# CONFIG_QFMT_V2 is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_ADFS_FS is not set
-# CONFIG_ADFS_FS_RW is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BEFS_DEBUG is not set
-# CONFIG_BFS_FS is not set
-CONFIG_EXT3_FS=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_UMSDOS_FS=y
-CONFIG_VFAT_FS=y
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_CRAMFS is not set
-CONFIG_TMPFS=y
-CONFIG_RAMFS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-# CONFIG_JFS_FS is not set
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_NTFS_FS is not set
-# CONFIG_NTFS_RW is not set
-# CONFIG_HPFS_FS is not set
-CONFIG_PROC_FS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVFS_MOUNT is not set
-# CONFIG_DEVFS_DEBUG is not set
-CONFIG_DEVPTS_FS=y
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_QNX4FS_RW is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_EXT2_FS=y
-# CONFIG_SYSV_FS is not set
-# CONFIG_UDF_FS is not set
-# CONFIG_UDF_RW is not set
-# CONFIG_UFS_FS is not set
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_TRACE is not set
-# CONFIG_XFS_DEBUG is not set
-
-#
-# Network File Systems
-#
-# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_DIRECTIO is not set
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-# CONFIG_NFSD_TCP is not set
-CONFIG_SUNRPC=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-# CONFIG_SMB_FS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_NCPFS_PACKET_SIGNING is not set
-# CONFIG_NCPFS_IOCTL_LOCKING is not set
-# CONFIG_NCPFS_STRONG is not set
-# CONFIG_NCPFS_NFS_NS is not set
-# CONFIG_NCPFS_OS2_NS is not set
-# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_NLS is not set
-# CONFIG_NCPFS_EXTRAS is not set
-CONFIG_ZISOFS_FS=y
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-# CONFIG_SMB_NLS is not set
-CONFIG_NLS=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS_DEFAULT="iso8559-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Console drivers
-#
-CONFIG_XEN_CONSOLE=y
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_HIGHMEM is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_IOVIRT is not set
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_KALLSYMS=y
-# CONFIG_FRAME_POINTER is not set
-CONFIG_LOG_BUF_SHIFT=0
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC32 is not set
-CONFIG_ZLIB_INFLATE=y
-# CONFIG_ZLIB_DEFLATE is not set
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/balloon/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/balloon/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,4 +0,0 @@
-O_TARGET := drv.o
-export-objs := balloon.o
-obj-y := balloon.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,10 +0,0 @@
-
-O_TARGET := drv.o
-
-subdir-$(CONFIG_XEN_BLKDEV_FRONTEND) += frontend
-obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += frontend/drv.o
-
-subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend
-obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o
-
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/backend/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/backend/Makefile Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-O_TARGET := drv.o
-obj-y := main.o control.o interface.o vbd.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-O_TARGET := drv.o
-obj-y := blkfront.o vbd.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/frontend/common.h
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/frontend/common.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,93 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/frontend/common.h
- *
- * Shared definitions between all levels of XenoLinux Virtual block devices.
- */
-
-#ifndef __XEN_DRIVERS_COMMON_H__
-#define __XEN_DRIVERS_COMMON_H__
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/hdreg.h>
-#include <linux/blkdev.h>
-#include <linux/major.h>
-#include <asm-xen/xen-public/xen.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/uaccess.h>
-#include <asm-xen/xen-public/io/blkif.h>
-
-#if 1
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_blk: " fmt, ##args)
-#else
-#define IPRINTK(fmt, args...) ((void)0)
-#endif
-
-#if 1
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_blk: " fmt, ##args)
-#else
-#define WPRINTK(fmt, args...) ((void)0)
-#endif
-
-#if 0
-#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-#else
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-#if 0
-#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-#else
-#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-#endif
-
-/* Private gendisk->flags[] values. */
-#define GENHD_FL_XEN 2 /* Is unit a Xen block device? */
-#define GENHD_FL_VIRT_PARTNS 4 /* Are unit partitions virtual? */
-
-/*
- * We have one of these per vbd, whether ide, scsi or 'other'.
- * They hang in an array off the gendisk structure. We may end up putting
- * all kinds of interesting stuff here :-)
- */
-typedef struct xl_disk {
- int usage;
-} xl_disk_t;
-
-extern int blkif_open(struct inode *inode, struct file *filep);
-extern int blkif_release(struct inode *inode, struct file *filep);
-extern int blkif_ioctl(struct inode *inode, struct file *filep,
- unsigned command, unsigned long argument);
-extern int blkif_check(kdev_t dev);
-extern int blkif_revalidate(kdev_t dev);
-extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
-extern void do_blkif_request (request_queue_t *rq);
-
-extern void xlvbd_update_vbds(void);
-
-static inline xl_disk_t *xldev_to_xldisk(kdev_t xldev)
-{
- struct gendisk *gd = get_gendisk(xldev);
-
- if ( gd == NULL )
- return NULL;
-
- return (xl_disk_t *)gd->real_devices +
- (MINOR(xldev) >> gd->minor_shift);
-}
-
-
-/* Virtual block-device subsystem. */
-extern int xlvbd_init(void);
-extern void xlvbd_cleanup(void);
-
-#endif /* __XEN_DRIVERS_COMMON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,564 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/frontend/vbd.c
- *
- * Xenolinux virtual block-device driver.
- *
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- */
-
-#include "common.h"
-#include <linux/blk.h>
-
-/*
- * For convenience we distinguish between ide, scsi and 'other' (i.e.
- * potentially combinations of the two) in the naming scheme and in a few
- * other places (like default readahead, etc).
- */
-#define XLIDE_MAJOR_NAME "hd"
-#define XLSCSI_MAJOR_NAME "sd"
-#define XLVBD_MAJOR_NAME "xvd"
-
-#define XLIDE_DEVS_PER_MAJOR 2
-#define XLSCSI_DEVS_PER_MAJOR 16
-#define XLVBD_DEVS_PER_MAJOR 16
-
-#define XLIDE_PARTN_SHIFT 6 /* amount to shift minor to get 'real' minor */
-#define XLIDE_MAX_PART (1 << XLIDE_PARTN_SHIFT) /* minors per ide vbd */
-
-#define XLSCSI_PARTN_SHIFT 4 /* amount to shift minor to get 'real' minor */
-#define XLSCSI_MAX_PART (1 << XLSCSI_PARTN_SHIFT) /* minors per scsi vbd */
-
-#define XLVBD_PARTN_SHIFT 4 /* amount to shift minor to get 'real' minor */
-#define XLVBD_MAX_PART (1 << XLVBD_PARTN_SHIFT) /* minors per 'other' vbd */
-
-/* The below are for the generic drivers/block/ll_rw_block.c code. */
-static int xlide_blksize_size[256];
-static int xlide_hardsect_size[256];
-static int xlide_max_sectors[256];
-static int xlscsi_blksize_size[256];
-static int xlscsi_hardsect_size[256];
-static int xlscsi_max_sectors[256];
-static int xlvbd_blksize_size[256];
-static int xlvbd_hardsect_size[256];
-static int xlvbd_max_sectors[256];
-
-/* Information about our VBDs. */
-#define MAX_VBDS 64
-static int nr_vbds;
-static vdisk_t *vbd_info;
-
-static struct block_device_operations xlvbd_block_fops =
-{
- open: blkif_open,
- release: blkif_release,
- ioctl: blkif_ioctl,
- check_media_change: blkif_check,
- revalidate: blkif_revalidate,
-};
-
-static int xlvbd_get_vbd_info(vdisk_t *disk_info)
-{
- vdisk_t *buf = (vdisk_t *)__get_free_page(GFP_KERNEL);
- blkif_request_t req;
- blkif_response_t rsp;
- int nr;
-
- memset(&req, 0, sizeof(req));
- req.operation = BLKIF_OP_PROBE;
- req.nr_segments = 1;
- req.frame_and_sects[0] = virt_to_machine(buf) | 7;
-
- blkif_control_send(&req, &rsp);
-
- if ( rsp.status <= 0 )
- {
- printk(KERN_ALERT "Could not probe disks (%d)\n", rsp.status);
- return -1;
- }
-
- if ( (nr = rsp.status) > MAX_VBDS )
- nr = MAX_VBDS;
- memcpy(disk_info, buf, nr * sizeof(vdisk_t));
-
- return nr;
-}
-
-/*
- * xlvbd_init_device - initialise a VBD device
- * @disk: a vdisk_t describing the VBD
- *
- * Takes a vdisk_t * that describes a VBD the domain has access to.
- * Performs appropriate initialisation and registration of the device.
- *
- * Care needs to be taken when making re-entrant calls to ensure that
- * corruption does not occur. Also, devices that are in use should not have
- * their details updated. This is the caller's responsibility.
- */
-static int xlvbd_init_device(vdisk_t *xd)
-{
- int device = xd->device;
- int major = MAJOR(device);
- int minor = MINOR(device);
- int is_ide = IDE_DISK_MAJOR(major); /* is this an ide device? */
- int is_scsi= SCSI_BLK_MAJOR(major); /* is this a scsi device? */
- char *major_name;
- struct gendisk *gd;
- struct block_device *bd;
- xl_disk_t *disk;
- int i, rc = 0, max_part, partno;
- unsigned long capacity;
-
- unsigned char buf[64];
-
- if ( (bd = bdget(device)) == NULL )
- return -1;
-
- /*
- * Update of partition info, and check of usage count, is protected
- * by the per-block-device semaphore.
- */
- down(&bd->bd_sem);
-
- if ( ((disk = xldev_to_xldisk(device)) != NULL) && (disk->usage != 0) )
- {
- printk(KERN_ALERT "VBD update failed - in use [dev=%x]\n", device);
- rc = -1;
- goto out;
- }
-
- if ( is_ide ) {
-
- major_name = XLIDE_MAJOR_NAME;
- max_part = XLIDE_MAX_PART;
-
- } else if ( is_scsi ) {
-
- major_name = XLSCSI_MAJOR_NAME;
- max_part = XLSCSI_MAX_PART;
-
- } else if (VDISK_VIRTUAL(xd->info)) {
-
- major_name = XLVBD_MAJOR_NAME;
- max_part = XLVBD_MAX_PART;
-
- } else {
-
- /* SMH: hmm - probably a CCISS driver or sim; assume CCISS for now */
- printk(KERN_ALERT "Assuming device %02x:%02x is CCISS/SCSI\n",
- major, minor);
- is_scsi = 1;
- major_name = "cciss";
- max_part = XLSCSI_MAX_PART;
-
- }
-
- partno = minor & (max_part - 1);
-
- if ( (gd = get_gendisk(device)) == NULL )
- {
- rc = register_blkdev(major, major_name, &xlvbd_block_fops);
- if ( rc < 0 )
- {
- printk(KERN_ALERT "XL VBD: can't get major %d\n", major);
- goto out;
- }
-
- if ( is_ide )
- {
- blksize_size[major] = xlide_blksize_size;
- hardsect_size[major] = xlide_hardsect_size;
- max_sectors[major] = xlide_max_sectors;
- read_ahead[major] = 8;
- }
- else if ( is_scsi )
- {
- blksize_size[major] = xlscsi_blksize_size;
- hardsect_size[major] = xlscsi_hardsect_size;
- max_sectors[major] = xlscsi_max_sectors;
- read_ahead[major] = 8;
- }
- else
- {
- blksize_size[major] = xlvbd_blksize_size;
- hardsect_size[major] = xlvbd_hardsect_size;
- max_sectors[major] = xlvbd_max_sectors;
- read_ahead[major] = 8;
- }
-
- blk_init_queue(BLK_DEFAULT_QUEUE(major), do_blkif_request);
-
- /*
- * Turn off barking 'headactive' mode. We dequeue buffer heads as
- * soon as we pass them to the back-end driver.
- */
- blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
-
- /* Construct an appropriate gendisk structure. */
- gd = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
- gd->major = major;
- gd->major_name = major_name;
-
- gd->max_p = max_part;
- if ( is_ide )
- {
- gd->minor_shift = XLIDE_PARTN_SHIFT;
- gd->nr_real = XLIDE_DEVS_PER_MAJOR;
- }
- else if ( is_scsi )
- {
- gd->minor_shift = XLSCSI_PARTN_SHIFT;
- gd->nr_real = XLSCSI_DEVS_PER_MAJOR;
- }
- else
- {
- gd->minor_shift = XLVBD_PARTN_SHIFT;
- gd->nr_real = XLVBD_DEVS_PER_MAJOR;
- }
-
- /*
- ** The sizes[] and part[] arrays hold the sizes and other
- ** information about every partition with this 'major' (i.e.
- ** every disk sharing the 8 bit prefix * max partns per disk)
- */
- gd->sizes = kmalloc(max_part*gd->nr_real*sizeof(int), GFP_KERNEL);
- gd->part = kmalloc(max_part*gd->nr_real*sizeof(struct hd_struct),
- GFP_KERNEL);
- memset(gd->sizes, 0, max_part * gd->nr_real * sizeof(int));
- memset(gd->part, 0, max_part * gd->nr_real
- * sizeof(struct hd_struct));
-
-
- gd->real_devices = kmalloc(gd->nr_real * sizeof(xl_disk_t),
- GFP_KERNEL);
- memset(gd->real_devices, 0, gd->nr_real * sizeof(xl_disk_t));
-
- gd->next = NULL;
- gd->fops = &xlvbd_block_fops;
-
- gd->de_arr = kmalloc(gd->nr_real * sizeof(*gd->de_arr),
- GFP_KERNEL);
- gd->flags = kmalloc(gd->nr_real * sizeof(*gd->flags), GFP_KERNEL);
-
- memset(gd->de_arr, 0, gd->nr_real * sizeof(*gd->de_arr));
- memset(gd->flags, 0, gd->nr_real * sizeof(*gd->flags));
-
- add_gendisk(gd);
-
- blk_size[major] = gd->sizes;
- }
-
- if ( VDISK_READONLY(xd->info) )
- set_device_ro(device, 1);
-
- gd->flags[minor >> gd->minor_shift] |= GENHD_FL_XEN;
-
- /* NB. Linux 2.4 only handles 32-bit sector offsets and capacities. */
- capacity = (unsigned long)xd->capacity;
-
- if ( partno != 0 )
- {
- /*
- * If this was previously set up as a real disc we will have set
- * up partition-table information. Virtual partitions override
- * 'real' partitions, and the two cannot coexist on a device.
- */
- if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
- (gd->sizes[minor & ~(max_part-1)] != 0) )
- {
- /*
- * Any non-zero sub-partition entries must be cleaned out before
- * installing 'virtual' partition entries. The two types cannot
- * coexist, and virtual partitions are favoured.
- */
- kdev_t dev = device & ~(max_part-1);
- for ( i = max_part - 1; i > 0; i-- )
- {
- invalidate_device(dev+i, 1);
- gd->part[MINOR(dev+i)].start_sect = 0;
- gd->part[MINOR(dev+i)].nr_sects = 0;
- gd->sizes[MINOR(dev+i)] = 0;
- }
- printk(KERN_ALERT
- "Virtual partitions found for /dev/%s - ignoring any "
- "real partition information we may have found.\n",
- disk_name(gd, MINOR(device), buf));
- }
-
- /* Need to skankily setup 'partition' information */
- gd->part[minor].start_sect = 0;
- gd->part[minor].nr_sects = capacity;
- gd->sizes[minor] = capacity >>(BLOCK_SIZE_BITS-9);
-
- gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
- }
- else
- {
- gd->part[minor].nr_sects = capacity;
- gd->sizes[minor] = capacity>>(BLOCK_SIZE_BITS-9);
-
- /* Some final fix-ups depending on the device type */
- switch ( VDISK_TYPE(xd->info) )
- {
- case VDISK_TYPE_CDROM:
- case VDISK_TYPE_FLOPPY:
- case VDISK_TYPE_TAPE:
- gd->flags[minor >> gd->minor_shift] |= GENHD_FL_REMOVABLE;
- printk(KERN_ALERT
- "Skipping partition check on %s /dev/%s\n",
- VDISK_TYPE(xd->info)==VDISK_TYPE_CDROM ? "cdrom" :
- (VDISK_TYPE(xd->info)==VDISK_TYPE_TAPE ? "tape" :
- "floppy"), disk_name(gd, MINOR(device), buf));
- break;
-
- case VDISK_TYPE_DISK:
- /* Only check partitions on real discs (not virtual!). */
- if ( gd->flags[minor>>gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
- {
- printk(KERN_ALERT
- "Skipping partition check on virtual /dev/%s\n",
- disk_name(gd, MINOR(device), buf));
- break;
- }
- register_disk(gd, device, gd->max_p, &xlvbd_block_fops, capacity);
- break;
-
- default:
- printk(KERN_ALERT "XenoLinux: unknown device type %d\n",
- VDISK_TYPE(xd->info));
- break;
- }
- }
-
- out:
- up(&bd->bd_sem);
- bdput(bd);
- return rc;
-}
-
-
-/*
- * xlvbd_remove_device - remove a device node if possible
- * @device: numeric device ID
- *
- * Updates the gendisk structure and invalidates devices.
- *
- * This is OK for now but in future, should perhaps consider where this should
- * deallocate gendisks / unregister devices.
- */
-static int xlvbd_remove_device(int device)
-{
- int i, rc = 0, minor = MINOR(device);
- struct gendisk *gd;
- struct block_device *bd;
- xl_disk_t *disk = NULL;
-
- if ( (bd = bdget(device)) == NULL )
- return -1;
-
- /*
- * Update of partition info, and check of usage count, is protected
- * by the per-block-device semaphore.
- */
- down(&bd->bd_sem);
-
- if ( ((gd = get_gendisk(device)) == NULL) ||
- ((disk = xldev_to_xldisk(device)) == NULL) )
- BUG();
-
- if ( disk->usage != 0 )
- {
- printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device);
- rc = -1;
- goto out;
- }
-
- if ( (minor & (gd->max_p-1)) != 0 )
- {
- /* 1: The VBD is mapped to a partition rather than a whole unit. */
- invalidate_device(device, 1);
- gd->part[minor].start_sect = 0;
- gd->part[minor].nr_sects = 0;
- gd->sizes[minor] = 0;
-
- /* Clear the consists-of-virtual-partitions flag if possible. */
- gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS;
- for ( i = 1; i < gd->max_p; i++ )
- if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 )
- gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
-
- /*
- * If all virtual partitions are now gone, and a 'whole unit' VBD is
- * present, then we can try to grok the unit's real partition table.
- */
- if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
- (gd->sizes[minor & ~(gd->max_p-1)] != 0) &&
- !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) )
- {
- register_disk(gd,
- device&~(gd->max_p-1),
- gd->max_p,
- &xlvbd_block_fops,
- gd->part[minor&~(gd->max_p-1)].nr_sects);
- }
- }
- else
- {
- /*
- * 2: The VBD is mapped to an entire 'unit'. Clear all partitions.
- * NB. The partition entries are only cleared if there are no VBDs
- * mapped to individual partitions on this unit.
- */
- i = gd->max_p - 1; /* Default: clear subpartitions as well. */
- if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
- i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */
- while ( i >= 0 )
- {
- invalidate_device(device+i, 1);
- gd->part[minor+i].start_sect = 0;
- gd->part[minor+i].nr_sects = 0;
- gd->sizes[minor+i] = 0;
- i--;
- }
- }
-
- out:
- up(&bd->bd_sem);
- bdput(bd);
- return rc;
-}
-
-/*
- * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
- * state. The VBDs need to be updated in this way when the domain is
- * initialised and also each time we receive an XLBLK_UPDATE event.
- */
-void xlvbd_update_vbds(void)
-{
- int i, j, k, old_nr, new_nr;
- vdisk_t *old_info, *new_info, *merged_info;
-
- old_info = vbd_info;
- old_nr = nr_vbds;
-
- new_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
- if (!new_info)
- return;
-
- if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 )
- goto out;
-
- /*
- * Final list maximum size is old list + new list. This occurs only when
- * old list and new list do not overlap at all, and we cannot yet destroy
- * VBDs in the old list because the usage counts are busy.
- */
- merged_info = kmalloc((old_nr + new_nr) * sizeof(vdisk_t), GFP_KERNEL);
- if (!merged_info)
- goto out;
-
- /* @i tracks old list; @j tracks new list; @k tracks merged list. */
- i = j = k = 0;
-
- while ( (i < old_nr) && (j < new_nr) )
- {
- if ( old_info[i].device < new_info[j].device )
- {
- if ( xlvbd_remove_device(old_info[i].device) != 0 )
- memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
- i++;
- }
- else if ( old_info[i].device > new_info[j].device )
- {
- if ( xlvbd_init_device(&new_info[j]) == 0 )
- memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
- j++;
- }
- else
- {
- if ( ((old_info[i].capacity == new_info[j].capacity) &&
- (old_info[i].info == new_info[j].info)) ||
- (xlvbd_remove_device(old_info[i].device) != 0) )
- memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
- else if ( xlvbd_init_device(&new_info[j]) == 0 )
- memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
- i++; j++;
- }
- }
-
- for ( ; i < old_nr; i++ )
- {
- if ( xlvbd_remove_device(old_info[i].device) != 0 )
- memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
- }
-
- for ( ; j < new_nr; j++ )
- {
- if ( xlvbd_init_device(&new_info[j]) == 0 )
- memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
- }
-
- vbd_info = merged_info;
- nr_vbds = k;
-
- kfree(old_info);
-out:
- kfree(new_info);
-}
-
-
-/*
- * Set up all the linux device goop for the virtual block devices (vbd's) that
- * we know about. Note that although from the backend driver's p.o.v. VBDs are
- * addressed simply an opaque 16-bit device number, the domain creation tools
- * conventionally allocate these numbers to correspond to those used by 'real'
- * linux -- this is just for convenience as it means e.g. that the same
- * /etc/fstab can be used when booting with or without Xen.
- */
-int xlvbd_init(void)
-{
- int i;
-
- /*
- * If compiled as a module, we don't support unloading yet. We therefore
- * permanently increment the reference count to disallow it.
- */
- SET_MODULE_OWNER(&xlvbd_block_fops);
- MOD_INC_USE_COUNT;
-
- /* Initialize the global arrays. */
- for ( i = 0; i < 256; i++ )
- {
- xlide_blksize_size[i] = 1024;
- xlide_hardsect_size[i] = 512;
- xlide_max_sectors[i] = 512;
-
- xlscsi_blksize_size[i] = 1024;
- xlscsi_hardsect_size[i] = 512;
- xlscsi_max_sectors[i] = 512;
-
- xlvbd_blksize_size[i] = 512;
- xlvbd_hardsect_size[i] = 512;
- xlvbd_max_sectors[i] = 512;
- }
-
- vbd_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
- if (!vbd_info)
- return -ENOMEM;
-
- nr_vbds = xlvbd_get_vbd_info(vbd_info);
-
- if ( nr_vbds < 0 )
- {
- kfree(vbd_info);
- vbd_info = NULL;
- nr_vbds = 0;
- }
- else
- {
- for ( i = 0; i < nr_vbds; i++ )
- xlvbd_init_device(&vbd_info[i]);
- }
-
- return 0;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/console/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/console/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-O_TARGET := drv.o
-obj-$(CONFIG_XEN_CONSOLE) := console.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/dom0/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/dom0/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-O_TARGET := drv.o
-obj-y := core.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/evtchn/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/evtchn/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-O_TARGET := drv.o
-obj-y := evtchn.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/netif/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/netif/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,10 +0,0 @@
-
-O_TARGET := drv.o
-
-subdir-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend
-obj-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend/drv.o
-
-subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend
-obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o
-
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/netif/backend/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/netif/backend/Makefile Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,4 +0,0 @@
-O_TARGET := drv.o
-export-objs := interface.o
-obj-y := main.o control.o interface.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/drivers/netif/frontend/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/drivers/netif/frontend/Makefile Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-O_TARGET := drv.o
-obj-y := main.o
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,20 +0,0 @@
-
-.S.o:
- $(CC) $(AFLAGS) -traditional -c $< -o $*.o
-
-all: kernel.o head.o init_task.o
-
-O_TARGET := kernel.o
-
-export-objs := i386_ksyms.o skbuff.o ctrl_if.o
-
-obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
- ptrace.o ioport.o ldt.o setup.o time.o sys_i386.o \
- i386_ksyms.o i387.o evtchn.o ctrl_if.o pci-dma.o \
- reboot.o fixup.o skbuff.o
-
-ifdef CONFIG_PCI
-obj-y += pci-i386.o pci-pc.o
-endif
-
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/entry.S
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/entry.S Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,779 +0,0 @@
-/*
- * linux/arch/i386/entry.S
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-/*
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- * I changed all the .align's to 4 (16 byte alignment), as that's faster
- * on a 486.
- *
- * Stack layout in 'ret_to_user':
- * ptrace needs to have all regs on the stack.
- * if the order here is changed, it needs to be
- * updated in fork.c:copy_process, signal.c:do_signal,
- * ptrace.c and ptrace.h
- *
- * 0(%esp) - %ebx
- * 4(%esp) - %ecx
- * 8(%esp) - %edx
- * C(%esp) - %esi
- * 10(%esp) - %edi
- * 14(%esp) - %ebp
- * 18(%esp) - %eax
- * 1C(%esp) - %ds
- * 20(%esp) - %es
- * 24(%esp) - orig_eax
- * 28(%esp) - %eip
- * 2C(%esp) - %cs
- * 30(%esp) - %eflags
- * 34(%esp) - %oldesp
- * 38(%esp) - %oldss
- *
- * "current" is in register %ebx during any slow entries.
- */
-
-#include <linux/config.h>
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/smp.h>
-
-EBX = 0x00
-ECX = 0x04
-EDX = 0x08
-ESI = 0x0C
-EDI = 0x10
-EBP = 0x14
-EAX = 0x18
-DS = 0x1C
-ES = 0x20
-ORIG_EAX = 0x24
-EIP = 0x28
-CS = 0x2C
-EFLAGS = 0x30
-OLDESP = 0x34
-OLDSS = 0x38
-
-CF_MASK = 0x00000001
-TF_MASK = 0x00000100
-IF_MASK = 0x00000200
-DF_MASK = 0x00000400
-NT_MASK = 0x00004000
-
-/* Offsets into task_struct. */
-state = 0
-flags = 4
-sigpending = 8
-addr_limit = 12
-exec_domain = 16
-need_resched = 20
-tsk_ptrace = 24
-processor = 52
-
-/* Offsets into shared_info_t. */
-#define evtchn_upcall_pending /* 0 */
-#define evtchn_upcall_mask 1
-
-ENOSYS = 38
-
-
-#define SAVE_ALL \
- cld; \
- pushl %es; \
- pushl %ds; \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- movl $(__KERNEL_DS),%edx; \
- movl %edx,%ds; \
- movl %edx,%es;
-
-#define RESTORE_ALL \
- popl %ebx; \
- popl %ecx; \
- popl %edx; \
- popl %esi; \
- popl %edi; \
- popl %ebp; \
- popl %eax; \
-1: popl %ds; \
-2: popl %es; \
- addl $4,%esp; \
-3: iret; \
-.section .fixup,"ax"; \
-4: movl $0,(%esp); \
- jmp 1b; \
-5: movl $0,(%esp); \
- jmp 2b; \
-6: pushl %ss; \
- popl %ds; \
- pushl %ss; \
- popl %es; \
- pushl $11; \
- call do_exit; \
-.previous; \
-.section __ex_table,"a";\
- .align 4; \
- .long 1b,4b; \
- .long 2b,5b; \
- .long 3b,6b; \
-.previous
-
-#define GET_CURRENT(reg) \
- movl $-8192, reg; \
- andl %esp, reg
-
-ENTRY(lcall7)
- pushfl # We get a different stack layout with call
- pushl %eax # gates, which has to be cleaned up later..
- SAVE_ALL
- movl EIP(%esp),%eax # due to call gates, this is eflags, not eip..
- movl CS(%esp),%edx # this is eip..
- movl EFLAGS(%esp),%ecx # and this is cs..
- movl %eax,EFLAGS(%esp) #
- andl $~(NT_MASK|TF_MASK|DF_MASK), %eax
- pushl %eax
- popfl
- movl %edx,EIP(%esp) # Now we move them to their "normal" places
- movl %ecx,CS(%esp) #
- movl %esp,%ebx
- pushl %ebx
- andl $-8192,%ebx # GET_CURRENT
- movl exec_domain(%ebx),%edx # Get the execution domain
- movl 4(%edx),%edx # Get the lcall7 handler for the domain
- pushl $0x7
- call *%edx
- addl $4, %esp
- popl %eax
- jmp ret_to_user
-
-ENTRY(lcall27)
- pushfl # We get a different stack layout with call
- pushl %eax # gates, which has to be cleaned up later..
- SAVE_ALL
- movl EIP(%esp),%eax # due to call gates, this is eflags, not eip..
- movl CS(%esp),%edx # this is eip..
- movl EFLAGS(%esp),%ecx # and this is cs..
- movl %eax,EFLAGS(%esp) #
- andl $~(NT_MASK|TF_MASK|DF_MASK), %eax
- pushl %eax
- popfl
- movl %edx,EIP(%esp) # Now we move them to their "normal" places
- movl %ecx,CS(%esp) #
- movl %esp,%ebx
- pushl %ebx
- andl $-8192,%ebx # GET_CURRENT
- movl exec_domain(%ebx),%edx # Get the execution domain
- movl 4(%edx),%edx # Get the lcall7 handler for the domain
- pushl $0x27
- call *%edx
- addl $4, %esp
- popl %eax
- jmp ret_to_user
-
-ENTRY(ret_from_fork)
- pushl %ebx
- call SYMBOL_NAME(schedule_tail)
- addl $4, %esp
- GET_CURRENT(%ebx)
- testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
- jne tracesys_exit
- jmp ret_to_user
-
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-ENTRY(system_call)
- pushl %eax # save orig_eax
- SAVE_ALL
- GET_CURRENT(%ebx)
- testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
- jne tracesys
- cmpl $(NR_syscalls),%eax
- jae badsys
- call *SYMBOL_NAME(sys_call_table)(,%eax,4)
- movl %eax,EAX(%esp) # save the return value
-ret_to_user:
- movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
- movb $1,evtchn_upcall_mask(%esi) # make tests atomic
-ret_to_user_nocli:
- cmpl $0,need_resched(%ebx)
- jne reschedule
- cmpl $0,sigpending(%ebx)
- je safesti # ensure need_resched updates are seen
-/*signal_return:*/
- movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
- movl %esp,%eax
- xorl %edx,%edx
- call SYMBOL_NAME(do_signal)
- jmp safesti
-
- ALIGN
-restore_all:
- RESTORE_ALL
-
- ALIGN
-tracesys:
- movl $-ENOSYS,EAX(%esp)
- call SYMBOL_NAME(syscall_trace)
- movl ORIG_EAX(%esp),%eax
- cmpl $(NR_syscalls),%eax
- jae tracesys_exit
- call *SYMBOL_NAME(sys_call_table)(,%eax,4)
- movl %eax,EAX(%esp) # save the return value
-tracesys_exit:
- call SYMBOL_NAME(syscall_trace)
- jmp ret_to_user
-badsys:
- movl $-ENOSYS,EAX(%esp)
- jmp ret_to_user
-
- ALIGN
-ENTRY(ret_from_intr)
- GET_CURRENT(%ebx)
-ret_from_exception:
- movb CS(%esp),%al
- testl $2,%eax
- jne ret_to_user
- jmp restore_all
-
- ALIGN
-reschedule:
- movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
- call SYMBOL_NAME(schedule) # test
- jmp ret_to_user
-
-ENTRY(divide_error)
- pushl $0 # no error code
- pushl $ SYMBOL_NAME(do_divide_error)
- ALIGN
-error_code:
- pushl %ds
- pushl %eax
- xorl %eax,%eax
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- decl %eax # eax = -1
- pushl %ecx
- pushl %ebx
- GET_CURRENT(%ebx)
- cld
- movl %es,%ecx
- movl ORIG_EAX(%esp), %esi # get the error code
- movl ES(%esp), %edi # get the function address
- movl %eax, ORIG_EAX(%esp)
- movl %ecx, ES(%esp)
- movl %esp,%edx
- pushl %esi # push the error code
- pushl %edx # push the pt_regs pointer
- movl $(__KERNEL_DS),%edx
- movl %edx,%ds
- movl %edx,%es
- call *%edi
- addl $8,%esp
- jmp ret_from_exception
-
-# A note on the "critical region" in our callback handler.
-# We want to avoid stacking callback handlers due to events occurring
-# during handling of the last event. To do this, we keep events disabled
-# until we've done all processing. HOWEVER, we must enable events before
-# popping the stack frame (can't be done atomically) and so it would still
-# be possible to get enough handler activations to overflow the stack.
-# Although unlikely, bugs of that kind are hard to track down, so we'd
-# like to avoid the possibility.
-# So, on entry to the handler we detect whether we interrupted an
-# existing activation in its critical region -- if so, we pop the current
-# activation and restart the handler using the previous one.
-ENTRY(hypervisor_callback)
- pushl %eax
- SAVE_ALL
- GET_CURRENT(%ebx)
- movl EIP(%esp),%eax
- cmpl $scrit,%eax
- jb 11f
- cmpl $ecrit,%eax
- jb critical_region_fixup
-11: push %esp
- call evtchn_do_upcall
- add $4,%esp
- movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne ret_to_user_nocli
-safesti:movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
-scrit: /**** START OF CRITICAL REGION ****/
- testb $0xFF,evtchn_upcall_pending(%esi)
- jnz 14f # process more events if necessary...
- RESTORE_ALL
-14: movb $1,evtchn_upcall_mask(%esi)
- jmp 11b
-ecrit: /**** END OF CRITICAL REGION ****/
-# [How we do the fixup]. We want to merge the current stack frame with the
-# just-interrupted frame. How we do this depends on where in the critical
-# region the interrupted handler was executing, and so how many saved
-# registers are in each frame. We do this quickly using the lookup table
-# 'critical_fixup_table'. For each byte offset in the critical region, it
-# provides the number of bytes which have already been popped from the
-# interrupted stack frame.
-critical_region_fixup:
- addl $critical_fixup_table-scrit,%eax
- movzbl (%eax),%eax # %eax contains num bytes popped
- mov %esp,%esi
- add %eax,%esi # %esi points at end of src region
- mov %esp,%edi
- add $0x34,%edi # %edi points at end of dst region
- mov %eax,%ecx
- shr $2,%ecx # convert words to bytes
- je 16f # skip loop if nothing to copy
-15: subl $4,%esi # pre-decrementing copy loop
- subl $4,%edi
- movl (%esi),%eax
- movl %eax,(%edi)
- loop 15b
-16: movl %edi,%esp # final %edi is top of merged stack
- jmp 11b
-
-critical_fixup_table:
- .byte 0x00,0x00,0x00 # testb $0xFF,(%esi)
- .byte 0x00,0x00 # jnz 14f
- .byte 0x00 # pop %ebx
- .byte 0x04 # pop %ecx
- .byte 0x08 # pop %edx
- .byte 0x0c # pop %esi
- .byte 0x10 # pop %edi
- .byte 0x14 # pop %ebp
- .byte 0x18 # pop %eax
- .byte 0x1c # pop %ds
- .byte 0x20 # pop %es
- .byte 0x24,0x24,0x24 # add $4,%esp
- .byte 0x28 # iret
- .byte 0x00,0x00,0x00,0x00 # movb $1,4(%esi)
- .byte 0x00,0x00 # jmp 11b
-
-# Hypervisor uses this for application faults while it executes.
-ENTRY(failsafe_callback)
-1: popl %ds
-2: popl %es
-3: popl %fs
-4: popl %gs
-5: iret
-.section .fixup,"ax"; \
-6: movl $0,(%esp); \
- jmp 1b; \
-7: movl $0,(%esp); \
- jmp 2b; \
-8: movl $0,(%esp); \
- jmp 3b; \
-9: movl $0,(%esp); \
- jmp 4b; \
-10: pushl %ss; \
- popl %ds; \
- pushl %ss; \
- popl %es; \
- pushl $11; \
- call do_exit; \
-.previous; \
-.section __ex_table,"a";\
- .align 4; \
- .long 1b,6b; \
- .long 2b,7b; \
- .long 3b,8b; \
- .long 4b,9b; \
- .long 5b,10b; \
-.previous
-
-ENTRY(coprocessor_error)
- pushl $0
- pushl $ SYMBOL_NAME(do_coprocessor_error)
- jmp error_code
-
-ENTRY(simd_coprocessor_error)
- pushl $0
- pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
- jmp error_code
-
-ENTRY(device_not_available)
- pushl $-1 # mark this as an int
- SAVE_ALL
- GET_CURRENT(%ebx)
- call SYMBOL_NAME(math_state_restore)
- jmp ret_from_exception
-
-ENTRY(debug)
- pushl $0
- pushl $ SYMBOL_NAME(do_debug)
- jmp error_code
-
-ENTRY(int3)
- pushl $0
- pushl $ SYMBOL_NAME(do_int3)
- jmp error_code
-
-ENTRY(overflow)
- pushl $0
- pushl $ SYMBOL_NAME(do_overflow)
- jmp error_code
-
-ENTRY(bounds)
- pushl $0
- pushl $ SYMBOL_NAME(do_bounds)
- jmp error_code
-
-ENTRY(invalid_op)
- pushl $0
- pushl $ SYMBOL_NAME(do_invalid_op)
- jmp error_code
-
-ENTRY(coprocessor_segment_overrun)
- pushl $0
- pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
- jmp error_code
-
-ENTRY(double_fault)
- pushl $ SYMBOL_NAME(do_double_fault)
- jmp error_code
-
-ENTRY(invalid_TSS)
- pushl $ SYMBOL_NAME(do_invalid_TSS)
- jmp error_code
-
-ENTRY(segment_not_present)
- pushl $ SYMBOL_NAME(do_segment_not_present)
- jmp error_code
-
-ENTRY(stack_segment)
- pushl $ SYMBOL_NAME(do_stack_segment)
- jmp error_code
-
-ENTRY(general_protection)
- pushl $ SYMBOL_NAME(do_general_protection)
- jmp error_code
-
-ENTRY(alignment_check)
- pushl $ SYMBOL_NAME(do_alignment_check)
- jmp error_code
-
-# This handler is special, because it gets an extra value on its stack,
-# which is the linear faulting address.
-#define PAGE_FAULT_STUB(_name1, _name2) \
-ENTRY(_name1) \
- pushl %ds ; \
- pushl %eax ; \
- xorl %eax,%eax ; \
- pushl %ebp ; \
- pushl %edi ; \
- pushl %esi ; \
- pushl %edx ; \
- decl %eax /* eax = -1 */ ; \
- pushl %ecx ; \
- pushl %ebx ; \
- GET_CURRENT(%ebx) ; \
- cld ; \
- movl %es,%ecx ; \
- movl ORIG_EAX(%esp), %esi /* get the error code */ ; \
- movl ES(%esp), %edi /* get the faulting address */ ; \
- movl %eax, ORIG_EAX(%esp) ; \
- movl %ecx, ES(%esp) ; \
- movl %esp,%edx ; \
- pushl %edi /* push the faulting address */ ; \
- pushl %esi /* push the error code */ ; \
- pushl %edx /* push the pt_regs pointer */ ; \
- movl $(__KERNEL_DS),%edx ; \
- movl %edx,%ds ; \
- movl %edx,%es ; \
- call SYMBOL_NAME(_name2) ; \
- addl $12,%esp ; \
- jmp ret_from_exception ;
-PAGE_FAULT_STUB(page_fault, do_page_fault)
-
-ENTRY(machine_check)
- pushl $0
- pushl $ SYMBOL_NAME(do_machine_check)
- jmp error_code
-
-ENTRY(fixup_4gb_segment)
- pushl $ SYMBOL_NAME(do_fixup_4gb_segment)
- jmp error_code
-
-.data
-ENTRY(sys_call_table)
- .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system
call*/
- .long SYMBOL_NAME(sys_exit)
- .long SYMBOL_NAME(sys_fork)
- .long SYMBOL_NAME(sys_read)
- .long SYMBOL_NAME(sys_write)
- .long SYMBOL_NAME(sys_open) /* 5 */
- .long SYMBOL_NAME(sys_close)
- .long SYMBOL_NAME(sys_waitpid)
- .long SYMBOL_NAME(sys_creat)
- .long SYMBOL_NAME(sys_link)
- .long SYMBOL_NAME(sys_unlink) /* 10 */
- .long SYMBOL_NAME(sys_execve)
- .long SYMBOL_NAME(sys_chdir)
- .long SYMBOL_NAME(sys_time)
- .long SYMBOL_NAME(sys_mknod)
- .long SYMBOL_NAME(sys_chmod) /* 15 */
- .long SYMBOL_NAME(sys_lchown16)
- .long SYMBOL_NAME(sys_ni_syscall) /* old
break syscall holder */
- .long SYMBOL_NAME(sys_stat)
- .long SYMBOL_NAME(sys_lseek)
- .long SYMBOL_NAME(sys_getpid) /* 20 */
- .long SYMBOL_NAME(sys_mount)
- .long SYMBOL_NAME(sys_oldumount)
- .long SYMBOL_NAME(sys_setuid16)
- .long SYMBOL_NAME(sys_getuid16)
- .long SYMBOL_NAME(sys_stime) /* 25 */
- .long SYMBOL_NAME(sys_ptrace)
- .long SYMBOL_NAME(sys_alarm)
- .long SYMBOL_NAME(sys_fstat)
- .long SYMBOL_NAME(sys_pause)
- .long SYMBOL_NAME(sys_utime) /* 30 */
- .long SYMBOL_NAME(sys_ni_syscall) /* old
stty syscall holder */
- .long SYMBOL_NAME(sys_ni_syscall) /* old
gtty syscall holder */
- .long SYMBOL_NAME(sys_access)
- .long SYMBOL_NAME(sys_nice)
- .long SYMBOL_NAME(sys_ni_syscall) /* 35 */ /* old
ftime syscall holder */
- .long SYMBOL_NAME(sys_sync)
- .long SYMBOL_NAME(sys_kill)
- .long SYMBOL_NAME(sys_rename)
- .long SYMBOL_NAME(sys_mkdir)
- .long SYMBOL_NAME(sys_rmdir) /* 40 */
- .long SYMBOL_NAME(sys_dup)
- .long SYMBOL_NAME(sys_pipe)
- .long SYMBOL_NAME(sys_times)
- .long SYMBOL_NAME(sys_ni_syscall) /* old
prof syscall holder */
- .long SYMBOL_NAME(sys_brk) /* 45 */
- .long SYMBOL_NAME(sys_setgid16)
- .long SYMBOL_NAME(sys_getgid16)
- .long SYMBOL_NAME(sys_signal)
- .long SYMBOL_NAME(sys_geteuid16)
- .long SYMBOL_NAME(sys_getegid16) /* 50 */
- .long SYMBOL_NAME(sys_acct)
- .long SYMBOL_NAME(sys_umount) /*
recycled never used phys() */
- .long SYMBOL_NAME(sys_ni_syscall) /* old
lock syscall holder */
- .long SYMBOL_NAME(sys_ioctl)
- .long SYMBOL_NAME(sys_fcntl) /* 55 */
- .long SYMBOL_NAME(sys_ni_syscall) /* old
mpx syscall holder */
- .long SYMBOL_NAME(sys_setpgid)
- .long SYMBOL_NAME(sys_ni_syscall) /* old
ulimit syscall holder */
- .long SYMBOL_NAME(sys_olduname)
- .long SYMBOL_NAME(sys_umask) /* 60 */
- .long SYMBOL_NAME(sys_chroot)
- .long SYMBOL_NAME(sys_ustat)
- .long SYMBOL_NAME(sys_dup2)
- .long SYMBOL_NAME(sys_getppid)
- .long SYMBOL_NAME(sys_getpgrp) /* 65 */
- .long SYMBOL_NAME(sys_setsid)
- .long SYMBOL_NAME(sys_sigaction)
- .long SYMBOL_NAME(sys_sgetmask)
- .long SYMBOL_NAME(sys_ssetmask)
- .long SYMBOL_NAME(sys_setreuid16) /* 70 */
- .long SYMBOL_NAME(sys_setregid16)
- .long SYMBOL_NAME(sys_sigsuspend)
- .long SYMBOL_NAME(sys_sigpending)
- .long SYMBOL_NAME(sys_sethostname)
- .long SYMBOL_NAME(sys_setrlimit) /* 75 */
- .long SYMBOL_NAME(sys_old_getrlimit)
- .long SYMBOL_NAME(sys_getrusage)
- .long SYMBOL_NAME(sys_gettimeofday)
- .long SYMBOL_NAME(sys_settimeofday)
- .long SYMBOL_NAME(sys_getgroups16) /* 80 */
- .long SYMBOL_NAME(sys_setgroups16)
- .long SYMBOL_NAME(old_select)
- .long SYMBOL_NAME(sys_symlink)
- .long SYMBOL_NAME(sys_lstat)
- .long SYMBOL_NAME(sys_readlink) /* 85 */
- .long SYMBOL_NAME(sys_uselib)
- .long SYMBOL_NAME(sys_swapon)
- .long SYMBOL_NAME(sys_reboot)
- .long SYMBOL_NAME(old_readdir)
- .long SYMBOL_NAME(old_mmap) /* 90 */
- .long SYMBOL_NAME(sys_munmap)
- .long SYMBOL_NAME(sys_truncate)
- .long SYMBOL_NAME(sys_ftruncate)
- .long SYMBOL_NAME(sys_fchmod)
- .long SYMBOL_NAME(sys_fchown16) /* 95 */
- .long SYMBOL_NAME(sys_getpriority)
- .long SYMBOL_NAME(sys_setpriority)
- .long SYMBOL_NAME(sys_ni_syscall) /* old
profil syscall holder */
- .long SYMBOL_NAME(sys_statfs)
- .long SYMBOL_NAME(sys_fstatfs) /* 100 */
- .long SYMBOL_NAME(sys_ioperm)
- .long SYMBOL_NAME(sys_socketcall)
- .long SYMBOL_NAME(sys_syslog)
- .long SYMBOL_NAME(sys_setitimer)
- .long SYMBOL_NAME(sys_getitimer) /* 105 */
- .long SYMBOL_NAME(sys_newstat)
- .long SYMBOL_NAME(sys_newlstat)
- .long SYMBOL_NAME(sys_newfstat)
- .long SYMBOL_NAME(sys_uname)
- .long SYMBOL_NAME(sys_iopl) /* 110 */
- .long SYMBOL_NAME(sys_vhangup)
- .long SYMBOL_NAME(sys_ni_syscall) /* old "idle" system call */
- .long SYMBOL_NAME(sys_ni_syscall) /* was VM86 */
- .long SYMBOL_NAME(sys_wait4)
- .long SYMBOL_NAME(sys_swapoff) /* 115 */
- .long SYMBOL_NAME(sys_sysinfo)
- .long SYMBOL_NAME(sys_ipc)
- .long SYMBOL_NAME(sys_fsync)
- .long SYMBOL_NAME(sys_sigreturn)
- .long SYMBOL_NAME(sys_clone) /* 120 */
- .long SYMBOL_NAME(sys_setdomainname)
- .long SYMBOL_NAME(sys_newuname)
- .long SYMBOL_NAME(sys_modify_ldt)
- .long SYMBOL_NAME(sys_adjtimex)
- .long SYMBOL_NAME(sys_mprotect) /* 125 */
- .long SYMBOL_NAME(sys_sigprocmask)
- .long SYMBOL_NAME(sys_create_module)
- .long SYMBOL_NAME(sys_init_module)
- .long SYMBOL_NAME(sys_delete_module)
- .long SYMBOL_NAME(sys_get_kernel_syms) /* 130 */
- .long SYMBOL_NAME(sys_quotactl)
- .long SYMBOL_NAME(sys_getpgid)
- .long SYMBOL_NAME(sys_fchdir)
- .long SYMBOL_NAME(sys_bdflush)
- .long SYMBOL_NAME(sys_sysfs) /* 135 */
- .long SYMBOL_NAME(sys_personality)
- .long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
- .long SYMBOL_NAME(sys_setfsuid16)
- .long SYMBOL_NAME(sys_setfsgid16)
- .long SYMBOL_NAME(sys_llseek) /* 140 */
- .long SYMBOL_NAME(sys_getdents)
- .long SYMBOL_NAME(sys_select)
- .long SYMBOL_NAME(sys_flock)
- .long SYMBOL_NAME(sys_msync)
- .long SYMBOL_NAME(sys_readv) /* 145 */
- .long SYMBOL_NAME(sys_writev)
- .long SYMBOL_NAME(sys_getsid)
- .long SYMBOL_NAME(sys_fdatasync)
- .long SYMBOL_NAME(sys_sysctl)
- .long SYMBOL_NAME(sys_mlock) /* 150 */
- .long SYMBOL_NAME(sys_munlock)
- .long SYMBOL_NAME(sys_mlockall)
- .long SYMBOL_NAME(sys_munlockall)
- .long SYMBOL_NAME(sys_sched_setparam)
- .long SYMBOL_NAME(sys_sched_getparam) /* 155 */
- .long SYMBOL_NAME(sys_sched_setscheduler)
- .long SYMBOL_NAME(sys_sched_getscheduler)
- .long SYMBOL_NAME(sys_sched_yield)
- .long SYMBOL_NAME(sys_sched_get_priority_max)
- .long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */
- .long SYMBOL_NAME(sys_sched_rr_get_interval)
- .long SYMBOL_NAME(sys_nanosleep)
- .long SYMBOL_NAME(sys_mremap)
- .long SYMBOL_NAME(sys_setresuid16)
- .long SYMBOL_NAME(sys_getresuid16) /* 165 */
- .long SYMBOL_NAME(sys_ni_syscall) /* was VM86 */
- .long SYMBOL_NAME(sys_query_module)
- .long SYMBOL_NAME(sys_poll)
- .long SYMBOL_NAME(sys_nfsservctl)
- .long SYMBOL_NAME(sys_setresgid16) /* 170 */
- .long SYMBOL_NAME(sys_getresgid16)
- .long SYMBOL_NAME(sys_prctl)
- .long SYMBOL_NAME(sys_rt_sigreturn)
- .long SYMBOL_NAME(sys_rt_sigaction)
- .long SYMBOL_NAME(sys_rt_sigprocmask) /* 175 */
- .long SYMBOL_NAME(sys_rt_sigpending)
- .long SYMBOL_NAME(sys_rt_sigtimedwait)
- .long SYMBOL_NAME(sys_rt_sigqueueinfo)
- .long SYMBOL_NAME(sys_rt_sigsuspend)
- .long SYMBOL_NAME(sys_pread) /* 180 */
- .long SYMBOL_NAME(sys_pwrite)
- .long SYMBOL_NAME(sys_chown16)
- .long SYMBOL_NAME(sys_getcwd)
- .long SYMBOL_NAME(sys_capget)
- .long SYMBOL_NAME(sys_capset) /* 185 */
- .long SYMBOL_NAME(sys_sigaltstack)
- .long SYMBOL_NAME(sys_sendfile)
- .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
- .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
- .long SYMBOL_NAME(sys_vfork) /* 190 */
- .long SYMBOL_NAME(sys_getrlimit)
- .long SYMBOL_NAME(sys_mmap2)
- .long SYMBOL_NAME(sys_truncate64)
- .long SYMBOL_NAME(sys_ftruncate64)
- .long SYMBOL_NAME(sys_stat64) /* 195 */
- .long SYMBOL_NAME(sys_lstat64)
- .long SYMBOL_NAME(sys_fstat64)
- .long SYMBOL_NAME(sys_lchown)
- .long SYMBOL_NAME(sys_getuid)
- .long SYMBOL_NAME(sys_getgid) /* 200 */
- .long SYMBOL_NAME(sys_geteuid)
- .long SYMBOL_NAME(sys_getegid)
- .long SYMBOL_NAME(sys_setreuid)
- .long SYMBOL_NAME(sys_setregid)
- .long SYMBOL_NAME(sys_getgroups) /* 205 */
- .long SYMBOL_NAME(sys_setgroups)
- .long SYMBOL_NAME(sys_fchown)
- .long SYMBOL_NAME(sys_setresuid)
- .long SYMBOL_NAME(sys_getresuid)
- .long SYMBOL_NAME(sys_setresgid) /* 210 */
- .long SYMBOL_NAME(sys_getresgid)
- .long SYMBOL_NAME(sys_chown)
- .long SYMBOL_NAME(sys_setuid)
- .long SYMBOL_NAME(sys_setgid)
- .long SYMBOL_NAME(sys_setfsuid) /* 215 */
- .long SYMBOL_NAME(sys_setfsgid)
- .long SYMBOL_NAME(sys_pivot_root)
- .long SYMBOL_NAME(sys_mincore)
- .long SYMBOL_NAME(sys_madvise)
- .long SYMBOL_NAME(sys_getdents64) /* 220 */
- .long SYMBOL_NAME(sys_fcntl64)
- .long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */
- .long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
- .long SYMBOL_NAME(sys_gettid)
- .long SYMBOL_NAME(sys_readahead) /* 225 */
- .long SYMBOL_NAME(sys_setxattr)
- .long SYMBOL_NAME(sys_lsetxattr)
- .long SYMBOL_NAME(sys_fsetxattr)
- .long SYMBOL_NAME(sys_getxattr)
- .long SYMBOL_NAME(sys_lgetxattr) /* 230 */
- .long SYMBOL_NAME(sys_fgetxattr)
- .long SYMBOL_NAME(sys_listxattr)
- .long SYMBOL_NAME(sys_llistxattr)
- .long SYMBOL_NAME(sys_flistxattr)
- .long SYMBOL_NAME(sys_removexattr) /* 235 */
- .long SYMBOL_NAME(sys_lremovexattr)
- .long SYMBOL_NAME(sys_fremovexattr)
- .long SYMBOL_NAME(sys_tkill)
- .long SYMBOL_NAME(sys_sendfile64)
- .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */
- .long SYMBOL_NAME(sys_ni_syscall) /* reserved for
sched_setaffinity */
- .long SYMBOL_NAME(sys_ni_syscall) /* reserved for
sched_getaffinity */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_thread_area */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_get_thread_area */
- .long SYMBOL_NAME(sys_ni_syscall) /* 245 sys_io_setup */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_destroy */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_getevents */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_submit */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_io_cancel */
- .long SYMBOL_NAME(sys_ni_syscall) /* 250 sys_alloc_hugepages */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_free_hugepages */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_exit_group */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_lookup_dcookie */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_create */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_ctl 255 */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_wait */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */
- .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_tid_address */
-
- .rept NR_syscalls-(.-sys_call_table)/4
- .long SYMBOL_NAME(sys_ni_syscall)
- .endr
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/head.S
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/head.S Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,39 +0,0 @@
-
-.section __xen_guest
- .asciz "GUEST_OS=linux,GUEST_VER=2.4,XEN_VER=2.0,VIRT_BASE=0xC0000000"
-
-.text
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-
-ENTRY(stext)
-ENTRY(_stext)
- cld
- lss stack_start,%esp
- /* Copy the necessary stuff from xen_start_info structure. */
- mov $SYMBOL_NAME(xen_start_info_union),%edi
- mov $128,%ecx
- rep movsl
- jmp SYMBOL_NAME(start_kernel)
-
-ENTRY(stack_start)
- .long SYMBOL_NAME(init_task_union)+8192, __KERNEL_DS
-
-.org 0x1000
-ENTRY(empty_zero_page)
-
-.org 0x2000
-ENTRY(default_ldt)
-
-.org 0x3000
-ENTRY(cpu0_pte_quicklist)
-
-.org 0x3400
-ENTRY(cpu0_pgd_quicklist)
-
-.org 0x3800
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/i386_ksyms.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/i386_ksyms.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,180 +0,0 @@
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mca.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/hardirq.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-extern void dump_thread(struct pt_regs *, struct user *);
-extern spinlock_t rtc_lock;
-
-#if defined(CONFIG_APMXXX) || defined(CONFIG_APM_MODULEXXX)
-extern void machine_real_restart(unsigned char *, int);
-EXPORT_SYMBOL(machine_real_restart);
-extern void default_idle(void);
-EXPORT_SYMBOL(default_idle);
-#endif
-
-#ifdef CONFIG_SMP
-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-#endif
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) ||
defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
-// XXX extern unsigned long get_cmos_time(void);
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(dump_extended_fpu);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(probe_irq_mask);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-EXPORT_SYMBOL(pm_power_off);
-EXPORT_SYMBOL(apm_info);
-//EXPORT_SYMBOL(gdt);
-EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(phys_to_machine_mapping);
-
-
-#ifdef CONFIG_DEBUG_IOVIRT
-EXPORT_SYMBOL(__io_virt_debug);
-#endif
-
-EXPORT_SYMBOL_NOVERS(__down_failed);
-EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
-EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
-EXPORT_SYMBOL_NOVERS(__up_wakeup);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_generic);
-/* Delay loops */
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
-
-EXPORT_SYMBOL_NOVERS(__get_user_1);
-EXPORT_SYMBOL_NOVERS(__get_user_2);
-EXPORT_SYMBOL_NOVERS(__get_user_4);
-
-EXPORT_SYMBOL(strtok);
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
-
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__generic_copy_from_user);
-EXPORT_SYMBOL(__generic_copy_to_user);
-EXPORT_SYMBOL(strnlen_user);
-
-
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pcibios_penalize_isa_irq);
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
-
-#ifdef CONFIG_X86_USE_3DNOW
-EXPORT_SYMBOL(_mmx_memcpy);
-EXPORT_SYMBOL(mmx_clear_page);
-EXPORT_SYMBOL(mmx_copy_page);
-#endif
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(kernel_flag_cacheline);
-EXPORT_SYMBOL(smp_num_cpus);
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL_NOVERS(__write_lock_failed);
-EXPORT_SYMBOL_NOVERS(__read_lock_failed);
-
-/* Global SMP irq stuff */
-EXPORT_SYMBOL(synchronize_irq);
-EXPORT_SYMBOL(global_irq_holder);
-EXPORT_SYMBOL(__global_cli);
-EXPORT_SYMBOL(__global_sti);
-EXPORT_SYMBOL(__global_save_flags);
-EXPORT_SYMBOL(__global_restore_flags);
-EXPORT_SYMBOL(smp_call_function);
-
-/* TLB flushing */
-EXPORT_SYMBOL(flush_tlb_page);
-
-/* HT support */
-EXPORT_SYMBOL(smp_num_siblings);
-EXPORT_SYMBOL(cpu_sibling_map);
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
-EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(rtc_lock);
-
-#undef memcpy
-#undef memset
-extern void * memset(void *,int,__kernel_size_t);
-extern void * memcpy(void *,const void *,__kernel_size_t);
-EXPORT_SYMBOL_NOVERS(memcpy);
-EXPORT_SYMBOL_NOVERS(memset);
-
-#ifdef CONFIG_HAVE_DEC_LOCK
-EXPORT_SYMBOL(atomic_dec_and_lock);
-#endif
-
-#ifdef CONFIG_MULTIQUAD
-EXPORT_SYMBOL(xquad_portio);
-#endif
-
-#include <asm/xen_proc.h>
-EXPORT_SYMBOL(create_xen_proc_entry);
-EXPORT_SYMBOL(remove_xen_proc_entry);
-
-EXPORT_SYMBOL(evtchn_do_upcall);
-EXPORT_SYMBOL(force_evtchn_callback);
-EXPORT_SYMBOL(HYPERVISOR_shared_info);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/irq.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/irq.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1242 +0,0 @@
-/*
- * linux/arch/i386/kernel/irq.c
- *
- * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
- *
- * This file contains the code used by various IRQ handling routines:
- * asking for different IRQ's should be done through these routines
- * instead of just grabbing them. Thus setups with different IRQ numbers
- * shouldn't result in any weird surprises, and installing new handlers
- * should be easier.
- */
-
-/*
- * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
- *
- * IRQs are in fact implemented a bit like signal handlers for the kernel.
- * Naturally it's not a 1:1 relation, but there are similarities.
- */
-
-#include <linux/config.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/smp_lock.h>
-#include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/irq.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/delay.h>
-#include <asm/desc.h>
-#include <asm/irq.h>
-
-
-
-/*
- * Linux has a controller-independent x86 interrupt architecture.
- * every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the apropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
- * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
- * (IO-APICs assumed to be messaging to Pentium local-APICs)
- *
- * the code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic.
- */
-
-/*
- * Controller mappings for all interrupt sources:
- */
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
-
-static void register_irq_proc (unsigned int irq);
-
-/*
- * Special irq handlers.
- */
-
-void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
-
-/*
- * Generic no controller code
- */
-
-static void enable_none(unsigned int irq) { }
-static unsigned int startup_none(unsigned int irq) { return 0; }
-static void disable_none(unsigned int irq) { }
-static void ack_none(unsigned int irq)
-{
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves, it doesnt deserve
- * a generic callback i think.
- */
-#if CONFIG_X86
- printk("unexpected IRQ trap at vector %02x\n", irq);
-#ifdef CONFIG_X86_LOCAL_APIC
- /*
- * Currently unexpected vectors happen only on SMP and APIC.
- * We _must_ ack these because every local APIC has only N
- * irq slots per priority level, and a 'hanging, unacked' IRQ
- * holds up an irq slot - in excessive cases (when multiple
- * unexpected vectors occur) that might lock up the APIC
- * completely.
- */
- ack_APIC_irq();
-#endif
-#endif
-}
-
-/* startup is the same as "enable", shutdown is same as "disable" */
-#define shutdown_none disable_none
-#define end_none enable_none
-
-struct hw_interrupt_type no_irq_type = {
- "none",
- startup_none,
- shutdown_none,
- enable_none,
- disable_none,
- ack_none,
- end_none
-};
-
-atomic_t irq_err_count;
-#ifdef CONFIG_X86_IO_APIC
-#ifdef APIC_MISMATCH_DEBUG
-atomic_t irq_mis_count;
-#endif
-#endif
-
-/*
- * Generic, controller-independent functions:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
-{
- int i, j;
- struct irqaction * action;
-
- seq_printf(p, " ");
- for (j=0; j<smp_num_cpus; j++)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p,'\n');
-
- for (i = 0 ; i < NR_IRQS ; i++) {
- action = irq_desc[i].action;
- if (!action)
- continue;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for (j = 0; j < smp_num_cpus; j++)
- seq_printf(p, "%10u ",
- kstat.irqs[cpu_logical_map(j)][i]);
-#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
- seq_putc(p,'\n');
- }
- seq_printf(p, "NMI: ");
- for (j = 0; j < smp_num_cpus; j++)
- seq_printf(p, "%10u ",
- nmi_count(cpu_logical_map(j)));
- seq_printf(p, "\n");
-#if CONFIG_X86_LOCAL_APIC
- seq_printf(p, "LOC: ");
- for (j = 0; j < smp_num_cpus; j++)
- seq_printf(p, "%10u ",
- apic_timer_irqs[cpu_logical_map(j)]);
- seq_printf(p, "\n");
-#endif
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-#ifdef CONFIG_X86_IO_APIC
-#ifdef APIC_MISMATCH_DEBUG
- seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-#endif
-#endif
-
- return 0;
-}
-
-
-/*
- * Global interrupt locks for SMP. Allow interrupts to come in on any
- * CPU, yet make cli/sti act globally to protect critical regions..
- */
-
-#ifdef CONFIG_SMP
-unsigned char global_irq_holder = NO_PROC_ID;
-unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
-
-extern void show_stack(unsigned long* esp);
-
-static void show(char * str)
-{
- int i;
- int cpu = smp_processor_id();
-
- printk("\n%s, CPU %d:\n", str, cpu);
- printk("irq: %d [",irqs_running());
- for(i=0;i < smp_num_cpus;i++)
- printk(" %d",local_irq_count(i));
- printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
- for(i=0;i < smp_num_cpus;i++)
- printk(" %d",local_bh_count(i));
-
- printk(" ]\nStack dumps:");
- for(i = 0; i < smp_num_cpus; i++) {
- unsigned long esp;
- if (i == cpu)
- continue;
- printk("\nCPU %d:",i);
- esp = init_tss[i].esp0;
- if (!esp) {
- /* tss->esp0 is set to NULL in cpu_init(),
- * it's initialized when the cpu returns to user
- * space. -- manfreds
- */
- printk(" <unknown> ");
- continue;
- }
- esp &= ~(THREAD_SIZE-1);
- esp += sizeof(struct task_struct);
- show_stack((void*)esp);
- }
- printk("\nCPU %d:",cpu);
- show_stack(NULL);
- printk("\n");
-}
-
-#define MAXCOUNT 100000000
-
-/*
- * I had a lockup scenario where a tight loop doing
- * spin_unlock()/spin_lock() on CPU#1 was racing with
- * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
- * apparently the spin_unlock() information did not make it
- * through to CPU#0 ... nasty, is this by design, do we have to limit
- * 'memory update oscillation frequency' artificially like here?
- *
- * Such 'high frequency update' races can be avoided by careful design, but
- * some of our major constructs like spinlocks use similar techniques,
- * it would be nice to clarify this issue. Set this define to 0 if you
- * want to check whether your system freezes. I suspect the delay done
- * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
- * i thought that such things are guaranteed by design, since we use
- * the 'LOCK' prefix.
- */
-#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
-
-#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
-# define SYNC_OTHER_CORES(x) udelay(x+1)
-#else
-/*
- * We have to allow irqs to arrive between __sti and __cli
- */
-# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
-#endif
-
-static inline void wait_on_irq(int cpu)
-{
- int count = MAXCOUNT;
-
- for (;;) {
-
- /*
- * Wait until all interrupts are gone. Wait
- * for bottom half handlers unless we're
- * already executing in one..
- */
- if (!irqs_running())
- if (local_bh_count(cpu) ||
!spin_is_locked(&global_bh_lock))
- break;
-
- /* Duh, we have to loop. Release the lock to avoid deadlocks */
- clear_bit(0,&global_irq_lock);
-
- for (;;) {
- if (!--count) {
- show("wait_on_irq");
- count = ~0;
- }
- __sti();
- SYNC_OTHER_CORES(cpu);
- __cli();
- if (irqs_running())
- continue;
- if (global_irq_lock)
- continue;
- if (!local_bh_count(cpu) &&
spin_is_locked(&global_bh_lock))
- continue;
- if (!test_and_set_bit(0,&global_irq_lock))
- break;
- }
- }
-}
-
-/*
- * This is called when we want to synchronize with
- * interrupts. We may for example tell a device to
- * stop sending interrupts: but to make sure there
- * are no interrupts that are executing on another
- * CPU we need to call this function.
- */
-void synchronize_irq(void)
-{
- if (irqs_running()) {
- /* Stupid approach */
- cli();
- sti();
- }
-}
-
-static inline void get_irqlock(int cpu)
-{
- if (test_and_set_bit(0,&global_irq_lock)) {
- /* do we already hold the lock? */
- if ((unsigned char) cpu == global_irq_holder)
- return;
- /* Uhhuh.. Somebody else got it. Wait.. */
- do {
- do {
- rep_nop();
- } while (test_bit(0,&global_irq_lock));
- } while (test_and_set_bit(0,&global_irq_lock));
- }
- /*
- * We also to make sure that nobody else is running
- * in an interrupt context.
- */
- wait_on_irq(cpu);
-
- /*
- * Ok, finally..
- */
- global_irq_holder = cpu;
-}
-
-/*
- * A global "cli()" while in an interrupt context
- * turns into just a local cli(). Interrupts
- * should use spinlocks for the (very unlikely)
- * case that they ever want to protect against
- * each other.
- *
- * If we already have local interrupts disabled,
- * this will not turn a local disable into a
- * global one (problems with spinlocks: this makes
- * save_flags+cli+sti usable inside a spinlock).
- */
-void __global_cli(void)
-{
- unsigned int flags;
-
- __save_flags(flags);
- if (!flags) {
- int cpu = smp_processor_id();
- __cli();
- if (!local_irq_count(cpu))
- get_irqlock(cpu);
- }
-}
-
-void __global_sti(void)
-{
- int cpu = smp_processor_id();
-
- if (!local_irq_count(cpu))
- release_irqlock(cpu);
- __sti();
-}
-
-/*
- * SMP flags value to restore to:
- * 0 - global cli
- * 1 - global sti
- * 2 - local cli
- * 3 - local sti
- */
-unsigned long __global_save_flags(void)
-{
- int retval;
- int local_enabled;
- unsigned long flags;
- int cpu = smp_processor_id();
-
- __save_flags(flags);
- local_enabled = !flags;
- /* default to local */
- retval = 2 + local_enabled;
-
- /* check for global flags if we're not in an interrupt */
- if (!local_irq_count(cpu)) {
- if (local_enabled)
- retval = 1;
- if (global_irq_holder == cpu)
- retval = 0;
- }
- return retval;
-}
-
-void __global_restore_flags(unsigned long flags)
-{
- switch (flags) {
- case 0:
- __global_cli();
- break;
- case 1:
- __global_sti();
- break;
- case 2:
- __cli();
- break;
- case 3:
- __sti();
- break;
- default:
- printk("global_restore_flags: %08lx (%08lx)\n",
- flags, (&flags)[-1]);
- }
-}
-
-#endif
-
-/*
- * This should really return information about whether
- * we should do bottom half handling etc. Right now we
- * end up _always_ checking the bottom half, which is a
- * waste of time and is not what some drivers would
- * prefer.
- */
-int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction
* action)
-{
- int status;
- int cpu = smp_processor_id();
-
- irq_enter(cpu, irq);
-
- status = 1; /* Force the "do bottom halves" bit */
-
- if (!(action->flags & SA_INTERRUPT))
- __sti();
-
- do {
- status |= action->flags;
- action->handler(irq, action->dev_id, regs);
- action = action->next;
- } while (action);
- if (status & SA_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
- __cli();
-
- irq_exit(cpu, irq);
-
- return status;
-}
-
-/*
- * Generic enable/disable code: this just calls
- * down into the PIC-specific version for the actual
- * hardware disable after having gotten the irq
- * controller lock.
- */
-
-/**
- * disable_irq_nosync - disable an irq without waiting
- * @irq: Interrupt to disable
- *
- * Disable the selected interrupt line. Disables and Enables are
- * nested.
- * Unlike disable_irq(), this function does not ensure existing
- * instances of the IRQ handler have completed before returning.
- *
- * This function may be called from IRQ context.
- */
-
-inline void disable_irq_nosync(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->handler->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/**
- * disable_irq - disable an irq and wait for completion
- * @irq: Interrupt to disable
- *
- * Disable the selected interrupt line. Enables and Disables are
- * nested.
- * This function waits for any pending IRQ handlers for this interrupt
- * to complete before returning. If you use this function while
- * holding a resource the IRQ handler may need you will deadlock.
- *
- * This function may be called - with care - from IRQ context.
- */
-
-void disable_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
-
- if (!local_irq_count(smp_processor_id())) {
- do {
- barrier();
- cpu_relax();
- } while (irq_desc[irq].status & IRQ_INPROGRESS);
- }
-}
-
-/**
- * enable_irq - enable handling of an irq
- * @irq: Interrupt to enable
- *
- * Undoes the effect of one call to disable_irq(). If this
- * matches the last disable, processing of interrupts on this
- * IRQ line is re-enabled.
- *
- * This function may be called from IRQ context.
- */
-
-void enable_irq(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- switch (desc->depth) {
- case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
- desc->status = status;
- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = status | IRQ_REPLAY;
- hw_resend_irq(desc->handler,irq);
- }
- desc->handler->enable(irq);
- /* fall-through */
- }
- default:
- desc->depth--;
- break;
- case 0:
- printk("enable_irq(%u) unbalanced from %p\n", irq,
- __builtin_return_address(0));
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
-{
- /*
- * We ack quickly, we don't want the irq controller
- * thinking we're snobs just because some other CPU has
- * disabled global interrupts (we have already done the
- * INT_ACK cycles, it's too late to try to pretend to the
- * controller that we aren't taking the interrupt).
- *
- * 0 return value means that this irq is already being
- * handled by some other CPU. (or is disabled)
- */
- int irq = regs->orig_eax & 0xff; /* high bits used in ret_from_ code */
- int cpu = smp_processor_id();
- irq_desc_t *desc = irq_desc + irq;
- struct irqaction * action;
- unsigned int status;
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- long esp;
-
- /* Debugging check for stack overflow: is there less than 1KB free? */
- __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
- if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
- extern void show_stack(unsigned long *);
-
- printk("do_IRQ: stack overflow: %ld\n",
- esp - sizeof(struct task_struct));
- __asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
- show_stack((void *)esp);
- }
-#endif
-
- kstat.irqs[cpu][irq]++;
- spin_lock(&desc->lock);
- desc->handler->ack(irq);
- /*
- REPLAY is when Linux resends an IRQ that was dropped earlier
- WAITING is used by probe to mark irqs that are being tested
- */
- status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
- status |= IRQ_PENDING; /* we _want_ to handle it */
-
- /*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
- */
- action = NULL;
- if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- action = desc->action;
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
-
- /*
- * If there is no IRQ handler or it was disabled, exit early.
- Since we set PENDING, if another processor is handling
- a different instance of this same irq, the other processor
- will take care of it.
- */
- if (!action)
- goto out;
-
- /*
- * Edge triggered interrupts need to remember
- * pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in do_IRQ
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
- */
- for (;;) {
- spin_unlock(&desc->lock);
- handle_IRQ_event(irq, regs, action);
- spin_lock(&desc->lock);
-
- if (!(desc->status & IRQ_PENDING))
- break;
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
-out:
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- desc->handler->end(irq);
- spin_unlock(&desc->lock);
-
- if (softirq_pending(cpu))
- do_softirq();
- return 1;
-}
-
-/**
- * request_irq - allocate an interrupt line
- * @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
- * @irqflags: Interrupt type flags
- * @devname: An ascii name for the claiming device
- * @dev_id: A cookie passed back to the handler function
- *
- * This call allocates interrupt resources and enables the
- * interrupt line and IRQ handling. From the point this
- * call is made your handler function may be invoked. Since
- * your handler function must clear any interrupt the board
- * raises, you must take care both to initialise your hardware
- * and to set up the interrupt handler in the right order.
- *
- * Dev_id must be globally unique. Normally the address of the
- * device data structure is used as the cookie. Since the handler
- * receives this value it makes sense to use it.
- *
- * If your interrupt is shared you must pass a non NULL dev_id
- * as this is required when freeing the interrupt.
- *
- * Flags:
- *
- * SA_SHIRQ Interrupt is shared
- *
- * SA_INTERRUPT Disable local interrupts while processing
- *
- * SA_SAMPLE_RANDOM The interrupt can be used for entropy
- *
- */
-
-int request_irq(unsigned int irq,
- void (*handler)(int, void *, struct pt_regs *),
- unsigned long irqflags,
- const char * devname,
- void *dev_id)
-{
- int retval;
- struct irqaction * action;
-
-#if 1
- /*
- * Sanity-check: shared interrupts should REALLY pass in
- * a real dev-ID, otherwise we'll have trouble later trying
- * to figure out which interrupt is which (messes up the
- * interrupt freeing logic etc).
- */
- if (irqflags & SA_SHIRQ) {
- if (!dev_id)
- printk("Bad boy: %s (at 0x%x) called us without a
dev_id!\n", devname, (&irq)[-1]);
- }
-#endif
-
- if (irq >= NR_IRQS)
- return -EINVAL;
- if (!handler)
- return -EINVAL;
-
- action = (struct irqaction *)
- kmalloc(sizeof(struct irqaction), GFP_KERNEL);
- if (!action)
- return -ENOMEM;
-
- action->handler = handler;
- action->flags = irqflags;
- action->mask = 0;
- action->name = devname;
- action->next = NULL;
- action->dev_id = dev_id;
-
- retval = setup_irq(irq, action);
- if (retval)
- kfree(action);
- return retval;
-}
-
-/*
- * Internal function to unregister an irqaction - typically used to
- * deallocate special interrupts that are part of the architecture.
- */
-int teardown_irq(unsigned int irq, struct irqaction * old)
-{
- irq_desc_t *desc;
- struct irqaction **p;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return -ENOENT;
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- for (;;) {
- struct irqaction * action = *p;
- if (action) {
- struct irqaction **pp = p;
- p = &action->next;
- if (action != old)
- continue;
-
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- desc->handler->shutdown(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
-#ifdef CONFIG_SMP
- /* Wait to make sure it's not being used on another CPU
*/
- while (desc->status & IRQ_INPROGRESS) {
- barrier();
- cpu_relax();
- }
-#endif
- return 0;
- }
- printk("Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- return -ENOENT;
- }
-}
-
-/**
- * free_irq - free an interrupt
- * @irq: Interrupt line to free
- * @dev_id: Device identity to free
- *
- * Remove an interrupt handler. The handler is removed and if the
- * interrupt line is no longer in use by any driver it is disabled.
- * On a shared IRQ the caller must ensure the interrupt is disabled
- * on the card it drives before calling this function. The function
- * does not return until any executing interrupts for this IRQ
- * have completed.
- *
- * This function may be called from interrupt context.
- *
- * Bugs: Attempting to free an irq in a handler for the same irq hangs
- * the machine.
- */
-
-void free_irq(unsigned int irq, void *dev_id)
-{
- irq_desc_t *desc;
- struct irqaction *action;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
- for (action = desc->action; action != NULL; action = action->next) {
- if (action->dev_id != dev_id)
- continue;
-
- spin_unlock_irqrestore(&desc->lock,flags);
-
- if (teardown_irq(irq, action) == 0)
- kfree(action);
- return;
- }
- printk("Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- return;
-}
-
-/*
- * IRQ autodetection code..
- *
- * This depends on the fact that any interrupt that
- * comes in on to an unassigned handler will get stuck
- * with "IRQ_WAITING" cleared and the interrupt
- * disabled.
- */
-
-static DECLARE_MUTEX(probe_sem);
-
-/**
- * probe_irq_on - begin an interrupt autodetect
- *
- * Commence probing for an interrupt. The interrupts are scanned
- * and a mask of potential interrupt lines is returned.
- *
- */
-
-unsigned long probe_irq_on(void)
-{
- unsigned int i;
- irq_desc_t *desc;
- unsigned long val;
- unsigned long delay;
-
- down(&probe_sem);
- /*
- * something may have generated an irq long ago and we want to
- * flush such a longstanding irq before considering it as spurious.
- */
- for (i = NR_PIRQS-1; i > 0; i--) {
- desc = irq_desc + i;
-
- spin_lock_irq(&desc->lock);
- if (!irq_desc[i].action)
- irq_desc[i].handler->startup(i);
- spin_unlock_irq(&desc->lock);
- }
-
- /* Wait for longstanding interrupts to trigger. */
- for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
- /* about 20ms delay */ synchronize_irq();
-
- /*
- * enable any unassigned irqs
- * (we must startup again here because if a longstanding irq
- * happened in the previous stage, it may have masked itself)
- */
- for (i = NR_PIRQS-1; i > 0; i--) {
- desc = irq_desc + i;
-
- spin_lock_irq(&desc->lock);
- if (!desc->action) {
- desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->handler->startup(i))
- desc->status |= IRQ_PENDING;
- }
- spin_unlock_irq(&desc->lock);
- }
-
- /*
- * Wait for spurious interrupts to trigger
- */
- for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
- /* about 100ms delay */ synchronize_irq();
-
- /*
- * Now filter out any obviously spurious interrupts
- */
- val = 0;
- for (i = 0; i < NR_PIRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- /* It triggered already - consider it spurious. */
- if (!(status & IRQ_WAITING)) {
- desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
- } else
- if (i < 32)
- val |= 1 << i;
- }
- spin_unlock_irq(&desc->lock);
- }
-
- return val;
-}
-
-/*
- * Return a mask of triggered interrupts (this
- * can handle only legacy ISA interrupts).
- */
-
-/**
- * probe_irq_mask - scan a bitmap of interrupt lines
- * @val: mask of interrupts to consider
- *
- * Scan the ISA bus interrupt lines and return a bitmap of
- * active interrupts. The interrupt probe logic state is then
- * returned to its previous value.
- *
- * Note: we need to scan all the irq's even though we will
- * only return ISA irq numbers - just so that we reset them
- * all to a known state.
- */
-unsigned int probe_irq_mask(unsigned long val)
-{
- int i;
- unsigned int mask;
-
- mask = 0;
- for (i = 0; i < NR_PIRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- if (i < 16 && !(status & IRQ_WAITING))
- mask |= 1 << i;
-
- desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
- }
- spin_unlock_irq(&desc->lock);
- }
- up(&probe_sem);
-
- return mask & val;
-}
-
-/*
- * Return the one interrupt that triggered (this can
- * handle any interrupt source).
- */
-
-/**
- * probe_irq_off - end an interrupt autodetect
- * @val: mask of potential interrupts (unused)
- *
- * Scans the unused interrupt lines and returns the line which
- * appears to have triggered the interrupt. If no interrupt was
- * found then zero is returned. If more than one interrupt is
- * found then minus the first candidate is returned to indicate
- * their is doubt.
- *
- * The interrupt probe logic state is returned to its previous
- * value.
- *
- * BUGS: When used in a module (which arguably shouldnt happen)
- * nothing prevents two IRQ probe callers from overlapping. The
- * results of this are non-optimal.
- */
-
-int probe_irq_off(unsigned long val)
-{
- int i, irq_found, nr_irqs;
-
- nr_irqs = 0;
- irq_found = 0;
- for (i = 0; i < NR_PIRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- if (!(status & IRQ_WAITING)) {
- if (!nr_irqs)
- irq_found = i;
- nr_irqs++;
- }
- desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
- }
- spin_unlock_irq(&desc->lock);
- }
- up(&probe_sem);
-
- if (nr_irqs > 1)
- irq_found = -irq_found;
- return irq_found;
-}
-
-/* this was setup_x86_irq but it seems pretty generic */
-int setup_irq(unsigned int irq, struct irqaction * new)
-{
- int shared = 0;
- unsigned long flags;
- struct irqaction *old, **p;
- irq_desc_t *desc = irq_desc + irq;
-
- /*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
- */
- if (new->flags & SA_SAMPLE_RANDOM) {
- /*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
- */
- rand_initialize_irq(irq);
- }
-
- /*
- * The following block of code has to be executed atomically
- */
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & new->flags & SA_SHIRQ)) {
- spin_unlock_irqrestore(&desc->lock,flags);
- return -EBUSY;
- }
-
- /* add new interrupt at end of irq queue */
- do {
- p = &old->next;
- old = *p;
- } while (old);
- shared = 1;
- }
-
- *p = new;
-
- if (!shared) {
- desc->depth = 0;
- desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING |
IRQ_INPROGRESS);
- desc->handler->startup(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
- register_irq_proc(irq);
- return 0;
-}
-
-static struct proc_dir_entry * root_irq_dir;
-static struct proc_dir_entry * irq_dir [NR_IRQS];
-
-#define HEX_DIGITS 8
-
-static unsigned int parse_hex_value (const char *buffer,
- unsigned long count, unsigned long *ret)
-{
- unsigned char hexnum [HEX_DIGITS];
- unsigned long value;
- int i;
-
- if (!count)
- return -EINVAL;
- if (count > HEX_DIGITS)
- count = HEX_DIGITS;
- if (copy_from_user(hexnum, buffer, count))
- return -EFAULT;
-
- /*
- * Parse the first 8 characters as a hex string, any non-hex char
- * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
- */
- value = 0;
-
- for (i = 0; i < count; i++) {
- unsigned int c = hexnum[i];
-
- switch (c) {
- case '0' ... '9': c -= '0'; break;
- case 'a' ... 'f': c -= 'a'-10; break;
- case 'A' ... 'F': c -= 'A'-10; break;
- default:
- goto out;
- }
- value = (value << 4) | c;
- }
-out:
- *ret = value;
- return 0;
-}
-
-#if CONFIG_SMP
-
-static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
-
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
-static int irq_affinity_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- if (count < HEX_DIGITS+1)
- return -EINVAL;
- return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
-}
-
-static int irq_affinity_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- int irq = (long) data, full_count = count, err;
- unsigned long new_value;
-
- if (!irq_desc[irq].handler->set_affinity)
- return -EIO;
-
- err = parse_hex_value(buffer, count, &new_value);
-
- /*
- * Do not allow disabling IRQs completely - it's a too easy
- * way to make the system unusable accidentally :-) At least
- * one online CPU still has to be targeted.
- */
- if (!(new_value & cpu_online_map))
- return -EINVAL;
-
- irq_affinity[irq] = new_value;
- irq_desc[irq].handler->set_affinity(irq, new_value);
-
- return full_count;
-}
-
-#endif
-
-static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- unsigned long *mask = (unsigned long *) data;
- if (count < HEX_DIGITS+1)
- return -EINVAL;
- return sprintf (page, "%08lx\n", *mask);
-}
-
-static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- unsigned long *mask = (unsigned long *) data, full_count = count, err;
- unsigned long new_value;
-
- err = parse_hex_value(buffer, count, &new_value);
- if (err)
- return err;
-
- *mask = new_value;
- return full_count;
-}
-
-#define MAX_NAMELEN 10
-
-static void register_irq_proc (unsigned int irq)
-{
- char name [MAX_NAMELEN];
-
- if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
- irq_dir[irq])
- return;
-
- memset(name, 0, MAX_NAMELEN);
- sprintf(name, "%d", irq);
-
- /* create /proc/irq/1234 */
- irq_dir[irq] = proc_mkdir(name, root_irq_dir);
-
-#if CONFIG_SMP
- {
- struct proc_dir_entry *entry;
-
- /* create /proc/irq/1234/smp_affinity */
- entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
-
- if (entry) {
- entry->nlink = 1;
- entry->data = (void *)(long)irq;
- entry->read_proc = irq_affinity_read_proc;
- entry->write_proc = irq_affinity_write_proc;
- }
-
- smp_affinity_entry[irq] = entry;
- }
-#endif
-}
-
-unsigned long prof_cpu_mask = -1;
-
-void init_irq_proc (void)
-{
- struct proc_dir_entry *entry;
- int i;
-
- /* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", 0);
-
- /* create /proc/irq/prof_cpu_mask */
- entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
-
- if (!entry)
- return;
-
- entry->nlink = 1;
- entry->data = (void *)&prof_cpu_mask;
- entry->read_proc = prof_cpu_mask_read_proc;
- entry->write_proc = prof_cpu_mask_write_proc;
-
- /*
- * Create entries for all existing IRQs.
- */
- for (i = 0; i < NR_IRQS; i++)
- register_irq_proc(i);
-}
-
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/ldt.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/ldt.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,271 +0,0 @@
-/*
- * linux/kernel/ldt.c
- *
- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
- * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/ldt.h>
-#include <asm/desc.h>
-
-#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-static void flush_ldt(void *mm)
-{
- if (current->active_mm)
- load_LDT(¤t->active_mm->context);
-}
-#endif
-
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
-{
- void *oldldt;
- void *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount+511)&(~511);
- if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
- else
- newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
-
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-
- oldldt = pc->ldt;
- memset(newldt+oldsize*LDT_ENTRY_SIZE, 0,
(mincount-oldsize)*LDT_ENTRY_SIZE);
- wmb();
- pc->ldt = newldt;
- pc->size = mincount;
- if (reload) {
- make_pages_readonly(
- pc->ldt,
- (pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
- load_LDT(pc);
- flush_page_update_queue();
-#ifdef CONFIG_SMP
- if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
- smp_call_function(flush_ldt, 0, 1, 1);
-#endif
- }
- wmb();
- if (oldsize) {
- if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- kfree(oldldt);
- }
- return 0;
-}
-
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-{
- int err = alloc_ldt(new, old->size, 0);
- if (err < 0) {
- printk(KERN_WARNING "ldt allocation failed\n");
- new->size = 0;
- return err;
- }
- memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
- make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
- return 0;
-}
-
-/*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- struct mm_struct * old_mm;
- int retval = 0;
-
- init_MUTEX(&mm->context.sem);
- mm->context.size = 0;
- old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- down(&old_mm->context.sem);
- retval = copy_ldt(&mm->context, &old_mm->context);
- up(&old_mm->context.sem);
- }
- return retval;
-}
-
-/*
- * No need to lock the MM as we are the last user
- * Do not touch the ldt register, we are already
- * in the next thread.
- */
-void destroy_context(struct mm_struct *mm)
-{
- if (mm->context.size) {
- make_pages_writable(
- mm->context.ldt,
- (mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
- flush_page_update_queue();
- if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- kfree(mm->context.ldt);
- mm->context.size = 0;
- }
-}
-
-static int read_ldt(void * ptr, unsigned long bytecount)
-{
- int err;
- unsigned long size;
- struct mm_struct * mm = current->mm;
-
- if (!mm->context.size)
- return 0;
- if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
- bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-
- down(&mm->context.sem);
- size = mm->context.size*LDT_ENTRY_SIZE;
- if (size > bytecount)
- size = bytecount;
-
- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- up(&mm->context.sem);
- if (err < 0)
- return err;
- if (size != bytecount) {
- /* zero-fill the rest */
- clear_user(ptr+size, bytecount-size);
- }
- return bytecount;
-}
-
-static int read_default_ldt(void * ptr, unsigned long bytecount)
-{
- int err;
- unsigned long size;
- void *address;
-
- err = 0;
- address = &default_ldt[0];
- size = 5*sizeof(struct desc_struct);
- if (size > bytecount)
- size = bytecount;
-
- err = size;
- if (copy_to_user(ptr, address, size))
- err = -EFAULT;
-
- return err;
-}
-
-static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
-{
- struct mm_struct * mm = current->mm;
- __u32 entry_1, entry_2, *lp;
- unsigned long mach_lp;
- int error;
- struct modify_ldt_ldt_s ldt_info;
-
- error = -EINVAL;
- if (bytecount != sizeof(ldt_info))
- goto out;
- error = -EFAULT;
- if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
- goto out;
-
- error = -EINVAL;
- if (ldt_info.entry_number >= LDT_ENTRIES)
- goto out;
- if (ldt_info.contents == 3) {
- if (oldmode)
- goto out;
- if (ldt_info.seg_not_present == 0)
- goto out;
- }
-
- down(&mm->context.sem);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(¤t->mm->context,
ldt_info.entry_number+1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *)
mm->context.ldt);
- mach_lp = arbitrary_virt_to_machine(lp);
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode ||
- (ldt_info.contents == 0 &&
- ldt_info.read_exec_only == 1 &&
- ldt_info.seg_32bit == 0 &&
- ldt_info.limit_in_pages == 0 &&
- ldt_info.seg_not_present == 1 &&
- ldt_info.useable == 0 )) {
- entry_1 = 0;
- entry_2 = 0;
- goto install;
- }
- }
-
- entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
- (ldt_info.limit & 0x0ffff);
- entry_2 = (ldt_info.base_addr & 0xff000000) |
- ((ldt_info.base_addr & 0x00ff0000) >> 16) |
- (ldt_info.limit & 0xf0000) |
- ((ldt_info.read_exec_only ^ 1) << 9) |
- (ldt_info.contents << 10) |
- ((ldt_info.seg_not_present ^ 1) << 15) |
- (ldt_info.seg_32bit << 22) |
- (ldt_info.limit_in_pages << 23) |
- 0x7000;
- if (!oldmode)
- entry_2 |= (ldt_info.useable << 20);
-
- /* Install the new entry ... */
-install:
- error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
-
-out_unlock:
- up(&mm->context.sem);
-out:
- return error;
-}
-
-asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
-{
- int ret = -ENOSYS;
-
- switch (func) {
- case 0:
- ret = read_ldt(ptr, bytecount);
- break;
- case 1:
- ret = write_ldt(ptr, bytecount, 1);
- break;
- case 2:
- ret = read_default_ldt(ptr, bytecount);
- break;
- case 0x11:
- ret = write_ldt(ptr, bytecount, 0);
- break;
- }
- return ret;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/pci-pc.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/pci-pc.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,260 +0,0 @@
-/*
- * Low-Level PCI Support for PC
- *
- * (c) 1999--2000 Martin Mares <mj@xxxxxx>
- *
- * Adjusted to use Xen's interface by Rolf Neugebauer, Intel Research Cambridge
- * Further modifications by Keir Fraser, University of Cambridge
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/physdev.h>
-
-#include "pci-i386.h"
-
-/*
- * NB. The following interface functions are not included here:
- * 1. void eisa_set_level_irq(unsigned int irq)
- * 2. irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
- * 3. int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
- * All are used by the ACPI driver. This should be ported to Xen if it is
- * ever required -- Xen is the ultimate source for IRQ-routing knowledge.
- */
-
-struct pci_ops *pci_root_ops = NULL;
-
-int (*pci_config_read)(int seg, int bus, int dev, int fn,
- int reg, int len, u32 *value) = NULL;
-int (*pci_config_write)(int seg, int bus, int dev, int fn,
- int reg, int len, u32 value) = NULL;
-
-unsigned int pci_probe = PCI_PROBE_BIOS;
-
-struct pci_fixup pcibios_fixups[] = { { 0 } };
-
-static int pci_confx_read(int seg, int bus, int dev, int fn, int reg,
- int len, u32 *value)
-{
- int ret;
- physdev_op_t op;
-
- if (bus > 255 || dev > 31 || fn > 7 || reg > 255)
- return -EINVAL;
-
- op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
- op.u.pci_cfgreg_read.bus = bus;
- op.u.pci_cfgreg_read.dev = dev;
- op.u.pci_cfgreg_read.func = fn;
- op.u.pci_cfgreg_read.reg = reg;
- op.u.pci_cfgreg_read.len = len;
-
- if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
- return ret;
-
- *value = op.u.pci_cfgreg_read.value;
-
- return 0;
-}
-
-static int pci_confx_write(int seg, int bus, int dev, int fn, int reg,
- int len, u32 value)
-{
- int ret;
- physdev_op_t op;
-
- if ((bus > 255 || dev > 31 || fn > 7 || reg > 255))
- return -EINVAL;
-
- op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
- op.u.pci_cfgreg_write.bus = bus;
- op.u.pci_cfgreg_write.dev = dev;
- op.u.pci_cfgreg_write.func = fn;
- op.u.pci_cfgreg_write.reg = reg;
- op.u.pci_cfgreg_write.len = len;
- op.u.pci_cfgreg_write.value = value;
-
- if ( (ret = HYPERVISOR_physdev_op(&op)) != 0 )
- return ret;
- return 0;
-}
-
-
-static int pci_confx_read_config_byte(struct pci_dev *dev,
- int where, u8 *value)
-{
- int result;
- u32 data;
-
- result = pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), where, 1, &data);
-
- *value = (u8)data;
-
- return result;
-}
-
-static int pci_confx_read_config_word(struct pci_dev *dev,
- int where, u16 *value)
-{
- int result;
- u32 data;
-
- result = pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), where, 2, &data);
-
- *value = (u16)data;
-
- return result;
-}
-
-static int pci_confx_read_config_dword(struct pci_dev *dev,
- int where, u32 *value)
-{
- return pci_confx_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), where, 4, value);
-}
-
-static int pci_confx_write_config_byte(struct pci_dev *dev,
- int where, u8 value)
-{
- return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), where, 1, value);
-}
-
-static int pci_confx_write_config_word(struct pci_dev *dev,
- int where, u16 value)
-{
- return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), where, 2, value);
-}
-
-static int pci_confx_write_config_dword(struct pci_dev *dev,
- int where, u32 value)
-{
- return pci_confx_write(0, dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), where, 4, value);
-}
-
-static struct pci_ops pci_conf_xen = {
- pci_confx_read_config_byte,
- pci_confx_read_config_word,
- pci_confx_read_config_dword,
- pci_confx_write_config_byte,
- pci_confx_write_config_word,
- pci_confx_write_config_dword
-};
-
-void pcibios_penalize_isa_irq(int irq)
-{
- /* nothing */
-}
-
-void __devinit pcibios_fixup_bus(struct pci_bus *b)
-{
- pci_read_bridge_bases(b);
-}
-
-struct pci_bus * __devinit pcibios_scan_root(int busnum)
-{
- struct list_head *list;
- struct pci_bus *bus;
-
- list_for_each ( list, &pci_root_buses )
- {
- bus = pci_bus_b(list);
- if ( bus->number == busnum )
- return bus;
- }
-
- printk("PCI: Probing PCI hardware (bus %02x)\n", busnum);
- return pci_scan_bus(busnum, pci_root_ops, NULL);
-}
-
-void __init pcibios_init(void)
-{
- int bus;
- physdev_op_t op;
-
- if ( !pci_probe )
- return;
-
- pci_root_ops = &pci_conf_xen;
- pci_config_read = pci_confx_read;
- pci_config_write = pci_confx_write;
-
- pcibios_set_cacheline_size();
-
- op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
- if ( HYPERVISOR_physdev_op(&op) != 0 )
- {
- printk(KERN_WARNING "PCI: System does not support PCI\n");
- return;
- }
-
- printk(KERN_INFO "PCI: Probing PCI hardware\n");
- for ( bus = 0; bus < 256; bus++ )
- if ( test_bit(bus, &op.u.pci_probe_root_buses.busmask[0]) )
- (void)pcibios_scan_root(bus);
-
- pcibios_resource_survey();
-}
-
-char * __devinit pcibios_setup(char *str)
-{
- if ( !strcmp(str, "off") )
- pci_probe = 0;
- return NULL;
-}
-
-unsigned int pcibios_assign_all_busses(void)
-{
- return 0;
-}
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- int err;
- u8 pin;
- physdev_op_t op;
-
- /* Inform Xen that we are going to use this device. */
- op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
- op.u.pci_initialise_device.bus = dev->bus->number;
- op.u.pci_initialise_device.dev = PCI_SLOT(dev->devfn);
- op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
- if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
- return err;
-
- /* Now we can bind to the very final IRQ line. */
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
- dev->irq = pin;
-
- /* Turn on device I/O and memory access as necessary. */
- if ( (err = pcibios_enable_resources(dev, mask)) < 0 )
- return err;
-
- /* Sanity-check that an interrupt-producing device is routed to an IRQ. */
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- if ( pin != 0 )
- {
- if ( dev->irq != 0 )
- printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
- dev->irq, dev->slot_name);
- else
- printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of "
- "device %s.\n", 'A' + pin - 1, dev->slot_name);
- }
-
- return 0;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/process.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/process.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,460 +0,0 @@
-/*
- * linux/arch/i386/kernel/process.c
- *
- * Copyright (C) 1995 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
- */
-
-/*
- * This file handles the architecture-dependent parts of process handling..
- */
-
-#define __KERNEL_SYSCALLS__
-#include <stdarg.h>
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/config.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/mc146818rtc.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ldt.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/desc.h>
-#include <asm/mmu_context.h>
-#include <asm/multicall.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-
-#include <linux/irq.h>
-
-asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-
-int hlt_counter;
-
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-
-/*
- * Power off function, if any
- */
-void (*pm_power_off)(void);
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle (void)
-{
- extern int set_timeout_timer(void);
-
- /* Endless idle loop with no priority at all. */
- init_idle();
- current->nice = 20;
- current->counter = -100;
-
- for ( ; ; )
- {
- while ( !current->need_resched )
- {
- __cli();
- if ( current->need_resched )
- {
- /* The race-free check for events failed. */
- __sti();
- break;
- }
- else if ( set_timeout_timer() == 0 )
- {
- /* NB. Blocking reenable events in a race-free manner. */
- HYPERVISOR_block();
- }
- else
- {
- /* No race here: yielding will get us the CPU again anyway. */
- __sti();
- HYPERVISOR_yield();
- }
- }
- schedule();
- check_pgt_cache();
- }
-}
-
-extern void show_trace(unsigned long* esp);
-
-void show_regs(struct pt_regs * regs)
-{
- printk("\n");
- printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
- printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip,
smp_processor_id());
- if (regs->xcs & 2)
- printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
- printk(" EFLAGS: %08lx %s\n",regs->eflags, print_tainted());
- printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
- regs->eax,regs->ebx,regs->ecx,regs->edx);
- printk("ESI: %08lx EDI: %08lx EBP: %08lx",
- regs->esi, regs->edi, regs->ebp);
- printk(" DS: %04x ES: %04x\n",
- 0xffff & regs->xds,0xffff & regs->xes);
-
- show_trace(®s->esp);
-}
-
-
-/*
- * Create a kernel thread
- */
-int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-{
- long retval, d0;
-
- __asm__ __volatile__(
- "movl %%esp,%%esi\n\t"
- "int $0x80\n\t" /* Linux/i386 system call */
- "cmpl %%esp,%%esi\n\t" /* child or parent? */
- "je 1f\n\t" /* parent - jump */
- /* Load the argument into eax, and push it. That way, it does
- * not matter whether the called function is compiled with
- * -mregparm or not. */
- "movl %4,%%eax\n\t"
- "pushl %%eax\n\t"
- "call *%5\n\t" /* call fn */
- "movl %3,%0\n\t" /* exit */
- "int $0x80\n"
- "1:\t"
- :"=&a" (retval), "=&S" (d0)
- :"0" (__NR_clone), "i" (__NR_exit),
- "r" (arg), "r" (fn),
- "b" (flags | CLONE_VM)
- : "memory");
-
- return retval;
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(void)
-{
- /* nothing to do ... */
-}
-
-void flush_thread(void)
-{
- struct task_struct *tsk = current;
-
- memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-
- /*
- * Forget coprocessor state..
- */
- clear_fpu(tsk);
- tsk->used_math = 0;
-}
-
-void release_thread(struct task_struct *dead_task)
-{
- if (dead_task->mm) {
- // temporary debugging check
- if (dead_task->mm->context.size) {
- printk("WARNING: dead process %8s still has LDT? <%p/%08x>\n",
- dead_task->comm,
- dead_task->mm->context.ldt,
- dead_task->mm->context.size);
- BUG();
- }
- }
- //release_x86_irqs(dead_task);
-}
-
-
-/*
- * Save a segment.
- */
-#define savesegment(seg,value) \
- asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
-
-int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
- unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
-{
- struct pt_regs * childregs;
- unsigned long eflags;
-
- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
- struct_cpy(childregs, regs);
- childregs->eax = 0;
- childregs->esp = esp;
-
- p->thread.esp = (unsigned long) childregs;
- p->thread.esp0 = (unsigned long) (childregs+1);
-
- p->thread.eip = (unsigned long) ret_from_fork;
-
- savesegment(fs,p->thread.fs);
- savesegment(gs,p->thread.gs);
-
- unlazy_fpu(current);
- struct_cpy(&p->thread.i387, ¤t->thread.i387);
-
-
- __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
- p->thread.io_pl = (eflags >> 12) & 3;
-
- return 0;
-}
-
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
- int i;
-
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >>
PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
- for (i = 0; i < 8; i++)
- dump->u_debugreg[i] = current->thread.debugreg[i];
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >>
PAGE_SHIFT;
-
- dump->regs.ebx = regs->ebx;
- dump->regs.ecx = regs->ecx;
- dump->regs.edx = regs->edx;
- dump->regs.esi = regs->esi;
- dump->regs.edi = regs->edi;
- dump->regs.ebp = regs->ebp;
- dump->regs.eax = regs->eax;
- dump->regs.ds = regs->xds;
- dump->regs.es = regs->xes;
- savesegment(fs,dump->regs.fs);
- savesegment(gs,dump->regs.gs);
- dump->regs.orig_eax = regs->orig_eax;
- dump->regs.eip = regs->eip;
- dump->regs.cs = regs->xcs;
- dump->regs.eflags = regs->eflags;
- dump->regs.esp = regs->esp;
- dump->regs.ss = regs->xss;
-
- dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-}
-
-/*
- * switch_to(x,yn) should switch tasks from x to y.
- *
- * We fsave/fwait so that an exception goes off at the right time
- * (as a call from the fsave or fwait in effect) rather than to
- * the wrong process. Lazy FP saving no longer makes any sense
- * with modern CPU's, and this simplifies a lot of things (SMP
- * and UP become the same).
- *
- * NOTE! We used to use the x86 hardware context switching. The
- * reason for not using it any more becomes apparent when you
- * try to recover gracefully from saved state that is no longer
- * valid (stale segment register values in particular). With the
- * hardware task-switch, there is no way to fix up bad state in
- * a reasonable manner.
- *
- * The fact that Intel documents the hardware task-switching to
- * be slow is a fairly red herring - this code is not noticeably
- * faster. However, there _is_ some room for improvement here,
- * so the performance issues may eventually be a valid point.
- * More important, however, is the fact that this allows us much
- * more flexibility.
- */
-void fastcall __switch_to(struct task_struct *prev_p, struct task_struct
*next_p)
-{
- struct thread_struct *next = &next_p->thread;
-
- __cli();
-
- /*
- * We clobber FS and GS here so that we avoid a GPF when restoring previous
- * task's FS/GS values in Xen when the LDT is switched. If we don't do this
- * then we can end up erroneously re-flushing the page-update queue when
- * we 'execute_multicall_list'.
- */
- __asm__ __volatile__ (
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
-
- MULTICALL_flush_page_update_queue();
-
- /*
- * This is basically 'unlazy_fpu', except that we queue a multicall to
- * indicate FPU task switch, rather than synchronously trapping to Xen.
- */
- if ( prev_p->flags & PF_USEDFPU )
- {
- if ( cpu_has_fxsr )
- asm volatile( "fxsave %0 ; fnclex"
- : "=m" (prev_p->thread.i387.fxsave) );
- else
- asm volatile( "fnsave %0 ; fwait"
- : "=m" (prev_p->thread.i387.fsave) );
- prev_p->flags &= ~PF_USEDFPU;
- queue_multicall0(__HYPERVISOR_fpu_taskswitch);
- }
-
- queue_multicall2(__HYPERVISOR_stack_switch, __KERNEL_DS, next->esp0);
- if ( xen_start_info.flags & SIF_PRIVILEGED )
- {
- dom0_op_t op;
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = next->io_pl;
- op.interface_version = DOM0_INTERFACE_VERSION;
- queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
- }
-
- /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
- execute_multicall_list();
- __sti();
-
- /*
- * Restore %fs and %gs.
- */
- loadsegment(fs, next->fs);
- loadsegment(gs, next->gs);
-
- /*
- * Now maybe reload the debug registers
- */
- if ( next->debugreg[7] != 0 )
- {
- HYPERVISOR_set_debugreg(0, next->debugreg[0]);
- HYPERVISOR_set_debugreg(1, next->debugreg[1]);
- HYPERVISOR_set_debugreg(2, next->debugreg[2]);
- HYPERVISOR_set_debugreg(3, next->debugreg[3]);
- /* no 4 and 5 */
- HYPERVISOR_set_debugreg(6, next->debugreg[6]);
- HYPERVISOR_set_debugreg(7, next->debugreg[7]);
- }
-}
-
-asmlinkage int sys_fork(struct pt_regs regs)
-{
- return do_fork(SIGCHLD, regs.esp, ®s, 0);
-}
-
-asmlinkage int sys_clone(struct pt_regs regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
-
- clone_flags = regs.ebx;
- newsp = regs.ecx;
- if (!newsp)
- newsp = regs.esp;
- return do_fork(clone_flags, newsp, ®s, 0);
-}
-
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(struct pt_regs regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(struct pt_regs regs)
-{
- int error;
- char * filename;
-
- filename = getname((char *) regs.ebx);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s);
- if (error == 0)
- current->ptrace &= ~PT_DTRACE;
- putname(filename);
- out:
- return error;
-}
-
-/*
- * These bracket the sleeping functions..
- */
-extern void scheduling_functions_start_here(void);
-extern void scheduling_functions_end_here(void);
-#define first_sched ((unsigned long) scheduling_functions_start_here)
-#define last_sched ((unsigned long) scheduling_functions_end_here)
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long ebp, esp, eip;
- unsigned long stack_page;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- stack_page = (unsigned long)p;
- esp = p->thread.esp;
- if (!stack_page || esp < stack_page || esp > 8188+stack_page)
- return 0;
- /* include/asm-i386/system.h:switch_to() pushes ebp last. */
- ebp = *(unsigned long *) esp;
- do {
- if (ebp < stack_page || ebp > 8184+stack_page)
- return 0;
- eip = *(unsigned long *) (ebp+4);
- if (eip < first_sched || eip >= last_sched)
- return eip;
- ebp = *(unsigned long *) ebp;
- } while (count++ < 16);
- return 0;
-}
-#undef last_sched
-#undef first_sched
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/setup.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/setup.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1222 +0,0 @@
-/*
- * linux/arch/i386/kernel/setup.c
- *
- * Copyright (C) 1995 Linus Torvalds
- */
-
-/*
- * This file handles the architecture-dependent parts of initialization
- */
-
-#define __KERNEL_SYSCALLS__
-static int errno;
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/tty.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/apm_bios.h>
-#ifdef CONFIG_BLK_DEV_RAM
-#include <linux/blk.h>
-#endif
-#include <linux/highmem.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/reboot.h>
-#include <asm/processor.h>
-#include <linux/console.h>
-#include <linux/module.h>
-#include <asm/mtrr.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/msr.h>
-#include <asm/desc.h>
-#include <asm/dma.h>
-#include <asm/mpspec.h>
-#include <asm/mmu_context.h>
-#include <asm/ctrl_if.h>
-#include <asm/hypervisor.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-#include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
-#include <linux/tqueue.h>
-#include <net/pkt_sched.h> /* dev_(de)activate */
-
-/*
- * Point at the empty zero page to start with. We map the real shared_info
- * page as soon as fixmap is up and running.
- */
-shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-
-unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
-
-multicall_entry_t multicall_list[8];
-int nr_multicall_ents = 0;
-
-/*
- * Machine setup..
- */
-
-char ignore_irq13; /* set if exception 16 works */
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-
-unsigned long mmu_cr4_features;
-
-unsigned char * vgacon_mmap;
-
-/*
- * Bus types ..
- */
-#ifdef CONFIG_EISA
-int EISA_bus;
-#endif
-int MCA_bus;
-
-/* for MCA, but anyone else can use it if they want */
-unsigned int machine_id;
-unsigned int machine_submodel_id;
-unsigned int BIOS_revision;
-unsigned int mca_pentium_flag;
-
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x10000000;
-
-/*
- * Setup options
- */
-struct drive_info_struct { char dummy[32]; } drive_info;
-struct screen_info screen_info;
-struct apm_info apm_info;
-struct sys_desc_table_struct {
- unsigned short length;
- unsigned char table[0];
-};
-
-unsigned char aux_device_present;
-
-extern int root_mountflags;
-extern char _text, _etext, _edata, _end;
-
-extern int blk_nohighio;
-
-int enable_acpi_smp_table;
-
-/* Raw start-of-day parameters from the hypervisor. */
-union xen_start_info_union xen_start_info_union;
-
-#define COMMAND_LINE_SIZE 256
-static char command_line[COMMAND_LINE_SIZE];
-char saved_command_line[COMMAND_LINE_SIZE];
-
-/* parse_mem_cmdline()
- * returns the value of the mem= boot param converted to pages or 0
- */
-static int __init parse_mem_cmdline (char ** cmdline_p)
-{
- char c = ' ', *to = command_line, *from = saved_command_line;
- int len = 0;
- unsigned long long bytes;
- int mem_param = 0;
-
- /* Save unparsed command line copy for /proc/cmdline */
- memcpy(saved_command_line, xen_start_info.cmd_line, COMMAND_LINE_SIZE);
- saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
-
- for (;;) {
- /*
- * "mem=nopentium" disables the 4MB page tables.
- * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
- * to <mem>, overriding the bios size.
- * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
- * <start> to <start>+<mem>, overriding the bios size.
- */
- if (c == ' ' && !memcmp(from, "mem=", 4)) {
- if (to != command_line)
- to--;
- if (!memcmp(from+4, "nopentium", 9)) {
- from += 9+4;
- } else if (!memcmp(from+4, "exactmap", 8)) {
- from += 8+4;
- } else {
- bytes = memparse(from+4, &from);
- mem_param = bytes>>PAGE_SHIFT;
- if (*from == '@')
- (void)memparse(from+1, &from);
- }
- }
-
- c = *(from++);
- if (!c)
- break;
- if (COMMAND_LINE_SIZE <= ++len)
- break;
- *(to++) = c;
- }
- *to = '\0';
- *cmdline_p = command_line;
-
- return mem_param;
-}
-
-/*
- * Every exception-fixup table is sorted (i.e., kernel main table, and every
- * module table. Some elements may be out of order if they reference text.init,
- * for example.
- */
-static void sort_exception_table(struct exception_table_entry *start,
- struct exception_table_entry *end)
-{
- struct exception_table_entry *p, *q, tmp;
-
- for ( p = start; p < end; p++ )
- {
- for ( q = p-1; q > start; q-- )
- if ( p->insn > q->insn )
- break;
- if ( ++q != p )
- {
- tmp = *p;
- memmove(q+1, q, (p-q)*sizeof(*p));
- *q = tmp;
- }
- }
-}
-
-int xen_module_init(struct module *mod)
-{
- sort_exception_table(mod->ex_table_start, mod->ex_table_end);
- return 0;
-}
-
-void __init setup_arch(char **cmdline_p)
-{
- int i,j;
- unsigned long bootmap_size, start_pfn, lmax_low_pfn;
- int mem_param; /* user specified memory size in pages */
- int boot_pfn; /* low pages available for bootmem */
-
- extern void hypervisor_callback(void);
- extern void failsafe_callback(void);
-
- extern unsigned long cpu0_pte_quicklist[];
- extern unsigned long cpu0_pgd_quicklist[];
-
- extern const struct exception_table_entry __start___ex_table[];
- extern const struct exception_table_entry __stop___ex_table[];
-
- extern char _stext;
-
- /* Force a quick death if the kernel panics. */
- extern int panic_timeout;
- if ( panic_timeout == 0 )
- panic_timeout = 1;
-
- /* Ensure that the kernel exception-fixup table is sorted. */
- sort_exception_table(__start___ex_table, __stop___ex_table);
-
-#ifndef CONFIG_HIGHIO
- blk_nohighio = 1;
-#endif
-
- HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_4gb_segments);
-
- HYPERVISOR_set_callbacks(
- __KERNEL_CS, (unsigned long)hypervisor_callback,
- __KERNEL_CS, (unsigned long)failsafe_callback);
-
- boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
- boot_cpu_data.pte_quick = cpu0_pte_quicklist;
-
- /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
- properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd. */
- ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
- memset(&drive_info, 0, sizeof(drive_info));
- memset(&screen_info, 0, sizeof(screen_info));
-
- /* This is drawn from a dump from vgacon:startup in standard Linux. */
- screen_info.orig_video_mode = 3;
- screen_info.orig_video_isVGA = 1;
- screen_info.orig_video_lines = 25;
- screen_info.orig_video_cols = 80;
- screen_info.orig_video_ega_bx = 3;
- screen_info.orig_video_points = 16;
-
- memset(&apm_info.bios, 0, sizeof(apm_info.bios));
- aux_device_present = 0;
-#ifdef CONFIG_BLK_DEV_RAM
- rd_image_start = 0;
- rd_prompt = 0;
- rd_doload = 0;
-#endif
-
- root_mountflags &= ~MS_RDONLY;
- init_mm.start_code = (unsigned long) &_text;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
- init_mm.brk = (unsigned long) &_end;
-
- /* The mem= kernel command line param overrides the detected amount
- * of memory. For xenolinux, if this override is larger than detected
- * memory, then boot using only detected memory and make provisions to
- * use all of the override value. The hypervisor can give this
- * domain more memory later on and it will be added to the free
- * lists at that time. See claim_new_pages() in
- * arch/xen/drivers/balloon/balloon.c
- */
- mem_param = parse_mem_cmdline(cmdline_p);
- if (mem_param < xen_start_info.nr_pages)
- mem_param = xen_start_info.nr_pages;
-
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
-
-/*
- * 128MB for vmalloc(), iomap(), kmap(), and fixaddr mappings.
- */
-#define VMALLOC_RESERVE (unsigned long)(128 << 20)
-#define MAXMEM (unsigned
long)(HYPERVISOR_VIRT_START-PAGE_OFFSET-VMALLOC_RESERVE)
-#define MAXMEM_PFN PFN_DOWN(MAXMEM)
-#define MAX_NONPAE_PFN (1 << 20)
-
- /*
- * Determine low and high memory ranges:
- */
- lmax_low_pfn = max_pfn = mem_param;
- if (lmax_low_pfn > MAXMEM_PFN) {
- lmax_low_pfn = MAXMEM_PFN;
-#ifndef CONFIG_HIGHMEM
- /* Maximum memory usable is what is directly addressable */
- printk(KERN_WARNING "Warning only %ldMB will be used.\n",
- MAXMEM>>20);
- if (max_pfn > MAX_NONPAE_PFN)
- printk(KERN_WARNING "Use a PAE enabled kernel.\n");
- else
- printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
- max_pfn = lmax_low_pfn;
-#else /* !CONFIG_HIGHMEM */
-#ifndef CONFIG_X86_PAE
- if (max_pfn > MAX_NONPAE_PFN) {
- max_pfn = MAX_NONPAE_PFN;
- printk(KERN_WARNING "Warning only 4GB will be used.\n");
- printk(KERN_WARNING "Use a PAE enabled kernel.\n");
- }
-#endif /* !CONFIG_X86_PAE */
-#endif /* !CONFIG_HIGHMEM */
- }
-
-#ifdef CONFIG_HIGHMEM
- highstart_pfn = highend_pfn = max_pfn;
- if (max_pfn > MAXMEM_PFN) {
- highstart_pfn = MAXMEM_PFN;
- printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
- pages_to_mb(highend_pfn - highstart_pfn));
- }
-#endif
-
- phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
- cur_pgd = init_mm.pgd = (pgd_t *)xen_start_info.pt_base;
-
- start_pfn = (__pa(xen_start_info.pt_base) >> PAGE_SHIFT) +
- xen_start_info.nr_pt_frames;
-
- /*
- * Initialize the boot-time allocator, and free up all RAM. Then reserve
- * space for OS image, initrd, phys->machine table, bootstrap page table,
- * and the bootmem bitmap.
- * NB. There is definitely enough room for the bootmem bitmap in the
- * bootstrap page table. We are guaranteed to get >=512kB unused 'padding'
- * for our own use after all bootstrap elements
- * (see asm-xen/xen-public/xen.h).
- */
- boot_pfn = min((int)xen_start_info.nr_pages,lmax_low_pfn);
- bootmap_size = init_bootmem(start_pfn,boot_pfn);
- free_bootmem(0, PFN_PHYS(boot_pfn));
- reserve_bootmem(__pa(&_stext),
- PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1 -
- __pa(&_stext));
-
- /* init_bootmem() set the global max_low_pfn to boot_pfn. Now max_low_pfn
- * can be set to the override value.
- */
- max_low_pfn = lmax_low_pfn;
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if ( xen_start_info.mod_start != 0 )
- {
- if ( (__pa(xen_start_info.mod_start) + xen_start_info.mod_len) <=
- (max_low_pfn << PAGE_SHIFT) )
- {
- initrd_start = xen_start_info.mod_start;
- initrd_end = initrd_start + xen_start_info.mod_len;
- initrd_below_start_ok = 1;
- }
- else
- {
- printk(KERN_ERR "initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- __pa(xen_start_info.mod_start) + xen_start_info.mod_len,
- max_low_pfn << PAGE_SHIFT);
- initrd_start = 0;
- }
- }
-#endif
-
- paging_init();
-
- /* Make sure we have a correctly sized P->M table. */
- if ( max_pfn != xen_start_info.nr_pages )
- {
- phys_to_machine_mapping = alloc_bootmem_low_pages(
- max_pfn * sizeof(unsigned long));
- if ( max_pfn > xen_start_info.nr_pages )
- {
- memset(phys_to_machine_mapping, ~0,
- max_pfn * sizeof(unsigned long));
- memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- xen_start_info.nr_pages * sizeof(unsigned long));
- }
- else
- {
- memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- max_pfn * sizeof(unsigned long));
- if (HYPERVISOR_dom_mem_op(
- MEMOP_decrease_reservation,
- (unsigned long *)xen_start_info.mfn_list + max_pfn,
- xen_start_info.nr_pages - max_pfn, 0) !=
- (xen_start_info.nr_pages - max_pfn))
- BUG();
- }
- free_bootmem(__pa(xen_start_info.mfn_list),
- PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
- sizeof(unsigned long))));
- }
-
- pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
- for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
- {
- pfn_to_mfn_frame_list[j] =
- virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
- }
- HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
- virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
-
- /* If we are a privileged guest OS then we should request IO privileges. */
- if ( xen_start_info.flags & SIF_PRIVILEGED )
- {
- dom0_op_t op;
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = 1;
- if( HYPERVISOR_dom0_op(&op) != 0 )
- panic("Unable to obtain IOPL, despite being SIF_PRIVILEGED");
- current->thread.io_pl = 1;
- }
-
- if (xen_start_info.flags & SIF_INITDOMAIN )
- {
- if( !(xen_start_info.flags & SIF_PRIVILEGED) )
- panic("Xen granted us console access but not privileged status");
-
-#if defined(CONFIG_VT)
-#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
-#endif
- }
-}
-
-static int cachesize_override __initdata = -1;
-static int __init cachesize_setup(char *str)
-{
- get_option (&str, &cachesize_override);
- return 1;
-}
-__setup("cachesize=", cachesize_setup);
-
-static int __init highio_setup(char *str)
-{
- printk("i386: disabling HIGHMEM block I/O\n");
- blk_nohighio = 1;
- return 1;
-}
-__setup("nohighio", highio_setup);
-
-static int __init get_model_name(struct cpuinfo_x86 *c)
-{
- unsigned int *v;
- char *p, *q;
-
- if (cpuid_eax(0x80000000) < 0x80000004)
- return 0;
-
- v = (unsigned int *) c->x86_model_id;
- cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
- cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
- cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
- c->x86_model_id[48] = 0;
-
- /* Intel chips right-justify this string for some dumb reason;
- undo that brain damage */
- p = q = &c->x86_model_id[0];
- while ( *p == ' ' )
- p++;
- if ( p != q ) {
- while ( *p )
- *q++ = *p++;
- while ( q <= &c->x86_model_id[48] )
- *q++ = '\0'; /* Zero-pad the rest */
- }
-
- return 1;
-}
-
-
-static void __init display_cacheinfo(struct cpuinfo_x86 *c)
-{
- unsigned int n, dummy, ecx, edx, l2size;
-
- n = cpuid_eax(0x80000000);
-
- if (n >= 0x80000005) {
- cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
- printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK
(%d bytes/line)\n",
- edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
- c->x86_cache_size=(ecx>>24)+(edx>>24);
- }
-
- if (n < 0x80000006) /* Some chips just has a large L1. */
- return;
-
- ecx = cpuid_ecx(0x80000006);
- l2size = ecx >> 16;
-
- /* AMD errata T13 (order #21922) */
- if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
- if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
- l2size = 64;
- if (c->x86_model == 4 &&
- (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
- l2size = 256;
- }
-
- /* Intel PIII Tualatin. This comes in two flavours.
- * One has 256kb of cache, the other 512. We have no way
- * to determine which, so we use a boottime override
- * for the 512kb model, and assume 256 otherwise.
- */
- if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&
- (c->x86_model == 11) && (l2size == 0))
- l2size = 256;
-
- if (c->x86_vendor == X86_VENDOR_CENTAUR) {
- /* VIA C3 CPUs (670-68F) need further shifting. */
- if ((c->x86 == 6) &&
- ((c->x86_model == 7) || (c->x86_model == 8))) {
- l2size >>= 8;
- }
-
- /* VIA also screwed up Nehemiah stepping 1, and made
- it return '65KB' instead of '64KB'
- - Note, it seems this may only be in engineering samples. */
- if ((c->x86==6) && (c->x86_model==9) &&
- (c->x86_mask==1) && (l2size==65))
- l2size -= 1;
- }
-
- /* Allow user to override all this if necessary. */
- if (cachesize_override != -1)
- l2size = cachesize_override;
-
- if ( l2size == 0 )
- return; /* Again, no L2 cache is possible */
-
- c->x86_cache_size = l2size;
-
- printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
- l2size, ecx & 0xFF);
-}
-
-static void __init init_c3(struct cpuinfo_x86 *c)
-{
- /* Test for Centaur Extended Feature Flags presence */
- if (cpuid_eax(0xC0000000) >= 0xC0000001) {
- /* store Centaur Extended Feature Flags as
- * word 5 of the CPU capability bit array
- */
- c->x86_capability[5] = cpuid_edx(0xC0000001);
- }
-
- switch (c->x86_model) {
- case 9: /* Nehemiah */
- default:
- get_model_name(c);
- display_cacheinfo(c);
- break;
- }
-}
-
-static void __init init_centaur(struct cpuinfo_x86 *c)
-{
- /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- 3DNow is IDd by bit 31 in extended CPUID (1*3231) anyway */
- clear_bit(0*32+31, &c->x86_capability);
-
- switch (c->x86) {
- case 6:
- init_c3(c);
- break;
- default:
- panic("Unsupported Centaur CPU (%i)\n", c->x86);
- }
-}
-
-static int __init init_amd(struct cpuinfo_x86 *c)
-{
- int r;
-
- /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
- clear_bit(0*32+31, &c->x86_capability);
-
- r = get_model_name(c);
-
- switch(c->x86)
- {
- case 5: /* We don't like AMD K6 */
- panic("Unsupported AMD processor\n");
- case 6: /* An Athlon/Duron. We can trust the BIOS probably */
- break;
- }
-
- display_cacheinfo(c);
- return r;
-}
-
-
-static void __init init_intel(struct cpuinfo_x86 *c)
-{
- char *p = NULL;
- unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
-
- if (c->cpuid_level > 1) {
- /* supports eax=2 call */
- int i, j, n;
- int regs[4];
- unsigned char *dp = (unsigned char *)regs;
-
- /* Number of times to iterate */
- n = cpuid_eax(2) & 0xFF;
-
- for ( i = 0 ; i < n ; i++ ) {
- cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
-
- /* If bit 31 is set, this is an unknown format */
- for ( j = 0 ; j < 3 ; j++ ) {
- if ( regs[j] < 0 ) regs[j] = 0;
- }
-
- /* Byte 0 is level count, not a descriptor */
- for ( j = 1 ; j < 16 ; j++ ) {
- unsigned char des = dp[j];
- unsigned char dl, dh;
- unsigned int cs;
-
- dh = des >> 4;
- dl = des & 0x0F;
-
- /* Black magic... */
-
- switch ( dh )
- {
- case 0:
- switch ( dl ) {
- case 6:
- /* L1 I cache */
- l1i += 8;
- break;
- case 8:
- /* L1 I cache */
- l1i += 16;
- break;
- case 10:
- /* L1 D cache */
- l1d += 8;
- break;
- case 12:
- /* L1 D cache */
- l1d += 16;
- break;
- default:;
- /* TLB, or unknown */
- }
- break;
- case 2:
- if ( dl ) {
- /* L3 cache */
- cs = (dl-1) << 9;
- l3 += cs;
- }
- break;
- case 4:
- if ( c->x86 > 6 && dl ) {
- /* P4 family */
- /* L3 cache */
- cs = 128 << (dl-1);
- l3 += cs;
- break;
- }
- /* else same as 8 - fall through */
- case 8:
- if ( dl ) {
- /* L2 cache */
- cs = 128 << (dl-1);
- l2 += cs;
- }
- break;
- case 6:
- if (dl > 5) {
- /* L1 D cache */
- cs = 8<<(dl-6);
- l1d += cs;
- }
- break;
- case 7:
- if ( dl >= 8 )
- {
- /* L2 cache */
- cs = 64<<(dl-8);
- l2 += cs;
- } else {
- /* L0 I cache, count as L1 */
- cs = dl ? (16 << (dl-1)) : 12;
- l1i += cs;
- }
- break;
- default:
- /* TLB, or something else we don't know about */
- break;
- }
- }
- }
- if ( l1i || l1d )
- printk(KERN_INFO "CPU: L1 I cache: %dK, L1 D cache: %dK\n",
- l1i, l1d);
- if ( l2 )
- printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
- if ( l3 )
- printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
-
- /*
- * This assumes the L3 cache is shared; it typically lives in
- * the northbridge. The L1 caches are included by the L2
- * cache, and so should not be included for the purpose of
- * SMP switching weights.
- */
- c->x86_cache_size = l2 ? l2 : (l1i+l1d);
- }
-
- /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
- if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
- clear_bit(X86_FEATURE_SEP, &c->x86_capability);
-
- /* Names for the Pentium II/Celeron processors
- detectable only by also checking the cache size.
- Dixon is NOT a Celeron. */
- if (c->x86 == 6) {
- switch (c->x86_model) {
- case 5:
- if (l2 == 0)
- p = "Celeron (Covington)";
- if (l2 == 256)
- p = "Mobile Pentium II (Dixon)";
- break;
-
- case 6:
- if (l2 == 128)
- p = "Celeron (Mendocino)";
- break;
-
- case 8:
- if (l2 == 128)
- p = "Celeron (Coppermine)";
- break;
- }
- }
-
- if ( p )
- strcpy(c->x86_model_id, p);
-}
-
-void __init get_cpu_vendor(struct cpuinfo_x86 *c)
-{
- char *v = c->x86_vendor_id;
-
- if (!strcmp(v, "GenuineIntel"))
- c->x86_vendor = X86_VENDOR_INTEL;
- else if (!strcmp(v, "AuthenticAMD"))
- c->x86_vendor = X86_VENDOR_AMD;
- else if (!strcmp(v, "CentaurHauls"))
- c->x86_vendor = X86_VENDOR_CENTAUR;
- else
- c->x86_vendor = X86_VENDOR_UNKNOWN;
-}
-
-struct cpu_model_info {
- int vendor;
- int family;
- char *model_names[16];
-};
-
-/* Naming convention should be: <Name> [(<Codename>)] */
-/* This table only is used unless init_<vendor>() below doesn't set it; */
-/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*/
-static struct cpu_model_info cpu_models[] __initdata = {
- { X86_VENDOR_INTEL, 6,
- { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
- NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
- "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
- "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
- { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
- { "Athlon", "Athlon",
- "Athlon", NULL, "Athlon", NULL,
- NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL }}
-};
-
-/* Look up CPU names by table lookup. */
-static char __init *table_lookup_model(struct cpuinfo_x86 *c)
-{
- struct cpu_model_info *info = cpu_models;
- int i;
-
- if ( c->x86_model >= 16 )
- return NULL; /* Range check */
-
- for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ )
{
- if ( info->vendor == c->x86_vendor &&
- info->family == c->x86 ) {
- return info->model_names[c->x86_model];
- }
- info++;
- }
- return NULL; /* Not found */
-}
-
-
-
-/* Standard macro to see if a specific flag is changeable */
-static inline int flag_is_changeable_p(u32 flag)
-{
- u32 f1, f2;
-
- asm("pushfl\n\t"
- "pushfl\n\t"
- "popl %0\n\t"
- "movl %0,%1\n\t"
- "xorl %2,%0\n\t"
- "pushl %0\n\t"
- "popfl\n\t"
- "pushfl\n\t"
- "popl %0\n\t"
- "popfl\n\t"
- : "=&r" (f1), "=&r" (f2)
- : "ir" (flag));
-
- return ((f1^f2) & flag) != 0;
-}
-
-
-/* Probe for the CPUID instruction */
-static int __init have_cpuid_p(void)
-{
- return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
-
-
-#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-unsigned char eddnr;
-struct edd_info edd[EDDMAXNR];
-unsigned int edd_disk80_sig;
-/**
- * copy_edd() - Copy the BIOS EDD information
- * from empty_zero_page into a safe place.
- *
- */
-static inline void copy_edd(void)
-{
- eddnr = EDD_NR;
- memcpy(edd, EDD_BUF, sizeof(edd));
- edd_disk80_sig = DISK80_SIGNATURE_BUFFER;
-}
-#else
-static inline void copy_edd(void) {}
-#endif
-
-/*
- * This does the hard work of actually picking apart the CPU stuff...
- */
-void __init identify_cpu(struct cpuinfo_x86 *c)
-{
- int junk, i;
- u32 xlvl, tfms;
-
- c->loops_per_jiffy = loops_per_jiffy;
- c->x86_cache_size = -1;
- c->x86_vendor = X86_VENDOR_UNKNOWN;
- c->cpuid_level = -1; /* CPUID not detected */
- c->x86_model = c->x86_mask = 0; /* So far unknown... */
- c->x86_vendor_id[0] = '\0'; /* Unset */
- c->x86_model_id[0] = '\0'; /* Unset */
- memset(&c->x86_capability, 0, sizeof c->x86_capability);
- c->hard_math = 1;
-
- if ( !have_cpuid_p() ) {
- panic("Processor must support CPUID\n");
- } else {
- /* CPU does have CPUID */
-
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level,
- (int *)&c->x86_vendor_id[0],
- (int *)&c->x86_vendor_id[8],
- (int *)&c->x86_vendor_id[4]);
-
- get_cpu_vendor(c);
- /* Initialize the standard set of capabilities */
- /* Note that the vendor-specific code below might override */
-
- /* Intel-defined flags: level 0x00000001 */
- if ( c->cpuid_level >= 0x00000001 ) {
- u32 capability, excap;
- cpuid(0x00000001, &tfms, &junk, &excap, &capability);
- c->x86_capability[0] = capability;
- c->x86_capability[4] = excap;
- c->x86 = (tfms >> 8) & 15;
- c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
- c->x86 += (tfms >> 20) & 0xff;
- c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
- c->x86_mask = tfms & 15;
- } else {
- /* Have CPUID level 0 only - unheard of */
- c->x86 = 4;
- }
-
- /* AMD-defined flags: level 0x80000001 */
- xlvl = cpuid_eax(0x80000000);
- if ( (xlvl & 0xffff0000) == 0x80000000 ) {
- if ( xlvl >= 0x80000001 )
- c->x86_capability[1] = cpuid_edx(0x80000001);
- if ( xlvl >= 0x80000004 )
- get_model_name(c); /* Default name */
- }
-
- /* Transmeta-defined flags: level 0x80860001 */
- xlvl = cpuid_eax(0x80860000);
- if ( (xlvl & 0xffff0000) == 0x80860000 ) {
- if ( xlvl >= 0x80860001 )
- c->x86_capability[2] = cpuid_edx(0x80860001);
- }
- }
-
- printk(KERN_DEBUG "CPU: Before vendor init, caps: %08x %08x %08x, vendor =
%d\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_vendor);
-
- /*
- * Vendor-specific initialization. In this section we
- * canonicalize the feature flags, meaning if there are
- * features a certain CPU supports which CPUID doesn't
- * tell us, CPUID claiming incorrect flags, or other bugs,
- * we handle them here.
- *
- * At the end of this section, c->x86_capability better
- * indicate the features this CPU genuinely supports!
- */
- switch ( c->x86_vendor ) {
- case X86_VENDOR_AMD:
- init_amd(c);
- break;
-
- case X86_VENDOR_INTEL:
- init_intel(c);
- break;
-
- case X86_VENDOR_CENTAUR:
- init_centaur(c);
- break;
-
- default:
- printk("Unsupported CPU vendor (%d) -- please report!\n",
- c->x86_vendor);
- }
-
- printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
-
-
- /* If the model name is still unset, do table lookup. */
- if ( !c->x86_model_id[0] ) {
- char *p;
- p = table_lookup_model(c);
- if ( p )
- strcpy(c->x86_model_id, p);
- else
- /* Last resort... */
- sprintf(c->x86_model_id, "%02x/%02x",
- c->x86_vendor, c->x86_model);
- }
-
- /* Now the feature flags better reflect actual CPU features! */
-
- printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
- c->x86_capability[0],
- c->x86_capability[1],
- c->x86_capability[2],
- c->x86_capability[3]);
-
- /*
- * On SMP, boot_cpu_data holds the common feature set between
- * all CPUs; so make sure that we indicate which features are
- * common between the CPUs. The first time this routine gets
- * executed, c == &boot_cpu_data.
- */
- if ( c != &boot_cpu_data ) {
- /* AND the already accumulated flags with these */
- for ( i = 0 ; i < NCAPINTS ; i++ )
- boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
- }
-
- printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n",
- boot_cpu_data.x86_capability[0],
- boot_cpu_data.x86_capability[1],
- boot_cpu_data.x86_capability[2],
- boot_cpu_data.x86_capability[3]);
-}
-
-
-/* These need to match <asm/processor.h> */
-static char *cpu_vendor_names[] __initdata = {
- "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
-
-
-void __init print_cpu_info(struct cpuinfo_x86 *c)
-{
- char *vendor = NULL;
-
- if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
- vendor = cpu_vendor_names[c->x86_vendor];
- else if (c->cpuid_level >= 0)
- vendor = c->x86_vendor_id;
-
- if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
- printk("%s ", vendor);
-
- if (!c->x86_model_id[0])
- printk("%d86", c->x86);
- else
- printk("%s", c->x86_model_id);
-
- if (c->x86_mask || c->cpuid_level >= 0)
- printk(" stepping %02x\n", c->x86_mask);
- else
- printk("\n");
-}
-
-/*
- * Get CPU information for use by the procfs.
- */
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- /*
- * These flag bits must match the definitions in <asm/cpufeature.h>.
- * NULL means this bit is undefined or reserved; either way it doesn't
- * have meaning as far as Linux is concerned. Note that it's important
- * to realize there is a difference between this table and CPUID -- if
- * applications want to get the raw CPUID data, they should access
- * /dev/cpu/<cpu_nr>/cpuid instead.
- */
- static char *x86_cap_flags[] = {
- /* Intel-defined */
- "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
- "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
- "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
- "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
-
- /* AMD-defined */
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, "mp", NULL, NULL, "mmxext", NULL,
- NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
-
- /* Transmeta-defined */
- "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
- /* Other (Linux-defined) */
- "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
- /* Intel-defined (#2) */
- "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2",
- "est", NULL, "cid", NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
- /* VIA/Cyrix/Centaur-defined */
- NULL, NULL, "xstore", NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
- };
- struct cpuinfo_x86 *c = v;
- int i, n = c - cpu_data;
- int fpu_exception;
-
-#ifdef CONFIG_SMP
- if (!(cpu_online_map & (1<<n)))
- return 0;
-#endif
- seq_printf(m, "processor\t: %d\n"
- "vendor_id\t: %s\n"
- "cpu family\t: %d\n"
- "model\t\t: %d\n"
- "model name\t: %s\n",
- n,
- c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
- c->x86,
- c->x86_model,
- c->x86_model_id[0] ? c->x86_model_id : "unknown");
-
- if (c->x86_mask || c->cpuid_level >= 0)
- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
- else
- seq_printf(m, "stepping\t: unknown\n");
-
- if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
- seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
- cpu_khz / 1000, (cpu_khz % 1000));
- }
-
- /* Cache size */
- if (c->x86_cache_size >= 0)
- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-
- /* We use exception 16 if we have hardware math and we've either seen
it or the CPU claims it is internal */
- fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
- seq_printf(m, "fdiv_bug\t: %s\n"
- "hlt_bug\t\t: %s\n"
- "f00f_bug\t: %s\n"
- "coma_bug\t: %s\n"
- "fpu\t\t: %s\n"
- "fpu_exception\t: %s\n"
- "cpuid level\t: %d\n"
- "wp\t\t: %s\n"
- "flags\t\t:",
- c->fdiv_bug ? "yes" : "no",
- c->hlt_works_ok ? "no" : "yes",
- c->f00f_bug ? "yes" : "no",
- c->coma_bug ? "yes" : "no",
- c->hard_math ? "yes" : "no",
- fpu_exception ? "yes" : "no",
- c->cpuid_level,
- c->wp_works_ok ? "yes" : "no");
-
- for ( i = 0 ; i < 32*NCAPINTS ; i++ )
- if ( test_bit(i, &c->x86_capability) &&
- x86_cap_flags[i] != NULL )
- seq_printf(m, " %s", x86_cap_flags[i]);
-
- seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
- c->loops_per_jiffy/(500000/HZ),
- (c->loops_per_jiffy/(5000/HZ)) % 100);
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-struct seq_operations cpuinfo_op = {
- start: c_start,
- next: c_next,
- stop: c_stop,
- show: show_cpuinfo,
-};
-
-unsigned long cpu_initialized __initdata = 0;
-
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __init cpu_init (void)
-{
- int nr = smp_processor_id();
-
- if (test_and_set_bit(nr, &cpu_initialized)) {
- printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
- for (;;) __sti();
- }
- printk(KERN_INFO "Initializing CPU#%d\n", nr);
-
- /*
- * set up and load the per-CPU TSS and LDT
- */
- atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
- if(current->mm)
- BUG();
- enter_lazy_tlb(&init_mm, current, nr);
-
- HYPERVISOR_stack_switch(__KERNEL_DS, current->thread.esp0);
-
- load_LDT(&init_mm.context);
- flush_page_update_queue();
-
- /* Force FPU initialization. */
- current->flags &= ~PF_USEDFPU;
- current->used_math = 0;
- stts();
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/signal.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/signal.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,717 +0,0 @@
-/*
- * linux/arch/i386/kernel/signal.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
- * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/tty.h>
-#include <linux/personality.h>
-#include <asm/ucontext.h>
-#include <asm/uaccess.h>
-#include <asm/i387.h>
-
-#define DEBUG_SIG 0
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
-
-int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
-{
- if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
- return -EFAULT;
- if (from->si_code < 0)
- return __copy_to_user(to, from, sizeof(siginfo_t));
- else {
- int err;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user((short)from->si_code, &to->si_code);
- /* First 32bits of unions are always present. */
- err |= __put_user(from->si_pid, &to->si_pid);
- switch (from->si_code >> 16) {
- case __SI_FAULT >> 16:
- break;
- case __SI_CHLD >> 16:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- default:
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- /* case __SI_RT: This is not generated by the kernel as of now.
*/
- }
- return err;
- }
-}
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
-{
- struct pt_regs * regs = (struct pt_regs *) &history0;
- sigset_t saveset;
-
- mask &= _BLOCKABLE;
- spin_lock_irq(¤t->sigmask_lock);
- saveset = current->blocked;
- siginitset(¤t->blocked, mask);
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
- regs->eax = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
-{
- struct pt_regs * regs = (struct pt_regs *) &unewset;
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(¤t->sigmask_lock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
- regs->eax = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction *act,
- struct old_sigaction *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
-
- return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(const stack_t *uss, stack_t *uoss)
-{
- struct pt_regs *regs = (struct pt_regs *) &uss;
- return do_sigaltstack(uss, uoss, regs->esp);
-}
-
-
-/*
- * Do a signal return; undo the signal stack.
- */
-
-struct sigframe
-{
- char *pretcode;
- int sig;
- struct sigcontext sc;
- struct _fpstate fpstate;
- unsigned long extramask[_NSIG_WORDS-1];
- char retcode[8];
-};
-
-struct rt_sigframe
-{
- char *pretcode;
- int sig;
- struct siginfo *pinfo;
- void *puc;
- struct siginfo info;
- struct ucontext uc;
- struct _fpstate fpstate;
- char retcode[8];
-};
-
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *peax)
-{
- unsigned int err = 0;
-
-#define COPY(x) err |= __get_user(regs->x, &sc->x)
-
-#define COPY_SEG(seg) \
- { unsigned short tmp; \
- err |= __get_user(tmp, &sc->seg); \
- regs->x##seg = tmp; }
-
-#define COPY_SEG_STRICT(seg) \
- { unsigned short tmp; \
- err |= __get_user(tmp, &sc->seg); \
- regs->x##seg = tmp|3; }
-
-#define GET_SEG(seg) \
- { unsigned short tmp; \
- err |= __get_user(tmp, &sc->seg); \
- loadsegment(seg,tmp); }
-
- GET_SEG(gs);
- GET_SEG(fs);
- COPY_SEG(es);
- COPY_SEG(ds);
- COPY(edi);
- COPY(esi);
- COPY(ebp);
- COPY(esp);
- COPY(ebx);
- COPY(edx);
- COPY(ecx);
- COPY(eip);
- COPY_SEG_STRICT(cs);
- COPY_SEG_STRICT(ss);
-
- {
- unsigned int tmpflags;
- err |= __get_user(tmpflags, &sc->eflags);
- regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
- regs->orig_eax = -1; /* disable syscall checks */
- }
-
- {
- struct _fpstate * buf;
- err |= __get_user(buf, &sc->fpstate);
- if (buf) {
- if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
- goto badframe;
- err |= restore_i387(buf);
- }
- }
-
- err |= __get_user(*peax, &sc->eax);
- return err;
-
-badframe:
- return 1;
-}
-
-asmlinkage int sys_sigreturn(unsigned long __unused)
-{
- struct pt_regs *regs = (struct pt_regs *) &__unused;
- struct sigframe *frame = (struct sigframe *)(regs->esp - 8);
- sigset_t set;
- int eax;
-
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sigmask_lock);
- current->blocked = set;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
- if (restore_sigcontext(regs, &frame->sc, &eax))
- goto badframe;
- return eax;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-{
- struct pt_regs *regs = (struct pt_regs *) &__unused;
- struct rt_sigframe *frame = (struct rt_sigframe *)(regs->esp - 4);
- sigset_t set;
- stack_t st;
- int eax;
-
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sigmask_lock);
- current->blocked = set;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
- goto badframe;
-
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
- goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack(&st, NULL, regs->esp);
-
- return eax;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-
-static int
-setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate,
- struct pt_regs *regs, unsigned long mask)
-{
- int tmp, err = 0;
-
- tmp = 0;
- __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
- err |= __put_user(tmp, (unsigned int *)&sc->gs);
- __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
- err |= __put_user(tmp, (unsigned int *)&sc->fs);
-
- err |= __put_user(regs->xes, (unsigned int *)&sc->es);
- err |= __put_user(regs->xds, (unsigned int *)&sc->ds);
- err |= __put_user(regs->edi, &sc->edi);
- err |= __put_user(regs->esi, &sc->esi);
- err |= __put_user(regs->ebp, &sc->ebp);
- err |= __put_user(regs->esp, &sc->esp);
- err |= __put_user(regs->ebx, &sc->ebx);
- err |= __put_user(regs->edx, &sc->edx);
- err |= __put_user(regs->ecx, &sc->ecx);
- err |= __put_user(regs->eax, &sc->eax);
- err |= __put_user(current->thread.trap_no, &sc->trapno);
- err |= __put_user(current->thread.error_code, &sc->err);
- err |= __put_user(regs->eip, &sc->eip);
- err |= __put_user(regs->xcs, (unsigned int *)&sc->cs);
- err |= __put_user(regs->eflags, &sc->eflags);
- err |= __put_user(regs->esp, &sc->esp_at_signal);
- err |= __put_user(regs->xss, (unsigned int *)&sc->ss);
-
- tmp = save_i387(fpstate);
- if (tmp < 0)
- err = 1;
- else
- err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
-
- /* non-iBCS2 extensions.. */
- err |= __put_user(mask, &sc->oldmask);
- err |= __put_user(current->thread.cr2, &sc->cr2);
-
- return err;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void *
-get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
-{
- unsigned long esp;
-
- /* Default to using normal stack */
- esp = regs->esp;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(esp) == 0)
- esp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- /* This is the legacy signal stack switching. */
- else if ((regs->xss & 0xffff) != __USER_DS &&
- !(ka->sa.sa_flags & SA_RESTORER) &&
- ka->sa.sa_restorer) {
- esp = (unsigned long) ka->sa.sa_restorer;
- }
-
- return (void *)((esp - frame_size) & -8ul);
-}
-
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs * regs)
-{
- struct sigframe *frame;
- int err = 0;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- err |= __put_user((current->exec_domain
- && current->exec_domain->signal_invmap
- && sig < 32
- ? current->exec_domain->signal_invmap[sig]
- : sig),
- &frame->sig);
- if (err)
- goto give_sigsegv;
-
- err |= setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
- if (err)
- goto give_sigsegv;
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
- }
- if (err)
- goto give_sigsegv;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ka->sa.sa_flags & SA_RESTORER) {
- err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
- } else {
- err |= __put_user(frame->retcode, &frame->pretcode);
- /* This is popl %eax ; movl $,%eax ; int $0x80 */
- err |= __put_user(0xb858, (short *)(frame->retcode+0));
- err |= __put_user(__NR_sigreturn, (int *)(frame->retcode+2));
- err |= __put_user(0x80cd, (short *)(frame->retcode+6));
- }
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- regs->esp = (unsigned long) frame;
- regs->eip = (unsigned long) ka->sa.sa_handler;
-
- set_fs(USER_DS);
- regs->xds = __USER_DS;
- regs->xes = __USER_DS;
- regs->xss = __USER_DS;
- regs->xcs = __USER_CS;
- regs->eflags &= ~TF_MASK;
-
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
- current->comm, current->pid, frame, regs->eip, frame->pretcode);
-#endif
-
- return;
-
-give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
-}
-
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs * regs)
-{
- struct rt_sigframe *frame;
- int err = 0;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- err |= __put_user((current->exec_domain
- && current->exec_domain->signal_invmap
- && sig < 32
- ? current->exec_domain->signal_invmap[sig]
- : sig),
- &frame->sig);
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, info);
- if (err)
- goto give_sigsegv;
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->esp),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
- regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto give_sigsegv;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ka->sa.sa_flags & SA_RESTORER) {
- err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
- } else {
- err |= __put_user(frame->retcode, &frame->pretcode);
- /* This is movl $,%eax ; int $0x80 */
- err |= __put_user(0xb8, (char *)(frame->retcode+0));
- err |= __put_user(__NR_rt_sigreturn, (int *)(frame->retcode+1));
- err |= __put_user(0x80cd, (short *)(frame->retcode+5));
- }
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- regs->esp = (unsigned long) frame;
- regs->eip = (unsigned long) ka->sa.sa_handler;
-
- set_fs(USER_DS);
- regs->xds = __USER_DS;
- regs->xes = __USER_DS;
- regs->xss = __USER_DS;
- regs->xcs = __USER_CS;
- regs->eflags &= ~TF_MASK;
-
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
- current->comm, current->pid, frame, regs->eip, frame->pretcode);
-#endif
-
- return;
-
-give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
-}
-
-/*
- * OK, we're invoking a handler
- */
-
-static void
-handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
-{
- /* Are we from a system call? */
- if (regs->orig_eax >= 0) {
- /* If so, check system call restarting.. */
- switch (regs->eax) {
- case -ERESTARTNOHAND:
- regs->eax = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->eax = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- regs->eax = regs->orig_eax;
- regs->eip -= 2;
- }
- }
-
- /* Set up the stack frame */
- if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
- else
- setup_frame(sig, ka, oldset, regs);
-
- if (ka->sa.sa_flags & SA_ONESHOT)
- ka->sa.sa_handler = SIG_DFL;
-
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(¤t->sigmask_lock);
- sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
- sigaddset(¤t->blocked,sig);
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
- }
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
-{
- siginfo_t info;
- struct k_sigaction *ka;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if ((regs->xcs & 2) != 2)
- return 1;
-
- if (!oldset)
- oldset = ¤t->blocked;
-
- for (;;) {
- unsigned long signr;
-
- spin_lock_irq(¤t->sigmask_lock);
- signr = dequeue_signal(¤t->blocked, &info);
- spin_unlock_irq(¤t->sigmask_lock);
-
- if (!signr)
- break;
-
- if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
- /* Let the debugger run. */
- current->exit_code = signr;
- current->state = TASK_STOPPED;
- notify_parent(current, SIGCHLD);
- schedule();
-
- /* We're back. Did the debugger cancel the sig? */
- if (!(signr = current->exit_code))
- continue;
- current->exit_code = 0;
-
- /* The debugger continued. Ignore SIGSTOP. */
- if (signr == SIGSTOP)
- continue;
-
- /* Update the siginfo structure. Is this good? */
- if (signr != info.si_signo) {
- info.si_signo = signr;
- info.si_errno = 0;
- info.si_code = SI_USER;
- info.si_pid = current->p_pptr->pid;
- info.si_uid = current->p_pptr->uid;
- }
-
- /* If the (new) signal is now blocked, requeue it. */
- if (sigismember(¤t->blocked, signr)) {
- send_sig_info(signr, &info, current);
- continue;
- }
- }
-
- ka = ¤t->sig->action[signr-1];
- if (ka->sa.sa_handler == SIG_IGN) {
- if (signr != SIGCHLD)
- continue;
- /* Check for SIGCHLD: it's special. */
- while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
- /* nothing */;
- continue;
- }
-
- if (ka->sa.sa_handler == SIG_DFL) {
- int exit_code = signr;
-
- /* Init gets no signals it doesn't want. */
- if (current->pid == 1)
- continue;
-
- switch (signr) {
- case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
- continue;
-
- case SIGTSTP: case SIGTTIN: case SIGTTOU:
- if (is_orphaned_pgrp(current->pgrp))
- continue;
- /* FALLTHRU */
-
- case SIGSTOP: {
- struct signal_struct *sig;
- current->state = TASK_STOPPED;
- current->exit_code = signr;
- sig = current->p_pptr->sig;
- if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags
& SA_NOCLDSTOP))
- notify_parent(current, SIGCHLD);
- schedule();
- continue;
- }
-
- case SIGQUIT: case SIGILL: case SIGTRAP:
- case SIGABRT: case SIGFPE: case SIGSEGV:
- case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
- if (do_coredump(signr, regs))
- exit_code |= 0x80;
- /* FALLTHRU */
-
- default:
- sig_exit(signr, exit_code, &info);
- /* NOTREACHED */
- }
- }
-
- /* Reenable any watchpoints before delivering the
- * signal to user space. The processor register will
- * have been cleared if the watchpoint triggered
- * inside the kernel.
- */
- if ( current->thread.debugreg[7] != 0 )
- HYPERVISOR_set_debugreg(7, current->thread.debugreg[7]);
-
- /* Whee! Actually deliver the signal. */
- handle_signal(signr, ka, &info, oldset, regs);
- return 1;
- }
-
- /* Did we come from a system call? */
- if (regs->orig_eax >= 0) {
- /* Restart the system call - no handlers present */
- if (regs->eax == -ERESTARTNOHAND ||
- regs->eax == -ERESTARTSYS ||
- regs->eax == -ERESTARTNOINTR) {
- regs->eax = regs->orig_eax;
- regs->eip -= 2;
- }
- }
- return 0;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/time.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/time.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,721 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
- ****************************************************************************
- * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
- * (C) 2002-2003 - Keir Fraser - University of Cambridge
- ****************************************************************************
- *
- * File: arch/xen/kernel/time.c
- * Author: Rolf Neugebauer and Keir Fraser
- *
- * Description: Interface with Xen to get correct notion of time
- */
-
-/*
- * linux/arch/i386/kernel/time.c
- *
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- *
- * This file contains the PC-specific time handling details:
- * reading the RTC at bootup, etc..
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1995-03-26 Markus Kuhn
- * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
- * precision CMOS clock update
- * 1996-05-03 Ingo Molnar
- * fixed time warps in do_[slow|fast]_gettimeoffset()
- * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
- * 1998-09-05 (Various)
- * More robust do_fast_gettimeoffset() algorithm implemented
- * (works with APM, Cyrix 6x86MX and Centaur C6),
- * monotonic gettimeofday() with fast_get_timeoffset(),
- * drift-proof precision TSC calibration on boot
- * (C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, Andrew D.
- * Balsa <andrebalsa@xxxxxxxxxx>, Philip Gladstone <philip@xxxxxxxxxx>;
- * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@xxxxxxxxxxxxx>).
- * 1998-12-16 Andrea Arcangeli
- * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
- * because was not accounting lost_ticks.
- * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
- * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- * serialize accesses to xtime/lost_ticks).
- */
-
-#include <asm/smp.h>
-#include <asm/irq.h>
-#include <asm/msr.h>
-#include <asm/delay.h>
-#include <asm/mpspec.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-
-#include <asm/div64.h>
-#include <asm/hypervisor.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-
-#include <linux/mc146818rtc.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-#include <linux/sysctl.h>
-#include <linux/sysrq.h>
-
-spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
-extern rwlock_t xtime_lock;
-extern unsigned long wall_jiffies;
-
-unsigned long cpu_khz; /* get this from Xen, used elsewhere */
-
-static unsigned int rdtsc_bitshift;
-static u32 st_scale_f; /* convert ticks -> usecs */
-static u32 st_scale_i; /* convert ticks -> usecs */
-
-/* These are peridically updated in shared_info, and then copied here. */
-static u32 shadow_tsc_stamp;
-static u64 shadow_system_time;
-static u32 shadow_time_version;
-static struct timeval shadow_tv;
-
-/*
- * We use this to ensure that gettimeofday() is monotonically increasing. We
- * only break this guarantee if the wall clock jumps backwards "a long way".
- */
-static struct timeval last_seen_tv = {0,0};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-/* Periodically propagate synchronised time base to the RTC and to Xen. */
-static long last_update_to_rtc, last_update_to_xen;
-#endif
-
-/* Periodically take synchronised time base from Xen, if we need it. */
-static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
-
-/* Keep track of last time we did processing/updating of jiffies and xtime. */
-static u64 processed_system_time; /* System time (ns) at last processing. */
-
-#define NS_PER_TICK (1000000000ULL/HZ)
-
-#ifndef NSEC_PER_SEC
-#define NSEC_PER_SEC (1000000000L)
-#endif
-
-#define HANDLE_USEC_UNDERFLOW(_tv) \
- do { \
- while ( (_tv).tv_usec < 0 ) \
- { \
- (_tv).tv_usec += 1000000; \
- (_tv).tv_sec--; \
- } \
- } while ( 0 )
-#define HANDLE_USEC_OVERFLOW(_tv) \
- do { \
- while ( (_tv).tv_usec >= 1000000 ) \
- { \
- (_tv).tv_usec -= 1000000; \
- (_tv).tv_sec++; \
- } \
- } while ( 0 )
-static inline void __normalize_time(time_t *sec, s64 *nsec)
-{
- while (*nsec >= NSEC_PER_SEC) {
- (*nsec) -= NSEC_PER_SEC;
- (*sec)++;
- }
- while (*nsec < 0) {
- (*nsec) += NSEC_PER_SEC;
- (*sec)--;
- }
-}
-
-/* Dynamically-mapped IRQs. */
-static int time_irq, debug_irq;
-
-/* Does this guest OS track Xen time, or set its wall clock independently? */
-static int independent_wallclock = 0;
-static int __init __independent_wallclock(char *str)
-{
- independent_wallclock = 1;
- return 1;
-}
-__setup("independent_wallclock", __independent_wallclock);
-#define INDEPENDENT_WALLCLOCK() \
- (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
- */
-static int set_rtc_mmss(unsigned long nowtime)
-{
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
- unsigned char save_control, save_freq_select;
-
- /* gets recalled with irq locally disabled */
- spin_lock(&rtc_lock);
- save_control = CMOS_READ(RTC_CONTROL);
- CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- if ( !(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD )
- BCD_TO_BIN(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds, don't interfere with
- * hour overflow. This avoids messing with unknown time zones but requires
- * your RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if ( ((abs(real_minutes - cmos_minutes) + 15)/30) & 1 )
- real_minutes += 30; /* correct for half hour time zone */
- real_minutes %= 60;
-
- if ( abs(real_minutes - cmos_minutes) < 30 )
- {
- if ( !(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD )
- {
- BIN_TO_BCD(real_seconds);
- BIN_TO_BCD(real_minutes);
- }
- CMOS_WRITE(real_seconds,RTC_SECONDS);
- CMOS_WRITE(real_minutes,RTC_MINUTES);
- }
- else
- {
- printk(KERN_WARNING
- "set_rtc_mmss: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- /* The following flags have to be released exactly in this order,
- * otherwise the DS12887 (popular MC146818A clone with integrated
- * battery and quartz) will not reset the oscillator and will not
- * update precisely 500 ms later. You won't find this mentioned in
- * the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
- */
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
- spin_unlock(&rtc_lock);
-
- return retval;
-}
-#endif
-
-
-/*
- * Reads a consistent set of time-base values from Xen, into a shadow data
- * area. Must be called with the xtime_lock held for writing.
- */
-static void __get_time_values_from_xen(void)
-{
- do {
- shadow_time_version = HYPERVISOR_shared_info->time_version2;
- rmb();
- shadow_tv.tv_sec = HYPERVISOR_shared_info->wc_sec;
- shadow_tv.tv_usec = HYPERVISOR_shared_info->wc_usec;
- shadow_tsc_stamp =
- (u32)(HYPERVISOR_shared_info->tsc_timestamp >> rdtsc_bitshift);
- shadow_system_time = HYPERVISOR_shared_info->system_time;
- rmb();
- }
- while ( shadow_time_version != HYPERVISOR_shared_info->time_version1 );
-}
-
-#define TIME_VALUES_UP_TO_DATE \
- ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
-
-
-/*
- * Returns the system time elapsed, in ns, since the current shadow_timestamp
- * was calculated. Must be called with the xtime_lock held for reading.
- */
-static inline unsigned long __get_time_delta_usecs(void)
-{
- s32 delta_tsc;
- u32 low;
- u64 delta, tsc;
-
- rdtscll(tsc);
- low = (u32)(tsc >> rdtsc_bitshift);
- delta_tsc = (s32)(low - shadow_tsc_stamp);
- if ( unlikely(delta_tsc < 0) ) delta_tsc = 0;
- delta = ((u64)delta_tsc * st_scale_f);
- delta >>= 32;
- delta += ((u64)delta_tsc * st_scale_i);
-
- return (unsigned long)delta;
-}
-
-
-/*
- * Returns the current time-of-day in UTC timeval format.
- */
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags, lost;
- struct timeval _tv;
- s64 nsec;
-
- again:
- read_lock_irqsave(&xtime_lock, flags);
-
- _tv.tv_usec = __get_time_delta_usecs();
- if ( (lost = (jiffies - wall_jiffies)) != 0 )
- _tv.tv_usec += lost * (1000000 / HZ);
- _tv.tv_sec = xtime.tv_sec;
- _tv.tv_usec += xtime.tv_usec;
-
- nsec = shadow_system_time - processed_system_time;
- __normalize_time(&_tv.tv_sec, &nsec);
- _tv.tv_usec += (long)nsec / 1000L;
-
- if ( unlikely(!TIME_VALUES_UP_TO_DATE) )
- {
- /*
- * We may have blocked for a long time, rendering our calculations
- * invalid (e.g. the time delta may have overflowed). Detect that
- * and recalculate with fresh values.
- */
- read_unlock_irqrestore(&xtime_lock, flags);
- write_lock_irqsave(&xtime_lock, flags);
- __get_time_values_from_xen();
- write_unlock_irqrestore(&xtime_lock, flags);
- goto again;
- }
-
- HANDLE_USEC_OVERFLOW(_tv);
-
- /* Ensure that time-of-day is monotonically increasing. */
- if ( (_tv.tv_sec < last_seen_tv.tv_sec) ||
- ((_tv.tv_sec == last_seen_tv.tv_sec) &&
- (_tv.tv_usec < last_seen_tv.tv_usec)) )
- _tv = last_seen_tv;
- last_seen_tv = _tv;
-
- read_unlock_irqrestore(&xtime_lock, flags);
-
- *tv = _tv;
-}
-
-
-/*
- * Sets the current time-of-day based on passed-in UTC timeval parameter.
- */
-void do_settimeofday(struct timeval *tv)
-{
- struct timeval newtv;
- s64 nsec;
- suseconds_t usec;
-
- if ( !INDEPENDENT_WALLCLOCK() )
- return;
-
- write_lock_irq(&xtime_lock);
-
- /*
- * Ensure we don't get blocked for a long time so that our time delta
- * overflows. If that were to happen then our shadow time values would
- * be stale, so we can retry with fresh ones.
- */
- again:
- usec = tv->tv_usec - __get_time_delta_usecs();
-
- nsec = shadow_system_time - processed_system_time;
- __normalize_time(&tv->tv_sec, &nsec);
- usec -= (long)nsec / 1000L;
-
- if ( unlikely(!TIME_VALUES_UP_TO_DATE) )
- {
- __get_time_values_from_xen();
- goto again;
- }
- tv->tv_usec = usec;
-
- HANDLE_USEC_UNDERFLOW(*tv);
-
- newtv = *tv;
-
- tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
- HANDLE_USEC_UNDERFLOW(*tv);
-
- xtime = *tv;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
-
- /* Reset all our running time counts. They make no sense now. */
- last_seen_tv.tv_sec = 0;
- last_update_from_xen = 0;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- if ( xen_start_info.flags & SIF_INITDOMAIN )
- {
- dom0_op_t op;
- last_update_to_rtc = last_update_to_xen = 0;
- op.cmd = DOM0_SETTIME;
- op.u.settime.secs = newtv.tv_sec;
- op.u.settime.usecs = newtv.tv_usec;
- op.u.settime.system_time = shadow_system_time;
- write_unlock_irq(&xtime_lock);
- HYPERVISOR_dom0_op(&op);
- }
- else
-#endif
- {
- write_unlock_irq(&xtime_lock);
- }
-}
-
-
-asmlinkage long sys_stime(int *tptr)
-{
- int value;
- struct timeval tv;
-
- if ( !capable(CAP_SYS_TIME) )
- return -EPERM;
-
- if ( get_user(value, tptr) )
- return -EFAULT;
-
- tv.tv_sec = value;
- tv.tv_usec = 0;
-
- do_settimeofday(&tv);
-
- return 0;
-}
-
-
-/* Convert jiffies to system time. Call with xtime_lock held for reading. */
-static inline u64 __jiffies_to_st(unsigned long j)
-{
- return processed_system_time + ((j - jiffies) * NS_PER_TICK);
-}
-
-
-static inline void do_timer_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- s64 delta;
- unsigned long ticks = 0;
- long sec_diff;
-
- do {
- __get_time_values_from_xen();
-
- delta = (s64)(shadow_system_time +
- ((s64)__get_time_delta_usecs() * 1000LL) -
- processed_system_time);
- }
- while ( !TIME_VALUES_UP_TO_DATE );
-
- if ( unlikely(delta < 0) )
- {
- printk("Timer ISR: Time went backwards: %lld\n", delta);
- return;
- }
-
- /* Process elapsed jiffies since last call. */
- while ( delta >= NS_PER_TICK )
- {
- ticks++;
- delta -= NS_PER_TICK;
- processed_system_time += NS_PER_TICK;
- }
-
- if ( ticks != 0 )
- {
- do_timer_ticks(ticks);
-
- if ( user_mode(regs) )
- update_process_times_us(ticks, 0);
- else
- update_process_times_us(0, ticks);
- }
-
- /*
- * Take synchronised time from Xen once a minute if we're not
- * synchronised ourselves, and we haven't chosen to keep an independent
- * time base.
- */
- if ( !INDEPENDENT_WALLCLOCK() &&
- ((time_status & STA_UNSYNC) != 0) &&
- (xtime.tv_sec > (last_update_from_xen + 60)) )
- {
- /* Adjust shadow timeval for jiffies that haven't updated xtime yet. */
- shadow_tv.tv_usec -= (jiffies - wall_jiffies) * (1000000/HZ);
- HANDLE_USEC_UNDERFLOW(shadow_tv);
-
- /*
- * Reset our running time counts if they are invalidated by a warp
- * backwards of more than 500ms.
- */
- sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
- if ( unlikely(abs(sec_diff) > 1) ||
- unlikely(((sec_diff * 1000000) +
- xtime.tv_usec - shadow_tv.tv_usec) > 500000) )
- {
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- last_update_to_rtc = last_update_to_xen = 0;
-#endif
- last_seen_tv.tv_sec = 0;
- }
-
- /* Update our unsynchronised xtime appropriately. */
- xtime = shadow_tv;
-
- last_update_from_xen = xtime.tv_sec;
- }
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- if ( (xen_start_info.flags & SIF_INITDOMAIN) &&
- ((time_status & STA_UNSYNC) == 0) )
- {
- /* Send synchronised time to Xen approximately every minute. */
- if ( xtime.tv_sec > (last_update_to_xen + 60) )
- {
- dom0_op_t op;
- struct timeval tv = xtime;
-
- tv.tv_usec += (jiffies - wall_jiffies) * (1000000/HZ);
- HANDLE_USEC_OVERFLOW(tv);
-
- op.cmd = DOM0_SETTIME;
- op.u.settime.secs = tv.tv_sec;
- op.u.settime.usecs = tv.tv_usec;
- op.u.settime.system_time = shadow_system_time;
- HYPERVISOR_dom0_op(&op);
-
- last_update_to_xen = xtime.tv_sec;
- }
-
- /*
- * If we have an externally synchronized Linux clock, then update CMOS
- * clock accordingly every ~11 minutes. Set_rtc_mmss() has to be called
- * as close as possible to 500 ms before the new second starts.
- */
- if ( (xtime.tv_sec > (last_update_to_rtc + 660)) &&
- (xtime.tv_usec >= (500000 - ((unsigned) tick) / 2)) &&
- (xtime.tv_usec <= (500000 + ((unsigned) tick) / 2)) )
- {
- if ( set_rtc_mmss(xtime.tv_sec) == 0 )
- last_update_to_rtc = xtime.tv_sec;
- else
- last_update_to_rtc = xtime.tv_sec - 600;
- }
- }
-#endif
-}
-
-
-static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- write_lock(&xtime_lock);
- do_timer_interrupt(irq, NULL, regs);
- write_unlock(&xtime_lock);
-}
-
-static struct irqaction irq_timer = {
- timer_interrupt,
- SA_INTERRUPT,
- 0,
- "timer",
- NULL,
- NULL
-};
-
-
-/*
- * This function works out when the the next timer function has to be
- * executed (by looking at the timer list) and sets the Xen one-shot
- * domain timer to the appropriate value. This is typically called in
- * cpu_idle() before the domain blocks.
- *
- * The function returns a non-0 value on error conditions.
- *
- * It must be called with interrupts disabled.
- */
-extern spinlock_t timerlist_lock;
-int set_timeout_timer(void)
-{
- struct timer_list *timer;
- u64 alarm = 0;
- int ret = 0;
-
- spin_lock(&timerlist_lock);
-
- /*
- * This is safe against long blocking (since calculations are not based on
- * TSC deltas). It is also safe against warped system time since
- * suspend-resume is cooperative and we would first get locked out. It is
- * safe against normal updates of jiffies since interrupts are off.
- */
- if ( (timer = next_timer_event()) != NULL )
- alarm = __jiffies_to_st(timer->expires);
-
- /* Tasks on the timer task queue expect to be executed on the next tick. */
- if ( TQ_ACTIVE(tq_timer) )
- alarm = __jiffies_to_st(jiffies + 1);
-
- /* Failure is pretty bad, but we'd best soldier on. */
- if ( HYPERVISOR_set_timer_op(alarm) != 0 )
- ret = -1;
-
- spin_unlock(&timerlist_lock);
-
- return ret;
-}
-
-
-/* Time debugging. */
-static void dbg_time_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
- unsigned long flags, j;
- u64 s_now, j_st;
- struct timeval s_tv, tv;
-
- struct timer_list *timer;
- u64 t_st;
-
- read_lock_irqsave(&xtime_lock, flags);
- s_tv.tv_sec = shadow_tv.tv_sec;
- s_tv.tv_usec = shadow_tv.tv_usec;
- s_now = shadow_system_time;
- read_unlock_irqrestore(&xtime_lock, flags);
-
- do_gettimeofday(&tv);
-
- j = jiffies;
- j_st = __jiffies_to_st(j);
-
- timer = next_timer_event();
- t_st = __jiffies_to_st(timer->expires);
-
- printk(KERN_ALERT "time: shadow_st=0x%X:%08X\n",
- (u32)(s_now>>32), (u32)s_now);
- printk(KERN_ALERT "time: wct=%lds %ldus shadow_wct=%lds %ldus\n",
- tv.tv_sec, tv.tv_usec, s_tv.tv_sec, s_tv.tv_usec);
- printk(KERN_ALERT "time: jiffies=%lu(0x%X:%08X) timeout=%lu(0x%X:%08X)\n",
- jiffies,(u32)(j_st>>32), (u32)j_st,
- timer->expires,(u32)(t_st>>32), (u32)t_st);
- printk(KERN_ALERT "time: processed_system_time=0x%X:%08X\n",
- (u32)(processed_system_time>>32), (u32)processed_system_time);
-
-#ifdef CONFIG_MAGIC_SYSRQ
- handle_sysrq('t',NULL,NULL,NULL);
-#endif
-}
-
-static struct irqaction dbg_time = {
- dbg_time_int,
- SA_SHIRQ,
- 0,
- "timer_dbg",
- &dbg_time_int,
- NULL
-};
-
-void __init time_init(void)
-{
- unsigned long long alarm;
- u64 __cpu_khz, __cpu_ghz, cpu_freq, scale, scale2;
- unsigned int cpu_ghz;
-
- __cpu_khz = __cpu_ghz = cpu_freq = HYPERVISOR_shared_info->cpu_freq;
- do_div(__cpu_khz, 1000UL);
- cpu_khz = (u32)__cpu_khz;
- do_div(__cpu_ghz, 1000000000UL);
- cpu_ghz = (unsigned int)__cpu_ghz;
-
- printk("Xen reported: %lu.%03lu MHz processor.\n",
- cpu_khz / 1000, cpu_khz % 1000);
-
- xtime.tv_sec = HYPERVISOR_shared_info->wc_sec;
- xtime.tv_usec = HYPERVISOR_shared_info->wc_usec;
- processed_system_time = shadow_system_time;
-
- for ( rdtsc_bitshift = 0; cpu_ghz != 0; rdtsc_bitshift++, cpu_ghz >>= 1 )
- continue;
-
- scale = 1000000LL << (32 + rdtsc_bitshift);
- do_div(scale, (u32)cpu_freq);
-
- if ( (cpu_freq >> 32) != 0 )
- {
- scale2 = 1000000LL << rdtsc_bitshift;
- do_div(scale2, (u32)(cpu_freq>>32));
- scale += scale2;
- }
-
- st_scale_f = scale & 0xffffffff;
- st_scale_i = scale >> 32;
-
- __get_time_values_from_xen();
- processed_system_time = shadow_system_time;
-
- time_irq = bind_virq_to_irq(VIRQ_TIMER);
- debug_irq = bind_virq_to_irq(VIRQ_DEBUG);
-
- (void)setup_irq(time_irq, &irq_timer);
- (void)setup_irq(debug_irq, &dbg_time);
-
- rdtscll(alarm);
-}
-
-void time_suspend(void)
-{
-}
-
-void time_resume(void)
-{
- unsigned long flags;
- write_lock_irqsave(&xtime_lock, flags);
- /* Get timebases for new environment. */
- __get_time_values_from_xen();
- /* Reset our own concept of passage of system time. */
- processed_system_time = shadow_system_time;
- /* Accept a warp in UTC (wall-clock) time. */
- last_seen_tv.tv_sec = 0;
- /* Make sure we resync UTC time with Xen on next timer interrupt. */
- last_update_from_xen = 0;
- write_unlock_irqrestore(&xtime_lock, flags);
-}
-
-/*
- * /proc/sys/xen: This really belongs in another file. It can stay here for
- * now however.
- */
-static ctl_table xen_subtable[] = {
- {1, "independent_wallclock", &independent_wallclock,
- sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
- {0}
-};
-static ctl_table xen_table[] = {
- {123, "xen", NULL, 0, 0555, xen_subtable},
- {0}
-};
-static int __init xen_sysctl_init(void)
-{
- (void)register_sysctl_table(xen_table, 0);
- return 0;
-}
-__initcall(xen_sysctl_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/kernel/traps.c
--- a/linux-2.4.30-xen-sparse/arch/xen/kernel/traps.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,628 +0,0 @@
-/*
- * linux/arch/i386/traps.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
- */
-
-/*
- * 'Traps.c' handles hardware traps and faults after we have saved some
- * state in 'asm.s'.
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/highmem.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/debugreg.h>
-#include <asm/desc.h>
-#include <asm/i387.h>
-
-#include <asm/smp.h>
-#include <asm/pgalloc.h>
-
-#include <asm/hypervisor.h>
-
-#include <linux/irq.h>
-#include <linux/module.h>
-
-asmlinkage int system_call(void);
-asmlinkage void lcall7(void);
-asmlinkage void lcall27(void);
-
-asmlinkage void divide_error(void);
-asmlinkage void debug(void);
-asmlinkage void int3(void);
-asmlinkage void overflow(void);
-asmlinkage void bounds(void);
-asmlinkage void invalid_op(void);
-asmlinkage void device_not_available(void);
-asmlinkage void double_fault(void);
-asmlinkage void coprocessor_segment_overrun(void);
-asmlinkage void invalid_TSS(void);
-asmlinkage void segment_not_present(void);
-asmlinkage void stack_segment(void);
-asmlinkage void general_protection(void);
-asmlinkage void page_fault(void);
-asmlinkage void coprocessor_error(void);
-asmlinkage void simd_coprocessor_error(void);
-asmlinkage void alignment_check(void);
-asmlinkage void fixup_4gb_segment(void);
-asmlinkage void machine_check(void);
-
-int kstack_depth_to_print = 24;
-
-
-/*
- * If the address is either in the .text section of the
- * kernel, or in the vmalloc'ed module regions, it *may*
- * be the address of a calling routine
- */
-
-#ifdef CONFIG_MODULES
-
-extern struct module *module_list;
-extern struct module kernel_module;
-
-static inline int kernel_text_address(unsigned long addr)
-{
- int retval = 0;
- struct module *mod;
-
- if (addr >= (unsigned long) &_stext &&
- addr <= (unsigned long) &_etext)
- return 1;
-
- for (mod = module_list; mod != &kernel_module; mod = mod->next) {
- /* mod_bound tests for addr being inside the vmalloc'ed
- * module area. Of course it'd be better to test only
- * for the .text subset... */
- if (mod_bound(addr, 0, mod)) {
- retval = 1;
- break;
- }
- }
-
- return retval;
-}
-
-#else
-
-static inline int kernel_text_address(unsigned long addr)
-{
- return (addr >= (unsigned long) &_stext &&
- addr <= (unsigned long) &_etext);
-}
-
-#endif
-
-void show_trace(unsigned long * stack)
-{
- int i;
- unsigned long addr;
-
- if (!stack)
- stack = (unsigned long*)&stack;
-
- printk("Call Trace: ");
- i = 1;
- while (((long) stack & (THREAD_SIZE-1)) != 0) {
- addr = *stack++;
- if (kernel_text_address(addr)) {
- if (i && ((i % 6) == 0))
- printk("\n ");
- printk("[<%08lx>] ", addr);
- i++;
- }
- }
- printk("\n");
-}
-
-void show_trace_task(struct task_struct *tsk)
-{
- unsigned long esp = tsk->thread.esp;
-
- /* User space on another CPU? */
- if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
- return;
- show_trace((unsigned long *)esp);
-}
-
-void show_stack(unsigned long * esp)
-{
- unsigned long *stack;
- int i;
-
- // debugging aid: "show_stack(NULL);" prints the
- // back trace for this cpu.
-
- if(esp==NULL)
- esp=(unsigned long*)&esp;
-
- stack = esp;
- for(i=0; i < kstack_depth_to_print; i++) {
- if (((long) stack & (THREAD_SIZE-1)) == 0)
- break;
- if (i && ((i % 8) == 0))
- printk("\n ");
- printk("%08lx ", *stack++);
- }
- printk("\n");
- show_trace(esp);
-}
-
-void show_registers(struct pt_regs *regs)
-{
- int in_kernel = 1;
- unsigned long esp;
- unsigned short ss;
-
- esp = (unsigned long) (®s->esp);
- ss = __KERNEL_DS;
- if (regs->xcs & 2) {
- in_kernel = 0;
- esp = regs->esp;
- ss = regs->xss & 0xffff;
- }
- printk(KERN_ALERT "CPU: %d\n", smp_processor_id() );
- printk(KERN_ALERT "EIP: %04x:[<%08lx>] %s\n",
- 0xffff & regs->xcs, regs->eip, print_tainted());
- printk(KERN_ALERT "EFLAGS: %08lx\n",regs->eflags);
- printk(KERN_ALERT "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
- regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk(KERN_ALERT "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
- regs->esi, regs->edi, regs->ebp, esp);
- printk(KERN_ALERT "ds: %04x es: %04x ss: %04x\n",
- regs->xds & 0xffff, regs->xes & 0xffff, ss);
- printk(KERN_ALERT "Process %s (pid: %d, stackpage=%08lx)",
- current->comm, current->pid, 4096+(unsigned long)current);
- /*
- * When in-kernel, we also print out the stack and code at the
- * time of the fault..
- */
- if (in_kernel) {
-
- printk(KERN_ALERT "\nStack: ");
- show_stack((unsigned long*)esp);
-
-#if 0
- {
- int i;
- printk(KERN_ALERT "\nCode: ");
- if(regs->eip < PAGE_OFFSET)
- goto bad;
-
- for(i=0;i<20;i++)
- {
- unsigned char c;
- if(__get_user(c, &((unsigned
char*)regs->eip)[i])) {
-bad:
- printk(KERN_ALERT " Bad EIP value.");
- break;
- }
- printk("%02x ", c);
- }
- }
-#endif
- }
- printk(KERN_ALERT "\n");
-}
-
-spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
- printk("%s: %04lx\n", str, err & 0xffff);
- show_registers(regs);
- bust_spinlocks(0);
- spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long
err)
-{
- if (!(2 & regs->xcs))
- die(str, regs, err);
-}
-
-
-static void inline do_trap(int trapnr, int signr, char *str,
- struct pt_regs * regs, long error_code,
- siginfo_t *info)
-{
- if (!(regs->xcs & 2))
- goto kernel_trap;
-
- /*trap_signal:*/ {
- struct task_struct *tsk = current;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
- if (info)
- force_sig_info(signr, info, tsk);
- else
- force_sig(signr, tsk);
- return;
- }
-
- kernel_trap: {
- unsigned long fixup = search_exception_table(regs->eip);
- if (fixup)
- regs->eip = fixup;
- else
- die(str, regs, error_code);
- return;
- }
-}
-
-#define DO_ERROR(trapnr, signr, str, name) \
-asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-{ \
- do_trap(trapnr, signr, str, regs, error_code, NULL); \
-}
-
-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-{ \
- siginfo_t info; \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = (void *)siaddr; \
- do_trap(trapnr, signr, str, regs, error_code, &info); \
-}
-
-DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
-DO_ERROR( 3, SIGTRAP, "int3", int3)
-DO_ERROR( 4, SIGSEGV, "overflow", overflow)
-DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN,
regs->eip)
-DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
-DO_ERROR( 9, SIGFPE, "coprocessor segment overrun",
coprocessor_segment_overrun)
-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-DO_ERROR(18, SIGBUS, "machine check", machine_check)
-
-asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
-{
- /*
- * If we trapped on an LDT access then ensure that the default_ldt is
- * loaded, if nothing else. We load default_ldt lazily because LDT
- * switching costs time and many applications don't need it.
- */
- if ( unlikely((error_code & 6) == 4) )
- {
- unsigned long ldt;
- __asm__ __volatile__ ( "sldt %0" : "=r" (ldt) );
- if ( ldt == 0 )
- {
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.ptr |= (unsigned long)&default_ldt[0];
- u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
- if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0) )
- {
- show_trace(NULL);
- panic("Failed to install default LDT");
- }
- return;
- }
- }
-
- if (!(regs->xcs & 2))
- goto gp_in_kernel;
-
- current->thread.error_code = error_code;
- current->thread.trap_no = 13;
- force_sig(SIGSEGV, current);
- return;
-
-gp_in_kernel:
- {
- unsigned long fixup;
- fixup = search_exception_table(regs->eip);
- if (fixup) {
- regs->eip = fixup;
- return;
- }
- die("general protection fault", regs, error_code);
- }
-}
-
-
-asmlinkage void do_debug(struct pt_regs * regs, long error_code)
-{
- unsigned int condition;
- struct task_struct *tsk = current;
- siginfo_t info;
-
- condition = HYPERVISOR_get_debugreg(6);
-
- /* Mask out spurious debug traps due to lazy DR7 setting */
- if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
- if (!tsk->thread.debugreg[7])
- goto clear_dr7;
- }
-
- /* Save debug status register where ptrace can see it */
- tsk->thread.debugreg[6] = condition;
-
- /* Mask out spurious TF errors due to lazy TF clearing */
- if (condition & DR_STEP) {
- /*
- * The TF error should be masked out only if the current
- * process is not traced and if the TRAP flag has been set
- * previously by a tracing process (condition detected by
- * the PT_DTRACE flag); remember that the i386 TRAP flag
- * can be modified by the process itself in user mode,
- * allowing programs to debug themselves without the ptrace()
- * interface.
- */
- if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
- goto clear_TF;
- }
-
- /* Ok, finally something we can handle */
- tsk->thread.trap_no = 1;
- tsk->thread.error_code = error_code;
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
-
- /* If this is a kernel mode trap, save the user PC on entry to
- * the kernel, that's what the debugger can make sense of.
- */
- info.si_addr = ((regs->xcs & 2) == 0) ? (void *)tsk->thread.eip :
- (void *)regs->eip;
- force_sig_info(SIGTRAP, &info, tsk);
-
- /* Disable additional traps. They'll be re-enabled when
- * the signal is delivered.
- */
- clear_dr7:
- HYPERVISOR_set_debugreg(7, 0);
- return;
-
- clear_TF:
- regs->eflags &= ~TF_MASK;
- return;
-}
-
-
-/*
- * Note that we play around with the 'TS' bit in an attempt to get
- * the correct behaviour even in the presence of the asynchronous
- * IRQ13 behaviour
- */
-void math_error(void *eip)
-{
- struct task_struct * task;
- siginfo_t info;
- unsigned short cwd, swd;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 16;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = eip;
- /*
- * (~cwd & swd) will mask out exceptions that are not set to unmasked
- * status. 0x3f is the exception bits in these regs, 0x200 is the
- * C1 reg you need in case of a stack fault, 0x040 is the stack
- * fault bit. We should only be taking one exception at a time,
- * so if this combination doesn't produce any single exception,
- * then we have a bad program that isn't syncronizing its FPU usage
- * and it will suffer the consequences since we won't be able to
- * fully reproduce the context of the exception
- */
- cwd = get_fpu_cwd(task);
- swd = get_fpu_swd(task);
- switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- case 0x041: /* Stack Fault */
- case 0x241: /* Stack Fault | Direction */
- info.si_code = FPE_FLTINV;
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
-}
-
-asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
-{
- ignore_irq13 = 1;
- math_error((void *)regs->eip);
-}
-
-void simd_math_error(void *eip)
-{
- struct task_struct * task;
- siginfo_t info;
- unsigned short mxcsr;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 19;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = eip;
- /*
- * The SIMD FPU exceptions are handled a little differently, as there
- * is only a single status/control register. Thus, to determine which
- * unmasked exception was caught we must mask the exception mask bits
- * at 0x1f80, and then use these to mask the exception bits at 0x3f.
- */
- mxcsr = get_fpu_mxcsr(task);
- switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- info.si_code = FPE_FLTINV;
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
-}
-
-asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
- long error_code)
-{
- if (cpu_has_xmm) {
- /* Handle SIMD FPU exceptions on PIII+ processors. */
- ignore_irq13 = 1;
- simd_math_error((void *)regs->eip);
- } else {
- die_if_kernel("cache flush denied", regs, error_code);
- current->thread.trap_no = 19;
- current->thread.error_code = error_code;
- force_sig(SIGSEGV, current);
- }
-}
-
-/*
- * 'math_state_restore()' saves the current math information in the
- * old math state array, and gets the new ones from the current task
- *
- * Careful.. There are problems with IBM-designed IRQ13 behaviour.
- * Don't touch unless you *really* know how it works.
- */
-asmlinkage void math_state_restore(struct pt_regs regs)
-{
- /*
- * A trap in kernel mode can be ignored. It'll be the fast XOR or
- * copying libraries, which will correctly save/restore state and
- * reset the TS bit in CR0.
- */
- if ( (regs.xcs & 2) == 0 )
- return;
-
- if (current->used_math) {
- restore_fpu(current);
- } else {
- init_fpu();
- }
- current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */
-}
-
-
-#define _set_gate(gate_addr,type,dpl,addr) \
-do { \
- int __d0, __d1; \
- __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
- "movw %4,%%dx\n\t" \
- "movl %%eax,%0\n\t" \
- "movl %%edx,%1" \
- :"=m" (*((long *) (gate_addr))), \
- "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
- :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
- "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \
-} while (0)
-
-static void __init set_call_gate(void *a, void *addr)
-{
- _set_gate(a,12,3,addr);
-}
-
-
-/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
-static trap_info_t trap_table[] = {
- { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
- { 1, 0, __KERNEL_CS, (unsigned long)debug },
- { 3, 3, __KERNEL_CS, (unsigned long)int3 },
- { 4, 3, __KERNEL_CS, (unsigned long)overflow },
- { 5, 3, __KERNEL_CS, (unsigned long)bounds },
- { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
- { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
- { 8, 0, __KERNEL_CS, (unsigned long)double_fault },
- { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
- { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
- { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
- { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
- { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
- { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
- { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
- { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
- { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
- { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
- { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
- { SYSCALL_VECTOR,
- 3, __KERNEL_CS, (unsigned long)system_call },
- { 0, 0, 0, 0 }
-};
-
-
-void __init trap_init(void)
-{
- HYPERVISOR_set_trap_table(trap_table);
- HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
-
- /*
- * The default LDT is a single-entry callgate to lcall7 for iBCS and a
- * callgate to lcall27 for Solaris/x86 binaries.
- */
- clear_page(&default_ldt[0]);
- set_call_gate(&default_ldt[0],lcall7);
- set_call_gate(&default_ldt[4],lcall27);
- __make_page_readonly(&default_ldt[0]);
-
- cpu_init();
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/lib/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/lib/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,15 +0,0 @@
-
-.S.o:
- $(CC) $(AFLAGS) -c $< -o $*.o
-
-L_TARGET = lib.a
-
-obj-y = checksum.o old-checksum.o delay.o \
- usercopy.o getuser.o \
- memcpy.o strstr.o xen_proc.o
-
-obj-$(CONFIG_X86_USE_3DNOW) += mmx.o
-obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
-obj-$(CONFIG_DEBUG_IOVIRT) += iodebug.o
-
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/lib/delay.c
--- a/linux-2.4.30-xen-sparse/arch/xen/lib/delay.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,52 +0,0 @@
-/*
- * Precise Delay Loops for i386
- *
- * Copyright (C) 1993 Linus Torvalds
- * Copyright (C) 1997 Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>
- *
- * The __delay function must _NOT_ be inlined as its execution time
- * depends wildly on alignment on many x86 processors. The additional
- * jump magic is needed to get the timing stable on all the CPU's
- * we have to worry about.
- */
-
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <asm/processor.h>
-#include <asm/delay.h>
-
-#ifdef CONFIG_SMP
-#include <asm/smp.h>
-#endif
-
-void __delay(unsigned long loops)
-{
- unsigned long bclock, now;
-
- rdtscl(bclock);
- do
- {
- rep_nop();
- rdtscl(now);
- } while ((now-bclock) < loops);
-}
-
-inline void __const_udelay(unsigned long xloops)
-{
- int d0;
- __asm__("mull %0"
- :"=d" (xloops), "=&a" (d0)
- :"1" (xloops),"0" (current_cpu_data.loops_per_jiffy));
- __delay(xloops * HZ);
-}
-
-void __udelay(unsigned long usecs)
-{
- __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
-}
-
-void __ndelay(unsigned long nsecs)
-{
- __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/mm/Makefile
--- a/linux-2.4.30-xen-sparse/arch/xen/mm/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,16 +0,0 @@
-#
-# Makefile for the linux i386-specific parts of the memory manager.
-#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definition is now in the main makefile...
-
-O_TARGET := mm.o
-
-obj-y := init.o fault.o extable.o pageattr.o hypervisor.o ioremap.o
-
-export-objs := pageattr.o
-
-include $(TOPDIR)/Rules.make
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/arch/xen/mm/fault.c
--- a/linux-2.4.30-xen-sparse/arch/xen/mm/fault.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,306 +0,0 @@
-/*
- * linux/arch/i386/mm/fault.c
- *
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/hardirq.h>
-
-extern void die(const char *,struct pt_regs *,long);
-
-pgd_t *cur_pgd;
-
-extern spinlock_t timerlist_lock;
-
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out (timerlist_lock is acquired through the
- * console unblank code)
- */
-void bust_spinlocks(int yes)
-{
- spin_lock_init(&timerlist_lock);
- if (yes) {
- oops_in_progress = 1;
- } else {
- int loglevel_save = console_loglevel;
-#ifdef CONFIG_VT
- unblank_screen();
-#endif
- oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk()
- * without oops_in_progress set so that printk will give klogd
- * a poke. Hold onto your hats...
- */
- console_loglevel = 15; /* NMI oopser may have shut the
console up */
- printk(" ");
- console_loglevel = loglevel_save;
- }
-}
-
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- */
-asmlinkage void do_page_fault(struct pt_regs *regs,
- unsigned long error_code,
- unsigned long address)
-{
- struct task_struct *tsk = current;
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long page;
- unsigned long fixup;
- int write;
- siginfo_t info;
-
- /* Set the "privileged fault" bit to something sane. */
- error_code &= 3;
- error_code |= (regs->xcs & 2) << 1;
-
- if ( flush_page_update_queue() != 0 )
- return;
-
- /*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
- *
- * This verifies that the fault happens in kernel space
- * (error_code & 4) == 0, and that the fault was not a
- * protection error (error_code & 1) == 0.
- */
- if (address >= TASK_SIZE && !(error_code & 5))
- goto vmalloc_fault;
-
- mm = tsk->mm;
- info.si_code = SEGV_MAPERR;
-
- /*
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
- if (in_interrupt() || !mm)
- goto no_context;
-
- down_read(&mm->mmap_sem);
-
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (error_code & 4) {
- /*
- * accessing the stack below %esp is always a bug.
- * The "+ 32" is there due to some instructions (like
- * pusha) doing post-decrement on the stack and that
- * doesn't show up until later..
- */
- if (address + 32 < regs->esp)
- goto bad_area;
- }
- if (expand_stack(vma, address))
- goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- info.si_code = SEGV_ACCERR;
- write = 0;
- switch (error_code & 3) {
- default: /* 3: write, present */
- /* fall through */
- case 2: /* write, not present */
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- write++;
- break;
- case 1: /* read, present */
- goto bad_area;
- case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
- }
-
- survive:
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- switch (handle_mm_fault(mm, vma, address, write)) {
- case 1:
- tsk->min_flt++;
- break;
- case 2:
- tsk->maj_flt++;
- break;
- case 0:
- goto do_sigbus;
- default:
- goto out_of_memory;
- }
-
- up_read(&mm->mmap_sem);
- return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
- /* User mode accesses just cause a SIGSEGV */
- if (error_code & 4) {
- tsk->thread.cr2 = address;
- /* Kernel addresses are always protection faults */
- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
- tsk->thread.trap_no = 14;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void *)address;
- force_sig_info(SIGSEGV, &info, tsk);
- return;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if ((fixup = search_exception_table(regs->eip)) != 0) {
- regs->eip = fixup;
- return;
- }
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-
- bust_spinlocks(1);
-
- if (address < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer
dereference");
- else
- printk(KERN_ALERT "Unable to handle kernel paging request");
- printk(" at virtual address %08lx\n",address);
- printk(" printing eip:\n");
- printk("%08lx\n", regs->eip);
- page = ((unsigned long *) cur_pgd)[address >> 22];
- printk(KERN_ALERT "*pde=%08lx(%08lx)\n", page, machine_to_phys(page));
- if (page & 1) {
- page &= PAGE_MASK;
- address &= 0x003ff000;
- page = machine_to_phys(page);
- page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
- printk(KERN_ALERT "*pte=%08lx(%08lx)\n", page,
- machine_to_phys(page));
- }
- die("Oops", regs, error_code);
- bust_spinlocks(0);
- do_exit(SIGKILL);
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- if (tsk->pid == 1) {
- yield();
- goto survive;
- }
- up_read(&mm->mmap_sem);
- printk("VM: killing process %s\n", tsk->comm);
- if (error_code & 4)
- do_exit(SIGKILL);
- goto no_context;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
- tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 14;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void *)address;
- force_sig_info(SIGBUS, &info, tsk);
-
- /* Kernel mode? Handle exceptions or die */
- if (!(error_code & 4))
- goto no_context;
- return;
-
-vmalloc_fault:
- {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int offset = __pgd_offset(address);
- pgd_t *pgd, *pgd_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- pgd = offset + cur_pgd;
- pgd_k = init_mm.pgd + offset;
-
- if (!pgd_present(*pgd_k))
- goto no_context;
- set_pgd(pgd, *pgd_k);
-
- pmd = pmd_offset(pgd, address);
- pmd_k = pmd_offset(pgd_k, address);
- if (!pmd_present(*pmd_k))
- goto no_context;
- set_pmd(pmd, *pmd_k);
- XEN_flush_page_update_queue(); /* flush PMD update */
-
- pte_k = pte_offset(pmd_k, address);
- if (!pte_present(*pte_k))
- goto no_context;
- return;
- }
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/arch/xen/mm/init.c
--- a/linux-2.4.30-xen-sparse/arch/xen/mm/init.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,487 +0,0 @@
-/*
- * linux/arch/i386/mm/init.c
- *
- * Copyright (C) 1995 Linus Torvalds
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#include <linux/config.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#ifdef CONFIG_BLK_DEV_INITRD
-#include <linux/blk.h>
-#endif
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/dma.h>
-#include <asm/apic.h>
-#include <asm/tlb.h>
-
-/* XEN: We *cannot* use mmx_clear_page() this early. Force dumb memset(). */
-#undef clear_page
-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
-
-mmu_gather_t mmu_gathers[NR_CPUS];
-unsigned long highstart_pfn, highend_pfn;
-static unsigned long totalram_pages;
-static unsigned long totalhigh_pages;
-
-int do_check_pgt_cache(int low, int high)
-{
- int freed = 0;
- if(pgtable_cache_size > high) {
- do {
- if (!QUICKLIST_EMPTY(pgd_quicklist)) {
- free_pgd_slow(get_pgd_fast());
- freed++;
- }
- if (!QUICKLIST_EMPTY(pte_quicklist)) {
- pte_free_slow(pte_alloc_one_fast(NULL, 0));
- freed++;
- }
- } while(pgtable_cache_size > low);
- }
- return freed;
-}
-
-/*
- * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
- * physical space so we can cache the place of the first one and move
- * around without checking the pgd every time.
- */
-
-#if CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-#endif /* CONFIG_HIGHMEM */
-
-void show_mem(void)
-{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
- int highmem = 0;
-
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageHighMem(mem_map+i))
- highmem++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map+i))
- shared += page_count(mem_map+i) - 1;
- }
- printk("%d pages of RAM\n", total);
- printk("%d pages of HIGHMEM\n",highmem);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
- printk("%ld pages in page table cache\n",pgtable_cache_size);
- show_buffers();
-}
-
-/* References to section boundaries */
-
-extern char _text, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
-
-static inline void set_pte_phys (unsigned long vaddr,
- unsigned long phys, pgprot_t prot)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = init_mm.pgd + __pgd_offset(vaddr);
- if (pgd_none(*pgd)) {
- printk("PAE BUG #00!\n");
- return;
- }
- pmd = pmd_offset(pgd, vaddr);
- if (pmd_none(*pmd)) {
- printk("PAE BUG #01!\n");
- return;
- }
- pte = pte_offset(pmd, vaddr);
-
- queue_l1_entry_update(pte, phys | pgprot_val(prot));
-
- /*
- * It's enough to flush this one mapping.
- * (PGE mappings get flushed as well)
- */
- __flush_tlb_one(vaddr);
-}
-
-void __set_fixmap(enum fixed_addresses idx, unsigned long phys,
- pgprot_t flags)
-{
- unsigned long address = __fix_to_virt(idx);
-
- if (idx >= __end_of_fixed_addresses) {
- printk("Invalid __set_fixmap\n");
- return;
- }
- set_pte_phys(address, phys, flags);
-}
-
-void clear_fixmap(enum fixed_addresses idx)
-{
- set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0));
-}
-
-static void __init fixrange_init (unsigned long start,
- unsigned long end, pgd_t *pgd_base)
-{
- pgd_t *pgd, *kpgd;
- pmd_t *pmd, *kpmd;
- pte_t *pte, *kpte;
- int i, j;
- unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
-#if CONFIG_X86_PAE
- if (pgd_none(*pgd)) {
- pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
- if (pmd != pmd_offset(pgd, 0))
- printk("PAE BUG #02!\n");
- }
- pmd = pmd_offset(pgd, vaddr);
-#else
- pmd = (pmd_t *)pgd;
-#endif
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- clear_page(pte);
- kpgd = pgd_offset_k((unsigned long)pte);
- kpmd = pmd_offset(kpgd, (unsigned long)pte);
- kpte = pte_offset(kpmd, (unsigned long)pte);
- queue_l1_entry_update(kpte,
- (*(unsigned long *)kpte)&~_PAGE_RW);
-
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
- }
- vaddr += PMD_SIZE;
- }
- j = 0;
- }
-
- XEN_flush_page_update_queue();
-}
-
-
-static void __init pagetable_init (void)
-{
- unsigned long vaddr, end, ram_end;
- pgd_t *kpgd, *pgd, *pgd_base;
- int i, j, k;
- pmd_t *kpmd, *pmd;
- pte_t *kpte, *pte, *pte_base;
-
- ram_end = end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
- if ( xen_start_info.nr_pages < max_low_pfn )
- ram_end = (unsigned long)__va(xen_start_info.nr_pages * PAGE_SIZE);
-
- pgd_base = init_mm.pgd;
- i = __pgd_offset(PAGE_OFFSET);
- pgd = pgd_base + i;
-
- for (; i < PTRS_PER_PGD; pgd++, i++) {
- vaddr = i*PGDIR_SIZE;
- if (vaddr >= end)
- break;
- pmd = (pmd_t *)pgd;
- for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
- vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
- if (vaddr >= end)
- break;
-
- /* Filled in for us already? */
- if ( pmd_val(*pmd) & _PAGE_PRESENT )
- continue;
-
- pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- clear_page(pte_base);
-
- for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
- vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
- if (vaddr >= ram_end)
- break;
- *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
- }
- kpgd = pgd_offset_k((unsigned long)pte_base);
- kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
- kpte = pte_offset(kpmd, (unsigned long)pte_base);
- queue_l1_entry_update(kpte,
- (*(unsigned long *)kpte)&~_PAGE_RW);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
- XEN_flush_page_update_queue();
- }
- }
-
- /*
- * Fixed mappings, only the page table structure has to be
- * created - mappings will be set by set_fixmap():
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
-
-#if CONFIG_HIGHMEM
- /*
- * Permanent kmaps:
- */
- vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, init_mm.pgd);
-
- pgd = init_mm.pgd + __pgd_offset(vaddr);
- pmd = pmd_offset(pgd, vaddr);
- pte = pte_offset(pmd, vaddr);
- pkmap_page_table = pte;
-#endif
-}
-
-static void __init zone_sizes_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
- unsigned int max_dma, high, low;
-
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- low = max_low_pfn;
- high = highend_pfn;
-
- if (low < max_dma)
- zones_size[ZONE_DMA] = low;
- else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = low - max_dma;
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = high - low;
-#endif
- }
- free_area_init(zones_size);
-}
-
-void __init paging_init(void)
-{
- pagetable_init();
-
- zone_sizes_init();
- /* Switch to the real shared_info page, and clear the dummy page. */
- set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
- HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
- memset(empty_zero_page, 0, sizeof(empty_zero_page));
-
-#ifdef CONFIG_HIGHMEM
- kmap_init();
-#endif
-}
-
-static inline int page_is_ram (unsigned long pagenr)
-{
- return 1;
-}
-
-#ifdef CONFIG_HIGHMEM
-void __init one_highpage_init(struct page *page, int free_page)
-{
- ClearPageReserved(page);
- set_bit(PG_highmem, &page->flags);
- atomic_set(&page->count, 1);
- if ( free_page )
- __free_page(page);
- totalhigh_pages++;
-}
-#endif /* CONFIG_HIGHMEM */
-
-static void __init set_max_mapnr_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- highmem_start_page = mem_map + highstart_pfn;
- max_mapnr = num_physpages = highend_pfn;
- num_mappedpages = max_low_pfn;
-#else
- max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
-#endif
-}
-
-static int __init free_pages_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- int bad_ppro = 0;
-#endif
- int reservedpages, pfn;
-
- /* add only boot_pfn pages of low memory to free list.
- * max_low_pfn may be sized for
- * pages yet to be allocated from the hypervisor, or it may be set
- * to override the xen_start_info amount of memory
- */
- int boot_pfn = min(xen_start_info.nr_pages,max_low_pfn);
-
- /* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
- /* XEN: init and count low-mem pages outside initial allocation. */
- for (pfn = boot_pfn; pfn < max_low_pfn; pfn++) {
- ClearPageReserved(&mem_map[pfn]);
- atomic_set(&mem_map[pfn].count, 1);
- totalram_pages++;
- }
-
- reservedpages = 0;
- for (pfn = 0; pfn < boot_pfn ; pfn++) {
- /*
- * Only count reserved RAM pages
- */
- if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
- reservedpages++;
- }
-#ifdef CONFIG_HIGHMEM
- for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
- one_highpage_init((struct page *) (mem_map + pfn),
- (pfn < xen_start_info.nr_pages));
- totalram_pages += totalhigh_pages;
-#endif
- return reservedpages;
-}
-
-void __init mem_init(void)
-{
- int codesize, reservedpages, datasize, initsize;
-
- if (!mem_map)
- BUG();
-
-#ifdef CONFIG_HIGHMEM
- /* check that fixmap and pkmap do not overlap */
- if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
- printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
- printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
- PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
- BUG();
- }
-#endif
-
- set_max_mapnr_init();
-
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
-
- reservedpages = free_pages_init();
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk
reserved, %dk data, %dk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
- );
-
- boot_cpu_data.wp_works_ok = 1;
-}
-
-void free_initmem(void)
-{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
- free_page(addr);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- if (start < end)
- printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start)
>> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
- free_page(start);
- totalram_pages++;
- }
-}
-#endif
-
-void si_meminfo(struct sysinfo *val)
-{
- val->totalram = max_pfn;
- val->sharedram = 0;
- val->freeram = nr_free_pages();
- val->bufferram = atomic_read(&buffermem_pages);
- val->totalhigh = max_pfn-max_low_pfn;
- val->freehigh = nr_free_highpages();
- val->mem_unit = PAGE_SIZE;
- return;
-}
-
-#if defined(CONFIG_X86_PAE)
-struct kmem_cache_s *pae_pgd_cachep;
-void __init pgtable_cache_init(void)
-{
- /*
- * PAE pgds must be 16-byte aligned:
- */
- pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
- SLAB_HWCACHE_ALIGN |
SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
- if (!pae_pgd_cachep)
- panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
-}
-#endif /* CONFIG_X86_PAE */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/mm/ioremap.c
--- a/linux-2.4.30-xen-sparse/arch/xen/mm/ioremap.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,271 +0,0 @@
-/*
- * arch/xen/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- *
- * Modifications for Xenolinux (c) 2003-2004 Keir Fraser
- */
-
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/vmalloc.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-#include <asm/mmu.h>
-
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
- __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
- __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-static inline void direct_remap_area_pte(pte_t *pte,
- unsigned long address,
- unsigned long size,
- mmu_update_t **v)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- if (address >= end)
- BUG();
-
- do {
- (*v)->ptr = virt_to_machine(pte);
- (*v)++;
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int direct_remap_area_pmd(struct mm_struct *mm,
- pmd_t *pmd,
- unsigned long address,
- unsigned long size,
- mmu_update_t **v)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- if (address >= end)
- BUG();
- do {
- pte_t *pte = pte_alloc(mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- direct_remap_area_pte(pte, address, end - address, v);
-
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-int __direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long size,
- mmu_update_t *v)
-{
- pgd_t * dir;
- unsigned long end = address + size;
-
- dir = pgd_offset(mm, address);
- flush_cache_all();
- if (address >= end)
- BUG();
- spin_lock(&mm->page_table_lock);
- do {
- pmd_t *pmd = pmd_alloc(mm, dir, address);
- if (!pmd)
- return -ENOMEM;
- direct_remap_area_pmd(mm, pmd, address, end - address, &v);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
-
- } while (address && (address < end));
- spin_unlock(&mm->page_table_lock);
- flush_tlb_all();
- return 0;
-}
-
-
-int direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long machine_addr,
- unsigned long size,
- pgprot_t prot,
- domid_t domid)
-{
- int i;
- unsigned long start_address;
-#define MAX_DIRECTMAP_MMU_QUEUE 130
- mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
-
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)domid << 16;
- v = w = &u[1];
-
- start_address = address;
-
- for( i = 0; i < size; i += PAGE_SIZE )
- {
- if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
- {
- /* Fill in the PTE pointers. */
- __direct_remap_area_pages( mm,
- start_address,
- address-start_address,
- w);
-
- if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
- return -EFAULT;
- v = w;
- start_address = address;
- }
-
- /*
- * Fill in the machine address: PTE ptr is done later by
- * __direct_remap_area_pages().
- */
- v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
-
- machine_addr += PAGE_SIZE;
- address += PAGE_SIZE;
- v++;
- }
-
- if ( v != w )
- {
- /* get the ptep's filled in */
- __direct_remap_area_pages(mm,
- start_address,
- address-start_address,
- w);
- if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
- return -EFAULT;
- }
-
- return 0;
-}
-
-
-#endif /* CONFIG_XEN_PRIVILEGED_GUEST */
-
-
-/*
- * Remap an arbitrary machine address space into the kernel virtual
- * address space. Needed when a privileged instance of Xenolinux wants
- * to access space outside its world directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void * __ioremap(unsigned long machine_addr,
- unsigned long size,
- unsigned long flags)
-{
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- void * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
- pgprot_t prot;
-
- /* Don't allow wraparound or zero size */
- last_addr = machine_addr + size - 1;
- if (!size || last_addr < machine_addr)
- return NULL;
-
- /* Mappings have to be page-aligned */
- offset = machine_addr & ~PAGE_MASK;
- machine_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr+1) - machine_addr;
-
- /* Ok, go for it */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- addr = area->addr;
- prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
- _PAGE_ACCESSED | flags);
- if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
- machine_addr, size, prot, 0)) {
- vfree(addr);
- return NULL;
- }
- return (void *) (offset + (char *)addr);
-#else
- return NULL;
-#endif
-}
-
-void iounmap(void *addr)
-{
- vfree((void *)((unsigned long)addr & PAGE_MASK));
-}
-
-/* implementation of boot time ioremap for purpose of provising access
-to the vga console for privileged domains. Unlike boot time ioremap on
-other architectures, ours is permanent and not reclaimed when then vmalloc
-infrastructure is started */
-
-void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
-{
- unsigned long offset, last_addr;
- unsigned int nrpages;
- enum fixed_addresses idx;
-
- /* Don't allow wraparound or zero size */
- last_addr = machine_addr + size - 1;
- if (!size || last_addr < machine_addr)
- return NULL;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = machine_addr & ~PAGE_MASK;
- machine_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr) - machine_addr;
-
- /*
- * Mappings have to fit in the FIX_BTMAP area.
- */
- nrpages = size >> PAGE_SHIFT;
- if (nrpages > NR_FIX_BTMAPS)
- return NULL;
-
- /*
- * Ok, go for it..
- */
- idx = FIX_BTMAP_BEGIN;
- while (nrpages > 0) {
- __set_fixmap(idx, machine_addr, PAGE_KERNEL);
- machine_addr += PAGE_SIZE;
- --idx;
- --nrpages;
- }
-
- flush_tlb_all();
-
- return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-}
-
-
-#if 0 /* We don't support these functions. They shouldn't be required. */
-void __init bt_iounmap(void *addr, unsigned long size) {}
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/arch/xen/vmlinux.lds
--- a/linux-2.4.30-xen-sparse/arch/xen/vmlinux.lds Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,75 +0,0 @@
-/* ld script to make i386 Linux kernel
- * Written by Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>;
- */
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-SECTIONS
-{
- . = 0xC0000000 + 0x100000;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.fixup)
- *(.gnu.warning)
- } = 0x9090
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) }
- .kstrtab : { *(.kstrtab) }
-
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
- __start___ksymtab = .; /* Kernel symbol table */
- __ksymtab : { *(__ksymtab) }
- __stop___ksymtab = .;
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- . = ALIGN(8192); /* init_task */
- .data.init_task : { *(.data.init_task) }
-
- . = ALIGN(4096); /* Init code and data */
- __init_begin = .;
- .text.init : { *(.text.init) }
- .data.init : { *(.data.init) }
- . = ALIGN(16);
- __setup_start = .;
- .setup.init : { *(.setup.init) }
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : { *(.initcall.init) }
- __initcall_end = .;
- . = ALIGN(4096);
- __init_end = .;
-
- . = ALIGN(4096);
- .data.page_aligned : { *(.data.idt) }
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss)
- }
- _end = . ;
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/drivers/block/ll_rw_blk.c
--- a/linux-2.4.30-xen-sparse/drivers/block/ll_rw_blk.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1663 +0,0 @@
-/*
- * linux/drivers/block/ll_rw_blk.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
- * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@xxxxxxx> SuSE
- * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@xxxxxxx>
- * kernel-doc documentation started by NeilBrown <neilb@xxxxxxxxxxxxxxx> -
July2000
- */
-
-/*
- * This handles all read/write requests to block devices
- */
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/config.h>
-#include <linux/locks.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
-#include <linux/bootmem.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <linux/blk.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-/*
- * MAC Floppy IWM hooks
- */
-
-#ifdef CONFIG_MAC_FLOPPY_IWM
-extern int mac_floppy_init(void);
-#endif
-
-/*
- * For the allocated request tables
- */
-static kmem_cache_t *request_cachep;
-
-/*
- * The "disk" task queue is used to start the actual requests
- * after a plug
- */
-DECLARE_TASK_QUEUE(tq_disk);
-
-/*
- * Protect the request list against multiple users..
- *
- * With this spinlock the Linux block IO subsystem is 100% SMP threaded
- * from the IRQ event side, and almost 100% SMP threaded from the syscall
- * side (we still have protect against block device array operations, and
- * the do_request() side is casually still unsafe. The kernel lock protects
- * this part currently.).
- *
- * there is a fair chance that things will work just OK if these functions
- * are called with no global kernel lock held ...
- */
-spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
-
-/* This specifies how many sectors to read ahead on the disk. */
-
-int read_ahead[MAX_BLKDEV];
-
-/* blk_dev_struct is:
- * *request_fn
- * *current_request
- */
-struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
-
-/*
- * blk_size contains the size of all block-devices in units of 1024 byte
- * sectors:
- *
- * blk_size[MAJOR][MINOR]
- *
- * if (!blk_size[MAJOR]) then no minor size checking is done.
- */
-int * blk_size[MAX_BLKDEV];
-
-/*
- * blksize_size contains the size of all block-devices:
- *
- * blksize_size[MAJOR][MINOR]
- *
- * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
- */
-int * blksize_size[MAX_BLKDEV];
-
-/*
- * hardsect_size contains the size of the hardware sector of a device.
- *
- * hardsect_size[MAJOR][MINOR]
- *
- * if (!hardsect_size[MAJOR])
- * then 512 bytes is assumed.
- * else
- * sector_size is hardsect_size[MAJOR][MINOR]
- * This is currently set by some scsi devices and read by the msdos fs driver.
- * Other uses may appear later.
- */
-int * hardsect_size[MAX_BLKDEV];
-
-/*
- * The following tunes the read-ahead algorithm in mm/filemap.c
- */
-int * max_readahead[MAX_BLKDEV];
-
-/*
- * Max number of sectors per request
- */
-int * max_sectors[MAX_BLKDEV];
-
-unsigned long blk_max_low_pfn, blk_max_pfn;
-int blk_nohighio = 0;
-
-int block_dump = 0;
-
-static struct timer_list writeback_timer;
-
-static inline int get_max_sectors(kdev_t dev)
-{
- if (!max_sectors[MAJOR(dev)])
- return MAX_SECTORS;
- return max_sectors[MAJOR(dev)][MINOR(dev)];
-}
-
-static inline request_queue_t *__blk_get_queue(kdev_t dev)
-{
- struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
-
- if (bdev->queue)
- return bdev->queue(dev);
- else
- return &blk_dev[MAJOR(dev)].request_queue;
-}
-
-request_queue_t *blk_get_queue(kdev_t dev)
-{
- return __blk_get_queue(dev);
-}
-
-static int __blk_cleanup_queue(struct request_list *list)
-{
- struct list_head *head = &list->free;
- struct request *rq;
- int i = 0;
-
- while (!list_empty(head)) {
- rq = list_entry(head->next, struct request, queue);
- list_del(&rq->queue);
- kmem_cache_free(request_cachep, rq);
- i++;
- };
-
- if (i != list->count)
- printk("request list leak!\n");
-
- list->count = 0;
- return i;
-}
-
-/**
- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q: the request queue to be released
- *
- * Description:
- * blk_cleanup_queue is the pair to blk_init_queue(). It should
- * be called when a request queue is being released; typically
- * when a block device is being de-registered. Currently, its
- * primary task it to free all the &struct request structures that
- * were allocated to the queue.
- * Caveat:
- * Hopefully the low level driver will have finished any
- * outstanding requests first...
- **/
-void blk_cleanup_queue(request_queue_t * q)
-{
- int count = q->nr_requests;
-
- count -= __blk_cleanup_queue(&q->rq);
-
- if (count)
- printk("blk_cleanup_queue: leaked requests (%d)\n", count);
- if (atomic_read(&q->nr_sectors))
- printk("blk_cleanup_queue: leaked sectors (%d)\n",
atomic_read(&q->nr_sectors));
-
- memset(q, 0, sizeof(*q));
-}
-
-/**
- * blk_queue_headactive - indicate whether head of request queue may be active
- * @q: The queue which this applies to.
- * @active: A flag indication where the head of the queue is active.
- *
- * Description:
- * The driver for a block device may choose to leave the currently active
- * request on the request queue, removing it only when it has completed.
- * The queue handling routines assume this by default for safety reasons
- * and will not involve the head of the request queue in any merging or
- * reordering of requests when the queue is unplugged (and thus may be
- * working on this particular request).
- *
- * If a driver removes requests from the queue before processing them, then
- * it may indicate that it does so, there by allowing the head of the queue
- * to be involved in merging and reordering. This is done be calling
- * blk_queue_headactive() with an @active flag of %0.
- *
- * If a driver processes several requests at once, it must remove them (or
- * at least all but one of them) from the request queue.
- *
- * When a queue is plugged the head will be assumed to be inactive.
- **/
-
-void blk_queue_headactive(request_queue_t * q, int active)
-{
- q->head_active = active;
-}
-
-/**
- * blk_queue_throttle_sectors - indicates you will call sector throttling funcs
- * @q: The queue which this applies to.
- * @active: A flag indication if you want sector throttling on
- *
- * Description:
- * The sector throttling code allows us to put a limit on the number of
- * sectors pending io to the disk at a given time, sending @active nonzero
- * indicates you will call blk_started_sectors and blk_finished_sectors in
- * addition to calling blk_started_io and blk_finished_io in order to
- * keep track of the number of sectors in flight.
- **/
-
-void blk_queue_throttle_sectors(request_queue_t * q, int active)
-{
- q->can_throttle = active;
-}
-
-/**
- * blk_queue_make_request - define an alternate make_request function for a
device
- * @q: the request queue for the device to be affected
- * @mfn: the alternate make_request function
- *
- * Description:
- * The normal way for &struct buffer_heads to be passed to a device
- * driver is for them to be collected into requests on a request
- * queue, and then to allow the device driver to select requests
- * off that queue when it is ready. This works well for many block
- * devices. However some block devices (typically virtual devices
- * such as md or lvm) do not benefit from the processing on the
- * request queue, and are served best by having the requests passed
- * directly to them. This can be achieved by providing a function
- * to blk_queue_make_request().
- *
- * Caveat:
- * The driver that does this *must* be able to deal appropriately
- * with buffers in "highmemory", either by calling bh_kmap() to get
- * a kernel mapping, to by calling create_bounce() to create a
- * buffer in normal memory.
- **/
-
-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
-{
- q->make_request_fn = mfn;
-}
-
-/**
- * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q: the request queue for the device
- * @dma_addr: bus address limit
- *
- * Description:
- * Different hardware can have different requirements as to what pages
- * it can do I/O directly to. A low level driver can call
- * blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @page. By default
- * the block layer sets this to the highest numbered "low" memory page.
- **/
-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
-{
- unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
- unsigned long mb = dma_addr >> 20;
- static request_queue_t *old_q;
-
- /*
- * keep this for debugging for now...
- */
- if (dma_addr != BLK_BOUNCE_HIGH && q != old_q) {
- old_q = q;
- printk("blk: queue %p, ", q);
- if (dma_addr == BLK_BOUNCE_ANY)
- printk("no I/O memory limit\n");
- else
- printk("I/O limit %luMb (mask 0x%Lx)\n", mb,
- (long long) dma_addr);
- }
-
- q->bounce_pfn = bounce_pfn;
-}
-
-
-/*
- * can we merge the two segments, or do we need to start a new one?
- */
-static inline int __blk_seg_merge_ok(struct buffer_head *bh, struct
buffer_head *nxt)
-{
- /*
- * if bh and nxt are contigous and don't cross a 4g boundary, it's ok
- */
- if (BH_CONTIG(bh, nxt) && BH_PHYS_4G(bh, nxt))
- return 1;
-
- return 0;
-}
-
-int blk_seg_merge_ok(struct buffer_head *bh, struct buffer_head *nxt)
-{
- return __blk_seg_merge_ok(bh, nxt);
-}
-
-static inline int ll_new_segment(request_queue_t *q, struct request *req, int
max_segments)
-{
- if (req->nr_segments < max_segments) {
- req->nr_segments++;
- return 1;
- }
- return 0;
-}
-
-static int ll_back_merge_fn(request_queue_t *q, struct request *req,
- struct buffer_head *bh, int max_segments)
-{
- if (__blk_seg_merge_ok(req->bhtail, bh))
- return 1;
-
- return ll_new_segment(q, req, max_segments);
-}
-
-static int ll_front_merge_fn(request_queue_t *q, struct request *req,
- struct buffer_head *bh, int max_segments)
-{
- if (__blk_seg_merge_ok(bh, req->bh))
- return 1;
-
- return ll_new_segment(q, req, max_segments);
-}
-
-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
- struct request *next, int max_segments)
-{
- int total_segments = req->nr_segments + next->nr_segments;
-
- if (__blk_seg_merge_ok(req->bhtail, next->bh))
- total_segments--;
-
- if (total_segments > max_segments)
- return 0;
-
- req->nr_segments = total_segments;
- return 1;
-}
-
-/*
- * "plug" the device if there are no outstanding requests: this will
- * force the transfer to start only after we have put all the requests
- * on the list.
- *
- * This is called with interrupts off and no requests on the queue.
- * (and with the request spinlock acquired)
- */
-static void generic_plug_device(request_queue_t *q, kdev_t dev)
-{
- /*
- * no need to replug device
- */
- if (!list_empty(&q->queue_head) || q->plugged)
- return;
-
- q->plugged = 1;
- queue_task(&q->plug_tq, &tq_disk);
-}
-
-/*
- * remove the plug and let it rip..
- */
-static inline void __generic_unplug_device(request_queue_t *q)
-{
- if (q->plugged) {
- q->plugged = 0;
- if (!list_empty(&q->queue_head))
- q->request_fn(q);
- }
-}
-
-void generic_unplug_device(void *data)
-{
- request_queue_t *q = (request_queue_t *) data;
- unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
- __generic_unplug_device(q);
- spin_unlock_irqrestore(&io_request_lock, flags);
-}
-
-/** blk_grow_request_list
- * @q: The &request_queue_t
- * @nr_requests: how many requests are desired
- *
- * More free requests are added to the queue's free lists, bringing
- * the total number of requests to @nr_requests.
- *
- * The requests are added equally to the request queue's read
- * and write freelists.
- *
- * This function can sleep.
- *
- * Returns the (new) number of requests which the queue has available.
- */
-int blk_grow_request_list(request_queue_t *q, int nr_requests, int
max_queue_sectors)
-{
- unsigned long flags;
- /* Several broken drivers assume that this function doesn't sleep,
- * this causes system hangs during boot.
- * As a temporary fix, make the function non-blocking.
- */
- spin_lock_irqsave(&io_request_lock, flags);
- while (q->nr_requests < nr_requests) {
- struct request *rq;
-
- rq = kmem_cache_alloc(request_cachep, SLAB_ATOMIC);
- if (rq == NULL)
- break;
- memset(rq, 0, sizeof(*rq));
- rq->rq_status = RQ_INACTIVE;
- list_add(&rq->queue, &q->rq.free);
- q->rq.count++;
-
- q->nr_requests++;
- }
-
- /*
- * Wakeup waiters after both one quarter of the
- * max-in-fligh queue and one quarter of the requests
- * are available again.
- */
-
- q->batch_requests = q->nr_requests / 4;
- if (q->batch_requests > 32)
- q->batch_requests = 32;
- q->batch_sectors = max_queue_sectors / 4;
-
- q->max_queue_sectors = max_queue_sectors;
-
- BUG_ON(!q->batch_sectors);
- atomic_set(&q->nr_sectors, 0);
-
- spin_unlock_irqrestore(&io_request_lock, flags);
- return q->nr_requests;
-}
-
-static void blk_init_free_list(request_queue_t *q)
-{
- struct sysinfo si;
- int megs; /* Total memory, in megabytes */
- int nr_requests, max_queue_sectors = MAX_QUEUE_SECTORS;
-
- INIT_LIST_HEAD(&q->rq.free);
- q->rq.count = 0;
- q->rq.pending[READ] = q->rq.pending[WRITE] = 0;
- q->nr_requests = 0;
-
- si_meminfo(&si);
- megs = si.totalram >> (20 - PAGE_SHIFT);
- nr_requests = MAX_NR_REQUESTS;
- if (megs < 30) {
- nr_requests /= 2;
- max_queue_sectors /= 2;
- }
- /* notice early if anybody screwed the defaults */
- BUG_ON(!nr_requests);
- BUG_ON(!max_queue_sectors);
-
- blk_grow_request_list(q, nr_requests, max_queue_sectors);
-
- init_waitqueue_head(&q->wait_for_requests);
-
- spin_lock_init(&q->queue_lock);
-}
-
-static int __make_request(request_queue_t * q, int rw, struct buffer_head *
bh);
-
-/**
- * blk_init_queue - prepare a request queue for use with a block device
- * @q: The &request_queue_t to be initialised
- * @rfn: The function to be called to process requests that have been
- * placed on the queue.
- *
- * Description:
- * If a block device wishes to use the standard request handling procedures,
- * which sorts requests and coalesces adjacent requests, then it must
- * call blk_init_queue(). The function @rfn will be called when there
- * are requests on the queue that need to be processed. If the device
- * supports plugging, then @rfn may not be called immediately when requests
- * are available on the queue, but may be called at some time later instead.
- * Plugged queues are generally unplugged when a buffer belonging to one
- * of the requests on the queue is needed, or due to memory pressure.
- *
- * @rfn is not required, or even expected, to remove all requests off the
- * queue, but only as many as it can handle at a time. If it does leave
- * requests on the queue, it is responsible for arranging that the requests
- * get dealt with eventually.
- *
- * A global spin lock $io_request_lock must be held while manipulating the
- * requests on the request queue.
- *
- * The request on the head of the queue is by default assumed to be
- * potentially active, and it is not considered for re-ordering or merging
- * whenever the given queue is unplugged. This behaviour can be changed with
- * blk_queue_headactive().
- *
- * Note:
- * blk_init_queue() must be paired with a blk_cleanup_queue() call
- * when the block device is deactivated (such as at module unload).
- **/
-void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
-{
- INIT_LIST_HEAD(&q->queue_head);
- elevator_init(&q->elevator, ELEVATOR_LINUS);
- blk_init_free_list(q);
- q->request_fn = rfn;
- q->back_merge_fn = ll_back_merge_fn;
- q->front_merge_fn = ll_front_merge_fn;
- q->merge_requests_fn = ll_merge_requests_fn;
- q->make_request_fn = __make_request;
- q->plug_tq.sync = 0;
- q->plug_tq.routine = &generic_unplug_device;
- q->plug_tq.data = q;
- q->plugged = 0;
- q->can_throttle = 0;
-
- /*
- * These booleans describe the queue properties. We set the
- * default (and most common) values here. Other drivers can
- * use the appropriate functions to alter the queue properties.
- * as appropriate.
- */
- q->plug_device_fn = generic_plug_device;
- q->head_active = 1;
-
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-}
-
-#define blkdev_free_rq(list) list_entry((list)->next, struct request, queue);
-/*
- * Get a free request. io_request_lock must be held and interrupts
- * disabled on the way in. Returns NULL if there are no free requests.
- */
-static struct request *get_request(request_queue_t *q, int rw)
-{
- struct request *rq = NULL;
- struct request_list *rl = &q->rq;
-
- if (blk_oversized_queue(q)) {
- int rlim = q->nr_requests >> 5;
-
- if (rlim < 4)
- rlim = 4;
-
- /*
- * if its a write, or we have more than a handful of reads
- * pending, bail out
- */
- if ((rw == WRITE) || (rw == READ && rl->pending[READ] > rlim))
- return NULL;
- if (blk_oversized_queue_reads(q))
- return NULL;
- }
-
- if (!list_empty(&rl->free)) {
- rq = blkdev_free_rq(&rl->free);
- list_del(&rq->queue);
- rl->count--;
- rl->pending[rw]++;
- rq->rq_status = RQ_ACTIVE;
- rq->cmd = rw;
- rq->special = NULL;
- rq->q = q;
- }
-
- return rq;
-}
-
-/*
- * Here's the request allocation design, low latency version:
- *
- * 1: Blocking on request exhaustion is a key part of I/O throttling.
- *
- * 2: We want to be `fair' to all requesters. We must avoid starvation, and
- * attempt to ensure that all requesters sleep for a similar duration.
Hence
- * no stealing requests when there are other processes waiting.
- *
- * There used to be more here, attempting to allow a process to send in a
- * number of requests once it has woken up. But, there's no way to
- * tell if a process has just been woken up, or if it is a new process
- * coming in to steal requests from the waiters. So, we give up and force
- * everyone to wait fairly.
- *
- * So here's what we do:
- *
- * a) A READA requester fails if free_requests < batch_requests
- *
- * We don't want READA requests to prevent sleepers from ever
- * waking. Note that READA is used extremely rarely - a few
- * filesystems use it for directory readahead.
- *
- * When a process wants a new request:
- *
- * b) If free_requests == 0, the requester sleeps in FIFO manner, and
- * the queue full condition is set. The full condition is not
- * cleared until there are no longer any waiters. Once the full
- * condition is set, all new io must wait, hopefully for a very
- * short period of time.
- *
- * When a request is released:
- *
- * c) If free_requests < batch_requests, do nothing.
- *
- * d) If free_requests >= batch_requests, wake up a single waiter.
- *
- * As each waiter gets a request, he wakes another waiter. We do this
- * to prevent a race where an unplug might get run before a request makes
- * it's way onto the queue. The result is a cascade of wakeups, so delaying
- * the initial wakeup until we've got batch_requests available helps avoid
- * wakeups where there aren't any requests available yet.
- */
-
-static struct request *__get_request_wait(request_queue_t *q, int rw)
-{
- register struct request *rq;
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue_exclusive(&q->wait_for_requests, &wait);
-
- do {
- set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock_irq(&io_request_lock);
- if (blk_oversized_queue(q) || q->rq.count == 0) {
- __generic_unplug_device(q);
- spin_unlock_irq(&io_request_lock);
- schedule();
- spin_lock_irq(&io_request_lock);
- }
- rq = get_request(q, rw);
- spin_unlock_irq(&io_request_lock);
- } while (rq == NULL);
- remove_wait_queue(&q->wait_for_requests, &wait);
- current->state = TASK_RUNNING;
-
- return rq;
-}
-
-static void get_request_wait_wakeup(request_queue_t *q, int rw)
-{
- /*
- * avoid losing an unplug if a second __get_request_wait did the
- * generic_unplug_device while our __get_request_wait was running
- * w/o the queue_lock held and w/ our request out of the queue.
- */
- if (waitqueue_active(&q->wait_for_requests))
- wake_up(&q->wait_for_requests);
-}
-
-/* RO fail safe mechanism */
-
-static long ro_bits[MAX_BLKDEV][8];
-
-int is_read_only(kdev_t dev)
-{
- int minor,major;
-
- major = MAJOR(dev);
- minor = MINOR(dev);
- if (major < 0 || major >= MAX_BLKDEV) return 0;
- return ro_bits[major][minor >> 5] & (1 << (minor & 31));
-}
-
-void set_device_ro(kdev_t dev,int flag)
-{
- int minor,major;
-
- major = MAJOR(dev);
- minor = MINOR(dev);
- if (major < 0 || major >= MAX_BLKDEV) return;
- if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
- else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
-}
-
-inline void drive_stat_acct (kdev_t dev, int rw,
- unsigned long nr_sectors, int new_io)
-{
- unsigned int major = MAJOR(dev);
- unsigned int index;
-
- index = disk_index(dev);
- if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
- return;
-
- kstat.dk_drive[major][index] += new_io;
- if (rw == READ) {
- kstat.dk_drive_rio[major][index] += new_io;
- kstat.dk_drive_rblk[major][index] += nr_sectors;
- } else if (rw == WRITE) {
- kstat.dk_drive_wio[major][index] += new_io;
- kstat.dk_drive_wblk[major][index] += nr_sectors;
- } else
- printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
-}
-
-#ifdef CONFIG_BLK_STATS
-/*
- * Return up to two hd_structs on which to do IO accounting for a given
- * request.
- *
- * On a partitioned device, we want to account both against the partition
- * and against the whole disk.
- */
-static void locate_hd_struct(struct request *req,
- struct hd_struct **hd1,
- struct hd_struct **hd2)
-{
- struct gendisk *gd;
-
- *hd1 = NULL;
- *hd2 = NULL;
-
- gd = get_gendisk(req->rq_dev);
- if (gd && gd->part) {
- /* Mask out the partition bits: account for the entire disk */
- int devnr = MINOR(req->rq_dev) >> gd->minor_shift;
- int whole_minor = devnr << gd->minor_shift;
-
- *hd1 = &gd->part[whole_minor];
- if (whole_minor != MINOR(req->rq_dev))
- *hd2= &gd->part[MINOR(req->rq_dev)];
- }
-}
-
-/*
- * Round off the performance stats on an hd_struct.
- *
- * The average IO queue length and utilisation statistics are maintained
- * by observing the current state of the queue length and the amount of
- * time it has been in this state for.
- * Normally, that accounting is done on IO completion, but that can result
- * in more than a second's worth of IO being accounted for within any one
- * second, leading to >100% utilisation. To deal with that, we do a
- * round-off before returning the results when reading /proc/partitions,
- * accounting immediately for all queue usage up to the current jiffies and
- * restarting the counters again.
- */
-void disk_round_stats(struct hd_struct *hd)
-{
- unsigned long now = jiffies;
-
- hd->aveq += (hd->ios_in_flight * (jiffies - hd->last_queue_change));
- hd->last_queue_change = now;
-
- if (hd->ios_in_flight)
- hd->io_ticks += (now - hd->last_idle_time);
- hd->last_idle_time = now;
-}
-
-static inline void down_ios(struct hd_struct *hd)
-{
- disk_round_stats(hd);
- --hd->ios_in_flight;
-}
-
-static inline void up_ios(struct hd_struct *hd)
-{
- disk_round_stats(hd);
- ++hd->ios_in_flight;
-}
-
-static void account_io_start(struct hd_struct *hd, struct request *req,
- int merge, int sectors)
-{
- switch (req->cmd) {
- case READ:
- if (merge)
- hd->rd_merges++;
- hd->rd_sectors += sectors;
- break;
- case WRITE:
- if (merge)
- hd->wr_merges++;
- hd->wr_sectors += sectors;
- break;
- }
- if (!merge)
- up_ios(hd);
-}
-
-static void account_io_end(struct hd_struct *hd, struct request *req)
-{
- unsigned long duration = jiffies - req->start_time;
- switch (req->cmd) {
- case READ:
- hd->rd_ticks += duration;
- hd->rd_ios++;
- break;
- case WRITE:
- hd->wr_ticks += duration;
- hd->wr_ios++;
- break;
- }
- down_ios(hd);
-}
-
-void req_new_io(struct request *req, int merge, int sectors)
-{
- struct hd_struct *hd1, *hd2;
-
- locate_hd_struct(req, &hd1, &hd2);
- if (hd1)
- account_io_start(hd1, req, merge, sectors);
- if (hd2)
- account_io_start(hd2, req, merge, sectors);
-}
-
-void req_merged_io(struct request *req)
-{
- struct hd_struct *hd1, *hd2;
-
- locate_hd_struct(req, &hd1, &hd2);
- if (hd1)
- down_ios(hd1);
- if (hd2)
- down_ios(hd2);
-}
-
-void req_finished_io(struct request *req)
-{
- struct hd_struct *hd1, *hd2;
-
- locate_hd_struct(req, &hd1, &hd2);
- if (hd1)
- account_io_end(hd1, req);
- if (hd2)
- account_io_end(hd2, req);
-}
-EXPORT_SYMBOL(req_finished_io);
-#endif /* CONFIG_BLK_STATS */
-
-/*
- * add-request adds a request to the linked list.
- * io_request_lock is held and interrupts disabled, as we muck with the
- * request queue list.
- *
- * By this point, req->cmd is always either READ/WRITE, never READA,
- * which is important for drive_stat_acct() above.
- */
-static inline void add_request(request_queue_t * q, struct request * req,
- struct list_head *insert_here)
-{
- drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
-
- if (!q->plugged && q->head_active && insert_here == &q->queue_head) {
- spin_unlock_irq(&io_request_lock);
- BUG();
- }
-
- /*
- * elevator indicated where it wants this request to be
- * inserted at elevator_merge time
- */
- list_add(&req->queue, insert_here);
-}
-
-/*
- * Must be called with io_request_lock held and interrupts disabled
- */
-void blkdev_release_request(struct request *req)
-{
- request_queue_t *q = req->q;
-
- req->rq_status = RQ_INACTIVE;
- req->q = NULL;
-
- /*
- * Request may not have originated from ll_rw_blk. if not,
- * assume it has free buffers and check waiters
- */
- if (q) {
- struct request_list *rl = &q->rq;
- int oversized_batch = 0;
-
- if (q->can_throttle)
- oversized_batch = blk_oversized_queue_batch(q);
- rl->count++;
- /*
- * paranoia check
- */
- if (req->cmd == READ || req->cmd == WRITE)
- rl->pending[req->cmd]--;
- if (rl->pending[READ] > q->nr_requests)
- printk("blk: reads: %u\n", rl->pending[READ]);
- if (rl->pending[WRITE] > q->nr_requests)
- printk("blk: writes: %u\n", rl->pending[WRITE]);
- if (rl->pending[READ] + rl->pending[WRITE] > q->nr_requests)
- printk("blk: r/w: %u + %u > %u\n", rl->pending[READ],
rl->pending[WRITE], q->nr_requests);
- list_add(&req->queue, &rl->free);
- if (rl->count >= q->batch_requests && !oversized_batch) {
- smp_mb();
- if (waitqueue_active(&q->wait_for_requests))
- wake_up(&q->wait_for_requests);
- }
- }
-}
-
-/*
- * Has to be called with the request spinlock acquired
- */
-static void attempt_merge(request_queue_t * q,
- struct request *req,
- int max_sectors,
- int max_segments)
-{
- struct request *next;
-
- next = blkdev_next_request(req);
- if (req->sector + req->nr_sectors != next->sector)
- return;
- if (req->cmd != next->cmd
- || req->rq_dev != next->rq_dev
- || req->nr_sectors + next->nr_sectors > max_sectors
- || next->waiting)
- return;
- /*
- * If we are not allowed to merge these requests, then
- * return. If we are allowed to merge, then the count
- * will have been updated to the appropriate number,
- * and we shouldn't do it here too.
- */
- if (!q->merge_requests_fn(q, req, next, max_segments))
- return;
-
- q->elevator.elevator_merge_req_fn(req, next);
-
- /* At this point we have either done a back merge
- * or front merge. We need the smaller start_time of
- * the merged requests to be the current request
- * for accounting purposes.
- */
- if (time_after(req->start_time, next->start_time))
- req->start_time = next->start_time;
-
- req->bhtail->b_reqnext = next->bh;
- req->bhtail = next->bhtail;
- req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
- list_del(&next->queue);
-
- /* One last thing: we have removed a request, so we now have one
- less expected IO to complete for accounting purposes. */
- req_merged_io(req);
-
- blkdev_release_request(next);
-}
-
-static inline void attempt_back_merge(request_queue_t * q,
- struct request *req,
- int max_sectors,
- int max_segments)
-{
- if (&req->queue == q->queue_head.prev)
- return;
- attempt_merge(q, req, max_sectors, max_segments);
-}
-
-static inline void attempt_front_merge(request_queue_t * q,
- struct list_head * head,
- struct request *req,
- int max_sectors,
- int max_segments)
-{
- struct list_head * prev;
-
- prev = req->queue.prev;
- if (head == prev)
- return;
- attempt_merge(q, blkdev_entry_to_request(prev), max_sectors,
max_segments);
-}
-
-static int __make_request(request_queue_t * q, int rw,
- struct buffer_head * bh)
-{
- unsigned int sector, count, sync;
- int max_segments = MAX_SEGMENTS;
- struct request * req, *freereq = NULL;
- int rw_ahead, max_sectors, el_ret;
- struct list_head *head, *insert_here;
- int latency;
- elevator_t *elevator = &q->elevator;
- int should_wake = 0;
-
- count = bh->b_size >> 9;
- sector = bh->b_rsector;
- sync = test_and_clear_bit(BH_Sync, &bh->b_state);
-
- rw_ahead = 0; /* normal case; gets changed below for READA */
- switch (rw) {
- case READA:
-#if 0 /* bread() misinterprets failed READA attempts as IO errors on SMP */
- rw_ahead = 1;
-#endif
- rw = READ; /* drop into READ */
- case READ:
- case WRITE:
- latency = elevator_request_latency(elevator, rw);
- break;
- default:
- BUG();
- goto end_io;
- }
-
- /* We'd better have a real physical mapping!
- Check this bit only if the buffer was dirty and just locked
- down by us so at this point flushpage will block and
- won't clear the mapped bit under us. */
- if (!buffer_mapped(bh))
- BUG();
-
- /*
- * Temporary solution - in 2.5 this will be done by the lowlevel
- * driver. Create a bounce buffer if the buffer data points into
- * high memory - keep the original buffer otherwise.
- */
- bh = blk_queue_bounce(q, rw, bh);
-
-/* look for a free request. */
- /*
- * Try to coalesce the new request with old requests
- */
- max_sectors = get_max_sectors(bh->b_rdev);
-
- req = NULL;
- head = &q->queue_head;
- /*
- * Now we acquire the request spinlock, we have to be mega careful
- * not to schedule or do something nonatomic
- */
- spin_lock_irq(&io_request_lock);
-
-again:
- insert_here = head->prev;
-
- if (list_empty(head)) {
- q->plug_device_fn(q, bh->b_rdev); /* is atomic */
- goto get_rq;
- } else if (q->head_active && !q->plugged)
- head = head->next;
-
- el_ret = elevator->elevator_merge_fn(q, &req, head, bh, rw,max_sectors);
- switch (el_ret) {
-
- case ELEVATOR_BACK_MERGE:
- if (!q->back_merge_fn(q, req, bh, max_segments)) {
- insert_here = &req->queue;
- break;
- }
- req->bhtail->b_reqnext = bh;
- req->bhtail = bh;
- req->nr_sectors = req->hard_nr_sectors += count;
- blk_started_io(count);
- blk_started_sectors(req, count);
- drive_stat_acct(req->rq_dev, req->cmd, count, 0);
- req_new_io(req, 1, count);
- attempt_back_merge(q, req, max_sectors, max_segments);
- goto out;
-
- case ELEVATOR_FRONT_MERGE:
- if (!q->front_merge_fn(q, req, bh, max_segments)) {
- insert_here = req->queue.prev;
- break;
- }
- bh->b_reqnext = req->bh;
- req->bh = bh;
- /*
- * may not be valid, but queues not having bounce
- * enabled for highmem pages must not look at
- * ->buffer anyway
- */
- req->buffer = bh->b_data;
- req->current_nr_sectors = req->hard_cur_sectors = count;
- req->sector = req->hard_sector = sector;
- req->nr_sectors = req->hard_nr_sectors += count;
- blk_started_io(count);
- blk_started_sectors(req, count);
- drive_stat_acct(req->rq_dev, req->cmd, count, 0);
- req_new_io(req, 1, count);
- attempt_front_merge(q, head, req, max_sectors,
max_segments);
- goto out;
-
- /*
- * elevator says don't/can't merge. get new request
- */
- case ELEVATOR_NO_MERGE:
- /*
- * use elevator hints as to where to insert the
- * request. if no hints, just add it to the back
- * of the queue
- */
- if (req)
- insert_here = &req->queue;
- break;
-
- default:
- printk("elevator returned crap (%d)\n", el_ret);
- BUG();
- }
-
-get_rq:
- if (freereq) {
- req = freereq;
- freereq = NULL;
- } else {
- /*
- * See description above __get_request_wait()
- */
- if (rw_ahead) {
- if (q->rq.count < q->batch_requests ||
blk_oversized_queue_batch(q)) {
- spin_unlock_irq(&io_request_lock);
- goto end_io;
- }
- req = get_request(q, rw);
- if (req == NULL)
- BUG();
- } else {
- req = get_request(q, rw);
- if (req == NULL) {
- spin_unlock_irq(&io_request_lock);
- freereq = __get_request_wait(q, rw);
- head = &q->queue_head;
- spin_lock_irq(&io_request_lock);
- should_wake = 1;
- goto again;
- }
- }
- }
-
-/* fill up the request-info, and add it to the queue */
- req->elevator_sequence = latency;
- req->cmd = rw;
- req->errors = 0;
- req->hard_sector = req->sector = sector;
- req->hard_nr_sectors = req->nr_sectors = count;
- req->current_nr_sectors = req->hard_cur_sectors = count;
- req->nr_segments = 1; /* Always 1 for a new request. */
- req->nr_hw_segments = 1; /* Always 1 for a new request. */
- req->buffer = bh->b_data;
- req->waiting = NULL;
- req->bh = bh;
- req->bhtail = bh;
- req->rq_dev = bh->b_rdev;
- req->start_time = jiffies;
- req_new_io(req, 0, count);
- blk_started_io(count);
- blk_started_sectors(req, count);
- add_request(q, req, insert_here);
-out:
- if (freereq)
- blkdev_release_request(freereq);
- if (should_wake)
- get_request_wait_wakeup(q, rw);
- if (sync)
- __generic_unplug_device(q);
- spin_unlock_irq(&io_request_lock);
- return 0;
-end_io:
- bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
- return 0;
-}
-
-/**
- * generic_make_request: hand a buffer head to it's device driver for I/O
- * @rw: READ, WRITE, or READA - what sort of I/O is desired.
- * @bh: The buffer head describing the location in memory and on the device.
- *
- * generic_make_request() is used to make I/O requests of block
- * devices. It is passed a &struct buffer_head and a &rw value. The
- * %READ and %WRITE options are (hopefully) obvious in meaning. The
- * %READA value means that a read is required, but that the driver is
- * free to fail the request if, for example, it cannot get needed
- * resources immediately.
- *
- * generic_make_request() does not return any status. The
- * success/failure status of the request, along with notification of
- * completion, is delivered asynchronously through the bh->b_end_io
- * function described (one day) else where.
- *
- * The caller of generic_make_request must make sure that b_page,
- * b_addr, b_size are set to describe the memory buffer, that b_rdev
- * and b_rsector are set to describe the device address, and the
- * b_end_io and optionally b_private are set to describe how
- * completion notification should be signaled. BH_Mapped should also
- * be set (to confirm that b_dev and b_blocknr are valid).
- *
- * generic_make_request and the drivers it calls may use b_reqnext,
- * and may change b_rdev and b_rsector. So the values of these fields
- * should NOT be depended on after the call to generic_make_request.
- * Because of this, the caller should record the device address
- * information in b_dev and b_blocknr.
- *
- * Apart from those fields mentioned above, no other fields, and in
- * particular, no other flags, are changed by generic_make_request or
- * any lower level drivers.
- * */
-void generic_make_request (int rw, struct buffer_head * bh)
-{
- int major = MAJOR(bh->b_rdev);
- int minorsize = 0;
- request_queue_t *q;
-
- if (!bh->b_end_io)
- BUG();
-
- /* Test device size, when known. */
- if (blk_size[major])
- minorsize = blk_size[major][MINOR(bh->b_rdev)];
- if (minorsize) {
- unsigned long maxsector = (minorsize << 1) + 1;
- unsigned long sector = bh->b_rsector;
- unsigned int count = bh->b_size >> 9;
-
- if (maxsector < count || maxsector - count < sector) {
- /* Yecch */
- bh->b_state &= ~(1 << BH_Dirty);
-
- /* This may well happen - the kernel calls bread()
- without checking the size of the device, e.g.,
- when mounting a device. */
- printk(KERN_INFO
- "attempt to access beyond end of device\n");
- printk(KERN_INFO "%s: rw=%d, want=%ld, limit=%d\n",
- kdevname(bh->b_rdev), rw,
- (sector + count)>>1, minorsize);
-
- bh->b_end_io(bh, 0);
- return;
- }
- }
-
- /*
- * Resolve the mapping until finished. (drivers are
- * still free to implement/resolve their own stacking
- * by explicitly returning 0)
- */
- /* NOTE: we don't repeat the blk_size check for each new device.
- * Stacking drivers are expected to know what they are doing.
- */
- do {
- q = __blk_get_queue(bh->b_rdev);
- if (!q) {
- printk(KERN_ERR
- "generic_make_request: Trying to access "
- "nonexistent block-device %s (%ld)\n",
- kdevname(bh->b_rdev), bh->b_rsector);
- buffer_IO_error(bh);
- break;
- }
- } while (q->make_request_fn(q, rw, bh));
-}
-
-
-/**
- * submit_bh: submit a buffer_head to the block device later for I/O
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
- * @bh: The &struct buffer_head which describes the I/O
- *
- * submit_bh() is very similar in purpose to generic_make_request(), and
- * uses that function to do most of the work.
- *
- * The extra functionality provided by submit_bh is to determine
- * b_rsector from b_blocknr and b_size, and to set b_rdev from b_dev.
- * This is is appropriate for IO requests that come from the buffer
- * cache and page cache which (currently) always use aligned blocks.
- */
-void submit_bh(int rw, struct buffer_head * bh)
-{
- int count = bh->b_size >> 9;
-
- if (!test_bit(BH_Lock, &bh->b_state))
- BUG();
-
- set_bit(BH_Req, &bh->b_state);
- set_bit(BH_Launder, &bh->b_state);
-
- /*
- * First step, 'identity mapping' - RAID or LVM might
- * further remap this.
- */
- bh->b_rdev = bh->b_dev;
- bh->b_rsector = bh->b_blocknr * count;
-
- get_bh(bh);
- generic_make_request(rw, bh);
-
- /* fix race condition with wait_on_buffer() */
- smp_mb(); /* spin_unlock may have inclusive semantics */
- if (waitqueue_active(&bh->b_wait))
- wake_up(&bh->b_wait);
-
- if (block_dump)
- printk(KERN_DEBUG "%s: %s block %lu/%u on %s\n", current->comm,
rw == WRITE ? "WRITE" : "READ", bh->b_rsector, count, kdevname(bh->b_rdev));
-
- put_bh(bh);
- switch (rw) {
- case WRITE:
- kstat.pgpgout += count;
- break;
- default:
- kstat.pgpgin += count;
- break;
- }
-}
-
-/**
- * ll_rw_block: low-level access to block devices
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
- * @nr: number of &struct buffer_heads in the array
- * @bhs: array of pointers to &struct buffer_head
- *
- * ll_rw_block() takes an array of pointers to &struct buffer_heads,
- * and requests an I/O operation on them, either a %READ or a %WRITE.
- * The third %READA option is described in the documentation for
- * generic_make_request() which ll_rw_block() calls.
- *
- * This function provides extra functionality that is not in
- * generic_make_request() that is relevant to buffers in the buffer
- * cache or page cache. In particular it drops any buffer that it
- * cannot get a lock on (with the BH_Lock state bit), any buffer that
- * appears to be clean when doing a write request, and any buffer that
- * appears to be up-to-date when doing read request. Further it marks
- * as clean buffers that are processed for writing (the buffer cache
- * wont assume that they are actually clean until the buffer gets
- * unlocked).
- *
- * ll_rw_block sets b_end_io to simple completion handler that marks
- * the buffer up-to-date (if approriate), unlocks the buffer and wakes
- * any waiters. As client that needs a more interesting completion
- * routine should call submit_bh() (or generic_make_request())
- * directly.
- *
- * Caveat:
- * All of the buffers must be for the same device, and must also be
- * of the current approved size for the device. */
-
-void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
-{
- unsigned int major;
- int correct_size;
- int i;
-
- if (!nr)
- return;
-
- major = MAJOR(bhs[0]->b_dev);
-
- /* Determine correct block size for this device. */
- correct_size = get_hardsect_size(bhs[0]->b_dev);
-
- /* Verify requested block sizes. */
- for (i = 0; i < nr; i++) {
- struct buffer_head *bh = bhs[i];
- if (bh->b_size % correct_size) {
- printk(KERN_NOTICE "ll_rw_block: device %s: "
- "only %d-char blocks implemented (%u)\n",
- kdevname(bhs[0]->b_dev),
- correct_size, bh->b_size);
- goto sorry;
- }
- }
-
- if ((rw & WRITE) && is_read_only(bhs[0]->b_dev)) {
- printk(KERN_NOTICE "Can't write to read-only device %s\n",
- kdevname(bhs[0]->b_dev));
- goto sorry;
- }
-
- for (i = 0; i < nr; i++) {
- struct buffer_head *bh = bhs[i];
-
- lock_buffer(bh);
-
- /* We have the buffer lock */
- atomic_inc(&bh->b_count);
- bh->b_end_io = end_buffer_io_sync;
-
- switch(rw) {
- case WRITE:
- if (!atomic_set_buffer_clean(bh))
- /* Hmmph! Nothing to write */
- goto end_io;
- __mark_buffer_clean(bh);
- break;
-
- case READA:
- case READ:
- if (buffer_uptodate(bh))
- /* Hmmph! Already have it */
- goto end_io;
- break;
- default:
- BUG();
- end_io:
- bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
- continue;
- }
-
- submit_bh(rw, bh);
- }
- return;
-
-sorry:
- /* Make sure we don't get infinite dirty retries.. */
- for (i = 0; i < nr; i++)
- mark_buffer_clean(bhs[i]);
-}
-
-#ifdef CONFIG_STRAM_SWAP
-extern int stram_device_init (void);
-#endif
-
-static void blk_writeback_timer(unsigned long data)
-{
- wakeup_bdflush();
- wakeup_kupdate();
-}
-
-/**
- * end_that_request_first - end I/O on one buffer.
- * @req: the request being processed
- * @uptodate: 0 for I/O error
- * @name: the name printed for an I/O error
- *
- * Description:
- * Ends I/O on the first buffer attached to @req, and sets it up
- * for the next buffer_head (if any) in the cluster.
- *
- * Return:
- * 0 - we are done with this request, call end_that_request_last()
- * 1 - still buffers pending for this request
- *
- * Caveat:
- * Drivers implementing their own end_request handling must call
- * blk_finished_io() appropriately.
- **/
-
-int end_that_request_first (struct request *req, int uptodate, char *name)
-{
- struct buffer_head * bh;
- int nsect;
-
- req->errors = 0;
- if (!uptodate)
- printk("end_request: I/O error, dev %s (%s), sector %lu\n",
- kdevname(req->rq_dev), name, req->sector);
-
- if ((bh = req->bh) != NULL) {
- nsect = bh->b_size >> 9;
- blk_finished_io(nsect);
- blk_finished_sectors(req, nsect);
- req->bh = bh->b_reqnext;
- bh->b_reqnext = NULL;
- bh->b_end_io(bh, uptodate);
- if ((bh = req->bh) != NULL) {
- req->hard_sector += nsect;
- req->hard_nr_sectors -= nsect;
- req->sector = req->hard_sector;
- req->nr_sectors = req->hard_nr_sectors;
-
- req->current_nr_sectors = bh->b_size >> 9;
- req->hard_cur_sectors = req->current_nr_sectors;
- if (req->nr_sectors < req->current_nr_sectors) {
- req->nr_sectors = req->current_nr_sectors;
- printk("end_request: buffer-list destroyed\n");
- }
- req->buffer = bh->b_data;
- return 1;
- }
- }
- return 0;
-}
-
-extern int laptop_mode;
-
-void end_that_request_last(struct request *req)
-{
- struct completion *waiting = req->waiting;
-
- /*
- * schedule the writeout of pending dirty data when the disk is idle
- */
- if (laptop_mode && req->cmd == READ)
- mod_timer(&writeback_timer, jiffies + 5 * HZ);
-
- req_finished_io(req);
- blkdev_release_request(req);
- if (waiting)
- complete(waiting);
-}
-
-int __init blk_dev_init(void)
-{
- struct blk_dev_struct *dev;
-
- request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
-
- if (!request_cachep)
- panic("Can't create request pool slab cache\n");
-
- for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;)
- dev->queue = NULL;
-
- memset(ro_bits,0,sizeof(ro_bits));
- memset(max_readahead, 0, sizeof(max_readahead));
- memset(max_sectors, 0, sizeof(max_sectors));
-
- blk_max_low_pfn = max_low_pfn - 1;
- blk_max_pfn = max_pfn - 1;
-
- init_timer(&writeback_timer);
- writeback_timer.function = blk_writeback_timer;
-
-#ifdef CONFIG_AMIGA_Z2RAM
- z2_init();
-#endif
-#ifdef CONFIG_STRAM_SWAP
- stram_device_init();
-#endif
-#ifdef CONFIG_ISP16_CDI
- isp16_init();
-#endif
-#ifdef CONFIG_BLK_DEV_PS2
- ps2esdi_init();
-#endif
-#ifdef CONFIG_BLK_DEV_XD
- xd_init();
-#endif
-#ifdef CONFIG_BLK_DEV_MFM
- mfm_init();
-#endif
-#ifdef CONFIG_PARIDE
- { extern void paride_init(void); paride_init(); };
-#endif
-#ifdef CONFIG_MAC_FLOPPY
- swim3_init();
-#endif
-#ifdef CONFIG_BLK_DEV_SWIM_IOP
- swimiop_init();
-#endif
-#ifdef CONFIG_AMIGA_FLOPPY
- amiga_floppy_init();
-#endif
-#ifdef CONFIG_ATARI_FLOPPY
- atari_floppy_init();
-#endif
-#ifdef CONFIG_BLK_DEV_FD
- floppy_init();
-#else
-#if defined(__i386__) && !defined(CONFIG_XEN) /* Do we even need this? */
- outb_p(0xc, 0x3f2);
-#endif
-#endif
-#ifdef CONFIG_CDU31A
- cdu31a_init();
-#endif
-#ifdef CONFIG_ATARI_ACSI
- acsi_init();
-#endif
-#ifdef CONFIG_MCD
- mcd_init();
-#endif
-#ifdef CONFIG_MCDX
- mcdx_init();
-#endif
-#ifdef CONFIG_SBPCD
- sbpcd_init();
-#endif
-#ifdef CONFIG_AZTCD
- aztcd_init();
-#endif
-#ifdef CONFIG_CDU535
- sony535_init();
-#endif
-#ifdef CONFIG_GSCD
- gscd_init();
-#endif
-#ifdef CONFIG_CM206
- cm206_init();
-#endif
-#ifdef CONFIG_OPTCD
- optcd_init();
-#endif
-#ifdef CONFIG_SJCD
- sjcd_init();
-#endif
-#ifdef CONFIG_APBLOCK
- ap_init();
-#endif
-#ifdef CONFIG_DDV
- ddv_init();
-#endif
-#ifdef CONFIG_MDISK
- mdisk_init();
-#endif
-#ifdef CONFIG_DASD
- dasd_init();
-#endif
-#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_BLOCK)
- tapeblock_init();
-#endif
-#ifdef CONFIG_BLK_DEV_XPRAM
- xpram_init();
-#endif
-
-#ifdef CONFIG_SUN_JSFLASH
- jsfd_init();
-#endif
-
-#if defined(CONFIG_XEN_BLKDEV_FRONTEND)
- xlblk_init();
-#endif
-
- return 0;
-};
-
-EXPORT_SYMBOL(io_request_lock);
-EXPORT_SYMBOL(end_that_request_first);
-EXPORT_SYMBOL(end_that_request_last);
-EXPORT_SYMBOL(blk_grow_request_list);
-EXPORT_SYMBOL(blk_init_queue);
-EXPORT_SYMBOL(blk_get_queue);
-EXPORT_SYMBOL(blk_cleanup_queue);
-EXPORT_SYMBOL(blk_queue_headactive);
-EXPORT_SYMBOL(blk_queue_throttle_sectors);
-EXPORT_SYMBOL(blk_queue_make_request);
-EXPORT_SYMBOL(generic_make_request);
-EXPORT_SYMBOL(blkdev_release_request);
-EXPORT_SYMBOL(generic_unplug_device);
-EXPORT_SYMBOL(blk_queue_bounce_limit);
-EXPORT_SYMBOL(blk_max_low_pfn);
-EXPORT_SYMBOL(blk_max_pfn);
-EXPORT_SYMBOL(blk_seg_merge_ok);
-EXPORT_SYMBOL(blk_nohighio);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/drivers/char/Makefile
--- a/linux-2.4.30-xen-sparse/drivers/char/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,361 +0,0 @@
-#
-# Makefile for the kernel character device drivers.
-#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definitions are now inherited from the
-# parent makes..
-#
-
-#
-# This file contains the font map for the default (hardware) font
-#
-FONTMAPFILE = cp437.uni
-
-O_TARGET := char.o
-
-obj-y += mem.o tty_io.o n_tty.o tty_ioctl.o raw.o pty.o misc.o random.o
-
-# All of the (potential) objects that export symbols.
-# This list comes from 'grep -l EXPORT_SYMBOL *.[hc]'.
-
-export-objs := busmouse.o console.o keyboard.o sysrq.o \
- misc.o pty.o random.o selection.o serial.o \
- sonypi.o tty_io.o tty_ioctl.o generic_serial.o \
- au1000_gpio.o vac-serial.o hp_psaux.o nvram.o \
- scx200.o fetchop.o
-
-mod-subdirs := joystick ftape drm drm-4.0 pcmcia
-
-list-multi :=
-
-KEYMAP =defkeymap.o
-KEYBD =pc_keyb.o
-CONSOLE =console.o
-SERIAL =serial.o
-
-ifeq ($(ARCH),xen)
- ifneq ($(CONFIG_XEN_PHYSDEV_ACCESS),y)
- KEYBD =
- endif
-endif
-
-ifeq ($(ARCH),s390)
- KEYMAP =
- KEYBD =
- CONSOLE =
- SERIAL =
-endif
-
-ifeq ($(ARCH),mips)
- ifneq ($(CONFIG_PC_KEYB),y)
- KEYBD =
- endif
- ifeq ($(CONFIG_VR41XX_KIU),y)
- KEYMAP =
- KEYBD = vr41xx_keyb.o
- endif
-endif
-
-ifeq ($(ARCH),s390x)
- KEYMAP =
- KEYBD =
- CONSOLE =
- SERIAL =
-endif
-
-ifeq ($(ARCH),m68k)
- ifdef CONFIG_AMIGA
- KEYBD = amikeyb.o
- else
- ifndef CONFIG_MAC
- KEYBD =
- endif
- endif
- SERIAL =
-endif
-
-ifeq ($(ARCH),parisc)
- ifdef CONFIG_GSC_PS2
- KEYBD = hp_psaux.o hp_keyb.o
- else
- KEYBD =
- endif
- ifdef CONFIG_SERIAL_MUX
- CONSOLE += mux.o
- endif
- ifdef CONFIG_PDC_CONSOLE
- CONSOLE += pdc_console.o
- endif
-endif
-
-ifdef CONFIG_Q40
- KEYBD += q40_keyb.o
- SERIAL = serial.o
-endif
-
-ifdef CONFIG_APOLLO
- KEYBD += dn_keyb.o
-endif
-
-ifeq ($(ARCH),parisc)
- ifdef CONFIG_GSC_PS2
- KEYBD = hp_psaux.o hp_keyb.o
- else
- KEYBD =
- endif
- ifdef CONFIG_PDC_CONSOLE
- CONSOLE += pdc_console.o
- endif
-endif
-
-ifeq ($(ARCH),arm)
- ifneq ($(CONFIG_PC_KEYMAP),y)
- KEYMAP =
- endif
- ifneq ($(CONFIG_PC_KEYB),y)
- KEYBD =
- endif
-endif
-
-ifeq ($(ARCH),sh)
- KEYMAP =
- KEYBD =
- CONSOLE =
- ifeq ($(CONFIG_SH_HP600),y)
- KEYMAP = defkeymap.o
- KEYBD = scan_keyb.o hp600_keyb.o
- CONSOLE = console.o
- endif
- ifeq ($(CONFIG_SH_DMIDA),y)
- # DMIDA does not connect the HD64465 PS/2 keyboard port
- # but we allow for USB keyboards to be plugged in.
- KEYMAP = defkeymap.o
- KEYBD = # hd64465_keyb.o pc_keyb.o
- CONSOLE = console.o
- endif
- ifeq ($(CONFIG_SH_EC3104),y)
- KEYMAP = defkeymap.o
- KEYBD = ec3104_keyb.o
- CONSOLE = console.o
- endif
- ifeq ($(CONFIG_SH_DREAMCAST),y)
- KEYMAP = defkeymap.o
- KEYBD =
- CONSOLE = console.o
- endif
-endif
-
-ifeq ($(CONFIG_DECSTATION),y)
- KEYMAP =
- KEYBD =
-endif
-
-ifeq ($(CONFIG_BAGET_MIPS),y)
- KEYBD =
- SERIAL = vac-serial.o
-endif
-
-ifeq ($(CONFIG_NINO),y)
- SERIAL =
-endif
-
-ifneq ($(CONFIG_SUN_SERIAL),)
- SERIAL =
-endif
-
-ifeq ($(CONFIG_QTRONIX_KEYBOARD),y)
- KEYBD = qtronix.o
- KEYMAP = qtronixmap.o
-endif
-
-ifeq ($(CONFIG_DUMMY_KEYB),y)
- KEYBD = dummy_keyb.o
-endif
-
-obj-$(CONFIG_VT) += vt.o vc_screen.o consolemap.o consolemap_deftbl.o
$(CONSOLE) selection.o
-obj-$(CONFIG_SERIAL) += $(SERIAL)
-obj-$(CONFIG_PARPORT_SERIAL) += parport_serial.o
-obj-$(CONFIG_SERIAL_HCDP) += hcdp_serial.o
-obj-$(CONFIG_SERIAL_21285) += serial_21285.o
-obj-$(CONFIG_SERIAL_SA1100) += serial_sa1100.o
-obj-$(CONFIG_SERIAL_AMBA) += serial_amba.o
-obj-$(CONFIG_TS_AU1X00_ADS7846) += au1000_ts.o
-obj-$(CONFIG_SERIAL_DEC) += decserial.o
-
-ifndef CONFIG_SUN_KEYBOARD
- obj-$(CONFIG_VT) += keyboard.o $(KEYMAP) $(KEYBD)
-else
- obj-$(CONFIG_PCI) += keyboard.o $(KEYMAP)
-endif
-
-obj-$(CONFIG_HIL) += hp_keyb.o
-obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
-obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
-obj-$(CONFIG_ROCKETPORT) += rocket.o
-obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
-obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
-obj-$(CONFIG_DIGI) += pcxx.o
-obj-$(CONFIG_DIGIEPCA) += epca.o
-obj-$(CONFIG_CYCLADES) += cyclades.o
-obj-$(CONFIG_STALLION) += stallion.o
-obj-$(CONFIG_ISTALLION) += istallion.o
-obj-$(CONFIG_SIBYTE_SB1250_DUART) += sb1250_duart.o
-obj-$(CONFIG_COMPUTONE) += ip2.o ip2main.o
-obj-$(CONFIG_RISCOM8) += riscom8.o
-obj-$(CONFIG_ISI) += isicom.o
-obj-$(CONFIG_ESPSERIAL) += esp.o
-obj-$(CONFIG_SYNCLINK) += synclink.o
-obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
-obj-$(CONFIG_N_HDLC) += n_hdlc.o
-obj-$(CONFIG_SPECIALIX) += specialix.o
-obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
-obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o
-obj-$(CONFIG_SX) += sx.o generic_serial.o
-obj-$(CONFIG_RIO) += rio/rio.o generic_serial.o
-obj-$(CONFIG_SH_SCI) += sh-sci.o generic_serial.o
-obj-$(CONFIG_SERIAL167) += serial167.o
-obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
-obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
-obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
-obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o
-obj-$(CONFIG_SERIAL_TX3912) += generic_serial.o serial_tx3912.o
-obj-$(CONFIG_TXX927_SERIAL) += serial_txx927.o
-obj-$(CONFIG_SERIAL_TXX9) += generic_serial.o serial_txx9.o
-obj-$(CONFIG_IP22_SERIAL) += sgiserial.o
-obj-$(CONFIG_AU1X00_UART) += au1x00-serial.o
-obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o
-
-subdir-$(CONFIG_RIO) += rio
-subdir-$(CONFIG_INPUT) += joystick
-
-obj-$(CONFIG_ATIXL_BUSMOUSE) += atixlmouse.o
-obj-$(CONFIG_LOGIBUSMOUSE) += logibusmouse.o
-obj-$(CONFIG_PRINTER) += lp.o
-obj-$(CONFIG_TIPAR) += tipar.o
-obj-$(CONFIG_OBMOUSE) += obmouse.o
-
-ifeq ($(CONFIG_INPUT),y)
-obj-y += joystick/js.o
-endif
-
-obj-$(CONFIG_FETCHOP) += fetchop.o
-obj-$(CONFIG_BUSMOUSE) += busmouse.o
-obj-$(CONFIG_DTLK) += dtlk.o
-obj-$(CONFIG_R3964) += n_r3964.o
-obj-$(CONFIG_APPLICOM) += applicom.o
-obj-$(CONFIG_SONYPI) += sonypi.o
-obj-$(CONFIG_MS_BUSMOUSE) += msbusmouse.o
-obj-$(CONFIG_82C710_MOUSE) += qpmouse.o
-obj-$(CONFIG_AMIGAMOUSE) += amigamouse.o
-obj-$(CONFIG_ATARIMOUSE) += atarimouse.o
-obj-$(CONFIG_ADBMOUSE) += adbmouse.o
-obj-$(CONFIG_PC110_PAD) += pc110pad.o
-obj-$(CONFIG_MK712_MOUSE) += mk712.o
-obj-$(CONFIG_RTC) += rtc.o
-obj-$(CONFIG_GEN_RTC) += genrtc.o
-obj-$(CONFIG_EFI_RTC) += efirtc.o
-obj-$(CONFIG_SGI_DS1286) += ds1286.o
-obj-$(CONFIG_MIPS_RTC) += mips_rtc.o
-obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
-ifeq ($(CONFIG_PPC),)
- obj-$(CONFIG_NVRAM) += nvram.o
-endif
-obj-$(CONFIG_TOSHIBA) += toshiba.o
-obj-$(CONFIG_I8K) += i8k.o
-obj-$(CONFIG_DS1620) += ds1620.o
-obj-$(CONFIG_DS1742) += ds1742.o
-obj-$(CONFIG_INTEL_RNG) += i810_rng.o
-obj-$(CONFIG_AMD_RNG) += amd768_rng.o
-obj-$(CONFIG_HW_RANDOM) += hw_random.o
-obj-$(CONFIG_AMD_PM768) += amd76x_pm.o
-obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
-
-obj-$(CONFIG_ITE_GPIO) += ite_gpio.o
-obj-$(CONFIG_AU1X00_GPIO) += au1000_gpio.o
-obj-$(CONFIG_AU1X00_USB_TTY) += au1000_usbtty.o
-obj-$(CONFIG_AU1X00_USB_RAW) += au1000_usbraw.o
-obj-$(CONFIG_COBALT_LCD) += lcd.o
-
-obj-$(CONFIG_QIC02_TAPE) += tpqic02.o
-
-subdir-$(CONFIG_FTAPE) += ftape
-subdir-$(CONFIG_DRM_OLD) += drm-4.0
-subdir-$(CONFIG_DRM_NEW) += drm
-subdir-$(CONFIG_PCMCIA) += pcmcia
-subdir-$(CONFIG_AGP) += agp
-
-ifeq ($(CONFIG_FTAPE),y)
-obj-y += ftape/ftape.o
-endif
-
-obj-$(CONFIG_H8) += h8.o
-obj-$(CONFIG_PPDEV) += ppdev.o
-obj-$(CONFIG_DZ) += dz.o
-obj-$(CONFIG_NWBUTTON) += nwbutton.o
-obj-$(CONFIG_NWFLASH) += nwflash.o
-obj-$(CONFIG_SCx200) += scx200.o
-obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
-
-# Only one watchdog can succeed. We probe the hardware watchdog
-# drivers first, then the softdog driver. This means if your hardware
-# watchdog dies or is 'borrowed' for some reason the software watchdog
-# still gives you some cover.
-
-obj-$(CONFIG_PCWATCHDOG) += pcwd.o
-obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
-obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
-obj-$(CONFIG_IB700_WDT) += ib700wdt.o
-obj-$(CONFIG_MIXCOMWD) += mixcomwd.o
-obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
-obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
-obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
-obj-$(CONFIG_WDT) += wdt.o
-obj-$(CONFIG_WDTPCI) += wdt_pci.o
-obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
-obj-$(CONFIG_977_WATCHDOG) += wdt977.o
-obj-$(CONFIG_I810_TCO) += i810-tco.o
-obj-$(CONFIG_MACHZ_WDT) += machzwd.o
-obj-$(CONFIG_SH_WDT) += shwdt.o
-obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o
-obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
-obj-$(CONFIG_ALIM1535_WDT) += alim1535d_wdt.o
-obj-$(CONFIG_INDYDOG) += indydog.o
-obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
-obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
-obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
-obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
-obj-$(CONFIG_INDYDOG) += indydog.o
-obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
-
-subdir-$(CONFIG_MWAVE) += mwave
-ifeq ($(CONFIG_MWAVE),y)
- obj-y += mwave/mwave.o
-endif
-
-subdir-$(CONFIG_IPMI_HANDLER) += ipmi
-ifeq ($(CONFIG_IPMI_HANDLER),y)
- obj-y += ipmi/ipmi.o
-endif
-
-include $(TOPDIR)/Rules.make
-
-fastdep:
-
-conmakehash: conmakehash.c
- $(HOSTCC) $(HOSTCFLAGS) -o conmakehash conmakehash.c
-
-consolemap_deftbl.c: $(FONTMAPFILE) conmakehash
- ./conmakehash $(FONTMAPFILE) > consolemap_deftbl.c
-
-consolemap_deftbl.o: consolemap_deftbl.c $(TOPDIR)/include/linux/types.h
-
-.DELETE_ON_ERROR:
-
-defkeymap.c: defkeymap.map
- set -e ; loadkeys --mktable $< | sed -e 's/^static *//' > $@
-
-qtronixmap.c: qtronixmap.map
- set -e ; loadkeys --mktable $< | sed -e 's/^static *//' > $@
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/drivers/char/mem.c
--- a/linux-2.4.30-xen-sparse/drivers/char/mem.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,812 +0,0 @@
-/*
- * linux/drivers/char/mem.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Added devfs support.
- * Jan-11-1998, C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>
- * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@xxxxxxx>
- *
- * MODIFIED FOR XEN by Keir Fraser, 10th July 2003.
- * Linux running on Xen has strange semantics for /dev/mem and /dev/kmem!!
- * 1. mmap will not work on /dev/kmem
- * 2. mmap on /dev/mem interprets the 'file offset' as a machine address
- * rather than a physical address.
- * I don't believe anyone sane mmaps /dev/kmem, but /dev/mem is mmapped
- * to get at memory-mapped I/O spaces (eg. the VESA X server does this).
- * For this to work at all we need to expect machine addresses.
- * Reading/writing of /dev/kmem expects kernel virtual addresses, as usual.
- * Reading/writing of /dev/mem expects 'physical addresses' as usual -- this
- * is because /dev/mem can only read/write existing kernel mappings, which
- * will be normal RAM, and we should present pseudo-physical layout for all
- * except I/O (which is the sticky case that mmap is hacked to deal with).
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <linux/tpqic02.h>
-#include <linux/ftape.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/raw.h>
-#include <linux/tty.h>
-#include <linux/capability.h>
-#include <linux/ptrace.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-
-#ifdef CONFIG_I2C
-extern int i2c_init_all(void);
-#endif
-#ifdef CONFIG_FB
-extern void fbmem_init(void);
-#endif
-#ifdef CONFIG_PROM_CONSOLE
-extern void prom_con_init(void);
-#endif
-#ifdef CONFIG_MDA_CONSOLE
-extern void mda_console_init(void);
-#endif
-#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
-extern void tapechar_init(void);
-#endif
-
-static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
- const char * buf, size_t count, loff_t *ppos)
-{
- ssize_t written;
-
- written = 0;
-#if defined(__sparc__) || defined(__mc68000__)
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (realp < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE-realp;
- if (sz > count) sz = count;
- /* Hmm. Do something? */
- buf+=sz;
- p+=sz;
- count-=sz;
- written+=sz;
- }
-#endif
- if (copy_from_user(p, buf, count))
- return -EFAULT;
- written += count;
- *ppos = realp + written;
- return written;
-}
-
-
-/*
- * This funcion reads the *physical* memory. The f_pos points directly to the
- * memory location.
- */
-static ssize_t read_mem(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- unsigned long end_mem;
- ssize_t read;
-
- end_mem = __pa(high_memory);
- if (p >= end_mem)
- return 0;
- if (count > end_mem - p)
- count = end_mem - p;
- read = 0;
-#if defined(__sparc__) || defined(__mc68000__)
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (p < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE-p;
- if (sz > count)
- sz = count;
- if (sz > 0) {
- if (clear_user(buf, sz))
- return -EFAULT;
- buf += sz;
- p += sz;
- count -= sz;
- read += sz;
- }
- }
-#endif
- if (copy_to_user(buf, __va(p), count))
- return -EFAULT;
- read += count;
- *ppos = p + read;
- return read;
-}
-
-static ssize_t write_mem(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- unsigned long end_mem;
-
- end_mem = __pa(high_memory);
- if (p >= end_mem)
- return 0;
- if (count > end_mem - p)
- count = end_mem - p;
- return do_write_mem(file, __va(p), p, buf, count, ppos);
-}
-
-#ifndef pgprot_noncached
-
-/*
- * This should probably be per-architecture in <asm/pgtable.h>
- */
-static inline pgprot_t pgprot_noncached(pgprot_t _prot)
-{
- unsigned long prot = pgprot_val(_prot);
-
-#if defined(__i386__) || defined(__x86_64__)
- /* On PPro and successors, PCD alone doesn't always mean
- uncached because of interactions with the MTRRs. PCD | PWT
- means definitely uncached. */
- if (boot_cpu_data.x86 > 3)
- prot |= _PAGE_PCD | _PAGE_PWT;
-#elif defined(__powerpc__)
- prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
-#elif defined(__mc68000__)
-#ifdef SUN3_PAGE_NOCACHE
- if (MMU_IS_SUN3)
- prot |= SUN3_PAGE_NOCACHE;
- else
-#endif
- if (MMU_IS_851 || MMU_IS_030)
- prot |= _PAGE_NOCACHE030;
- /* Use no-cache mode, serialized */
- else if (MMU_IS_040 || MMU_IS_060)
- prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
-#endif
-
- return __pgprot(prot);
-}
-
-#endif /* !pgprot_noncached */
-
-/*
- * Architectures vary in how they handle caching for addresses
- * outside of main memory.
- */
-static inline int noncached_address(unsigned long addr)
-{
-#if defined(__i386__)
- /*
- * On the PPro and successors, the MTRRs are used to set
- * memory types for physical addresses outside main memory,
- * so blindly setting PCD or PWT on those pages is wrong.
- * For Pentiums and earlier, the surround logic should disable
- * caching for the high addresses through the KEN pin, but
- * we maintain the tradition of paranoia in this code.
- */
- return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
- test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability)
||
- test_bit(X86_FEATURE_CYRIX_ARR,
&boot_cpu_data.x86_capability) ||
- test_bit(X86_FEATURE_CENTAUR_MCR,
&boot_cpu_data.x86_capability) )
- && addr >= __pa(high_memory);
-#else
- return addr >= __pa(high_memory);
-#endif
-}
-
-#if !defined(CONFIG_XEN)
-static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-
- /*
- * Accessing memory above the top the kernel knows about or
- * through a file pointer that was marked O_SYNC will be
- * done non-cached.
- */
- if (noncached_address(offset) || (file->f_flags & O_SYNC))
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- /* Don't try to swap out physical pages.. */
- vma->vm_flags |= VM_RESERVED;
-
- /*
- * Don't dump addresses that are not real memory to a core file.
- */
- if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
- vma->vm_flags |= VM_IO;
-
- if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-#elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
-static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-{
- return -ENXIO;
-}
-#else
-static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-
- if (!(xen_start_info.flags & SIF_PRIVILEGED))
- return -ENXIO;
-
- /* DONTCOPY is essential for Xen as copy_page_range is broken. */
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
- vma->vm_end-vma->vm_start, vma->vm_page_prot,
- DOMID_IO))
- return -EAGAIN;
- return 0;
-}
-#endif /* CONFIG_XEN */
-
-/*
- * This function reads the *virtual* memory as seen by the kernel.
- */
-static ssize_t read_kmem(struct file *file, char *buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t read = 0;
- ssize_t virtr = 0;
- char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
-
- if (p < (unsigned long) high_memory) {
- read = count;
- if (count > (unsigned long) high_memory - p)
- read = (unsigned long) high_memory - p;
-
-#if defined(__sparc__) || defined(__mc68000__)
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (p < PAGE_SIZE && read > 0) {
- size_t tmp = PAGE_SIZE - p;
- if (tmp > read) tmp = read;
- if (clear_user(buf, tmp))
- return -EFAULT;
- buf += tmp;
- p += tmp;
- read -= tmp;
- count -= tmp;
- }
-#endif
- if (copy_to_user(buf, (char *)p, read))
- return -EFAULT;
- p += read;
- buf += read;
- count -= read;
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len = vread(kbuf, (char *)p, len);
- if (!len)
- break;
- if (copy_to_user(buf, kbuf, len)) {
- free_page((unsigned long)kbuf);
- return -EFAULT;
- }
- count -= len;
- buf += len;
- virtr += len;
- p += len;
- }
- free_page((unsigned long)kbuf);
- }
- *ppos = p;
- return virtr + read;
-}
-
-extern long vwrite(char *buf, char *addr, unsigned long count);
-
-/*
- * This function writes to the *virtual* memory as seen by the kernel.
- */
-static ssize_t write_kmem(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t wrote = 0;
- ssize_t virtr = 0;
- char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
-
- if (p < (unsigned long) high_memory) {
- wrote = count;
- if (count > (unsigned long) high_memory - p)
- wrote = (unsigned long) high_memory - p;
-
- wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
-
- p += wrote;
- buf += wrote;
- count -= wrote;
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- if (len && copy_from_user(kbuf, buf, len)) {
- free_page((unsigned long)kbuf);
- return -EFAULT;
- }
- len = vwrite(kbuf, (char *)p, len);
- count -= len;
- buf += len;
- virtr += len;
- p += len;
- }
- free_page((unsigned long)kbuf);
- }
-
- *ppos = p;
- return virtr + wrote;
-}
-
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-static ssize_t read_port(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long i = *ppos;
- char *tmp = buf;
-
- if (verify_area(VERIFY_WRITE,buf,count))
- return -EFAULT;
- while (count-- > 0 && i < 65536) {
- if (__put_user(inb(i),tmp) < 0)
- return -EFAULT;
- i++;
- tmp++;
- }
- *ppos = i;
- return tmp-buf;
-}
-
-static ssize_t write_port(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long i = *ppos;
- const char * tmp = buf;
-
- if (verify_area(VERIFY_READ,buf,count))
- return -EFAULT;
- while (count-- > 0 && i < 65536) {
- char c;
- if (__get_user(c, tmp))
- return -EFAULT;
- outb(c,i);
- i++;
- tmp++;
- }
- *ppos = i;
- return tmp-buf;
-}
-#endif
-
-static ssize_t read_null(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-
-static ssize_t write_null(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
-{
- return count;
-}
-
-/*
- * For fun, we are using the MMU for this.
- */
-static inline size_t read_zero_pagealigned(char * buf, size_t size)
-{
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long addr=(unsigned long)buf;
-
- mm = current->mm;
- /* Oops, this was forgotten before. -ben */
- down_read(&mm->mmap_sem);
-
- /* For private mappings, just map in zero pages. */
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- unsigned long count;
-
- if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
- goto out_up;
- if (vma->vm_flags & VM_SHARED)
- break;
- count = vma->vm_end - addr;
- if (count > size)
- count = size;
-
- zap_page_range(mm, addr, count);
- zeromap_page_range(addr, count, PAGE_COPY);
-
- size -= count;
- buf += count;
- addr += count;
- if (size == 0)
- goto out_up;
- }
-
- up_read(&mm->mmap_sem);
-
- /* The shared case is hard. Let's do the conventional zeroing. */
- do {
- unsigned long unwritten = clear_user(buf, PAGE_SIZE);
- if (unwritten)
- return size + unwritten - PAGE_SIZE;
- if (current->need_resched)
- schedule();
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
- } while (size);
-
- return size;
-out_up:
- up_read(&mm->mmap_sem);
- return size;
-}
-
-static ssize_t read_zero(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long left, unwritten, written = 0;
-
- if (!count)
- return 0;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- left = count;
-
- /* do we want to be clever? Arbitrary cut-off */
- if (count >= PAGE_SIZE*4) {
- unsigned long partial;
-
- /* How much left of the page? */
- partial = (PAGE_SIZE-1) & -(unsigned long) buf;
- unwritten = clear_user(buf, partial);
- written = partial - unwritten;
- if (unwritten)
- goto out;
- left -= partial;
- buf += partial;
- unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
- written += (left & PAGE_MASK) - unwritten;
- if (unwritten)
- goto out;
- buf += left & PAGE_MASK;
- left &= ~PAGE_MASK;
- }
- unwritten = clear_user(buf, left);
- written += left - unwritten;
-out:
- return written ? written : -EFAULT;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static ssize_t write_full(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
-{
- return -ENOSPC;
-}
-
-/*
- * Special lseek() function for /dev/null and /dev/zero. Most notably, you
- * can fopen() both devices with "a" now. This was previously impossible.
- * -- SRB.
- */
-
-static loff_t null_lseek(struct file * file, loff_t offset, int orig)
-{
- return file->f_pos = 0;
-}
-
-/*
- * The memory devices use the full 32/64 bits of the offset, and so we cannot
- * check against negative addresses: they are ok. The return value is weird,
- * though, in that case (0).
- *
- * also note that seeking relative to the "end of file" isn't supported:
- * it has no meaning, so it returns -EINVAL.
- */
-static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-{
- loff_t ret;
-
- switch (orig) {
- case 0:
- file->f_pos = offset;
- ret = file->f_pos;
- force_successful_syscall_return();
- break;
- case 1:
- file->f_pos += offset;
- ret = file->f_pos;
- force_successful_syscall_return();
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int open_port(struct inode * inode, struct file * filp)
-{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address,
int write)
-{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long kaddr;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *ptep, pte;
- struct page *page = NULL;
-
- /* address is user VA; convert to kernel VA of desired page */
- kaddr = (address - vma->vm_start) + offset;
- kaddr = VMALLOC_VMADDR(kaddr);
-
- spin_lock(&init_mm.page_table_lock);
-
- /* Lookup page structure for kernel VA */
- pgd = pgd_offset(&init_mm, kaddr);
- if (pgd_none(*pgd) || pgd_bad(*pgd))
- goto out;
- pmd = pmd_offset(pgd, kaddr);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- goto out;
- ptep = pte_offset(pmd, kaddr);
- if (!ptep)
- goto out;
- pte = *ptep;
- if (!pte_present(pte))
- goto out;
- if (write && !pte_write(pte))
- goto out;
- page = pte_page(pte);
- if (!VALID_PAGE(page)) {
- page = NULL;
- goto out;
- }
-
- /* Increment reference count on page */
- get_page(page);
-
-out:
- spin_unlock(&init_mm.page_table_lock);
-
- return page;
-}
-
-struct vm_operations_struct kmem_vm_ops = {
- nopage: kmem_vm_nopage,
-};
-
-static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
-{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long size = vma->vm_end - vma->vm_start;
-
- /*
- * If the user is not attempting to mmap a high memory address then
- * the standard mmap_mem mechanism will work. High memory addresses
- * need special handling, as remap_page_range expects a physically-
- * contiguous range of kernel addresses (such as obtained in kmalloc).
- */
- if ((offset + size) < (unsigned long) high_memory)
- return mmap_mem(file, vma);
-
- /*
- * Accessing memory above the top the kernel knows about or
- * through a file pointer that was marked O_SYNC will be
- * done non-cached.
- */
- if (noncached_address(offset) || (file->f_flags & O_SYNC))
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- /* Don't do anything here; "nopage" will fill the holes */
- vma->vm_ops = &kmem_vm_ops;
-
- /* Don't try to swap out physical pages.. */
- vma->vm_flags |= VM_RESERVED;
-
- /*
- * Don't dump addresses that are not real memory to a core file.
- */
- vma->vm_flags |= VM_IO;
-
- return 0;
-}
-
-#define zero_lseek null_lseek
-#define full_lseek null_lseek
-#define write_zero write_null
-#define read_full read_zero
-#define open_mem open_port
-#define open_kmem open_mem
-
-static struct file_operations mem_fops = {
- llseek: memory_lseek,
- read: read_mem,
- write: write_mem,
- mmap: mmap_mem,
- open: open_mem,
-};
-
-static struct file_operations kmem_fops = {
- llseek: memory_lseek,
- read: read_kmem,
- write: write_kmem,
-#if !defined(CONFIG_XEN)
- mmap: mmap_kmem,
-#endif
- open: open_kmem,
-};
-
-static struct file_operations null_fops = {
- llseek: null_lseek,
- read: read_null,
- write: write_null,
-};
-
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-static struct file_operations port_fops = {
- llseek: memory_lseek,
- read: read_port,
- write: write_port,
- open: open_port,
-};
-#endif
-
-static struct file_operations zero_fops = {
- llseek: zero_lseek,
- read: read_zero,
- write: write_zero,
- mmap: mmap_zero,
-};
-
-static struct file_operations full_fops = {
- llseek: full_lseek,
- read: read_full,
- write: write_full,
-};
-
-static int memory_open(struct inode * inode, struct file * filp)
-{
- switch (MINOR(inode->i_rdev)) {
- case 1:
- filp->f_op = &mem_fops;
- break;
- case 2:
- filp->f_op = &kmem_fops;
- break;
- case 3:
- filp->f_op = &null_fops;
- break;
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
- case 4:
- filp->f_op = &port_fops;
- break;
-#endif
- case 5:
- filp->f_op = &zero_fops;
- break;
- case 7:
- filp->f_op = &full_fops;
- break;
- case 8:
- filp->f_op = &random_fops;
- break;
- case 9:
- filp->f_op = &urandom_fops;
- break;
- default:
- return -ENXIO;
- }
- if (filp->f_op && filp->f_op->open)
- return filp->f_op->open(inode,filp);
- return 0;
-}
-
-void __init memory_devfs_register (void)
-{
- /* These are never unregistered */
- static const struct {
- unsigned short minor;
- char *name;
- umode_t mode;
- struct file_operations *fops;
- } list[] = { /* list of minor devices */
- {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
- {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
- {3, "null", S_IRUGO | S_IWUGO, &null_fops},
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
- {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
-#endif
- {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
- {7, "full", S_IRUGO | S_IWUGO, &full_fops},
- {8, "random", S_IRUGO | S_IWUSR, &random_fops},
- {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}
- };
- int i;
-
- for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
- devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
- MEM_MAJOR, list[i].minor,
- list[i].mode | S_IFCHR,
- list[i].fops, NULL);
-}
-
-static struct file_operations memory_fops = {
- open: memory_open, /* just a selector for the real open */
-};
-
-int __init chr_dev_init(void)
-{
- if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
- printk("unable to get major %d for memory devs\n", MEM_MAJOR);
- memory_devfs_register();
- rand_initialize();
-#ifdef CONFIG_I2C
- i2c_init_all();
-#endif
-#if defined (CONFIG_FB)
- fbmem_init();
-#endif
-#if defined (CONFIG_PROM_CONSOLE)
- prom_con_init();
-#endif
-#if defined (CONFIG_MDA_CONSOLE)
- mda_console_init();
-#endif
- tty_init();
-#ifdef CONFIG_M68K_PRINTER
- lp_m68k_init();
-#endif
- misc_init();
-#if CONFIG_QIC02_TAPE
- qic02_tape_init();
-#endif
-#ifdef CONFIG_FTAPE
- ftape_init();
-#endif
-#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
- tapechar_init();
-#endif
- return 0;
-}
-
-__initcall(chr_dev_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/drivers/char/tty_io.c
--- a/linux-2.4.30-xen-sparse/drivers/char/tty_io.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2891 +0,0 @@
-/*
- * linux/drivers/char/tty_io.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-/*
- * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
- * or rs-channels. It also implements echoing, cooked mode etc.
- *
- * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
- *
- * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
- * tty_struct and tty_queue structures. Previously there was an array
- * of 256 tty_struct's which was statically allocated, and the
- * tty_queue structures were allocated at boot time. Both are now
- * dynamically allocated only when the tty is open.
- *
- * Also restructured routines so that there is more of a separation
- * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
- * the low-level tty routines (serial.c, pty.c, console.c). This
- * makes for cleaner and more compact code. -TYT, 9/17/92
- *
- * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
- * which can be dynamically activated and de-activated by the line
- * discipline handling modules (like SLIP).
- *
- * NOTE: pay no attention to the line discipline code (yet); its
- * interface is still subject to change in this version...
- * -- TYT, 1/31/92
- *
- * Added functionality to the OPOST tty handling. No delays, but all
- * other bits should be there.
- * -- Nick Holloway <alfie@xxxxxxxxxxxxxxxxx>, 27th May 1993.
- *
- * Rewrote canonical mode and added more termios flags.
- * -- julian@xxxxxxxxxxxxxxxxxxxxxx (J. Cowley), 13Jan94
- *
- * Reorganized FASYNC support so mouse code can share it.
- * -- ctm@xxxxxxxx, 9Sep95
- *
- * New TIOCLINUX variants added.
- * -- mj@xxxxxxxxxxxxxxxxx, 19-Nov-95
- *
- * Restrict vt switching via ioctl()
- * -- grif@xxxxxxxxxx, 5-Dec-95
- *
- * Move console and virtual terminal code to more appropriate files,
- * implement CONFIG_VT and generalize console device interface.
- * -- Marko Kohtala <Marko.Kohtala@xxxxxx>, March 97
- *
- * Rewrote init_dev and release_dev to eliminate races.
- * -- Bill Hawes <whawes@xxxxxxxx>, June 97
- *
- * Added devfs support.
- * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 13-Jan-1998
- *
- * Added support for a Unix98-style ptmx device.
- * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 14-Jan-1998
- *
- * Reduced memory usage for older ARM systems
- * -- Russell King <rmk@xxxxxxxxxxxxxxxx>
- *
- * Move do_SAK() into process context. Less stack use in devfs functions.
- * alloc_tty_struct() always uses kmalloc() -- Andrew Morton
<andrewm@xxxxxxxxxx> 17Mar01
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
-#include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/smp_lock.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-#include <linux/devfs_fs_kernel.h>
-
-#include <linux/kmod.h>
-
-#ifdef CONFIG_XEN_CONSOLE
-extern void xen_console_init(void);
-#endif
-
-#ifdef CONFIG_VT
-extern void con_init_devfs (void);
-#endif
-
-extern void disable_early_printk(void);
-
-#define CONSOLE_DEV MKDEV(TTY_MAJOR,0)
-#define TTY_DEV MKDEV(TTYAUX_MAJOR,0)
-#define SYSCONS_DEV MKDEV(TTYAUX_MAJOR,1)
-#define PTMX_DEV MKDEV(TTYAUX_MAJOR,2)
-
-#undef TTY_DEBUG_HANGUP
-
-#define TTY_PARANOIA_CHECK 1
-#define CHECK_TTY_COUNT 1
-
-struct termios tty_std_termios; /* for the benefit of tty
drivers */
-struct tty_driver *tty_drivers; /* linked list of tty drivers */
-
-#ifdef CONFIG_UNIX98_PTYS
-extern struct tty_driver ptm_driver[]; /* Unix98 pty masters; for /dev/ptmx */
-extern struct tty_driver pts_driver[]; /* Unix98 pty slaves; for /dev/ptmx */
-#endif
-
-static void initialize_tty_struct(struct tty_struct *tty);
-
-static ssize_t tty_read(struct file *, char *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char *, size_t, loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
-static int tty_open(struct inode *, struct file *);
-static int tty_release(struct inode *, struct file *);
-int tty_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg);
-static int tty_fasync(int fd, struct file * filp, int on);
-extern int vme_scc_init (void);
-extern long vme_scc_console_init(void);
-extern int serial167_init(void);
-extern long serial167_console_init(void);
-extern void console_8xx_init(void);
-extern void au1x00_serial_console_init(void);
-extern int rs_8xx_init(void);
-extern void mac_scc_console_init(void);
-extern void hwc_console_init(void);
-extern void hwc_tty_init(void);
-extern void con3215_init(void);
-extern void tty3215_init(void);
-extern void tub3270_con_init(void);
-extern void tub3270_init(void);
-extern void rs285_console_init(void);
-extern void sa1100_rs_console_init(void);
-extern void sgi_serial_console_init(void);
-extern void sn_sal_serial_console_init(void);
-extern void sci_console_init(void);
-extern void dec_serial_console_init(void);
-extern void tx3912_console_init(void);
-extern void tx3912_rs_init(void);
-extern void txx927_console_init(void);
-extern void txx9_rs_init(void);
-extern void txx9_serial_console_init(void);
-extern void sb1250_serial_console_init(void);
-extern void arc_console_init(void);
-extern int hvc_console_init(void);
-
-#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-#endif
-#ifndef MAX
-#define MAX(a,b) ((a) < (b) ? (b) : (a))
-#endif
-
-static struct tty_struct *alloc_tty_struct(void)
-{
- struct tty_struct *tty;
-
- tty = kmalloc(sizeof(struct tty_struct), GFP_KERNEL);
- if (tty)
- memset(tty, 0, sizeof(struct tty_struct));
- return tty;
-}
-
-static inline void free_tty_struct(struct tty_struct *tty)
-{
- kfree(tty);
-}
-
-/*
- * This routine returns the name of tty.
- */
-static char *
-_tty_make_name(struct tty_struct *tty, const char *name, char *buf)
-{
- int idx = (tty)?MINOR(tty->device) - tty->driver.minor_start:0;
-
- if (!tty) /* Hmm. NULL pointer. That's fun. */
- strcpy(buf, "NULL tty");
- else
- sprintf(buf, name,
- idx + tty->driver.name_base);
-
- return buf;
-}
-
-#define TTY_NUMBER(tty) (MINOR((tty)->device) - (tty)->driver.minor_start + \
- (tty)->driver.name_base)
-
-char *tty_name(struct tty_struct *tty, char *buf)
-{
- return _tty_make_name(tty, (tty)?tty->driver.name:NULL, buf);
-}
-
-inline int tty_paranoia_check(struct tty_struct *tty, kdev_t device,
- const char *routine)
-{
-#ifdef TTY_PARANOIA_CHECK
- static const char badmagic[] = KERN_WARNING
- "Warning: bad magic number for tty struct (%s) in %s\n";
- static const char badtty[] = KERN_WARNING
- "Warning: null TTY for (%s) in %s\n";
-
- if (!tty) {
- printk(badtty, kdevname(device), routine);
- return 1;
- }
- if (tty->magic != TTY_MAGIC) {
- printk(badmagic, kdevname(device), routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-static int check_tty_count(struct tty_struct *tty, const char *routine)
-{
-#ifdef CHECK_TTY_COUNT
- struct list_head *p;
- int count = 0;
-
- file_list_lock();
- for(p = tty->tty_files.next; p != &tty->tty_files; p = p->next) {
- if(list_entry(p, struct file, f_list)->private_data == tty)
- count++;
- }
- file_list_unlock();
- if (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
- tty->driver.subtype == PTY_TYPE_SLAVE &&
- tty->link && tty->link->count)
- count++;
- if (tty->count != count) {
- printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
- "!= #fd's(%d) in %s\n",
- kdevname(tty->device), tty->count, count, routine);
- return count;
- }
-#endif
- return 0;
-}
-
-/*
- * This is probably overkill for real world processors but
- * they are not on hot paths so a little discipline won't do
- * any harm.
- */
-
-static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
-{
- down(&tty->termios_sem);
- tty->termios->c_line = num;
- up(&tty->termios_sem);
-}
-
-/*
- * This guards the refcounted line discipline lists. The lock
- * must be taken with irqs off because there are hangup path
- * callers who will do ldisc lookups and cannot sleep.
- */
-
-spinlock_t tty_ldisc_lock = SPIN_LOCK_UNLOCKED;
-DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
-struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */
-
-int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
-{
-
- unsigned long flags;
- int ret = 0;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- return -EINVAL;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if (new_ldisc) {
- tty_ldiscs[disc] = *new_ldisc;
- tty_ldiscs[disc].num = disc;
- tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED;
- tty_ldiscs[disc].refcount = 0;
- } else {
- if(tty_ldiscs[disc].refcount)
- ret = -EBUSY;
- else
- tty_ldiscs[disc].flags &= ~LDISC_FLAG_DEFINED;
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- return ret;
-
-}
-
-
-EXPORT_SYMBOL(tty_register_ldisc);
-
-struct tty_ldisc *tty_ldisc_get(int disc)
-{
- unsigned long flags;
- struct tty_ldisc *ld;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- return NULL;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
-
- ld = &tty_ldiscs[disc];
- /* Check the entry is defined */
- if(ld->flags & LDISC_FLAG_DEFINED)
- ld->refcount++;
- else
- ld = NULL;
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return ld;
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_get);
-
-void tty_ldisc_put(int disc)
-{
- struct tty_ldisc *ld;
- unsigned long flags;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- BUG();
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = &tty_ldiscs[disc];
- if(ld->refcount <= 0)
- BUG();
- ld->refcount--;
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_put);
-
-void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
-{
- tty->ldisc = *ld;
- tty->ldisc.refcount = 0;
-}
-
-/**
- * tty_ldisc_try - internal helper
- * @tty: the tty
- *
- * Make a single attempt to grab and bump the refcount on
- * the tty ldisc. Return 0 on failure or 1 on success. This is
- * used to implement both the waiting and non waiting versions
- * of tty_ldisc_ref
- */
-
-static int tty_ldisc_try(struct tty_struct *tty)
-{
- unsigned long flags;
- struct tty_ldisc *ld;
- int ret = 0;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = &tty->ldisc;
- if(test_bit(TTY_LDISC, &tty->flags))
- {
- ld->refcount++;
- ret = 1;
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return ret;
-}
-
-/**
- * tty_ldisc_ref_wait - wait for the tty ldisc
- * @tty: tty device
- *
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * wait patiently until it changes.
- *
- * Note: Must not be called from an IRQ/timer context. The caller
- * must also be careful not to hold other locks that will deadlock
- * against a discipline change, such as an existing ldisc reference
- * (which we check for)
- */
-
-struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
-{
- /* wait_event is a macro */
- wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
- return &tty->ldisc;
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
-
-/**
- * tty_ldisc_ref - get the tty ldisc
- * @tty: tty device
- *
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * return NULL. Can be called from IRQ and timer functions.
- */
-
-struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
-{
- if(tty_ldisc_try(tty))
- return &tty->ldisc;
- return NULL;
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_ref);
-
-
-void tty_ldisc_deref(struct tty_ldisc *ld)
-{
-
- unsigned long flags;
-
- if(ld == NULL)
- BUG();
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if(ld->refcount == 0)
- printk(KERN_EMERG "tty_ldisc_deref: no references.\n");
- else
- ld->refcount--;
- if(ld->refcount == 0)
- wake_up(&tty_ldisc_wait);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_deref);
-
-/**
- * tty_ldisc_enable - allow ldisc use
- * @tty: terminal to activate ldisc on
- *
- * Set the TTY_LDISC flag when the line discipline can be called
- * again. Do neccessary wakeups for existing sleepers.
- *
- * Note: nobody should set this bit except via this function. Clearing
- * directly is allowed.
- */
-
-static void tty_ldisc_enable(struct tty_struct *tty)
-{
- set_bit(TTY_LDISC, &tty->flags);
- wake_up(&tty_ldisc_wait);
-}
-
-/**
- * tty_set_ldisc - set line discipline
- * @tty: the terminal to set
- * @ldisc: the line discipline
- *
- * Set the discipline of a tty line. Must be called from a process
- * context.
- */
-
-static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
-{
- int retval = 0;
- struct tty_ldisc o_ldisc;
- char buf[64];
- unsigned long flags;
- struct tty_ldisc *ld;
-
- if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
- return -EINVAL;
-
-restart:
-
- if (tty->ldisc.num == ldisc)
- return 0; /* We are already in the desired discipline */
-
- ld = tty_ldisc_get(ldisc);
- /* Eduardo Blanco <ejbs@xxxxxxxxxxxx> */
- /* Cyrus Durgin <cider@xxxxxxxxxxxxx> */
- if (ld == NULL)
- {
- char modname [20];
- sprintf(modname, "tty-ldisc-%d", ldisc);
- request_module (modname);
- ld = tty_ldisc_get(ldisc);
- }
-
- if (ld == NULL)
- return -EINVAL;
-
-
- o_ldisc = tty->ldisc;
- tty_wait_until_sent(tty, 0);
-
- /*
- * Make sure we don't change while someone holds a
- * reference to the line discipline. The TTY_LDISC bit
- * prevents anyone taking a reference once it is clear.
- * We need the lock to avoid racing reference takers.
- */
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if(tty->ldisc.refcount)
- {
- /* Free the new ldisc we grabbed. Must drop the lock
- first. */
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(ldisc);
- /*
- * There are several reasons we may be busy, including
- * random momentary I/O traffic. We must therefore
- * retry. We could distinguish between blocking ops
- * and retries if we made tty_ldisc_wait() smarter. That
- * is up for discussion.
- */
- if(wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount
== 0) < 0)
- return -ERESTARTSYS;
- goto restart;
- }
- clear_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- /*
- * From this point on we know nobody has an ldisc
- * usage reference, nor can they obtain one until
- * we say so later on.
- */
-
- /*
- * Wait for ->hangup_work and ->flip.work handlers to terminate
- */
- run_task_queue(&tq_timer);
- flush_scheduled_tasks();
-
- /* Shutdown the current discipline. */
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
-
- /* Now set up the new line discipline. */
- tty_ldisc_assign(tty, ld);
- tty_set_termios_ldisc(tty, ldisc);
- if (tty->ldisc.open)
- retval = (tty->ldisc.open)(tty);
- if (retval < 0) {
- tty_ldisc_put(ldisc);
- /* There is an outstanding reference here so this is safe */
- tty_ldisc_assign(tty, tty_ldisc_get(o_ldisc.num));
- tty_set_termios_ldisc(tty, tty->ldisc.num);
- if (tty->ldisc.open && (tty->ldisc.open(tty) < 0)) {
- tty_ldisc_put(o_ldisc.num);
- /* This driver is always present */
- tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
- tty_set_termios_ldisc(tty, N_TTY);
- if (tty->ldisc.open) {
- int r = tty->ldisc.open(tty);
-
- if (r < 0)
- panic("Couldn't open N_TTY ldisc for "
- "%s --- error %d.",
- tty_name(tty, buf), r);
- }
- }
- }
- /* At this point we hold a reference to the new ldisc and a
- reference to the old ldisc. If we ended up flipping back
- to the existing ldisc we have two references to it */
-
- if (tty->ldisc.num != o_ldisc.num && tty->driver.set_ldisc)
- tty->driver.set_ldisc(tty);
-
- tty_ldisc_put(o_ldisc.num);
-
- /*
- * Allow ldisc referencing to occur as soon as the driver
- * ldisc callback completes.
- */
- tty_ldisc_enable(tty);
-
- return retval;
-}
-
-/*
- * This routine returns a tty driver structure, given a device number
- */
-struct tty_driver *get_tty_driver(kdev_t device)
-{
- int major, minor;
- struct tty_driver *p;
-
- minor = MINOR(device);
- major = MAJOR(device);
-
- for (p = tty_drivers; p; p = p->next) {
- if (p->major != major)
- continue;
- if (minor < p->minor_start)
- continue;
- if (minor >= p->minor_start + p->num)
- continue;
- return p;
- }
- return NULL;
-}
-
-/*
- * If we try to write to, or set the state of, a terminal and we're
- * not in the foreground, send a SIGTTOU. If the signal is blocked or
- * ignored, go ahead and perform the operation. (POSIX 7.2)
- */
-int tty_check_change(struct tty_struct * tty)
-{
- if (current->tty != tty)
- return 0;
- if (tty->pgrp <= 0) {
- printk(KERN_WARNING "tty_check_change: tty->pgrp <= 0!\n");
- return 0;
- }
- if (current->pgrp == tty->pgrp)
- return 0;
- if (is_ignored(SIGTTOU))
- return 0;
- if (is_orphaned_pgrp(current->pgrp))
- return -EIO;
- (void) kill_pg(current->pgrp,SIGTTOU,1);
- return -ERESTARTSYS;
-}
-
-static ssize_t hung_up_tty_read(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- /* Can't seek (pread) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
- return 0;
-}
-
-static ssize_t hung_up_tty_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
-{
- /* Can't seek (pwrite) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
- return -EIO;
-}
-
-/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait)
-{
- return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
-}
-
-static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
-}
-
-static struct file_operations tty_fops = {
- llseek: no_llseek,
- read: tty_read,
- write: tty_write,
- poll: tty_poll,
- ioctl: tty_ioctl,
- open: tty_open,
- release: tty_release,
- fasync: tty_fasync,
-};
-
-static struct file_operations hung_up_tty_fops = {
- llseek: no_llseek,
- read: hung_up_tty_read,
- write: hung_up_tty_write,
- poll: hung_up_tty_poll,
- ioctl: hung_up_tty_ioctl,
- release: tty_release,
-};
-
-static spinlock_t redirect_lock = SPIN_LOCK_UNLOCKED;
-static struct file *redirect;
-
-/**
- * tty_wakeup - request more data
- * @tty: terminal
- *
- * Internal and external helper for wakeups of tty. This function
- * informs the line discipline if present that the driver is ready\
- * to receive more output data.
- */
-
-void tty_wakeup(struct tty_struct *tty)
-{
- struct tty_ldisc *ld;
-
- if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
- ld = tty_ldisc_ref(tty);
- if(ld) {
- if(ld->write_wakeup)
- ld->write_wakeup(tty);
- tty_ldisc_deref(ld);
- }
- }
- wake_up_interruptible(&tty->write_wait);
-}
-
-/*
- * tty_wakeup/tty_ldisc_flush are actually _GPL exports but we can't do
- * that in 2.4 for modutils compat reasons.
- */
-EXPORT_SYMBOL(tty_wakeup);
-
-
-void tty_ldisc_flush(struct tty_struct *tty)
-{
- struct tty_ldisc *ld = tty_ldisc_ref(tty);
- if(ld) {
- if(ld->flush_buffer)
- ld->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
-}
-
-
-/*
- * tty_wakeup/tty_ldisc_flush are actually _GPL exports but we can't do
- * that in 2.4 for modutils compat reasons.
- */
-EXPORT_SYMBOL(tty_ldisc_flush);
-
-void do_tty_hangup(void *data)
-{
- struct tty_struct *tty = (struct tty_struct *) data;
- struct file * cons_filp = NULL;
- struct file *f = NULL;
- struct task_struct *p;
- struct list_head *l;
- struct tty_ldisc *ld;
- int closecount = 0, n;
-
- if (!tty)
- return;
-
- /* inuse_filps is protected by the single kernel lock */
- lock_kernel();
-
- spin_lock(&redirect_lock);
- if (redirect && redirect->private_data == tty) {
- f = redirect;
- redirect = NULL;
- }
- spin_unlock(&redirect_lock);
-
- check_tty_count(tty, "do_tty_hangup");
- file_list_lock();
- for (l = tty->tty_files.next; l != &tty->tty_files; l = l->next) {
- struct file * filp = list_entry(l, struct file, f_list);
- if (filp->f_dentry->d_inode->i_rdev == CONSOLE_DEV ||
- filp->f_dentry->d_inode->i_rdev == SYSCONS_DEV) {
- cons_filp = filp;
- continue;
- }
- if (filp->f_op != &tty_fops)
- continue;
- closecount++;
- tty_fasync(-1, filp, 0); /* can't block */
- filp->f_op = &hung_up_tty_fops;
- }
- file_list_unlock();
-
- /* FIXME! What are the locking issues here? This may me overdoing
things.. */
- ld = tty_ldisc_ref(tty);
- if(ld != NULL)
- {
- if (ld->flush_buffer)
- ld->flush_buffer(tty);
- if (tty->driver.flush_buffer)
- tty->driver.flush_buffer(tty);
- if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
ld->write_wakeup)
- ld->write_wakeup(tty);
- if (ld->hangup)
- ld->hangup(tty);
- }
-
- /* FIXME: Once we trust the LDISC code better we can wait here for
- ldisc completion and fix the driver call race */
-
- wake_up_interruptible(&tty->write_wait);
- wake_up_interruptible(&tty->read_wait);
-
- /*
- * Shutdown the current line discipline, and reset it to
- * N_TTY.
- */
-
- if (tty->driver.flags & TTY_DRIVER_RESET_TERMIOS)
- {
- down(&tty->termios_sem);
- *tty->termios = tty->driver.init_termios;
- up(&tty->termios_sem);
- }
-
- /* Defer ldisc switch */
- /* tty_deferred_ldisc_switch(N_TTY)
- This should get done automatically when the port closes and
- tty_release is called */
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if ((tty->session > 0) && (p->session == tty->session) &&
- p->leader) {
- send_sig(SIGHUP,p,1);
- send_sig(SIGCONT,p,1);
- if (tty->pgrp > 0)
- p->tty_old_pgrp = tty->pgrp;
- }
- if (p->tty == tty)
- p->tty = NULL;
- }
- read_unlock(&tasklist_lock);
-
- tty->flags = 0;
- tty->session = 0;
- tty->pgrp = -1;
- tty->ctrl_status = 0;
- /*
- * If one of the devices matches a console pointer, we
- * cannot just call hangup() because that will cause
- * tty->count and state->count to go out of sync.
- * So we just call close() the right number of times.
- */
- if (cons_filp) {
- if (tty->driver.close)
- for (n = 0; n < closecount; n++)
- tty->driver.close(tty, cons_filp);
- } else if (tty->driver.hangup)
- (tty->driver.hangup)(tty);
-
- /* We don't want to have driver/ldisc interactions beyond
- the ones we did here. The driver layer expects no
- calls after ->hangup() from the ldisc side. However we
- can't yet guarantee all that */
-
- set_bit(TTY_HUPPED, &tty->flags);
- if(ld) {
- tty_ldisc_enable(tty);
- tty_ldisc_deref(ld);
- }
- unlock_kernel();
- if (f)
- fput(f);
-}
-
-void tty_hangup(struct tty_struct * tty)
-{
-#ifdef TTY_DEBUG_HANGUP
- char buf[64];
-
- printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
-#endif
- schedule_task(&tty->tq_hangup);
-}
-
-void tty_vhangup(struct tty_struct * tty)
-{
-#ifdef TTY_DEBUG_HANGUP
- char buf[64];
-
- printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
-#endif
- do_tty_hangup((void *) tty);
-}
-
-int tty_hung_up_p(struct file * filp)
-{
- return (filp->f_op == &hung_up_tty_fops);
-}
-
-/*
- * This function is typically called only by the session leader, when
- * it wants to disassociate itself from its controlling tty.
- *
- * It performs the following functions:
- * (1) Sends a SIGHUP and SIGCONT to the foreground process group
- * (2) Clears the tty from being controlling the session
- * (3) Clears the controlling tty for all processes in the
- * session group.
- *
- * The argument on_exit is set to 1 if called when a process is
- * exiting; it is 0 if called by the ioctl TIOCNOTTY.
- */
-void disassociate_ctty(int on_exit)
-{
- struct tty_struct *tty = current->tty;
- struct task_struct *p;
- int tty_pgrp = -1;
-
- if (tty) {
- tty_pgrp = tty->pgrp;
- if (on_exit && tty->driver.type != TTY_DRIVER_TYPE_PTY)
- tty_vhangup(tty);
- } else {
- if (current->tty_old_pgrp) {
- kill_pg(current->tty_old_pgrp, SIGHUP, on_exit);
- kill_pg(current->tty_old_pgrp, SIGCONT, on_exit);
- }
- return;
- }
- if (tty_pgrp > 0) {
- kill_pg(tty_pgrp, SIGHUP, on_exit);
- if (!on_exit)
- kill_pg(tty_pgrp, SIGCONT, on_exit);
- }
-
- current->tty_old_pgrp = 0;
- tty->session = 0;
- tty->pgrp = -1;
-
- read_lock(&tasklist_lock);
- for_each_task(p)
- if (p->session == current->session)
- p->tty = NULL;
- read_unlock(&tasklist_lock);
-}
-
-void stop_tty(struct tty_struct *tty)
-{
- if (tty->stopped)
- return;
- tty->stopped = 1;
- if (tty->link && tty->link->packet) {
- tty->ctrl_status &= ~TIOCPKT_START;
- tty->ctrl_status |= TIOCPKT_STOP;
- wake_up_interruptible(&tty->link->read_wait);
- }
- if (tty->driver.stop)
- (tty->driver.stop)(tty);
-}
-
-void start_tty(struct tty_struct *tty)
-{
- if (!tty->stopped || tty->flow_stopped)
- return;
- tty->stopped = 0;
- if (tty->link && tty->link->packet) {
- tty->ctrl_status &= ~TIOCPKT_STOP;
- tty->ctrl_status |= TIOCPKT_START;
- wake_up_interruptible(&tty->link->read_wait);
- }
- if (tty->driver.start)
- (tty->driver.start)(tty);
- /* If we have a running line discipline it may need kicking */
- tty_wakeup(tty);
-}
-
-static ssize_t tty_read(struct file * file, char * buf, size_t count,
- loff_t *ppos)
-{
- int i;
- struct tty_struct * tty;
- struct inode *inode;
- struct tty_ldisc *ld;
-
- /* Can't seek (pread) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
- tty = (struct tty_struct *)file->private_data;
- inode = file->f_dentry->d_inode;
- if (tty_paranoia_check(tty, inode->i_rdev, "tty_read"))
- return -EIO;
- if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
- return -EIO;
-
- /* This check not only needs to be done before reading, but also
- whenever read_chan() gets woken up after sleeping, so I've
- moved it to there. This should only be done for the N_TTY
- line discipline, anyway. Same goes for write_chan(). -- jlc. */
-#if 0
- if ((inode->i_rdev != CONSOLE_DEV) && /* don't stop on /dev/console */
- (tty->pgrp > 0) &&
- (current->tty == tty) &&
- (tty->pgrp != current->pgrp))
- if (is_ignored(SIGTTIN) || is_orphaned_pgrp(current->pgrp))
- return -EIO;
- else {
- (void) kill_pg(current->pgrp, SIGTTIN, 1);
- return -ERESTARTSYS;
- }
-#endif
- /* We want to wait for the line discipline to sort out in this
- situation */
- ld = tty_ldisc_ref_wait(tty);
- lock_kernel();
- if (ld->read)
- i = (ld->read)(tty,file,buf,count);
- else
- i = -EIO;
- tty_ldisc_deref(ld);
- unlock_kernel();
- if (i > 0)
- inode->i_atime = CURRENT_TIME;
- return i;
-}
-
-/*
- * Split writes up in sane blocksizes to avoid
- * denial-of-service type attacks
- */
-static inline ssize_t do_tty_write(
- ssize_t (*write)(struct tty_struct *, struct file *, const unsigned
char *, size_t),
- struct tty_struct *tty,
- struct file *file,
- const unsigned char *buf,
- size_t count)
-{
- ssize_t ret = 0, written = 0;
-
- if (file->f_flags & O_NONBLOCK) {
- if (down_trylock(&tty->atomic_write))
- return -EAGAIN;
- }
- else {
- if (down_interruptible(&tty->atomic_write))
- return -ERESTARTSYS;
- }
- if ( test_bit(TTY_NO_WRITE_SPLIT, &tty->flags) ) {
- lock_kernel();
- written = write(tty, file, buf, count);
- unlock_kernel();
- } else {
- for (;;) {
- unsigned long size = MAX(PAGE_SIZE*2,16384);
- if (size > count)
- size = count;
- lock_kernel();
- ret = write(tty, file, buf, size);
- unlock_kernel();
- if (ret <= 0)
- break;
- written += ret;
- buf += ret;
- count -= ret;
- if (!count)
- break;
- ret = -ERESTARTSYS;
- if (signal_pending(current))
- break;
- if (current->need_resched)
- schedule();
- }
- }
- if (written) {
- file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
- ret = written;
- }
- up(&tty->atomic_write);
- return ret;
-}
-
-
-static ssize_t tty_write(struct file * file, const char * buf, size_t count,
- loff_t *ppos)
-{
- int is_console;
- struct tty_struct * tty;
- struct inode *inode = file->f_dentry->d_inode;
- ssize_t ret;
- struct tty_ldisc *ld;
-
- /* Can't seek (pwrite) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
- /*
- * For now, we redirect writes from /dev/console as
- * well as /dev/tty0.
- */
- inode = file->f_dentry->d_inode;
- is_console = (inode->i_rdev == SYSCONS_DEV ||
- inode->i_rdev == CONSOLE_DEV);
-
- if (is_console) {
- struct file *p = NULL;
-
- spin_lock(&redirect_lock);
- if (redirect) {
- get_file(redirect);
- p = redirect;
- }
- spin_unlock(&redirect_lock);
-
- if (p) {
- ssize_t res = p->f_op->write(p, buf, count, &p->f_pos);
- fput(p);
- return res;
- }
- }
-
- tty = (struct tty_struct *)file->private_data;
- if (tty_paranoia_check(tty, inode->i_rdev, "tty_write"))
- return -EIO;
- if (!tty || !tty->driver.write || (test_bit(TTY_IO_ERROR, &tty->flags)))
- return -EIO;
-#if 0
- if (!is_console && L_TOSTOP(tty) && (tty->pgrp > 0) &&
- (current->tty == tty) && (tty->pgrp != current->pgrp)) {
- if (is_orphaned_pgrp(current->pgrp))
- return -EIO;
- if (!is_ignored(SIGTTOU)) {
- (void) kill_pg(current->pgrp, SIGTTOU, 1);
- return -ERESTARTSYS;
- }
- }
-#endif
-
- ld = tty_ldisc_ref_wait(tty);
- if (!ld->write)
- ret = -EIO;
- else
- ret = do_tty_write(ld->write, tty, file,
- (const unsigned char __user *)buf, count);
- tty_ldisc_deref(ld);
- return ret;
-}
-
-/* Semaphore to protect creating and releasing a tty. This is shared with
- vt.c for deeply disgusting hack reasons */
-static DECLARE_MUTEX(tty_sem);
-
-static void down_tty_sem(int index)
-{
- down(&tty_sem);
-}
-
-static void up_tty_sem(int index)
-{
- up(&tty_sem);
-}
-
-static void release_mem(struct tty_struct *tty, int idx);
-
-/*
- * WSH 06/09/97: Rewritten to remove races and properly clean up after a
- * failed open. The new code protects the open with a semaphore, so it's
- * really quite straightforward. The semaphore locking can probably be
- * relaxed for the (most common) case of reopening a tty.
- */
-static int init_dev(kdev_t device, struct tty_struct **ret_tty)
-{
- struct tty_struct *tty, *o_tty;
- struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
- struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
- struct tty_driver *driver;
- int retval=0;
- int idx;
-
- driver = get_tty_driver(device);
- if (!driver)
- return -ENODEV;
-
- idx = MINOR(device) - driver->minor_start;
-
- /*
- * Check whether we need to acquire the tty semaphore to avoid
- * race conditions. For now, play it safe.
- */
- down_tty_sem(idx);
-
- /* check whether we're reopening an existing tty */
- tty = driver->table[idx];
- if (tty) goto fast_track;
-
- /*
- * First time open is complex, especially for PTY devices.
- * This code guarantees that either everything succeeds and the
- * TTY is ready for operation, or else the table slots are vacated
- * and the allocated memory released. (Except that the termios
- * and locked termios may be retained.)
- */
-
- o_tty = NULL;
- tp = o_tp = NULL;
- ltp = o_ltp = NULL;
-
- tty = alloc_tty_struct();
- if(!tty)
- goto fail_no_mem;
- initialize_tty_struct(tty);
- tty->device = device;
- tty->driver = *driver;
-
- tp_loc = &driver->termios[idx];
- if (!*tp_loc) {
- tp = (struct termios *) kmalloc(sizeof(struct termios),
- GFP_KERNEL);
- if (!tp)
- goto free_mem_out;
- *tp = driver->init_termios;
- }
-
- ltp_loc = &driver->termios_locked[idx];
- if (!*ltp_loc) {
- ltp = (struct termios *) kmalloc(sizeof(struct termios),
- GFP_KERNEL);
- if (!ltp)
- goto free_mem_out;
- memset(ltp, 0, sizeof(struct termios));
- }
-
- if (driver->type == TTY_DRIVER_TYPE_PTY) {
- o_tty = alloc_tty_struct();
- if (!o_tty)
- goto free_mem_out;
- initialize_tty_struct(o_tty);
- o_tty->device = (kdev_t) MKDEV(driver->other->major,
- driver->other->minor_start + idx);
- o_tty->driver = *driver->other;
-
- o_tp_loc = &driver->other->termios[idx];
- if (!*o_tp_loc) {
- o_tp = (struct termios *)
- kmalloc(sizeof(struct termios), GFP_KERNEL);
- if (!o_tp)
- goto free_mem_out;
- *o_tp = driver->other->init_termios;
- }
-
- o_ltp_loc = &driver->other->termios_locked[idx];
- if (!*o_ltp_loc) {
- o_ltp = (struct termios *)
- kmalloc(sizeof(struct termios), GFP_KERNEL);
- if (!o_ltp)
- goto free_mem_out;
- memset(o_ltp, 0, sizeof(struct termios));
- }
-
- /*
- * Everything allocated ... set up the o_tty structure.
- */
- driver->other->table[idx] = o_tty;
- if (!*o_tp_loc)
- *o_tp_loc = o_tp;
- if (!*o_ltp_loc)
- *o_ltp_loc = o_ltp;
- o_tty->termios = *o_tp_loc;
- o_tty->termios_locked = *o_ltp_loc;
- (*driver->other->refcount)++;
- if (driver->subtype == PTY_TYPE_MASTER)
- o_tty->count++;
-
- /* Establish the links in both directions */
- tty->link = o_tty;
- o_tty->link = tty;
- }
-
- /*
- * All structures have been allocated, so now we install them.
- * Failures after this point use release_mem to clean up, so
- * there's no need to null out the local pointers.
- */
- driver->table[idx] = tty;
-
- if (!*tp_loc)
- *tp_loc = tp;
- if (!*ltp_loc)
- *ltp_loc = ltp;
- tty->termios = *tp_loc;
- tty->termios_locked = *ltp_loc;
- (*driver->refcount)++;
- tty->count++;
-
- /*
- * Structures all installed ... call the ldisc open routines.
- * If we fail here just call release_mem to clean up. No need
- * to decrement the use counts, as release_mem doesn't care.
- */
- if (tty->ldisc.open) {
- retval = (tty->ldisc.open)(tty);
- if (retval)
- goto release_mem_out;
- }
- if (o_tty && o_tty->ldisc.open) {
- retval = (o_tty->ldisc.open)(o_tty);
- if (retval) {
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
- goto release_mem_out;
- }
- set_bit(TTY_LDISC, &o_tty->flags);
- tty_ldisc_enable(o_tty);
- }
- tty_ldisc_enable(tty);
- goto success;
-
- /*
- * This fast open can be used if the tty is already open.
- * No memory is allocated, and the only failures are from
- * attempting to open a closing tty or attempting multiple
- * opens on a pty master.
- */
-fast_track:
- if (test_bit(TTY_CLOSING, &tty->flags)) {
- retval = -EIO;
- goto end_init;
- }
- if (driver->type == TTY_DRIVER_TYPE_PTY &&
- driver->subtype == PTY_TYPE_MASTER) {
- /*
- * special case for PTY masters: only one open permitted,
- * and the slave side open count is incremented as well.
- */
- if (tty->count) {
- retval = -EIO;
- goto end_init;
- }
- tty->link->count++;
- }
- tty->count++;
- tty->driver = *driver; /* N.B. why do this every time?? */
- /* FIXME */
- if(!test_bit(TTY_LDISC, &tty->flags))
- printk(KERN_ERR "init_dev but no ldisc\n");
-success:
- *ret_tty = tty;
-
- /* All paths come through here to release the semaphore */
-end_init:
- up_tty_sem(idx);
- return retval;
-
- /* Release locally allocated memory ... nothing placed in slots */
-free_mem_out:
- if (o_tp)
- kfree(o_tp);
- if (o_tty)
- free_tty_struct(o_tty);
- if (ltp)
- kfree(ltp);
- if (tp)
- kfree(tp);
- free_tty_struct(tty);
-
-fail_no_mem:
- retval = -ENOMEM;
- goto end_init;
-
- /* call the tty release_mem routine to clean out this slot */
-release_mem_out:
- printk(KERN_INFO "init_dev: ldisc open failed, "
- "clearing slot %d\n", idx);
- release_mem(tty, idx);
- goto end_init;
-}
-
-/*
- * Releases memory associated with a tty structure, and clears out the
- * driver table slots.
- */
-static void release_mem(struct tty_struct *tty, int idx)
-{
- struct tty_struct *o_tty;
- struct termios *tp;
-
- if ((o_tty = tty->link) != NULL) {
- o_tty->driver.table[idx] = NULL;
- if (o_tty->driver.flags & TTY_DRIVER_RESET_TERMIOS) {
- tp = o_tty->driver.termios[idx];
- o_tty->driver.termios[idx] = NULL;
- kfree(tp);
- }
- o_tty->magic = 0;
- (*o_tty->driver.refcount)--;
- list_del_init(&o_tty->tty_files);
- free_tty_struct(o_tty);
- }
-
- tty->driver.table[idx] = NULL;
- if (tty->driver.flags & TTY_DRIVER_RESET_TERMIOS) {
- tp = tty->driver.termios[idx];
- tty->driver.termios[idx] = NULL;
- kfree(tp);
- }
- tty->magic = 0;
- (*tty->driver.refcount)--;
- list_del_init(&tty->tty_files);
- free_tty_struct(tty);
-}
-
-/*
- * Even releasing the tty structures is a tricky business.. We have
- * to be very careful that the structures are all released at the
- * same time, as interrupts might otherwise get the wrong pointers.
- *
- * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
- * lead to double frees or releasing memory still in use.
- */
-static void release_dev(struct file * filp)
-{
- struct tty_struct *tty, *o_tty;
- int pty_master, tty_closing, o_tty_closing, do_sleep;
- int idx;
- char buf[64];
- unsigned long flags;
-
- tty = (struct tty_struct *)filp->private_data;
- if (tty_paranoia_check(tty, filp->f_dentry->d_inode->i_rdev,
"release_dev"))
- return;
-
- check_tty_count(tty, "release_dev");
-
- tty_fasync(-1, filp, 0);
-
- idx = MINOR(tty->device) - tty->driver.minor_start;
- pty_master = (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
- tty->driver.subtype == PTY_TYPE_MASTER);
- o_tty = tty->link;
-
-#ifdef TTY_PARANOIA_CHECK
- if (idx < 0 || idx >= tty->driver.num) {
- printk(KERN_DEBUG "release_dev: bad idx when trying to "
- "free (%s)\n", kdevname(tty->device));
- return;
- }
- if (tty != tty->driver.table[idx]) {
- printk(KERN_DEBUG "release_dev: driver.table[%d] not tty "
- "for (%s)\n", idx, kdevname(tty->device));
- return;
- }
- if (tty->termios != tty->driver.termios[idx]) {
- printk(KERN_DEBUG "release_dev: driver.termios[%d] not termios "
- "for (%s)\n",
- idx, kdevname(tty->device));
- return;
- }
- if (tty->termios_locked != tty->driver.termios_locked[idx]) {
- printk(KERN_DEBUG "release_dev: driver.termios_locked[%d] not "
- "termios_locked for (%s)\n",
- idx, kdevname(tty->device));
- return;
- }
-#endif
-
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
- tty_name(tty, buf), tty->count);
-#endif
-
-#ifdef TTY_PARANOIA_CHECK
- if (tty->driver.other) {
- if (o_tty != tty->driver.other->table[idx]) {
- printk(KERN_DEBUG "release_dev: other->table[%d] "
- "not o_tty for (%s)\n",
- idx, kdevname(tty->device));
- return;
- }
- if (o_tty->termios != tty->driver.other->termios[idx]) {
- printk(KERN_DEBUG "release_dev: other->termios[%d] "
- "not o_termios for (%s)\n",
- idx, kdevname(tty->device));
- return;
- }
- if (o_tty->termios_locked !=
- tty->driver.other->termios_locked[idx]) {
- printk(KERN_DEBUG "release_dev: other->termios_locked["
- "%d] not o_termios_locked for (%s)\n",
- idx, kdevname(tty->device));
- return;
- }
- if (o_tty->link != tty) {
- printk(KERN_DEBUG "release_dev: bad pty pointers\n");
- return;
- }
- }
-#endif
-
- if (tty->driver.close)
- tty->driver.close(tty, filp);
-
- /*
- * Sanity check: if tty->count is going to zero, there shouldn't be
- * any waiters on tty->read_wait or tty->write_wait. We test the
- * wait queues and kick everyone out _before_ actually starting to
- * close. This ensures that we won't block while releasing the tty
- * structure.
- *
- * The test for the o_tty closing is necessary, since the master and
- * slave sides may close in any order. If the slave side closes out
- * first, its count will be one, since the master side holds an open.
- * Thus this test wouldn't be triggered at the time the slave closes,
- * so we do it now.
- *
- * Note that it's possible for the tty to be opened again while we're
- * flushing out waiters. By recalculating the closing flags before
- * each iteration we avoid any problems.
- */
- while (1) {
- tty_closing = tty->count <= 1;
- o_tty_closing = o_tty &&
- (o_tty->count <= (pty_master ? 1 : 0));
- do_sleep = 0;
-
- if (tty_closing) {
- if (waitqueue_active(&tty->read_wait)) {
- wake_up(&tty->read_wait);
- do_sleep++;
- }
- if (waitqueue_active(&tty->write_wait)) {
- wake_up(&tty->write_wait);
- do_sleep++;
- }
- }
- if (o_tty_closing) {
- if (waitqueue_active(&o_tty->read_wait)) {
- wake_up(&o_tty->read_wait);
- do_sleep++;
- }
- if (waitqueue_active(&o_tty->write_wait)) {
- wake_up(&o_tty->write_wait);
- do_sleep++;
- }
- }
- if (!do_sleep)
- break;
-
- printk(KERN_WARNING "release_dev: %s: read/write wait queue "
- "active!\n", tty_name(tty, buf));
- schedule();
- }
-
- /*
- * The closing flags are now consistent with the open counts on
- * both sides, and we've completed the last operation that could
- * block, so it's safe to proceed with closing.
- */
- if (pty_master) {
- if (--o_tty->count < 0) {
- printk(KERN_WARNING "release_dev: bad pty slave count "
- "(%d) for %s\n",
- o_tty->count, tty_name(o_tty, buf));
- o_tty->count = 0;
- }
- }
- if (--tty->count < 0) {
- printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n",
- tty->count, tty_name(tty, buf));
- tty->count = 0;
- }
-
- /*
- * We've decremented tty->count, so we should zero out
- * filp->private_data, to break the link between the tty and
- * the file descriptor. Otherwise if filp_close() blocks before
- * the file descriptor is removed from the inuse_filp
- * list, check_tty_count() could observe a discrepancy and
- * printk a warning message to the user.
- */
- filp->private_data = 0;
-
- /*
- * Perform some housekeeping before deciding whether to return.
- *
- * Set the TTY_CLOSING flag if this was the last open. In the
- * case of a pty we may have to wait around for the other side
- * to close, and TTY_CLOSING makes sure we can't be reopened.
- */
- if(tty_closing)
- set_bit(TTY_CLOSING, &tty->flags);
- if(o_tty_closing)
- set_bit(TTY_CLOSING, &o_tty->flags);
-
- /*
- * If _either_ side is closing, make sure there aren't any
- * processes that still think tty or o_tty is their controlling
- * tty.
- */
- if (tty_closing || o_tty_closing) {
- struct task_struct *p;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (p->tty == tty || (o_tty && p->tty == o_tty))
- p->tty = NULL;
- }
- read_unlock(&tasklist_lock);
- }
-
- /* check whether both sides are closing ... */
- if (!tty_closing || (o_tty && !o_tty_closing))
- return;
-
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "freeing tty structure...");
-#endif
-
- /*
- * Prevent flush_to_ldisc() from rescheduling the work for later. Then
- * kill any delayed work. As this is the final close it does not
- * race with the set_ldisc code path.
- */
- clear_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
-
- /*
- * Wait for ->hangup_work and ->flip.work handlers to terminate
- */
-
- run_task_queue(&tq_timer);
- flush_scheduled_tasks();
-
- /*
- * Wait for any short term users (we know they are just driver
- * side waiters as the file is closing so user count on the file
- * side is zero.
- */
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- while(tty->ldisc.refcount)
- {
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- /*
- * Shutdown the current line discipline, and reset it to N_TTY.
- * N.B. why reset ldisc when we're releasing the memory??
- * FIXME: this MUST get fixed for the new reflocking
- */
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
- tty_ldisc_put(tty->ldisc.num);
-
- /*
- * Switch the line discipline back
- */
- tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
- tty_set_termios_ldisc(tty,N_TTY);
-
- if (o_tty) {
- /* FIXME: could o_tty be in setldisc here ? */
- clear_bit(TTY_LDISC, &o_tty->flags);
- if (o_tty->ldisc.close)
- (o_tty->ldisc.close)(o_tty);
- tty_ldisc_put(o_tty->ldisc.num);
- tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY));
- tty_set_termios_ldisc(o_tty,N_TTY);
- }
-
- /*
- * The release_mem function takes care of the details of clearing
- * the slots and preserving the termios structure.
- */
- release_mem(tty, idx);
-}
-
-/*
- * tty_open and tty_release keep up the tty count that contains the
- * number of opens done on a tty. We cannot use the inode-count, as
- * different inodes might point to the same tty.
- *
- * Open-counting is needed for pty masters, as well as for keeping
- * track of serial lines: DTR is dropped when the last close happens.
- * (This is not done solely through tty->count, now. - Ted 1/27/92)
- *
- * The termios state of a pty is reset on first open so that
- * settings don't persist across reuse.
- */
-static int tty_open(struct inode * inode, struct file * filp)
-{
- struct tty_struct *tty;
- int noctty, retval;
- kdev_t device;
- unsigned short saved_flags;
- char buf[64];
-
- saved_flags = filp->f_flags;
-retry_open:
- noctty = filp->f_flags & O_NOCTTY;
- device = inode->i_rdev;
- if (device == TTY_DEV) {
- if (!current->tty)
- return -ENXIO;
- device = current->tty->device;
- filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
- /* noctty = 1; */
- }
-#ifdef CONFIG_VT
- if (device == CONSOLE_DEV) {
- extern int fg_console;
- device = MKDEV(TTY_MAJOR, fg_console + 1);
- noctty = 1;
- }
-#endif
- if (device == SYSCONS_DEV) {
- struct console *c = console_drivers;
- while(c && !c->device)
- c = c->next;
- if (!c)
- return -ENODEV;
- device = c->device(c);
- filp->f_flags |= O_NONBLOCK; /* Don't let /dev/console block */
- noctty = 1;
- }
-
- if (device == PTMX_DEV) {
-#ifdef CONFIG_UNIX98_PTYS
-
- /* find a free pty. */
- int major, minor;
- struct tty_driver *driver;
-
- /* find a device that is not in use. */
- retval = -1;
- for ( major = 0 ; major < UNIX98_NR_MAJORS ; major++ ) {
- driver = &ptm_driver[major];
- for (minor = driver->minor_start ;
- minor < driver->minor_start + driver->num ;
- minor++) {
- device = MKDEV(driver->major, minor);
- if (!init_dev(device, &tty)) goto ptmx_found;
/* ok! */
- }
- }
- return -EIO; /* no free ptys */
- ptmx_found:
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- minor -= driver->minor_start;
- devpts_pty_new(driver->other->name_base + minor,
MKDEV(driver->other->major, minor + driver->other->minor_start));
- tty_register_devfs(&pts_driver[major], DEVFS_FL_DEFAULT,
- pts_driver[major].minor_start + minor);
- noctty = 1;
- goto init_dev_done;
-
-#else /* CONFIG_UNIX_98_PTYS */
-
- return -ENODEV;
-
-#endif /* CONFIG_UNIX_98_PTYS */
- }
-
- retval = init_dev(device, &tty);
- if (retval)
- return retval;
-
-#ifdef CONFIG_UNIX98_PTYS
-init_dev_done:
-#endif
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
- check_tty_count(tty, "tty_open");
- if (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
- tty->driver.subtype == PTY_TYPE_MASTER)
- noctty = 1;
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "opening %s...", tty_name(tty, buf));
-#endif
- if (tty->driver.open)
- retval = tty->driver.open(tty, filp);
- else
- retval = -ENODEV;
- filp->f_flags = saved_flags;
-
- if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !suser())
- retval = -EBUSY;
-
- if (retval) {
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "error %d in opening %s...", retval,
- tty_name(tty, buf));
-#endif
-
- release_dev(filp);
- if (retval != -ERESTARTSYS)
- return retval;
- if (signal_pending(current))
- return retval;
- schedule();
- /*
- * Need to reset f_op in case a hangup happened.
- */
- filp->f_op = &tty_fops;
- goto retry_open;
- }
- if (!noctty &&
- current->leader &&
- !current->tty &&
- tty->session == 0) {
- task_lock(current);
- current->tty = tty;
- task_unlock(current);
- current->tty_old_pgrp = 0;
- tty->session = current->session;
- tty->pgrp = current->pgrp;
- }
- if ((tty->driver.type == TTY_DRIVER_TYPE_SERIAL) &&
- (tty->driver.subtype == SERIAL_TYPE_CALLOUT) &&
- (tty->count == 1)) {
- static int nr_warns;
- if (nr_warns < 5) {
- printk(KERN_WARNING "tty_io.c: "
- "process %d (%s) used obsolete /dev/%s - "
- "update software to use /dev/ttyS%d\n",
- current->pid, current->comm,
- tty_name(tty, buf), TTY_NUMBER(tty));
- nr_warns++;
- }
- }
- return 0;
-}
-
-static int tty_release(struct inode * inode, struct file * filp)
-{
- lock_kernel();
- release_dev(filp);
- unlock_kernel();
- return 0;
-}
-
-/* No kernel lock held - fine */
-static unsigned int tty_poll(struct file * filp, poll_table * wait)
-{
- struct tty_struct * tty;
- struct tty_ldisc *ld;
- int ret = 0;
-
- tty = (struct tty_struct *)filp->private_data;
- if (tty_paranoia_check(tty, filp->f_dentry->d_inode->i_rdev,
"tty_poll"))
- return 0;
-
- ld = tty_ldisc_ref_wait(tty);
- if (ld->poll)
- ret = (ld->poll)(tty, filp, wait);
- tty_ldisc_deref(ld);
- return ret;
-}
-
-static int tty_fasync(int fd, struct file * filp, int on)
-{
- struct tty_struct * tty;
- int retval;
-
- tty = (struct tty_struct *)filp->private_data;
- if (tty_paranoia_check(tty, filp->f_dentry->d_inode->i_rdev,
"tty_fasync"))
- return 0;
-
- retval = fasync_helper(fd, filp, on, &tty->fasync);
- if (retval <= 0)
- return retval;
-
- if (on) {
- if (!waitqueue_active(&tty->read_wait))
- tty->minimum_to_wake = 1;
- if (filp->f_owner.pid == 0) {
- filp->f_owner.pid = (-tty->pgrp) ? : current->pid;
- filp->f_owner.uid = current->uid;
- filp->f_owner.euid = current->euid;
- }
- } else {
- if (!tty->fasync && !waitqueue_active(&tty->read_wait))
- tty->minimum_to_wake = N_TTY_BUF_SIZE;
- }
- return 0;
-}
-
-static int tiocsti(struct tty_struct *tty, char * arg)
-{
- char ch, mbz = 0;
- struct tty_ldisc *ld;
-
- if ((current->tty != tty) && !suser())
- return -EPERM;
- if (get_user(ch, arg))
- return -EFAULT;
- ld = tty_ldisc_ref_wait(tty);
- ld->receive_buf(tty, &ch, &mbz, 1);
- tty_ldisc_deref(ld);
- return 0;
-}
-
-static int tiocgwinsz(struct tty_struct *tty, struct winsize * arg)
-{
- if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
- return -EFAULT;
- return 0;
-}
-
-static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
- struct winsize * arg)
-{
- struct winsize tmp_ws;
-
- if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
- return -EFAULT;
- if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
- return 0;
- if (tty->pgrp > 0)
- kill_pg(tty->pgrp, SIGWINCH, 1);
- if ((real_tty->pgrp != tty->pgrp) && (real_tty->pgrp > 0))
- kill_pg(real_tty->pgrp, SIGWINCH, 1);
- tty->winsize = tmp_ws;
- real_tty->winsize = tmp_ws;
- return 0;
-}
-
-static int tioccons(struct inode *inode, struct file *file)
-{
- if (inode->i_rdev == SYSCONS_DEV ||
- inode->i_rdev == CONSOLE_DEV) {
- struct file *f;
- if (!suser())
- return -EPERM;
- spin_lock(&redirect_lock);
- f = redirect;
- redirect = NULL;
- spin_unlock(&redirect_lock);
- if (f)
- fput(f);
- return 0;
- }
- spin_lock(&redirect_lock);
- if (redirect) {
- spin_unlock(&redirect_lock);
- return -EBUSY;
- }
- get_file(file);
- redirect = file;
- spin_unlock(&redirect_lock);
- return 0;
-}
-
-
-static int fionbio(struct file *file, int *arg)
-{
- int nonblock;
-
- if (get_user(nonblock, arg))
- return -EFAULT;
-
- if (nonblock)
- file->f_flags |= O_NONBLOCK;
- else
- file->f_flags &= ~O_NONBLOCK;
- return 0;
-}
-
-static int tiocsctty(struct tty_struct *tty, int arg)
-{
- if (current->leader &&
- (current->session == tty->session))
- return 0;
- /*
- * The process must be a session leader and
- * not have a controlling tty already.
- */
- if (!current->leader || current->tty)
- return -EPERM;
- if (tty->session > 0) {
- /*
- * This tty is already the controlling
- * tty for another session group!
- */
- if ((arg == 1) && suser()) {
- /*
- * Steal it away
- */
- struct task_struct *p;
-
- read_lock(&tasklist_lock);
- for_each_task(p)
- if (p->tty == tty)
- p->tty = NULL;
- read_unlock(&tasklist_lock);
- } else
- return -EPERM;
- }
- task_lock(current);
- current->tty = tty;
- task_unlock(current);
- current->tty_old_pgrp = 0;
- tty->session = current->session;
- tty->pgrp = current->pgrp;
- return 0;
-}
-
-static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t *arg)
-{
- /*
- * (tty == real_tty) is a cheap way of
- * testing if the tty is NOT a master pty.
- */
- if (tty == real_tty && current->tty != real_tty)
- return -ENOTTY;
- return put_user(real_tty->pgrp, arg);
-}
-
-static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t *arg)
-{
- pid_t pgrp;
- int retval = tty_check_change(real_tty);
-
- if (retval == -EIO)
- return -ENOTTY;
- if (retval)
- return retval;
- if (!current->tty ||
- (current->tty != real_tty) ||
- (real_tty->session != current->session))
- return -ENOTTY;
- if (get_user(pgrp, (pid_t *) arg))
- return -EFAULT;
- if (pgrp < 0)
- return -EINVAL;
- if (session_of_pgrp(pgrp) != current->session)
- return -EPERM;
- real_tty->pgrp = pgrp;
- return 0;
-}
-
-static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
*arg)
-{
- /*
- * (tty == real_tty) is a cheap way of
- * testing if the tty is NOT a master pty.
- */
- if (tty == real_tty && current->tty != real_tty)
- return -ENOTTY;
- if (real_tty->session <= 0)
- return -ENOTTY;
- return put_user(real_tty->session, arg);
-}
-
-static int tiocttygstruct(struct tty_struct *tty, struct tty_struct *arg)
-{
- if (copy_to_user(arg, tty, sizeof(*arg)))
- return -EFAULT;
- return 0;
-}
-
-static int tiocsetd(struct tty_struct *tty, int *arg)
-{
- int ldisc;
-
- if (get_user(ldisc, arg))
- return -EFAULT;
- return tty_set_ldisc(tty, ldisc);
-}
-
-static int send_break(struct tty_struct *tty, int duration)
-{
- tty->driver.break_ctl(tty, -1);
- if (!signal_pending(current)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(duration);
- }
- tty->driver.break_ctl(tty, 0);
- if (signal_pending(current))
- return -EINTR;
- return 0;
-}
-
-static int tty_generic_brk(struct tty_struct *tty, struct file *file, unsigned
int cmd, unsigned long arg)
-{
- if (cmd == TCSBRK && arg)
- {
- /* tcdrain case */
- int retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- if (signal_pending(current))
- return -EINTR;
- }
- return 0;
-}
-
-/*
- * Split this up, as gcc can choke on it otherwise..
- */
-int tty_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- struct tty_struct *tty, *real_tty;
- int retval;
- struct tty_ldisc *ld;
-
- tty = (struct tty_struct *)file->private_data;
- if (tty_paranoia_check(tty, inode->i_rdev, "tty_ioctl"))
- return -EINVAL;
-
- real_tty = tty;
- if (tty->driver.type == TTY_DRIVER_TYPE_PTY &&
- tty->driver.subtype == PTY_TYPE_MASTER)
- real_tty = tty->link;
-
- /*
- * Break handling by driver
- */
- if (!tty->driver.break_ctl) {
- switch(cmd) {
- case TIOCSBRK:
- case TIOCCBRK:
- if (tty->driver.ioctl)
- return tty->driver.ioctl(tty, file, cmd, arg);
- return -EINVAL;
-
- /* These two ioctl's always return success; even if */
- /* the driver doesn't support them. */
- case TCSBRK:
- case TCSBRKP:
- retval = -ENOIOCTLCMD;
- if (tty->driver.ioctl)
- retval = tty->driver.ioctl(tty, file, cmd, arg);
- /* Not driver handled */
- if (retval == -ENOIOCTLCMD)
- retval = tty_generic_brk(tty, file, cmd, arg);
- return retval;
- }
- }
-
- /*
- * Factor out some common prep work
- */
- switch (cmd) {
- case TIOCSETD:
- case TIOCSBRK:
- case TIOCCBRK:
- case TCSBRK:
- case TCSBRKP:
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- if (cmd != TIOCCBRK) {
- tty_wait_until_sent(tty, 0);
- if (signal_pending(current))
- return -EINTR;
- }
- break;
- }
-
- switch (cmd) {
- case TIOCSTI:
- return tiocsti(tty, (char *)arg);
- case TIOCGWINSZ:
- return tiocgwinsz(tty, (struct winsize *) arg);
- case TIOCSWINSZ:
- return tiocswinsz(tty, real_tty, (struct winsize *)
arg);
- case TIOCCONS:
- return real_tty!=tty ? -EINVAL : tioccons(inode, file);
- case FIONBIO:
- return fionbio(file, (int *) arg);
- case TIOCEXCL:
- set_bit(TTY_EXCLUSIVE, &tty->flags);
- return 0;
- case TIOCNXCL:
- clear_bit(TTY_EXCLUSIVE, &tty->flags);
- return 0;
- case TIOCNOTTY:
- if (current->tty != tty)
- return -ENOTTY;
- if (current->leader)
- disassociate_ctty(0);
- task_lock(current);
- current->tty = NULL;
- task_unlock(current);
- return 0;
- case TIOCSCTTY:
- return tiocsctty(tty, arg);
- case TIOCGPGRP:
- return tiocgpgrp(tty, real_tty, (pid_t *) arg);
- case TIOCSPGRP:
- return tiocspgrp(tty, real_tty, (pid_t *) arg);
- case TIOCGSID:
- return tiocgsid(tty, real_tty, (pid_t *) arg);
- case TIOCGETD:
- /* FIXME: check this is ok */
- return put_user(tty->ldisc.num, (int *) arg);
- case TIOCSETD:
- return tiocsetd(tty, (int *) arg);
-#ifdef CONFIG_VT
- case TIOCLINUX:
- return tioclinux(tty, arg);
-#endif
- case TIOCTTYGSTRUCT:
- return tiocttygstruct(tty, (struct tty_struct *) arg);
-
- /*
- * Break handling
- */
- case TIOCSBRK: /* Turn break on, unconditionally */
- tty->driver.break_ctl(tty, -1);
- return 0;
-
- case TIOCCBRK: /* Turn break off, unconditionally */
- tty->driver.break_ctl(tty, 0);
- return 0;
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- /*
- * XXX is the above comment correct, or the
- * code below correct? Is this ioctl used at
- * all by anyone?
- */
- if (!arg)
- return send_break(tty, HZ/4);
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- return send_break(tty, arg ? arg*(HZ/10) : HZ/4);
- }
- if (tty->driver.ioctl) {
- retval = (tty->driver.ioctl)(tty, file, cmd, arg);
- if (retval != -ENOIOCTLCMD)
- return retval;
- }
- ld = tty_ldisc_ref_wait(tty);
- retval = -EINVAL;
- if (ld->ioctl) {
- retval = ld->ioctl(tty, file, cmd, arg);
- if (retval == -ENOIOCTLCMD)
- retval = -EINVAL;
- }
- tty_ldisc_deref(ld);
- return retval;
-}
-
-
-/*
- * This implements the "Secure Attention Key" --- the idea is to
- * prevent trojan horses by killing all processes associated with this
- * tty when the user hits the "Secure Attention Key". Required for
- * super-paranoid applications --- see the Orange Book for more details.
- *
- * This code could be nicer; ideally it should send a HUP, wait a few
- * seconds, then send a INT, and then a KILL signal. But you then
- * have to coordinate with the init process, since all processes associated
- * with the current tty must be dead before the new getty is allowed
- * to spawn.
- *
- * Now, if it would be correct ;-/ The current code has a nasty hole -
- * it doesn't catch files in flight. We may send the descriptor to ourselves
- * via AF_UNIX socket, close it and later fetch from socket. FIXME.
- *
- * Nasty bug: do_SAK is being called in interrupt context. This can
- * deadlock. We punt it up to process context. AKPM - 16Mar2001
- */
-static void __do_SAK(void *arg)
-{
-#ifdef TTY_SOFT_SAK
- tty_hangup(tty);
-#else
- struct tty_struct *tty = arg;
- struct task_struct *p;
- int session;
- int i;
- struct file *filp;
- struct tty_ldisc *disc;
-
- if (!tty)
- return;
- session = tty->session;
- /* We don't want an ldisc switch during this */
- disc = tty_ldisc_ref(tty);
- if (disc && disc->flush_buffer)
- disc->flush_buffer(tty);
- tty_ldisc_deref(disc);
-
- if (tty->driver.flush_buffer)
- tty->driver.flush_buffer(tty);
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if ((p->tty == tty) ||
- ((session > 0) && (p->session == session))) {
- send_sig(SIGKILL, p, 1);
- continue;
- }
- task_lock(p);
- if (p->files) {
- read_lock(&p->files->file_lock);
- for (i=0; i < p->files->max_fds; i++) {
- filp = fcheck_files(p->files, i);
- if (filp && (filp->f_op == &tty_fops) &&
- (filp->private_data == tty)) {
- send_sig(SIGKILL, p, 1);
- break;
- }
- }
- read_unlock(&p->files->file_lock);
- }
- task_unlock(p);
- }
- read_unlock(&tasklist_lock);
-#endif
-}
-
-/*
- * The tq handling here is a little racy - tty->SAK_tq may already be queued.
- * But there's no mechanism to fix that without futzing with tqueue_lock.
- * Fortunately we don't need to worry, because if ->SAK_tq is already queued,
- * the values which we write to it will be identical to the values which it
- * already has. --akpm
- */
-void do_SAK(struct tty_struct *tty)
-{
- if (!tty)
- return;
- PREPARE_TQUEUE(&tty->SAK_tq, __do_SAK, tty);
- schedule_task(&tty->SAK_tq);
-}
-
-/*
- * This routine is called out of the software interrupt to flush data
- * from the flip buffer to the line discipline.
- */
-static void flush_to_ldisc(void *private_)
-{
- struct tty_struct *tty = (struct tty_struct *) private_;
- unsigned char *cp;
- char *fp;
- int count;
- unsigned long flags;
- struct tty_ldisc *disc;
-
- disc = tty_ldisc_ref(tty);
- if (disc == NULL) /* !TTY_LDISC */
- return;
-
- if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
- queue_task(&tty->flip.tqueue, &tq_timer);
- goto out;
- }
- if (tty->flip.buf_num) {
- cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
- fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
- tty->flip.buf_num = 0;
-
- save_flags(flags); cli();
- tty->flip.char_buf_ptr = tty->flip.char_buf;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf;
- } else {
- cp = tty->flip.char_buf;
- fp = tty->flip.flag_buf;
- tty->flip.buf_num = 1;
-
- save_flags(flags); cli();
- tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
- }
- count = tty->flip.count;
- tty->flip.count = 0;
- restore_flags(flags);
-
- disc->receive_buf(tty, cp, fp, count);
-out:
- tty_ldisc_deref(disc);
-}
-
-/*
- * Call the ldisc flush directly from a driver. This function may
- * return an error and need retrying by the user.
- */
-
-int tty_push_data(struct tty_struct *tty, unsigned char *cp, unsigned char
*fp, int count)
-{
- int ret = 0;
- struct tty_ldisc *disc;
-
- disc = tty_ldisc_ref(tty);
- if(test_bit(TTY_DONT_FLIP, &tty->flags))
- ret = -EAGAIN;
- else if(disc == NULL)
- ret = -EIO;
- else
- disc->receive_buf(tty, cp, fp, count);
- tty_ldisc_deref(disc);
- return ret;
-
-}
-
-/*
- * Routine which returns the baud rate of the tty
- *
- * Note that the baud_table needs to be kept in sync with the
- * include/asm/termbits.h file.
- */
-static int baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 230400, 460800,
-#ifdef __sparc__
- 76800, 153600, 307200, 614400, 921600
-#else
- 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
- 2500000, 3000000, 3500000, 4000000
-#endif
-};
-
-static int n_baud_table = sizeof(baud_table)/sizeof(int);
-
-int tty_get_baud_rate(struct tty_struct *tty)
-{
- unsigned int cflag, i;
-
- cflag = tty->termios->c_cflag;
-
- i = cflag & CBAUD;
- if (i & CBAUDEX) {
- i &= ~CBAUDEX;
- if (i < 1 || i+15 >= n_baud_table)
- tty->termios->c_cflag &= ~CBAUDEX;
- else
- i += 15;
- }
- if (i==15 && tty->alt_speed) {
- if (!tty->warned) {
- printk(KERN_WARNING "Use of setserial/setrocket to "
- "set SPD_* flags is deprecated\n");
- tty->warned = 1;
- }
- return(tty->alt_speed);
- }
-
- return baud_table[i];
-}
-
-void tty_flip_buffer_push(struct tty_struct *tty)
-{
- if (tty->low_latency)
- flush_to_ldisc((void *) tty);
- else
- queue_task(&tty->flip.tqueue, &tq_timer);
-}
-
-/*
- * This subroutine initializes a tty structure.
- */
-static void initialize_tty_struct(struct tty_struct *tty)
-{
- memset(tty, 0, sizeof(struct tty_struct));
- tty->magic = TTY_MAGIC;
- tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
- tty->pgrp = -1;
- tty->flip.char_buf_ptr = tty->flip.char_buf;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf;
- tty->flip.tqueue.routine = flush_to_ldisc;
- tty->flip.tqueue.data = tty;
- init_MUTEX(&tty->flip.pty_sem);
- init_MUTEX(&tty->termios_sem);
- init_waitqueue_head(&tty->write_wait);
- init_waitqueue_head(&tty->read_wait);
- tty->tq_hangup.routine = do_tty_hangup;
- tty->tq_hangup.data = tty;
- sema_init(&tty->atomic_read, 1);
- sema_init(&tty->atomic_write, 1);
- spin_lock_init(&tty->read_lock);
- INIT_LIST_HEAD(&tty->tty_files);
- INIT_TQUEUE(&tty->SAK_tq, 0, 0);
-}
-
-/*
- * The default put_char routine if the driver did not define one.
- */
-void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
-{
- tty->driver.write(tty, 0, &ch, 1);
-}
-
-/*
- * Register a tty device described by <driver>, with minor number <minor>.
- */
-void tty_register_devfs (struct tty_driver *driver, unsigned int flags,
unsigned minor)
-{
-#ifdef CONFIG_DEVFS_FS
- umode_t mode = S_IFCHR | S_IRUSR | S_IWUSR;
- kdev_t device = MKDEV (driver->major, minor);
- int idx = minor - driver->minor_start;
- char buf[32];
-
- switch (device) {
- case TTY_DEV:
- case PTMX_DEV:
- mode |= S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
- break;
- default:
- if (driver->major == PTY_MASTER_MAJOR)
- mode |= S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
- break;
- }
- if ( (minor < driver->minor_start) ||
- (minor >= driver->minor_start + driver->num) ) {
- printk(KERN_ERR "Attempt to register invalid minor number "
- "with devfs (%d:%d).\n", (int)driver->major,(int)minor);
- return;
- }
-# ifdef CONFIG_UNIX98_PTYS
- if ( (driver->major >= UNIX98_PTY_SLAVE_MAJOR) &&
- (driver->major < UNIX98_PTY_SLAVE_MAJOR + UNIX98_NR_MAJORS) )
- flags |= DEVFS_FL_CURRENT_OWNER;
-# endif
- sprintf(buf, driver->name, idx + driver->name_base);
- devfs_register (NULL, buf, flags | DEVFS_FL_DEFAULT,
- driver->major, minor, mode, &tty_fops, NULL);
-#endif /* CONFIG_DEVFS_FS */
-}
-
-void tty_unregister_devfs (struct tty_driver *driver, unsigned minor)
-{
-#ifdef CONFIG_DEVFS_FS
- void * handle;
- int idx = minor - driver->minor_start;
- char buf[32];
-
- sprintf(buf, driver->name, idx + driver->name_base);
- handle = devfs_find_handle (NULL, buf, driver->major, minor,
- DEVFS_SPECIAL_CHR, 0);
- devfs_unregister (handle);
-#endif /* CONFIG_DEVFS_FS */
-}
-
-EXPORT_SYMBOL(tty_register_devfs);
-EXPORT_SYMBOL(tty_unregister_devfs);
-
-/*
- * Called by a tty driver to register itself.
- */
-int tty_register_driver(struct tty_driver *driver)
-{
- int error;
- int i;
-
- if (driver->flags & TTY_DRIVER_INSTALLED)
- return 0;
-
- error = devfs_register_chrdev(driver->major, driver->name, &tty_fops);
- if (error < 0)
- return error;
- else if(driver->major == 0)
- driver->major = error;
-
- if (!driver->put_char)
- driver->put_char = tty_default_put_char;
-
- driver->prev = 0;
- driver->next = tty_drivers;
- if (tty_drivers) tty_drivers->prev = driver;
- tty_drivers = driver;
-
- if ( !(driver->flags & TTY_DRIVER_NO_DEVFS) ) {
- for(i = 0; i < driver->num; i++)
- tty_register_devfs(driver, 0, driver->minor_start + i);
- }
- proc_tty_register_driver(driver);
- return error;
-}
-
-/*
- * Called by a tty driver to unregister itself.
- */
-int tty_unregister_driver(struct tty_driver *driver)
-{
- int retval;
- struct tty_driver *p;
- int i, found = 0;
- struct termios *tp;
- const char *othername = NULL;
-
- if (*driver->refcount)
- return -EBUSY;
-
- for (p = tty_drivers; p; p = p->next) {
- if (p == driver)
- found++;
- else if (p->major == driver->major)
- othername = p->name;
- }
-
- if (!found)
- return -ENOENT;
-
- if (othername == NULL) {
- retval = devfs_unregister_chrdev(driver->major, driver->name);
- if (retval)
- return retval;
- } else
- devfs_register_chrdev(driver->major, othername, &tty_fops);
-
- if (driver->prev)
- driver->prev->next = driver->next;
- else
- tty_drivers = driver->next;
-
- if (driver->next)
- driver->next->prev = driver->prev;
-
- /*
- * Free the termios and termios_locked structures because
- * we don't want to get memory leaks when modular tty
- * drivers are removed from the kernel.
- */
- for (i = 0; i < driver->num; i++) {
- tp = driver->termios[i];
- if (tp) {
- driver->termios[i] = NULL;
- kfree(tp);
- }
- tp = driver->termios_locked[i];
- if (tp) {
- driver->termios_locked[i] = NULL;
- kfree(tp);
- }
- tty_unregister_devfs(driver, driver->minor_start + i);
- }
- proc_tty_unregister_driver(driver);
- return 0;
-}
-
-
-/*
- * Initialize the console device. This is called *early*, so
- * we can't necessarily depend on lots of kernel help here.
- * Just do some early initializations, and do the complex setup
- * later.
- */
-void __init console_init(void)
-{
- /* Setup the default TTY line discipline. */
- memset(tty_ldiscs, 0, NR_LDISCS*sizeof(struct tty_ldisc));
- (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
-
- /*
- * Set up the standard termios. Individual tty drivers may
- * deviate from this; this is used as a template.
- */
- memset(&tty_std_termios, 0, sizeof(struct termios));
- memcpy(tty_std_termios.c_cc, INIT_C_CC, NCCS);
- tty_std_termios.c_iflag = ICRNL | IXON;
- tty_std_termios.c_oflag = OPOST | ONLCR;
- tty_std_termios.c_cflag = B38400 | CS8 | CREAD | HUPCL;
- tty_std_termios.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
- ECHOCTL | ECHOKE | IEXTEN;
-
- /*
- * set up the console device so that later boot sequences can
- * inform about problems etc..
- */
-#ifdef CONFIG_EARLY_PRINTK
- disable_early_printk();
-#endif
-
-#ifdef CONFIG_XEN_CONSOLE
- xen_console_init();
-#endif
-
-#ifdef CONFIG_VT
- con_init();
-#endif
-#ifdef CONFIG_AU1X00_SERIAL_CONSOLE
- au1x00_serial_console_init();
-#endif
-#ifdef CONFIG_SERIAL_CONSOLE
-#if (defined(CONFIG_8xx) || defined(CONFIG_CPM2))
- console_8xx_init();
-#elif defined(CONFIG_MAC_SERIAL) && defined(CONFIG_SERIAL)
- if (_machine == _MACH_Pmac)
- mac_scc_console_init();
- else
- serial_console_init();
-#elif defined(CONFIG_MAC_SERIAL)
- mac_scc_console_init();
-#elif defined(CONFIG_PARISC)
- pdc_console_init();
-#elif defined(CONFIG_SERIAL)
- serial_console_init();
-#endif /* CONFIG_8xx */
-#if defined(CONFIG_MVME162_SCC) || defined(CONFIG_BVME6000_SCC) ||
defined(CONFIG_MVME147_SCC)
- vme_scc_console_init();
-#endif
-#if defined(CONFIG_SERIAL167)
- serial167_console_init();
-#endif
-#if defined(CONFIG_SH_SCI)
- sci_console_init();
-#endif
-#endif
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
- dec_serial_console_init();
-#endif
-#ifdef CONFIG_TN3270_CONSOLE
- tub3270_con_init();
-#endif
-#ifdef CONFIG_TN3215
- con3215_init();
-#endif
-#ifdef CONFIG_HWC
- hwc_console_init();
-#endif
-#ifdef CONFIG_STDIO_CONSOLE
- stdio_console_init();
-#endif
-#ifdef CONFIG_SERIAL_21285_CONSOLE
- rs285_console_init();
-#endif
-#ifdef CONFIG_SERIAL_SA1100_CONSOLE
- sa1100_rs_console_init();
-#endif
-#ifdef CONFIG_ARC_CONSOLE
- arc_console_init();
-#endif
-#ifdef CONFIG_SERIAL_AMBA_CONSOLE
- ambauart_console_init();
-#endif
-#ifdef CONFIG_SERIAL_TX3912_CONSOLE
- tx3912_console_init();
-#endif
-#ifdef CONFIG_TXX927_SERIAL_CONSOLE
- txx927_console_init();
-#endif
-#ifdef CONFIG_SERIAL_TXX9_CONSOLE
- txx9_serial_console_init();
-#endif
-#ifdef CONFIG_SIBYTE_SB1250_DUART_CONSOLE
- sb1250_serial_console_init();
-#endif
-#ifdef CONFIG_IP22_SERIAL
- sgi_serial_console_init();
-#endif
-}
-
-static struct tty_driver dev_tty_driver, dev_syscons_driver;
-#ifdef CONFIG_UNIX98_PTYS
-static struct tty_driver dev_ptmx_driver;
-#endif
-#ifdef CONFIG_HVC_CONSOLE
- hvc_console_init();
-#endif
-#ifdef CONFIG_VT
-static struct tty_driver dev_console_driver;
-#endif
-
-/*
- * Ok, now we can initialize the rest of the tty devices and can count
- * on memory allocations, interrupts etc..
- */
-void __init tty_init(void)
-{
- /*
- * dev_tty_driver and dev_console_driver are actually magic
- * devices which get redirected at open time. Nevertheless,
- * we register them so that register_chrdev is called
- * appropriately.
- */
- memset(&dev_tty_driver, 0, sizeof(struct tty_driver));
- dev_tty_driver.magic = TTY_DRIVER_MAGIC;
- dev_tty_driver.driver_name = "/dev/tty";
- dev_tty_driver.name = dev_tty_driver.driver_name + 5;
- dev_tty_driver.name_base = 0;
- dev_tty_driver.major = TTYAUX_MAJOR;
- dev_tty_driver.minor_start = 0;
- dev_tty_driver.num = 1;
- dev_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM;
- dev_tty_driver.subtype = SYSTEM_TYPE_TTY;
-
- if (tty_register_driver(&dev_tty_driver))
- panic("Couldn't register /dev/tty driver\n");
-
- dev_syscons_driver = dev_tty_driver;
- dev_syscons_driver.driver_name = "/dev/console";
- dev_syscons_driver.name = dev_syscons_driver.driver_name + 5;
- dev_syscons_driver.major = TTYAUX_MAJOR;
- dev_syscons_driver.minor_start = 1;
- dev_syscons_driver.type = TTY_DRIVER_TYPE_SYSTEM;
- dev_syscons_driver.subtype = SYSTEM_TYPE_SYSCONS;
-
- if (tty_register_driver(&dev_syscons_driver))
- panic("Couldn't register /dev/console driver\n");
-
- /* console calls tty_register_driver() before kmalloc() works.
- * Thus, we can't devfs_register() then. Do so now, instead.
- */
-#ifdef CONFIG_VT
- con_init_devfs();
-#endif
-
-#ifdef CONFIG_UNIX98_PTYS
- dev_ptmx_driver = dev_tty_driver;
- dev_ptmx_driver.driver_name = "/dev/ptmx";
- dev_ptmx_driver.name = dev_ptmx_driver.driver_name + 5;
- dev_ptmx_driver.major= MAJOR(PTMX_DEV);
- dev_ptmx_driver.minor_start = MINOR(PTMX_DEV);
- dev_ptmx_driver.type = TTY_DRIVER_TYPE_SYSTEM;
- dev_ptmx_driver.subtype = SYSTEM_TYPE_SYSPTMX;
-
- if (tty_register_driver(&dev_ptmx_driver))
- panic("Couldn't register /dev/ptmx driver\n");
-#endif
-
-#ifdef CONFIG_VT
- dev_console_driver = dev_tty_driver;
- dev_console_driver.driver_name = "/dev/vc/0";
- dev_console_driver.name = dev_console_driver.driver_name + 5;
- dev_console_driver.major = TTY_MAJOR;
- dev_console_driver.type = TTY_DRIVER_TYPE_SYSTEM;
- dev_console_driver.subtype = SYSTEM_TYPE_CONSOLE;
-
- if (tty_register_driver(&dev_console_driver))
- panic("Couldn't register /dev/tty0 driver\n");
-
- kbd_init();
-#endif
-
-#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE
- if (ia64_platform_is("sn2")) {
- sn_sal_serial_console_init();
- return; /* only one console right now for SN2 */
- }
-#endif
-#ifdef CONFIG_ESPSERIAL /* init ESP before rs, so rs doesn't see the port */
- espserial_init();
-#endif
-#if defined(CONFIG_MVME162_SCC) || defined(CONFIG_BVME6000_SCC) ||
defined(CONFIG_MVME147_SCC)
- vme_scc_init();
-#endif
-#ifdef CONFIG_SERIAL_TX3912
- tx3912_rs_init();
-#endif
-#ifdef CONFIG_ROCKETPORT
- rp_init();
-#endif
-#ifdef CONFIG_SERIAL167
- serial167_init();
-#endif
-#ifdef CONFIG_CYCLADES
- cy_init();
-#endif
-#ifdef CONFIG_STALLION
- stl_init();
-#endif
-#ifdef CONFIG_ISTALLION
- stli_init();
-#endif
-#ifdef CONFIG_DIGI
- pcxe_init();
-#endif
-#ifdef CONFIG_DIGIEPCA
- pc_init();
-#endif
-#ifdef CONFIG_SPECIALIX
- specialix_init();
-#endif
-#if (defined(CONFIG_8xx) || defined(CONFIG_CPM2))
- rs_8xx_init();
-#endif /* CONFIG_8xx */
- pty_init();
-#ifdef CONFIG_MOXA_SMARTIO
- mxser_init();
-#endif
-#ifdef CONFIG_MOXA_INTELLIO
- moxa_init();
-#endif
-#ifdef CONFIG_VT
- vcs_init();
-#endif
-#ifdef CONFIG_TN3270
- tub3270_init();
-#endif
-#ifdef CONFIG_TN3215
- tty3215_init();
-#endif
-#ifdef CONFIG_HWC
- hwc_tty_init();
-#endif
-#ifdef CONFIG_A2232
- a2232board_init();
-#endif
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/drivers/scsi/aic7xxx/Makefile
--- a/linux-2.4.30-xen-sparse/drivers/scsi/aic7xxx/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,97 +0,0 @@
-#
-# drivers/scsi/aic7xxx/Makefile
-#
-# Makefile for the Linux aic7xxx SCSI driver.
-#
-
-O_TARGET := aic7xxx_drv.o
-
-list-multi := aic7xxx.o aic79xx.o
-
-obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx.o
-ifeq ($(CONFIG_PCI),y)
-obj-$(CONFIG_SCSI_AIC79XX) += aic79xx.o
-endif
-
-EXTRA_CFLAGS += -I$(TOPDIR)/drivers/scsi -Werror
-#EXTRA_CFLAGS += -g
-
-# Platform Specific Files
-obj-aic7xxx = aic7xxx_osm.o aic7xxx_proc.o
-
-# Core Files
-obj-aic7xxx += aic7xxx_core.o aic7xxx_93cx6.o
-ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
-obj-aic7xxx += aic7xxx_reg_print.o
-endif
-
-#EISA Specific Files
-AIC7XXX_EISA_ARCH = $(filter i386 alpha xen,$(ARCH))
-ifneq ($(AIC7XXX_EISA_ARCH),)
-obj-aic7xxx += aic7770.o
-# Platform Specific EISA Files
-obj-aic7xxx += aic7770_osm.o
-endif
-
-#PCI Specific Files
-ifeq ($(CONFIG_PCI),y)
-obj-aic7xxx += aic7xxx_pci.o
-# Platform Specific PCI Files
-obj-aic7xxx += aic7xxx_osm_pci.o
-endif
-
-# Platform Specific U320 Files
-obj-aic79xx = aic79xx_osm.o aic79xx_proc.o aic79xx_osm_pci.o
-# Core Files
-obj-aic79xx += aic79xx_core.o aic79xx_pci.o
-ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
-obj-aic79xx += aic79xx_reg_print.o
-endif
-
-# Override our module desitnation
-MOD_DESTDIR = $(shell cd .. && $(CONFIG_SHELL) $(TOPDIR)/scripts/pathdown.sh)
-
-include $(TOPDIR)/Rules.make
-
-aic7xxx_core.o: aic7xxx_seq.h
-$(obj-aic7xxx): aic7xxx_reg.h
-aic7xxx.o: aic7xxx_seq.h aic7xxx_reg.h $(obj-aic7xxx)
- $(LD) $(LD_RFLAG) -r -o $@ $(obj-aic7xxx)
-
-aic79xx_core.o: aic79xx_seq.h
-$(obj-aic79xx): aic79xx_reg.h
-aic79xx.o: aic79xx_seq.h aic79xx_reg.h $(obj-aic79xx)
- $(LD) $(LD_RFLAG) -r -o $@ $(obj-aic79xx)
-
-ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
-aic7xxx_gen = aic7xxx_seq.h aic7xxx_reg.h
-ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
-aic7xxx_gen += aic7xxx_reg_print.c
-aic7xxx_asm_cmd = aicasm/aicasm -I. -r aic7xxx_reg.h \
- -p aic7xxx_reg_print.c -i aic7xxx_osm.h \
- -o aic7xxx_seq.h aic7xxx.seq
-else
-aic7xxx_asm_cmd = aicasm/aicasm -I. -r aic7xxx_reg.h \
- -o aic7xxx_seq.h aic7xxx.seq
-endif
-$(aic7xxx_gen): aic7xxx.seq aic7xxx.reg aicasm/aicasm
- $(aic7xxx_asm_cmd)
-endif
-
-ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
-aic79xx_gen = aic79xx_seq.h aic79xx_reg.h
-ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
-aic79xx_gen += aic79xx_reg_print.c
-aic79xx_asm_cmd = aicasm/aicasm -I. -r aic79xx_reg.h \
- -p aic79xx_reg_print.c -i aic79xx_osm.h \
- -o aic79xx_seq.h aic79xx.seq
-else
-aic79xx_asm_cmd = aicasm/aicasm -I. -r aic79xx_reg.h \
- -o aic79xx_seq.h aic79xx.seq
-endif
-$(aic79xx_gen): aic79xx.seq aic79xx.reg aicasm/aicasm
- $(aic79xx_asm_cmd)
-endif
-
-aicasm/aicasm: aicasm/*.[chyl]
- $(MAKE) -C aicasm
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/fs/exec.c
--- a/linux-2.4.30-xen-sparse/fs/exec.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1179 +0,0 @@
-/*
- * linux/fs/exec.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-/*
- * #!-checking implemented by tytso.
- */
-/*
- * Demand-loading implemented 01.12.91 - no need to read anything but
- * the header into memory. The inode of the executable is put into
- * "current->executable", and page faults do the actual loading. Clean.
- *
- * Once more I can proudly say that linux stood up to being changed: it
- * was less than 2 hours work to get demand-loading completely implemented.
- *
- * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
- * current->executable is only used by the procfs. This allows a dispatch
- * table to check for several different types of binary formats. We keep
- * trying until we recognize the file or we run out of supported binary
- * formats.
- */
-
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/file.h>
-#include <linux/mman.h>
-#include <linux/a.out.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <linux/spinlock.h>
-#include <linux/personality.h>
-#include <linux/swap.h>
-#include <linux/utsname.h>
-#define __NO_VERSION__
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-int core_uses_pid;
-char core_pattern[65] = "core";
-int core_setuid_ok = 0;
-/* The maximal length of core_pattern is also specified in sysctl.c */
-
-static struct linux_binfmt *formats;
-static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
-
-int register_binfmt(struct linux_binfmt * fmt)
-{
- struct linux_binfmt ** tmp = &formats;
-
- if (!fmt)
- return -EINVAL;
- if (fmt->next)
- return -EBUSY;
- write_lock(&binfmt_lock);
- while (*tmp) {
- if (fmt == *tmp) {
- write_unlock(&binfmt_lock);
- return -EBUSY;
- }
- tmp = &(*tmp)->next;
- }
- fmt->next = formats;
- formats = fmt;
- write_unlock(&binfmt_lock);
- return 0;
-}
-
-int unregister_binfmt(struct linux_binfmt * fmt)
-{
- struct linux_binfmt ** tmp = &formats;
-
- write_lock(&binfmt_lock);
- while (*tmp) {
- if (fmt == *tmp) {
- *tmp = fmt->next;
- write_unlock(&binfmt_lock);
- return 0;
- }
- tmp = &(*tmp)->next;
- }
- write_unlock(&binfmt_lock);
- return -EINVAL;
-}
-
-static inline void put_binfmt(struct linux_binfmt * fmt)
-{
- if (fmt->module)
- __MOD_DEC_USE_COUNT(fmt->module);
-}
-
-/*
- * Note that a shared library must be both readable and executable due to
- * security reasons.
- *
- * Also note that we take the address to load from from the file itself.
- */
-asmlinkage long sys_uselib(const char * library)
-{
- struct file * file;
- struct nameidata nd;
- int error;
-
- error = user_path_walk(library, &nd);
- if (error)
- goto out;
-
- error = -EINVAL;
- if (!S_ISREG(nd.dentry->d_inode->i_mode))
- goto exit;
-
- error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC);
- if (error)
- goto exit;
-
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
- error = PTR_ERR(file);
- if (IS_ERR(file))
- goto out;
-
- error = -ENOEXEC;
- if(file->f_op && file->f_op->read) {
- struct linux_binfmt * fmt;
-
- read_lock(&binfmt_lock);
- for (fmt = formats ; fmt ; fmt = fmt->next) {
- if (!fmt->load_shlib)
- continue;
- if (!try_inc_mod_count(fmt->module))
- continue;
- read_unlock(&binfmt_lock);
- error = fmt->load_shlib(file);
- read_lock(&binfmt_lock);
- put_binfmt(fmt);
- if (error != -ENOEXEC)
- break;
- }
- read_unlock(&binfmt_lock);
- }
- fput(file);
-out:
- return error;
-exit:
- path_release(&nd);
- goto out;
-}
-
-/*
- * count() counts the number of arguments/envelopes
- */
-static int count(char ** argv, int max)
-{
- int i = 0;
-
- if (argv != NULL) {
- for (;;) {
- char * p;
-
- if (get_user(p, argv))
- return -EFAULT;
- if (!p)
- break;
- argv++;
- if(++i > max)
- return -E2BIG;
- }
- }
- return i;
-}
-
-/*
- * 'copy_strings()' copies argument/envelope strings from user
- * memory to free pages in kernel mem. These are in a format ready
- * to be put directly into the top of new user memory.
- */
-int copy_strings(int argc,char ** argv, struct linux_binprm *bprm)
-{
- struct page *kmapped_page = NULL;
- char *kaddr = NULL;
- int ret;
-
- while (argc-- > 0) {
- char *str;
- int len;
- unsigned long pos;
-
- if (get_user(str, argv+argc) ||
- !(len = strnlen_user(str, bprm->p))) {
- ret = -EFAULT;
- goto out;
- }
-
- if (bprm->p < len) {
- ret = -E2BIG;
- goto out;
- }
-
- bprm->p -= len;
- /* XXX: add architecture specific overflow check here. */
- pos = bprm->p;
-
- while (len > 0) {
- int i, new, err;
- int offset, bytes_to_copy;
- struct page *page;
-
- offset = pos % PAGE_SIZE;
- i = pos/PAGE_SIZE;
- page = bprm->page[i];
- new = 0;
- if (!page) {
- page = alloc_page(GFP_HIGHUSER);
- bprm->page[i] = page;
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
- new = 1;
- }
-
- if (page != kmapped_page) {
- if (kmapped_page)
- kunmap(kmapped_page);
- kmapped_page = page;
- kaddr = kmap(kmapped_page);
- }
- if (new && offset)
- memset(kaddr, 0, offset);
- bytes_to_copy = PAGE_SIZE - offset;
- if (bytes_to_copy > len) {
- bytes_to_copy = len;
- if (new)
- memset(kaddr+offset+len, 0,
- PAGE_SIZE-offset-len);
- }
- err = copy_from_user(kaddr+offset, str, bytes_to_copy);
- if (err) {
- ret = -EFAULT;
- goto out;
- }
-
- pos += bytes_to_copy;
- str += bytes_to_copy;
- len -= bytes_to_copy;
- }
- }
- ret = 0;
-out:
- if (kmapped_page)
- kunmap(kmapped_page);
- return ret;
-}
-
-/*
- * Like copy_strings, but get argv and its values from kernel memory.
- */
-int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
-{
- int r;
- mm_segment_t oldfs = get_fs();
- set_fs(KERNEL_DS);
- r = copy_strings(argc, argv, bprm);
- set_fs(oldfs);
- return r;
-}
-
-/*
- * This routine is used to map in a page into an address space: needed by
- * execve() for the initial stack and environment pages.
- *
- * tsk->mmap_sem is held for writing.
- */
-void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long
address)
-{
- pgd_t * pgd;
- pmd_t * pmd;
- pte_t * pte;
- struct vm_area_struct *vma;
- pgprot_t prot = PAGE_COPY;
-
- if (page_count(page) != 1)
- printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page,
address);
- pgd = pgd_offset(tsk->mm, address);
-
- spin_lock(&tsk->mm->page_table_lock);
- pmd = pmd_alloc(tsk->mm, pgd, address);
- if (!pmd)
- goto out;
- pte = pte_alloc(tsk->mm, pmd, address);
- if (!pte)
- goto out;
- if (!pte_none(*pte))
- goto out;
- lru_cache_add(page);
- flush_dcache_page(page);
- flush_page_to_ram(page);
- /* lookup is cheap because there is only a single entry in the list */
- vma = find_vma(tsk->mm, address);
- if (vma)
- prot = vma->vm_page_prot;
- set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
- XEN_flush_page_update_queue();
- tsk->mm->rss++;
- spin_unlock(&tsk->mm->page_table_lock);
-
- /* no need for flush_tlb */
- return;
-out:
- spin_unlock(&tsk->mm->page_table_lock);
- __free_page(page);
- force_sig(SIGKILL, tsk);
- return;
-}
-
-int setup_arg_pages(struct linux_binprm *bprm)
-{
- unsigned long stack_base;
- struct vm_area_struct *mpnt;
- int i, ret;
-
- stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
-
- bprm->p += stack_base;
- if (bprm->loader)
- bprm->loader += stack_base;
- bprm->exec += stack_base;
-
- mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!mpnt)
- return -ENOMEM;
-
- down_write(¤t->mm->mmap_sem);
- {
- mpnt->vm_mm = current->mm;
- mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
- mpnt->vm_end = STACK_TOP;
- mpnt->vm_flags = VM_STACK_FLAGS;
- mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
- mpnt->vm_ops = NULL;
- mpnt->vm_pgoff = 0;
- mpnt->vm_file = NULL;
- mpnt->vm_private_data = (void *) 0;
- if ((ret = insert_vm_struct(current->mm, mpnt))) {
- up_write(¤t->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, mpnt);
- return ret;
- }
- current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >>
PAGE_SHIFT;
- }
-
- for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
- struct page *page = bprm->page[i];
- if (page) {
- bprm->page[i] = NULL;
- put_dirty_page(current,page,stack_base);
- }
- stack_base += PAGE_SIZE;
- }
- up_write(¤t->mm->mmap_sem);
-
- return 0;
-}
-
-struct file *open_exec(const char *name)
-{
- struct nameidata nd;
- struct inode *inode;
- struct file *file;
- int err = 0;
-
- err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd);
- file = ERR_PTR(err);
- if (!err) {
- inode = nd.dentry->d_inode;
- file = ERR_PTR(-EACCES);
- if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
- S_ISREG(inode->i_mode)) {
- int err = permission(inode, MAY_EXEC);
- if (!err && !(inode->i_mode & 0111))
- err = -EACCES;
- file = ERR_PTR(err);
- if (!err) {
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
- if (!IS_ERR(file)) {
- err = deny_write_access(file);
- if (err) {
- fput(file);
- file = ERR_PTR(err);
- }
- }
-out:
- return file;
- }
- }
- path_release(&nd);
- }
- goto out;
-}
-
-int kernel_read(struct file *file, unsigned long offset,
- char * addr, unsigned long count)
-{
- mm_segment_t old_fs;
- loff_t pos = offset;
- int result = -ENOSYS;
-
- if (!file->f_op->read)
- goto fail;
- old_fs = get_fs();
- set_fs(get_ds());
- result = file->f_op->read(file, addr, count, &pos);
- set_fs(old_fs);
-fail:
- return result;
-}
-
-static int exec_mmap(void)
-{
- struct mm_struct * mm, * old_mm;
-
- old_mm = current->mm;
-
- if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
- mm_release();
- down_write(&old_mm->mmap_sem);
- exit_mmap(old_mm);
- up_write(&old_mm->mmap_sem);
- return 0;
- }
-
-
- mm = mm_alloc();
- if (mm) {
- struct mm_struct *active_mm;
-
- if (init_new_context(current, mm)) {
- mmdrop(mm);
- return -ENOMEM;
- }
-
- /* Add it to the list of mm's */
- spin_lock(&mmlist_lock);
- list_add(&mm->mmlist, &init_mm.mmlist);
- mmlist_nr++;
- spin_unlock(&mmlist_lock);
-
- task_lock(current);
- active_mm = current->active_mm;
- current->mm = mm;
- current->active_mm = mm;
- task_unlock(current);
- activate_mm(active_mm, mm);
- mm_release();
- if (old_mm) {
- if (active_mm != old_mm) BUG();
- mmput(old_mm);
- return 0;
- }
- mmdrop(active_mm);
- return 0;
- }
- return -ENOMEM;
-}
-
-/*
- * This function makes sure the current process has its own signal table,
- * so that flush_signal_handlers can later reset the handlers without
- * disturbing other processes. (Other processes might share the signal
- * table via the CLONE_SIGNAL option to clone().)
- */
-
-static inline int make_private_signals(void)
-{
- struct signal_struct * newsig;
-
- if (atomic_read(¤t->sig->count) <= 1)
- return 0;
- newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
- if (newsig == NULL)
- return -ENOMEM;
- spin_lock_init(&newsig->siglock);
- atomic_set(&newsig->count, 1);
- memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
- spin_lock_irq(¤t->sigmask_lock);
- current->sig = newsig;
- spin_unlock_irq(¤t->sigmask_lock);
- return 0;
-}
-
-/*
- * If make_private_signals() made a copy of the signal table, decrement the
- * refcount of the original table, and free it if necessary.
- * We don't do that in make_private_signals() so that we can back off
- * in flush_old_exec() if an error occurs after calling make_private_signals().
- */
-
-static inline void release_old_signals(struct signal_struct * oldsig)
-{
- if (current->sig == oldsig)
- return;
- if (atomic_dec_and_test(&oldsig->count))
- kmem_cache_free(sigact_cachep, oldsig);
-}
-
-/*
- * These functions flushes out all traces of the currently running executable
- * so that a new one can be started
- */
-
-static inline void flush_old_files(struct files_struct * files)
-{
- long j = -1;
-
- write_lock(&files->file_lock);
- for (;;) {
- unsigned long set, i;
-
- j++;
- i = j * __NFDBITS;
- if (i >= files->max_fds || i >= files->max_fdset)
- break;
- set = files->close_on_exec->fds_bits[j];
- if (!set)
- continue;
- files->close_on_exec->fds_bits[j] = 0;
- write_unlock(&files->file_lock);
- for ( ; set ; i++,set >>= 1) {
- if (set & 1) {
- sys_close(i);
- }
- }
- write_lock(&files->file_lock);
-
- }
- write_unlock(&files->file_lock);
-}
-
-/*
- * An execve() will automatically "de-thread" the process.
- * Note: we don't have to hold the tasklist_lock to test
- * whether we migth need to do this. If we're not part of
- * a thread group, there is no way we can become one
- * dynamically. And if we are, we only need to protect the
- * unlink - even if we race with the last other thread exit,
- * at worst the list_del_init() might end up being a no-op.
- */
-static inline void de_thread(struct task_struct *tsk)
-{
- if (!list_empty(&tsk->thread_group)) {
- write_lock_irq(&tasklist_lock);
- list_del_init(&tsk->thread_group);
- write_unlock_irq(&tasklist_lock);
- }
-
- /* Minor oddity: this might stay the same. */
- tsk->tgid = tsk->pid;
-}
-
-void get_task_comm(char *buf, struct task_struct *tsk)
-{
- /* buf must be at least sizeof(tsk->comm) in size */
- task_lock(tsk);
- memcpy(buf, tsk->comm, sizeof(tsk->comm));
- task_unlock(tsk);
-}
-
-void set_task_comm(struct task_struct *tsk, char *buf)
-{
- task_lock(tsk);
- strncpy(tsk->comm, buf, sizeof(tsk->comm));
- tsk->comm[sizeof(tsk->comm)-1]='\0';
- task_unlock(tsk);
-}
-
-int flush_old_exec(struct linux_binprm * bprm)
-{
- char * name;
- int i, ch, retval;
- struct signal_struct * oldsig;
- struct files_struct * files;
- char tcomm[sizeof(current->comm)];
-
- /*
- * Make sure we have a private signal table
- */
- oldsig = current->sig;
- retval = make_private_signals();
- if (retval) goto flush_failed;
-
- /*
- * Make sure we have private file handles. Ask the
- * fork helper to do the work for us and the exit
- * helper to do the cleanup of the old one.
- */
-
- files = current->files; /* refcounted so safe to hold */
- retval = unshare_files();
- if(retval)
- goto flush_failed;
-
- /*
- * Release all of the old mmap stuff
- */
- retval = exec_mmap();
- if (retval) goto mmap_failed;
-
- /* This is the point of no return */
- steal_locks(files);
- put_files_struct(files);
- release_old_signals(oldsig);
-
- current->sas_ss_sp = current->sas_ss_size = 0;
-
- if (current->euid == current->uid && current->egid == current->gid) {
- current->mm->dumpable = 1;
- current->task_dumpable = 1;
- }
- name = bprm->filename;
- for (i=0; (ch = *(name++)) != '\0';) {
- if (ch == '/')
- i = 0;
- else
- if (i < (sizeof(tcomm) - 1))
- tcomm[i++] = ch;
- }
- tcomm[i] = '\0';
- set_task_comm(current, tcomm);
-
- flush_thread();
-
- de_thread(current);
-
- if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
- permission(bprm->file->f_dentry->d_inode,MAY_READ))
- current->mm->dumpable = 0;
-
- /* An exec changes our domain. We are no longer part of the thread
- group */
-
- current->self_exec_id++;
-
- flush_signal_handlers(current);
- flush_old_files(current->files);
-
- return 0;
-
-mmap_failed:
- put_files_struct(current->files);
- current->files = files;
-flush_failed:
- spin_lock_irq(¤t->sigmask_lock);
- if (current->sig != oldsig) {
- kmem_cache_free(sigact_cachep, current->sig);
- current->sig = oldsig;
- }
- spin_unlock_irq(¤t->sigmask_lock);
- return retval;
-}
-
-/*
- * We mustn't allow tracing of suid binaries, unless
- * the tracer has the capability to trace anything..
- */
-static inline int must_not_trace_exec(struct task_struct * p)
-{
- return (p->ptrace & PT_PTRACED) && !(p->ptrace & PT_PTRACE_CAP);
-}
-
-/*
- * Fill the binprm structure from the inode.
- * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
- */
-int prepare_binprm(struct linux_binprm *bprm)
-{
- int mode;
- struct inode * inode = bprm->file->f_dentry->d_inode;
-
- mode = inode->i_mode;
- /*
- * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
- * vfs_permission lets a non-executable through
- */
- if (!(mode & 0111)) /* with at least _one_ execute bit set */
- return -EACCES;
- if (bprm->file->f_op == NULL)
- return -EACCES;
-
- bprm->e_uid = current->euid;
- bprm->e_gid = current->egid;
-
- if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
- /* Set-uid? */
- if (mode & S_ISUID)
- bprm->e_uid = inode->i_uid;
-
- /* Set-gid? */
- /*
- * If setgid is set but no group execute bit then this
- * is a candidate for mandatory locking, not a setgid
- * executable.
- */
- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
- bprm->e_gid = inode->i_gid;
- }
-
- /* We don't have VFS support for capabilities yet */
- cap_clear(bprm->cap_inheritable);
- cap_clear(bprm->cap_permitted);
- cap_clear(bprm->cap_effective);
-
- /* To support inheritance of root-permissions and suid-root
- * executables under compatibility mode, we raise all three
- * capability sets for the file.
- *
- * If only the real uid is 0, we only raise the inheritable
- * and permitted sets of the executable file.
- */
-
- if (!issecure(SECURE_NOROOT)) {
- if (bprm->e_uid == 0 || current->uid == 0) {
- cap_set_full(bprm->cap_inheritable);
- cap_set_full(bprm->cap_permitted);
- }
- if (bprm->e_uid == 0)
- cap_set_full(bprm->cap_effective);
- }
-
- memset(bprm->buf,0,BINPRM_BUF_SIZE);
- return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
-}
-
-/*
- * This function is used to produce the new IDs and capabilities
- * from the old ones and the file's capabilities.
- *
- * The formula used for evolving capabilities is:
- *
- * pI' = pI
- * (***) pP' = (fP & X) | (fI & pI)
- * pE' = pP' & fE [NB. fE is 0 or ~0]
- *
- * I=Inheritable, P=Permitted, E=Effective // p=process, f=file
- * ' indicates post-exec(), and X is the global 'cap_bset'.
- *
- */
-
-void compute_creds(struct linux_binprm *bprm)
-{
- kernel_cap_t new_permitted, working;
- int do_unlock = 0;
-
- new_permitted = cap_intersect(bprm->cap_permitted, cap_bset);
- working = cap_intersect(bprm->cap_inheritable,
- current->cap_inheritable);
- new_permitted = cap_combine(new_permitted, working);
-
- if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
- !cap_issubset(new_permitted, current->cap_permitted)) {
- current->mm->dumpable = 0;
-
- lock_kernel();
- if (must_not_trace_exec(current)
- || atomic_read(¤t->fs->count) > 1
- || atomic_read(¤t->files->count) > 1
- || atomic_read(¤t->sig->count) > 1) {
- if(!capable(CAP_SETUID)) {
- bprm->e_uid = current->uid;
- bprm->e_gid = current->gid;
- }
- if(!capable(CAP_SETPCAP)) {
- new_permitted = cap_intersect(new_permitted,
- current->cap_permitted);
- }
- }
- do_unlock = 1;
- }
-
-
- /* For init, we want to retain the capabilities set
- * in the init_task struct. Thus we skip the usual
- * capability rules */
- if (current->pid != 1) {
- current->cap_permitted = new_permitted;
- current->cap_effective =
- cap_intersect(new_permitted, bprm->cap_effective);
- }
-
- /* AUD: Audit candidate if current->cap_effective is set */
-
- current->suid = current->euid = current->fsuid = bprm->e_uid;
- current->sgid = current->egid = current->fsgid = bprm->e_gid;
-
- if(do_unlock)
- unlock_kernel();
- current->keep_capabilities = 0;
-}
-
-
-void remove_arg_zero(struct linux_binprm *bprm)
-{
- if (bprm->argc) {
- unsigned long offset;
- char * kaddr;
- struct page *page;
-
- offset = bprm->p % PAGE_SIZE;
- goto inside;
-
- while (bprm->p++, *(kaddr+offset++)) {
- if (offset != PAGE_SIZE)
- continue;
- offset = 0;
- kunmap(page);
-inside:
- page = bprm->page[bprm->p/PAGE_SIZE];
- kaddr = kmap(page);
- }
- kunmap(page);
- bprm->argc--;
- }
-}
-
-/*
- * cycle the list of binary formats handler, until one recognizes the image
- */
-int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
-{
- int try,retval=0;
- struct linux_binfmt *fmt;
-#ifdef __alpha__
- /* handle /sbin/loader.. */
- {
- struct exec * eh = (struct exec *) bprm->buf;
-
- if (!bprm->loader && eh->fh.f_magic == 0x183 &&
- (eh->fh.f_flags & 0x3000) == 0x3000)
- {
- struct file * file;
- unsigned long loader;
-
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
- loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
-
- file = open_exec("/sbin/loader");
- retval = PTR_ERR(file);
- if (IS_ERR(file))
- return retval;
-
- /* Remember if the application is TASO. */
- bprm->sh_bang = eh->ah.entry < 0x100000000;
-
- bprm->file = file;
- bprm->loader = loader;
- retval = prepare_binprm(bprm);
- if (retval<0)
- return retval;
- /* should call search_binary_handler recursively here,
- but it does not matter */
- }
- }
-#endif
- /* kernel module loader fixup */
- /* so we don't try to load run modprobe in kernel space. */
- set_fs(USER_DS);
- for (try=0; try<2; try++) {
- read_lock(&binfmt_lock);
- for (fmt = formats ; fmt ; fmt = fmt->next) {
- int (*fn)(struct linux_binprm *, struct pt_regs *) =
fmt->load_binary;
- if (!fn)
- continue;
- if (!try_inc_mod_count(fmt->module))
- continue;
- read_unlock(&binfmt_lock);
- retval = fn(bprm, regs);
- if (retval >= 0) {
- put_binfmt(fmt);
- allow_write_access(bprm->file);
- if (bprm->file)
- fput(bprm->file);
- bprm->file = NULL;
- current->did_exec = 1;
- return retval;
- }
- read_lock(&binfmt_lock);
- put_binfmt(fmt);
- if (retval != -ENOEXEC)
- break;
- if (!bprm->file) {
- read_unlock(&binfmt_lock);
- return retval;
- }
- }
- read_unlock(&binfmt_lock);
- if (retval != -ENOEXEC) {
- break;
-#ifdef CONFIG_KMOD
- }else{
-#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
- char modname[20];
- if (printable(bprm->buf[0]) &&
- printable(bprm->buf[1]) &&
- printable(bprm->buf[2]) &&
- printable(bprm->buf[3]))
- break; /* -ENOEXEC */
- sprintf(modname, "binfmt-%04x", *(unsigned short
*)(&bprm->buf[2]));
- request_module(modname);
-#endif
- }
- }
- return retval;
-}
-
-
-/*
- * sys_execve() executes a new program.
- */
-int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs *
regs)
-{
- struct linux_binprm bprm;
- struct file *file;
- int retval;
- int i;
-
- file = open_exec(filename);
-
- retval = PTR_ERR(file);
- if (IS_ERR(file))
- return retval;
-
- bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
- memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
-
- bprm.file = file;
- bprm.filename = filename;
- bprm.sh_bang = 0;
- bprm.loader = 0;
- bprm.exec = 0;
- if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
- allow_write_access(file);
- fput(file);
- return bprm.argc;
- }
-
- if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
- allow_write_access(file);
- fput(file);
- return bprm.envc;
- }
-
- retval = prepare_binprm(&bprm);
- if (retval < 0)
- goto out;
-
- retval = copy_strings_kernel(1, &bprm.filename, &bprm);
- if (retval < 0)
- goto out;
-
- bprm.exec = bprm.p;
- retval = copy_strings(bprm.envc, envp, &bprm);
- if (retval < 0)
- goto out;
-
- retval = copy_strings(bprm.argc, argv, &bprm);
- if (retval < 0)
- goto out;
-
- retval = search_binary_handler(&bprm,regs);
- if (retval >= 0)
- /* execve success */
- return retval;
-
-out:
- /* Something went wrong, return the inode and free the argument pages*/
- allow_write_access(bprm.file);
- if (bprm.file)
- fput(bprm.file);
-
- for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
- struct page * page = bprm.page[i];
- if (page)
- __free_page(page);
- }
-
- return retval;
-}
-
-void set_binfmt(struct linux_binfmt *new)
-{
- struct linux_binfmt *old = current->binfmt;
- if (new && new->module)
- __MOD_INC_USE_COUNT(new->module);
- current->binfmt = new;
- if (old && old->module)
- __MOD_DEC_USE_COUNT(old->module);
-}
-
-#define CORENAME_MAX_SIZE 64
-
-/* format_corename will inspect the pattern parameter, and output a
- * name into corename, which must have space for at least
- * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
- */
-void format_corename(char *corename, const char *pattern, long signr)
-{
- const char *pat_ptr = pattern;
- char *out_ptr = corename;
- char *const out_end = corename + CORENAME_MAX_SIZE;
- int rc;
- int pid_in_pattern = 0;
-
- /* Repeat as long as we have more pattern to process and more output
- space */
- while (*pat_ptr) {
- if (*pat_ptr != '%') {
- if (out_ptr == out_end)
- goto out;
- *out_ptr++ = *pat_ptr++;
- } else {
- switch (*++pat_ptr) {
- case 0:
- goto out;
- /* Double percent, output one percent */
- case '%':
- if (out_ptr == out_end)
- goto out;
- *out_ptr++ = '%';
- break;
- /* pid */
- case 'p':
- pid_in_pattern = 1;
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", current->pid);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- /* uid */
- case 'u':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", current->uid);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- /* gid */
- case 'g':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", current->gid);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- /* signal that caused the coredump */
- case 's':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%ld", signr);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- /* UNIX time of coredump */
- case 't': {
- struct timeval tv;
- do_gettimeofday(&tv);
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%ld", tv.tv_sec);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- }
- /* hostname */
- case 'h':
- down_read(&uts_sem);
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%s", system_utsname.nodename);
- up_read(&uts_sem);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- /* executable */
- case 'e':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%s", current->comm);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- break;
- default:
- break;
- }
- ++pat_ptr;
- }
- }
- /* Backward compatibility with core_uses_pid:
- *
- * If core_pattern does not include a %p (as is the default)
- * and core_uses_pid is set, then .%pid will be appended to
- * the filename */
- if (!pid_in_pattern
- && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
- rc = snprintf(out_ptr, out_end - out_ptr,
- ".%d", current->pid);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
- }
- out:
- *out_ptr = 0;
-}
-
-int do_coredump(long signr, struct pt_regs * regs)
-{
- struct linux_binfmt * binfmt;
- char corename[CORENAME_MAX_SIZE + 1];
- struct file * file;
- struct inode * inode;
- int retval = 0;
- int fsuid = current->fsuid;
-
- lock_kernel();
- binfmt = current->binfmt;
- if (!binfmt || !binfmt->core_dump)
- goto fail;
- if (!is_dumpable(current))
- {
- if(!core_setuid_ok || !current->task_dumpable)
- goto fail;
- current->fsuid = 0;
- }
- current->mm->dumpable = 0;
- if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
- goto fail;
-
- format_corename(corename, core_pattern, signr);
- file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW, 0600);
- if (IS_ERR(file))
- goto fail;
- inode = file->f_dentry->d_inode;
- if (inode->i_nlink > 1)
- goto close_fail; /* multiple links - don't dump */
- if (d_unhashed(file->f_dentry))
- goto close_fail;
-
- if (!S_ISREG(inode->i_mode))
- goto close_fail;
- if (!file->f_op)
- goto close_fail;
- if (!file->f_op->write)
- goto close_fail;
- if (do_truncate(file->f_dentry, 0) != 0)
- goto close_fail;
-
- retval = binfmt->core_dump(signr, regs, file);
-
-close_fail:
- filp_close(file, NULL);
-fail:
- if (fsuid != current->fsuid)
- current->fsuid = fsuid;
- unlock_kernel();
- return retval;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/bugs.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/bugs.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,53 +0,0 @@
-/*
- * include/asm-i386/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- *
- * Cyrix stuff, June 1998 by:
- * - Rafael R. Reilova (moved everything from head.S),
- * <rreilova@xxxxxxxxxxxx>
- * - Channing Corn (tests & fixes),
- * - Andrew D. Balsa (code cleanup).
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-
-
-static void __init check_fpu(void)
-{
- boot_cpu_data.fdiv_bug = 0;
-}
-
-static void __init check_hlt(void)
-{
- boot_cpu_data.hlt_works_ok = 1;
-}
-
-static void __init check_bugs(void)
-{
- extern void __init boot_init_fpu(void);
-
- identify_cpu(&boot_cpu_data);
- boot_init_fpu();
-#ifndef CONFIG_SMP
- printk("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- check_fpu();
- check_hlt();
- system_utsname.machine[1] = '0' +
- (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/desc.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/desc.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,41 +0,0 @@
-#ifndef __ARCH_DESC_H
-#define __ARCH_DESC_H
-
-#include <asm/ldt.h>
-
-#ifndef __ASSEMBLY__
-
-struct desc_struct {
- unsigned long a,b;
-};
-
-struct Xgt_desc_struct {
- unsigned short size;
- unsigned long address __attribute__((packed));
-};
-
-extern struct desc_struct default_ldt[];
-
-static inline void clear_LDT(void)
-{
- /*
- * NB. We load the default_ldt for lcall7/27 handling on demand, as
- * it slows down context switching. Noone uses it anyway.
- */
- queue_set_ldt(0, 0);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- void *segments = pc->ldt;
- int count = pc->size;
-
- if ( count == 0 )
- segments = NULL;
-
- queue_set_ldt((unsigned long)segments, count);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ARCH_DESC_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/fixmap.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/fixmap.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,105 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-enum fixed_addresses {
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
- FIX_BLKRING_BASE,
- FIX_NETRING0_BASE,
- FIX_NETRING1_BASE,
- FIX_NETRING2_BASE,
- FIX_NETRING3_BASE,
- FIX_SHARED_INFO,
- FIX_GNTTAB,
-#ifdef CONFIG_VGA_CONSOLE
-#define NR_FIX_BTMAPS 32 /* 128KB For the Dom0 VGA Console A0000-C0000 */
-#else
-#define NR_FIX_BTMAPS 1 /* in case anyone wants it in future... */
-#endif
- FIX_BTMAP_END,
- FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
- /* our bt_ioremap is permanent, unlike other architectures */
-
- __end_of_permanent_fixed_addresses,
- __end_of_fixed_addresses = __end_of_permanent_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-extern void clear_fixmap(enum fixed_addresses idx);
-
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap, and leave one page empty
- * at the top of mem..
- */
-#define FIXADDR_TOP (HYPERVISOR_VIRT_START - 2*PAGE_SIZE)
-#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(unsigned int idx)
-{
- return __fix_to_virt(idx);
-}
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/highmem.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/highmem.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,132 +0,0 @@
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@xxxxxxxxxxxxxx
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
- */
-
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define HIGHMEM_DEBUG 1
-#else
-#define HIGHMEM_DEBUG 0
-#endif
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-extern pte_t *pkmap_page_table;
-
-extern void kmap_init(void) __init;
-
-/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- */
-#define PKMAP_BASE (HYPERVISOR_VIRT_START - (1<<23))
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-extern void * FASTCALL(kmap_high(struct page *page, int nonblocking));
-extern void FASTCALL(kunmap_high(struct page *page));
-
-#define kmap(page) __kmap(page, 0)
-#define kmap_nonblock(page) __kmap(page, 1)
-
-static inline void *__kmap(struct page *page, int nonblocking)
-{
- if (in_interrupt())
- out_of_line_bug();
- if (page < highmem_start_page)
- return page_address(page);
- return kmap_high(page, nonblocking);
-}
-
-static inline void kunmap(struct page *page)
-{
- if (in_interrupt())
- out_of_line_bug();
- if (page < highmem_start_page)
- return;
- kunmap_high(page);
-}
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic(struct page *page, enum km_type type)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
-
- if (page < highmem_start_page)
- return page_address(page);
-
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#if HIGHMEM_DEBUG
- if (!pte_none(*(kmap_pte-idx)))
- out_of_line_bug();
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#if HIGHMEM_DEBUG
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < FIXADDR_START) // FIXME
- return;
-
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- out_of_line_bug();
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
-#endif
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/hw_irq.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/hw_irq.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,61 +0,0 @@
-#ifndef _ASM_HW_IRQ_H
-#define _ASM_HW_IRQ_H
-
-/*
- * linux/include/asm/hw_irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- */
-
-#include <linux/config.h>
-#include <linux/smp.h>
-#include <asm/atomic.h>
-#include <asm/irq.h>
-
-#define SYSCALL_VECTOR 0x80
-
-extern int irq_vector[NR_IRQS];
-
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
-
-extern char _stext, _etext;
-
-extern unsigned long prof_cpu_mask;
-extern unsigned int * prof_buffer;
-extern unsigned long prof_len;
-extern unsigned long prof_shift;
-
-/*
- * x86 profiling function, SMP safe. We might want to do this in
- * assembly totally?
- */
-static inline void x86_do_profile (unsigned long eip)
-{
- if (!prof_buffer)
- return;
-
- /*
- * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
- * (default is all CPUs.)
- */
- if (!((1<<smp_processor_id()) & prof_cpu_mask))
- return;
-
- eip -= (unsigned long) &_stext;
- eip >>= prof_shift;
- /*
- * Don't ignore out-of-bounds EIP values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
- */
- if (eip > prof_len-1)
- eip = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[eip]);
-}
-
-static inline void hw_resend_irq(struct hw_interrupt_type *h,
- unsigned int i)
-{}
-
-#endif /* _ASM_HW_IRQ_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/io.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/io.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,457 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-#include <linux/config.h>
-
-/*
- * This file contains the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated
- * to (a) handle it all in a way that makes gcc able to optimize it
- * as well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- */
-
-/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- * Linus
- */
-
- /*
- * Bit simplified and optimized by Jan Hubicka
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
- *
- * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
- * isa_read[wl] and isa_write[wl] fixed
- * - Arnaldo Carvalho de Melo <acme@xxxxxxxxxxxxxxxx>
- */
-
-#define IO_SPACE_LIMIT 0xffff
-
-#define XQUAD_PORTIO_BASE 0xfe400000
-#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
-#define XQUAD_PORTIO_LEN 0x80000 /* Only remapping first 2 quads */
-
-#ifdef __KERNEL__
-
-#include <linux/vmalloc.h>
-
-/*
- * Temporary debugging check to catch old code using
- * unmapped ISA addresses. Will be removed in 2.4.
- */
-#if CONFIG_DEBUG_IOVIRT
- extern void *__io_virt_debug(unsigned long x, const char *file, int line);
- extern unsigned long __io_phys_debug(unsigned long x, const char *file, int
line);
- #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
-//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
-#else
- #define __io_virt(x) ((void *)(x))
-//#define __io_phys(x) __pa(x)
-#endif
-
-/**
- * virt_to_phys - map virtual addresses to physical
- * @address: address to remap
- *
- * The returned physical address is the physical (CPU) mapping for
- * the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
- *
- * This function does not give bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa(address);
-}
-
-/**
- * phys_to_virt - map physical address to virtual
- * @address: address to remap
- *
- * The returned virtual address is a current CPU mapping for
- * the memory address given. It is only valid to use this function on
- * addresses that have a kernel mapping
- *
- * This function does not handle bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * We define page_to_phys 'incorrectly' because it is used when merging blkdev
- * requests, and the correct thing to do there is to use machine addresses.
- */
-#define page_to_phys(_x) phys_to_machine(((_x) - mem_map) << PAGE_SHIFT)
-
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned
long flags);
-
-/**
- * ioremap - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- */
-
-static inline void * ioremap (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, 0);
-}
-
-/**
- * ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In paticular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- */
-
-static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_PCD);
-}
-
-extern void iounmap(void *addr);
-
-/*
- * bt_ioremap() and bt_iounmap() are for temporary early boot-time
- * mappings, before the real ioremap() is functional.
- * A boot-time mapping is currently limited to at most 16 pages.
- */
-extern void *bt_ioremap(unsigned long offset, unsigned long size);
-extern void bt_iounmap(void *addr, unsigned long size);
-
-#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
-#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
-#define page_to_bus(_x) phys_to_machine(((_x) - mem_map) << PAGE_SHIFT)
-#define bus_to_phys(_x) machine_to_phys(_x)
-#define bus_to_page(_x) (mem_map + (bus_to_phys(_x) >> PAGE_SHIFT))
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
-#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
-#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-
-#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
-#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-#define memset_io(a,b,c) __memset(__io_virt(a),(b),(c))
-#define memcpy_fromio(a,b,c) __memcpy((a),__io_virt(b),(c))
-#define memcpy_toio(a,b,c) __memcpy(__io_virt(a),(b),(c))
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char *)(PAGE_OFFSET))
-
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base +
(b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
-/*
- * Again, i386 does not require mem IO specific function.
- */
-
-#define eth_io_copy_and_sum(a,b,c,d)
eth_copy_and_sum((a),__io_virt(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d)
eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
-
-/**
- * check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the mmio address io_addr. This
- * address should have been obtained by ioremap.
- * Returns 1 on a match.
- */
-
-static inline int check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/**
- * isa_check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the ISA mmio address io_addr.
- * Returns 1 on a match.
- *
- * This function is deprecated. New drivers should use ioremap and
- * check_signature.
- */
-
-
-static inline int isa_check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (isa_readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/*
- * Cache management
- *
- * This needed for two cases
- * 1. Out of order aware processors
- * 2. Accidentally out of order processors (PPro errata #51)
- */
-
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-
-static inline void flush_write_buffers(void)
-{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-}
-
-#define dma_cache_inv(_start,_size) flush_write_buffers()
-#define dma_cache_wback(_start,_size) flush_write_buffers()
-#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
-
-#else
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-#define flush_write_buffers()
-
-#endif
-
-#endif /* __KERNEL__ */
-
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
-#elif defined(__UNSAFE_IO__)
-#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-#else
-#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n" \
- "2:\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,2b\n" \
- ".previous"
-#endif
-
-#ifdef REALLY_SLOW_IO
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
__SLOW_DOWN_IO
-#else
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-
-#ifdef CONFIG_MULTIQUAD
-extern void *xquad_portio; /* Where the IO area was mapped */
-#endif /* CONFIG_MULTIQUAD */
-
-/*
- * Talk about misusing macros..
- */
-#define __OUT1(s,x) \
-static inline void out##s(unsigned x value, unsigned short port) {
-
-#ifdef __UNSAFE_IO__
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-#else
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("1: out" #s " %" s1 "0,%" s2 "1\n" \
- "2:\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,2b\n" \
- ".previous"
-#endif
-
-#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE)
-#define __OUTQ(s,ss,x) /* Do the equivalent of the portio op on quads */ \
-static inline void out##ss(unsigned x value, unsigned short port) { \
- if (xquad_portio) \
- write##s(value, (unsigned long) xquad_portio + port); \
- else /* We're still in early boot, running on quad 0 */ \
- out##ss##_local(value, port); \
-} \
-static inline void out##ss##_quad(unsigned x value, unsigned short port, int
quad) { \
- if (xquad_portio) \
- write##s(value, (unsigned long) xquad_portio +
(XQUAD_PORTIO_QUAD*quad)\
- + port); \
-}
-
-#define __INQ(s,ss) /* Do the equivalent of the portio op on quads */ \
-static inline RETURN_TYPE in##ss(unsigned short port) { \
- if (xquad_portio) \
- return read##s((unsigned long) xquad_portio + port); \
- else /* We're still in early boot, running on quad 0 */ \
- return in##ss##_local(port); \
-} \
-static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \
- if (xquad_portio) \
- return read##s((unsigned long) xquad_portio +
(XQUAD_PORTIO_QUAD*quad)\
- + port); \
- else\
- return 0;\
-}
-#endif /* CONFIG_MULTIQUAD && !STANDALONE */
-
-#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd"
(port));}
-#else
-/* Make the default portio routines operate on quad 0 */
-#define __OUT(s,s1,x) \
-__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value),
"Nd" (port));} \
-__OUTQ(s,s,x) \
-__OUTQ(s,s##_p,x)
-#endif /* !CONFIG_MULTIQUAD || STANDALONE */
-
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-
-#ifdef __UNSAFE_IO__
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-#else
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("1: in" #s " %" s2 "1,%" s1 "0\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov" #s " $~0,%" s1 "0\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous"
-#endif
-
-#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port)
,##i ); return _v; }
-#else
-/* Make the default portio routines operate on quad 0 */
-#define __IN(s,s1,i...) \
-__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v;
} \
-__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd"
(port) ,##i ); return _v; } \
-__INQ(s,s) \
-__INQ(s,s##_p)
-#endif /* !CONFIG_MULTIQUAD || STANDALONE */
-
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long
count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned
long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define RETURN_TYPE unsigned char
-__IN(b,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned short
-__IN(w,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned int
-__IN(l,"")
-#undef RETURN_TYPE
-
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
-
-__INS(b)
-__INS(w)
-__INS(l)
-
-__OUTS(b)
-__OUTS(w)
-__OUTS(l)
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/irq.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/irq.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,65 +0,0 @@
-#ifndef _ASM_IRQ_H
-#define _ASM_IRQ_H
-
-/*
- * linux/include/asm/irq.h
- *
- * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
- *
- * IRQ/IPI changes taken from work by Thomas Radke
- * <tomsoft@xxxxxxxxxxxxxxxxxxxxxxxxx>
- */
-
-#include <linux/config.h>
-#include <asm/hypervisor.h>
-#include <asm/ptrace.h>
-
-/*
- * The flat IRQ space is divided into two regions:
- * 1. A one-to-one mapping of real physical IRQs. This space is only used
- * if we have physical device-access privilege. This region is at the
- * start of the IRQ space so that existing device drivers do not need
- * to be modified to translate physical IRQ numbers into our IRQ space.
- * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
- * are bound using the provided bind/unbind functions.
- */
-
-#define PIRQ_BASE 0
-#define NR_PIRQS 128
-
-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS 128
-
-#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-
-#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-
-/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
-extern int bind_virq_to_irq(int virq);
-extern void unbind_virq_from_irq(int virq);
-extern int bind_evtchn_to_irq(int evtchn);
-extern void unbind_evtchn_from_irq(int evtchn);
-
-static __inline__ int irq_cannonicalize(int irq)
-{
- return (irq == 2) ? 9 : irq;
-}
-
-extern void disable_irq(unsigned int);
-extern void disable_irq_nosync(unsigned int);
-extern void enable_irq(unsigned int);
-
-extern void irq_suspend(void);
-extern void irq_resume(void);
-
-
-#define CPU_MASK_NONE 0
-
-/* XXX SMH: no-op for compat w/ 2.6 shared files */
-#define irq_ctx_init(cpu) do { ; } while (0)
-
-#endif /* _ASM_IRQ_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/keyboard.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/keyboard.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,74 +0,0 @@
-/*
- * linux/include/asm-i386/keyboard.h
- *
- * Created 3 Nov 1996 by Geert Uytterhoeven
- */
-
-/*
- * This file contains the i386 architecture specific keyboard definitions
- */
-
-#ifndef _I386_KEYBOARD_H
-#define _I386_KEYBOARD_H
-
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/kd.h>
-#include <linux/pm.h>
-#include <asm/io.h>
-
-#define KEYBOARD_IRQ 1
-#define DISABLE_KBD_DURING_INTERRUPTS 0
-
-extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
-extern int pckbd_getkeycode(unsigned int scancode);
-extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
- char raw_mode);
-extern char pckbd_unexpected_up(unsigned char keycode);
-extern void pckbd_leds(unsigned char leds);
-extern void pckbd_init_hw(void);
-extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *);
-extern pm_callback pm_kbd_request_override;
-extern unsigned char pckbd_sysrq_xlate[128];
-
-#define kbd_setkeycode pckbd_setkeycode
-#define kbd_getkeycode pckbd_getkeycode
-#define kbd_translate pckbd_translate
-#define kbd_unexpected_up pckbd_unexpected_up
-#define kbd_leds pckbd_leds
-#define kbd_init_hw pckbd_init_hw
-#define kbd_sysrq_xlate pckbd_sysrq_xlate
-
-#define SYSRQ_KEY 0x54
-
-#define kbd_controller_present() (xen_start_info.flags & SIF_INITDOMAIN)
-
-/* resource allocation */
-#define kbd_request_region()
-#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \
- "keyboard", NULL)
-
-/* How to access the keyboard macros on this platform. */
-#define kbd_read_input() inb(KBD_DATA_REG)
-#define kbd_read_status() inb(KBD_STATUS_REG)
-#define kbd_write_output(val) outb(val, KBD_DATA_REG)
-#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
-
-/* Some stoneage hardware needs delays after some operations. */
-#define kbd_pause() do { } while(0)
-
-/*
- * Machine specific bits for the PS/2 driver
- */
-
-#define AUX_IRQ 12
-
-#define aux_request_irq(hand, dev_id) \
- request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id)
-
-#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
-
-#endif /* __KERNEL__ */
-#endif /* _I386_KEYBOARD_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/mmu_context.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/mmu_context.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,74 +0,0 @@
-#ifndef __I386_MMU_CONTEXT_H
-#define __I386_MMU_CONTEXT_H
-
-#include <linux/config.h>
-#include <asm/desc.h>
-#include <asm/atomic.h>
-#include <asm/pgalloc.h>
-
-/*
- * hooks to add arch specific data into the mm struct.
- * Note that destroy_context is called even if init_new_context
- * fails.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
-
-#ifdef CONFIG_SMP
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk, unsigned cpu)
-{
- if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
- cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
-}
-#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk, unsigned cpu)
-{
-}
-#endif
-
-extern pgd_t *cur_pgd;
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu)
-{
- if (prev != next) {
- /* stop flush ipis for the previous mm */
- clear_bit(cpu, &prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- cpu_tlbstate[cpu].active_mm = next;
-#endif
-
- /* Re-load page tables */
- cur_pgd = next->pgd;
- queue_pt_switch(__pa(cur_pgd));
- /* load_LDT, if either the previous or next thread
- * has a non-default LDT.
- */
- if (next->context.size+prev->context.size)
- load_LDT(&next->context);
- }
-#ifdef CONFIG_SMP
- else {
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- if(cpu_tlbstate[cpu].active_mm != next)
- out_of_line_bug();
- if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload %cr3.
- */
- cur_pgd = next->pgd;
- queue_pt_switch(__pa(cur_pgd));
- load_LDT(next);
- }
- }
-#endif
-}
-
-#define activate_mm(prev, next) \
-do { \
- switch_mm((prev),(next),NULL,smp_processor_id()); \
- flush_page_update_queue(); \
-} while ( 0 )
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/module.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/module.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,14 +0,0 @@
-#ifndef _ASM_I386_MODULE_H
-#define _ASM_I386_MODULE_H
-/*
- * This file contains the i386 architecture specific module code.
- */
-
-extern int xen_module_init(struct module *mod);
-
-#define module_map(x) vmalloc(x)
-#define module_unmap(x) vfree(x)
-#define module_arch_init(x) xen_module_init(x)
-#define arch_init_modules(x) do { } while (0)
-
-#endif /* _ASM_I386_MODULE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/msr.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/msr.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,138 +0,0 @@
-#ifndef __ASM_MSR_H
-#define __ASM_MSR_H
-
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(msr,val1,val2) \
-{ \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 0; \
- op.u.msr.msr = msr; \
- op.u.msr.cpu_mask = (1 << current->processor); \
- HYPERVISOR_dom0_op(&op); \
- val1 = op.u.msr.out1; \
- val2 = op.u.msr.out2; \
-}
-
-#define wrmsr(msr,val1,val2) \
-{ \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 1; \
- op.u.msr.cpu_mask = (1 << current->processor); \
- op.u.msr.msr = msr; \
- op.u.msr.in1 = val1; \
- op.u.msr.in2 = val2; \
- HYPERVISOR_dom0_op(&op); \
-}
-
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
- __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
- __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE 0x79
-#define MSR_IA32_UCODE_REV 0x8b
-
-#define MSR_IA32_BBL_CR_CTL 0x119
-
-#define MSR_IA32_MCG_CAP 0x179
-#define MSR_IA32_MCG_STATUS 0x17a
-#define MSR_IA32_MCG_CTL 0x17b
-
-#define MSR_IA32_THERM_CONTROL 0x19a
-#define MSR_IA32_THERM_INTERRUPT 0x19b
-#define MSR_IA32_THERM_STATUS 0x19c
-#define MSR_IA32_MISC_ENABLE 0x1a0
-
-#define MSR_IA32_DEBUGCTLMSR 0x1d9
-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
-#define MSR_IA32_LASTINTFROMIP 0x1dd
-#define MSR_IA32_LASTINTTOIP 0x1de
-
-#define MSR_IA32_MC0_CTL 0x400
-#define MSR_IA32_MC0_STATUS 0x401
-#define MSR_IA32_MC0_ADDR 0x402
-#define MSR_IA32_MC0_MISC 0x403
-
-#define MSR_P6_PERFCTR0 0xc1
-#define MSR_P6_PERFCTR1 0xc2
-#define MSR_P6_EVNTSEL0 0x186
-#define MSR_P6_EVNTSEL1 0x187
-
-#define MSR_IA32_PERF_STATUS 0x198
-#define MSR_IA32_PERF_CTL 0x199
-
-/* AMD Defined MSRs */
-#define MSR_K6_EFER 0xC0000080
-#define MSR_K6_STAR 0xC0000081
-#define MSR_K6_WHCR 0xC0000082
-#define MSR_K6_UWCCR 0xC0000085
-#define MSR_K6_EPMR 0xC0000086
-#define MSR_K6_PSOR 0xC0000087
-#define MSR_K6_PFIR 0xC0000088
-
-#define MSR_K7_EVNTSEL0 0xC0010000
-#define MSR_K7_PERFCTR0 0xC0010004
-#define MSR_K7_HWCR 0xC0010015
-#define MSR_K7_CLK_CTL 0xC001001b
-#define MSR_K7_FID_VID_CTL 0xC0010041
-#define MSR_K7_VID_STATUS 0xC0010042
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1 0x107
-#define MSR_IDT_FCR2 0x108
-#define MSR_IDT_FCR3 0x109
-#define MSR_IDT_FCR4 0x10a
-
-#define MSR_IDT_MCR0 0x110
-#define MSR_IDT_MCR1 0x111
-#define MSR_IDT_MCR2 0x112
-#define MSR_IDT_MCR3 0x113
-#define MSR_IDT_MCR4 0x114
-#define MSR_IDT_MCR5 0x115
-#define MSR_IDT_MCR6 0x116
-#define MSR_IDT_MCR7 0x117
-#define MSR_IDT_MCR_CTRL 0x120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR 0x1107
-#define MSR_VIA_LONGHAUL 0x110a
-#define MSR_VIA_BCR2 0x1147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL 0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
-#define MSR_TMTA_LRTI_READOUT 0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-
-#endif /* __ASM_MSR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/page.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/page.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,182 +0,0 @@
-#ifndef _I386_PAGE_H
-#define _I386_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <asm/types.h>
-#include <asm-xen/xen-public/xen.h>
-
-#ifdef CONFIG_XEN_SCRUB_PAGES
-#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-#else
-#define scrub_pages(_p,_n) ((void)0)
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-
-#include <asm/mmx.h>
-
-#define clear_page(page) mmx_clear_page((void *)(page))
-#define copy_page(to,from) mmx_copy_page(to,from)
-
-#else
-
-/*
- * On older X86 processors its not a win to use MMX here it seems.
- * Maybe the K6-III ?
- */
-
-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
-#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-
-#endif
-
-#define clear_user_page(page, vaddr) clear_page(page)
-#define copy_user_page(to, from, vaddr) copy_page(to, from)
-
-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned long *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
-static inline unsigned long phys_to_machine(unsigned long phys)
-{
- unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
- return machine;
-}
-static inline unsigned long machine_to_phys(unsigned long machine)
-{
- unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
- return phys;
-}
-
-/*
- * These are used to make use of C type-checking..
- */
-#if CONFIG_X86_PAE
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
-typedef struct { unsigned long long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-#else
-typedef struct { unsigned long pte_low; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
-typedef struct { unsigned long pgd; } pgd_t;
-static inline unsigned long pte_val(pte_t x)
-{
- unsigned long ret = x.pte_low;
- if ( (ret & 1) ) ret = machine_to_phys(ret);
- return ret;
-}
-#define pte_val_ma(x) ((x).pte_low)
-#endif
-#define PTE_MASK PAGE_MASK
-
-typedef struct { unsigned long pgprot; } pgprot_t;
-
-static inline unsigned long pmd_val(pmd_t x)
-{
- unsigned long ret = x.pmd;
- if ( (ret & 1) ) ret = machine_to_phys(ret);
- return ret;
-}
-#define pgd_val(x) ({ BUG(); (unsigned long)0; })
-#define pgprot_val(x) ((x).pgprot)
-
-static inline pte_t __pte(unsigned long x)
-{
- if ( (x & 1) ) x = phys_to_machine(x);
- return ((pte_t) { (x) });
-}
-static inline pmd_t __pmd(unsigned long x)
-{
- if ( (x & 1) ) x = phys_to_machine(x);
- return ((pmd_t) { (x) });
-}
-#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#endif /* !__ASSEMBLY__ */
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/*
- * This handles the memory map.. We could make this a config
- * option, but too many people screw it up, and too few need
- * it.
- *
- * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 950MB.
- *
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
- */
-
-#define __PAGE_OFFSET (0xC0000000)
-
-#ifndef __ASSEMBLY__
-
-/*
- * Tell the user there is some problem. Beep too, so we can
- * see^H^H^Hhear bugs in early bootup as well!
- * The offending file and line are encoded after the "officially
- * undefined" opcode for parsing in the trap handler.
- */
-
-#if 1 /* Set to zero for a slightly smaller kernel */
-#define BUG() \
- __asm__ __volatile__( "ud2\n" \
- "\t.word %c0\n" \
- "\t.long %c1\n" \
- : : "i" (__LINE__), "i" (__FILE__))
-#else
-#define BUG() __asm__ __volatile__("ud2\n")
-#endif
-
-#define PAGE_BUG(page) do { \
- BUG(); \
-} while (0)
-
-/* Pure 2^n version of get_order */
-static __inline__ int get_order(unsigned long size)
-{
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned
long)(x)+PAGE_OFFSET))
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
-#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_PAGE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/pci.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/pci.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,283 +0,0 @@
-#ifndef __i386_PCI_H
-#define __i386_PCI_H
-
-#include <linux/config.h>
-
-#ifdef __KERNEL__
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- already-configured bus numbers - to be used for buggy BIOSes
- or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses() 0
-#endif
-#define pcibios_scan_all_fns() 0
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM (pci_mem_start)
-
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int
len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int
len, u32 value);
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-/* Dynamic DMA mapping stuff.
- * i386 has everything mapped statically.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include <linux/string.h>
-#include <asm/io.h>
-
-struct pci_dev;
-
-/* The networking and block device layers use this boolean for bounce
- * buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (0)
-
-/* Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices,
- * NULL for PCI-like buses (ISA, EISA).
- * Returns non-NULL cpu-view pointer to the buffer if successful and
- * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
- * is undefined.
- */
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle);
-
-/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings associated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
- return virt_to_bus(ptr);
-}
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/*
- * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
- * to pci_map_single, but takes a struct page instead of a virtual address
- */
-static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int
direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- return page_to_bus(page) + offset;
-}
-
-static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t
dma_address,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- int i;
-
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- /*
- * temporary 2.4 hack
- */
- for (i = 0; i < nents; i++ ) {
- if (sg[i].address && sg[i].page)
- out_of_line_bug();
- else if (!sg[i].address && !sg[i].page)
- out_of_line_bug();
-
- if (sg[i].address)
- sg[i].dma_address = virt_to_bus(sg[i].address);
- else
- sg[i].dma_address = page_to_bus(sg[i].page) +
sg[i].offset;
- }
-
- flush_write_buffers();
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-static inline void pci_dma_sync_single(struct pci_dev *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly. For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
- */
-static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static __inline__ dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long
offset, int direction)
-{
- return ((dma64_addr_t) page_to_bus(page) +
- (dma64_addr_t) offset);
-}
-
-static __inline__ struct page *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return bus_to_page(dma_addr);
-}
-
-static __inline__ unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static __inline__ void
-pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t
len, int direction)
-{
- flush_write_buffers();
-}
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
-/* Return the index of the PCI controller for device. */
-static inline int pci_controller_num(struct pci_dev *dev)
-{
- return 0;
-}
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int
write_combine);
-
-#endif /* __KERNEL__ */
-
-#endif /* __i386_PCI_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/pgalloc.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/pgalloc.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,285 +0,0 @@
-#ifndef _I386_PGALLOC_H
-#define _I386_PGALLOC_H
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <asm/hypervisor.h>
-#include <linux/threads.h>
-
-/*
- * Quick lists are aligned so that least significant bits of array pointer
- * are all zero when list is empty, and all one when list is full.
- */
-#define QUICKLIST_ENTRIES 256
-#define QUICKLIST_EMPTY(_l) !((unsigned long)(_l) & ((QUICKLIST_ENTRIES*4)-1))
-#define QUICKLIST_FULL(_l) QUICKLIST_EMPTY((_l)+1)
-#define pgd_quicklist (current_cpu_data.pgd_quick)
-#define pmd_quicklist (current_cpu_data.pmd_quick)
-#define pte_quicklist (current_cpu_data.pte_quick)
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
-
-#define pmd_populate(mm, pmd, pte) \
- do { \
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
- XEN_flush_page_update_queue(); \
- } while ( 0 )
-
-/*
- * Allocate and free page tables.
- */
-
-#if defined (CONFIG_X86_PAE)
-
-#error "no PAE support as yet"
-
-/*
- * We can't include <linux/slab.h> here, thus these uglinesses.
- */
-struct kmem_cache_s;
-
-extern struct kmem_cache_s *pae_pgd_cachep;
-extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
-extern void kmem_cache_free(struct kmem_cache_s *, void *);
-
-
-static inline pgd_t *get_pgd_slow(void)
-{
- int i;
- pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
-
- if (pgd) {
- for (i = 0; i < USER_PTRS_PER_PGD; i++) {
- unsigned long pmd = __get_free_page(GFP_KERNEL);
- if (!pmd)
- goto out_oom;
- clear_page(pmd);
- set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
- }
- memcpy(pgd + USER_PTRS_PER_PGD,
- init_mm.pgd + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return pgd;
-out_oom:
- for (i--; i >= 0; i--)
- free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pae_pgd_cachep, pgd);
- return NULL;
-}
-
-#else
-
-static inline pgd_t *get_pgd_slow(void)
-{
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (pgd) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(pgd + USER_PTRS_PER_PGD,
- init_mm.pgd + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- __make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
- }
- return pgd;
-}
-
-#endif /* CONFIG_X86_PAE */
-
-static inline pgd_t *get_pgd_fast(void)
-{
- unsigned long ret;
-
- if ( !QUICKLIST_EMPTY(pgd_quicklist) ) {
- ret = *(--pgd_quicklist);
- pgtable_cache_size--;
-
- } else
- ret = (unsigned long)get_pgd_slow();
- return (pgd_t *)ret;
-}
-
-static inline void free_pgd_slow(pgd_t *pgd)
-{
-#if defined(CONFIG_X86_PAE)
-#error
- int i;
-
- for (i = 0; i < USER_PTRS_PER_PGD; i++)
- free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pae_pgd_cachep, pgd);
-#else
- queue_pgd_unpin(__pa(pgd));
- __make_page_writable(pgd);
- free_page((unsigned long)pgd);
-#endif
-}
-
-static inline void free_pgd_fast(pgd_t *pgd)
-{
- if ( !QUICKLIST_FULL(pgd_quicklist) ) {
- *(pgd_quicklist++) = (unsigned long)pgd;
- pgtable_cache_size++;
- } else
- free_pgd_slow(pgd);
-}
-
-static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pte)
- {
- clear_page(pte);
- __make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- }
- return pte;
-
-}
-
-static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
- unsigned long address)
-{
- unsigned long ret = 0;
- if ( !QUICKLIST_EMPTY(pte_quicklist) ) {
- ret = *(--pte_quicklist);
- pgtable_cache_size--;
- }
- return (pte_t *)ret;
-}
-
-static __inline__ void pte_free_slow(pte_t *pte)
-{
- queue_pte_unpin(__pa(pte));
- __make_page_writable(pte);
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free_fast(pte_t *pte)
-{
- if ( !QUICKLIST_FULL(pte_quicklist) ) {
- *(pte_quicklist++) = (unsigned long)pte;
- pgtable_cache_size++;
- } else
- pte_free_slow(pte);
-}
-
-#define pte_free(pte) pte_free_fast(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc(mm) get_pgd_fast()
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- * (In the PAE case we free the pmds as part of the pgd.)
- */
-
-#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
-#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free_slow(x) do { } while (0)
-#define pmd_free_fast(x) do { } while (0)
-#define pmd_free(x) do { } while (0)
-#define pgd_populate(mm, pmd, pte) BUG()
-
-extern int do_check_pgt_cache(int, int);
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * ..but the i386 has somewhat limited tlb flushing capabilities,
- * and page-granular flushes are available only on i486 and up.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm) queue_tlb_flush();
- XEN_flush_page_update_queue();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm) queue_invlpg(addr);
- XEN_flush_page_update_queue();
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- if (mm == current->active_mm) queue_tlb_flush();
- XEN_flush_page_update_queue();
-}
-
-#else
-#error no guestos SMP support yet...
-#include <asm/smp.h>
-
-#define local_flush_tlb() \
- __flush_tlb()
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-
-#define flush_tlb() flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start,
unsigned long end)
-{
- flush_tlb_mm(mm);
-}
-
-#define TLBSTATE_OK 1
-#define TLBSTATE_LAZY 2
-
-struct tlb_state
-{
- struct mm_struct *active_mm;
- int state;
-} ____cacheline_aligned;
-extern struct tlb_state cpu_tlbstate[NR_CPUS];
-
-#endif /* CONFIG_SMP */
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* i386 does not keep any page table caches in TLB */
- XEN_flush_page_update_queue();
-}
-
-/*
- * NB. The 'domid' field should be zero if mapping I/O space (non RAM).
- * Otherwise it identifies the owner of the memory that is being mapped.
- */
-extern int direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long machine_addr,
- unsigned long size,
- pgprot_t prot,
- domid_t domid);
-
-extern int __direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long size,
- mmu_update_t *v);
-
-
-
-#endif /* _I386_PGALLOC_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/pgtable-2level.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/pgtable-2level.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,103 +0,0 @@
-#ifndef _I386_PGTABLE_2LEVEL_H
-#define _I386_PGTABLE_2LEVEL_H
-
-/*
- * traditional i386 two-level paging structure:
- */
-
-#define PGDIR_SHIFT 22
-#define PTRS_PER_PGD 1024
-
-/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
- */
-#define PMD_SHIFT 22
-#define PTRS_PER_PMD 1
-
-#define PTRS_PER_PTE 1024
-
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-#define pgd_clear(xp) do { } while (0)
-
-#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
-#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr,
(pteval).pte_low)
-#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval))
-#define set_pgd(pgdptr, pgdval) ((void)0)
-
-#define pgd_page(pgd) \
-((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
- return (pmd_t *) dir;
-}
-
-#define pte_same(a, b) ((a).pte_low == (b).pte_low)
-
-/*
- * We detect special mappings in one of two ways:
- * 1. If the MFN is an I/O page then Xen will set the m2p entry
- * to be outside our maximum possible pseudophys range.
- * 2. If the MFN belongs to a different domain then we will certainly
- * not have MFN in our p2m table. Conversely, if the page is ours,
- * then we'll have p2m(m2p(MFN))==MFN.
- * If we detect a special mapping then it doesn't have a 'struct page'.
- * We force !VALID_PAGE() by returning an out-of-range pointer.
- *
- * NB. These checks require that, for any MFN that is not in our reservation,
- * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
- * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
- * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
- *
- * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
- * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- * require. In all the cases we care about, the high bit gets shifted out
- * (e.g., phys_to_machine()) so behaviour there is correct.
- */
-#define INVALID_P2M_ENTRY (~0UL)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_page(_pte) \
-({ \
- unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
- unsigned long pfn = mfn_to_pfn(mfn); \
- if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
- pfn = max_mapnr; /* specia: force !VALID_PAGE() */ \
- &mem_map[pfn]; \
-})
-
-#define pte_none(x) (!(x).pte_low)
-#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) |
pgprot_val(pgprot))
-
-/*
- * A note on implementation of this atomic 'get-and-clear' operation.
- * This is actually very simple because XenoLinux can only run on a single
- * processor. Therefore, we cannot race other processors setting the 'accessed'
- * or 'dirty' bits on a page-table entry.
- * Even if pages are shared between domains, that is not a problem because
- * each domain will have separate page tables, with their own versions of
- * accessed & dirty state.
- */
-static inline pte_t ptep_get_and_clear(pte_t *xp)
-{
- pte_t pte = *xp;
- if ( !pte_none(pte) )
- queue_l1_entry_update(xp, 0);
- return pte;
-}
-
-#endif /* _I386_PGTABLE_2LEVEL_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/pgtable.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/pgtable.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,370 +0,0 @@
-#ifndef _I386_PGTABLE_H
-#define _I386_PGTABLE_H
-
-#include <linux/config.h>
-
-/*
- * The Linux memory management assumes a three-level page table setup. On
- * the i386, we use that, but "fold" the mid level into the top-level page
- * table, so that we physically have the same two-level page table as the
- * i386 mmu expects.
- *
- * This file contains the functions and defines necessary to modify and use
- * the i386 page table tree.
- */
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#include <asm/hypervisor.h>
-#include <linux/threads.h>
-#include <asm/fixmap.h>
-
-#ifndef _I386_BITOPS_H
-#include <asm/bitops.h>
-#endif
-
-#define swapper_pg_dir 0
-extern void paging_init(void);
-
-/* Caches aren't brain-dead on the intel. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(mm, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
-#define flush_dcache_page(page) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-
-extern unsigned long pgkern_mask;
-
-#define __flush_tlb() ({ queue_tlb_flush(); XEN_flush_page_update_queue(); })
-#define __flush_tlb_global() __flush_tlb()
-#define __flush_tlb_all() __flush_tlb_global()
-#define __flush_tlb_one(addr) ({ queue_invlpg(addr);
XEN_flush_page_update_queue(); })
-#define __flush_tlb_single(addr) ({ queue_invlpg(addr);
XEN_flush_page_update_queue(); })
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * The Linux x86 paging architecture is 'compile-time dual-mode', it
- * implements both the traditional 2-level x86 page tables and the
- * newer 3-level PAE-mode page tables.
- */
-#ifndef __ASSEMBLY__
-#if CONFIG_X86_PAE
-# include <asm/pgtable-3level.h>
-
-/*
- * Need to initialise the X86 PAE caches
- */
-extern void pgtable_cache_init(void);
-
-#else
-# include <asm/pgtable-2level.h>
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-#endif
-#endif
-
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_PGD_NR 0
-
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
-#define TWOLEVEL_PGDIR_SHIFT 22
-#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-
-
-#ifndef __ASSEMBLY__
-/* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
-#define VMALLOC_OFFSET (4*1024*1024)
-extern void * high_memory;
-#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
- ~(VMALLOC_OFFSET-1))
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#if CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
-#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#endif
-
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if
present.. */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-
-#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
-#define _PAGE_USER 0x004
-#define _PAGE_PWT 0x008
-#define _PAGE_PCD 0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY 0x040
-#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
-
-#define _PAGE_PROTNONE 0x080 /* If not present */
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED
| _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |
_PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
_PAGE_ACCESSED)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-
-#define __PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
-
-#if 0
-#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
-#else
-#define MAKE_GLOBAL(x) __pgprot(x)
-#endif
-
-#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-
-/*
- * The i386 can't do page protection for execute, and considers that
- * the same are read. Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
-
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp) queue_l1_entry_update(xp, 0)
-
-#define pmd_none(x) (!(x).pmd)
-#define pmd_present(x) ((x).pmd & _PAGE_PRESENT)
-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pmd_bad(x) (((x).pmd & (~PAGE_MASK & ~_PAGE_USER)) !=
_KERNPG_TABLE)
-
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_read(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
-static inline int pte_exec(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
-static inline int pte_dirty(pte_t pte) { return (pte).pte_low &
_PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return (pte).pte_low &
_PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return (pte).pte_low &
_PAGE_RW; }
-
-static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
-static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
-static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &=
~_PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &=
~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW;
return pte; }
-static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY;
return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |=
_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW;
return pte; }
-
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- int ret = pteval & _PAGE_DIRTY;
- if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
- return ret;
-}
-static inline int ptep_test_and_clear_young(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- int ret = pteval & _PAGE_ACCESSED;
- if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
- return ret;
-}
-static inline void ptep_set_wrprotect(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- if ( (pteval & _PAGE_RW) )
- queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
-}
-static inline void ptep_mkdirty(pte_t *ptep)
-{
- unsigned long pteval = *(unsigned long *)ptep;
- if ( !(pteval & _PAGE_DIRTY) )
- queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
-}
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
-#define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
-
-/* This takes a physical page address that is used by the remapping functions
*/
-#define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT,
pgprot)
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- pte.pte_low &= _PAGE_CHG_MASK;
- pte.pte_low |= pgprot_val(newprot);
- return pte;
-}
-
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
-#define pmd_page(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-#define __pgd_offset(address) pgd_index(address)
-
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define __pmd_offset(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
- __pte_offset(address))
-
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
-#define update_mmu_cache(vma,address,pte) do { } while (0)
-
-/* Encode and de-code a swap entry */
-#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
-#define SWP_OFFSET(x) ((x).val >> 8)
-#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1)
| ((offset) << 8) })
-#define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
-#define swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-struct page;
-int change_page_attr(struct page *, int, pgprot_t prot);
-
-static inline void __make_page_readonly(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
-}
-
-static inline void __make_page_writable(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
-}
-
-static inline void make_page_readonly(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
- if ( (unsigned long)va >= VMALLOC_START )
- __make_page_readonly(machine_to_virt(
- *(unsigned long *)pte&PAGE_MASK));
-}
-
-static inline void make_page_writable(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
- if ( (unsigned long)va >= VMALLOC_START )
- __make_page_writable(machine_to_virt(
- *(unsigned long *)pte&PAGE_MASK));
-}
-
-static inline void make_pages_readonly(void *va, unsigned int nr)
-{
- while ( nr-- != 0 )
- {
- make_page_readonly(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
-}
-
-static inline void make_pages_writable(void *va, unsigned int nr)
-{
- while ( nr-- != 0 )
- {
- make_page_writable(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
-}
-
-static inline unsigned long arbitrary_virt_to_machine(void *va)
-{
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset(pmd, (unsigned long)va);
- unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
- return pa | ((unsigned long)va & (PAGE_SIZE-1));
-}
-
-#endif /* !__ASSEMBLY__ */
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define PageSkip(page) (0)
-#define kern_addr_valid(addr) (1)
-
-#define io_remap_page_range remap_page_range
-
-#endif /* _I386_PGTABLE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/processor.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/processor.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,483 +0,0 @@
-/*
- * include/asm-i386/processor.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_I386_PROCESSOR_H
-#define __ASM_I386_PROCESSOR_H
-
-#include <asm/vm86.h>
-#include <asm/math_emu.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <linux/cache.h>
-#include <linux/config.h>
-#include <linux/threads.h>
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc));
pc; })
-
-/*
- * CPU type and hardware bug flags. Kept separately for each CPU.
- * Members of this structure are referenced in head.S, so think twice
- * before touching them. [mj]
- */
-
-struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
- __u8 x86_mask;
- char wp_works_ok; /* It doesn't on 386's */
- char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
- char hard_math;
- char rfu;
- int cpuid_level; /* Maximum supported CPUID level, -1=no
CPUID */
- __u32 x86_capability[NCAPINTS];
- char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB - valid for CPUS which support this
- call */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
- unsigned long loops_per_jiffy;
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_RISE 6
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NSC 8
-#define X86_VENDOR_SIS 9
-#define X86_VENDOR_UNKNOWN 0xff
-
-/*
- * capabilities of CPUs
- */
-
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct tss_struct init_tss[NR_CPUS];
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
-#endif
-
-extern char ignore_irq13;
-
-extern void identify_cpu(struct cpuinfo_x86 *);
-extern void print_cpu_info(struct cpuinfo_x86 *);
-
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-
-/*
- * Generic CPUID function
- */
-static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
-{
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (op));
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax;
-
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (op)
- : "bx", "cx", "dx");
- return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx;
-
- __asm__("cpuid"
- : "=a" (eax), "=b" (ebx)
- : "0" (op)
- : "cx", "dx" );
- return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ecx;
-
- __asm__("cpuid"
- : "=a" (eax), "=c" (ecx)
- : "0" (op)
- : "bx", "dx" );
- return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx;
-
- __asm__("cpuid"
- : "=a" (eax), "=d" (edx)
- : "0" (op)
- : "bx", "cx");
- return edx;
-}
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
-#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x0008 /* enable debugging extensions */
-#define X86_CR4_PSE 0x0010 /* enable page size extensions */
-#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
-#define X86_CR4_MCE 0x0040 /* Machine check enable */
-#define X86_CR4_PGE 0x0080 /* enable global pages */
-#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3
*/
-#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
-
-#define load_cr3(pgdir) \
- asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)));
-
-extern unsigned long mmu_cr4_features;
-
-#include <asm/hypervisor.h>
-
-static inline void set_in_cr4 (unsigned long mask)
-{
- BUG();
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
- BUG();
-}
-
-/*
- * Cyrix CPU configuration register indexes
- */
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-/*
- * Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
-/*
- * Bus types (default is ISA, but people can check others with these..)
- */
-#ifdef CONFIG_EISA
-extern int EISA_bus;
-#else
-#define EISA_bus (0)
-#endif
-extern int MCA_bus;
-
-/* from system description table in BIOS. Mostly for MCA use, but
-others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
-extern unsigned int mca_pentium_flag;
-
-/*
- * User space process size: 3GB (default).
- */
-#define TASK_SIZE (PAGE_OFFSET)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
-
-/*
- * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
- */
-#define IO_BITMAP_SIZE 32
-#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-
-struct i387_fsave_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- long status; /* software status information */
-};
-
-struct i387_fxsave_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd;
- unsigned short fop;
- long fip;
- long fcs;
- long foo;
- long fos;
- long mxcsr;
- long reserved;
- long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
- long padding[56];
-} __attribute__ ((aligned (16)));
-
-struct i387_soft_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- unsigned char ftop, changed, lookahead, no_update, rm, alimit;
- struct info *info;
- unsigned long entry_eip;
-};
-
-union i387_union {
- struct i387_fsave_struct fsave;
- struct i387_fxsave_struct fxsave;
- struct i387_soft_struct soft;
-};
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-struct tss_struct {
- unsigned short back_link,__blh;
- unsigned long esp0;
- unsigned short ss0,__ss0h;
- unsigned long esp1;
- unsigned short ss1,__ss1h;
- unsigned long esp2;
- unsigned short ss2,__ss2h;
- unsigned long __cr3;
- unsigned long eip;
- unsigned long eflags;
- unsigned long eax,ecx,edx,ebx;
- unsigned long esp;
- unsigned long ebp;
- unsigned long esi;
- unsigned long edi;
- unsigned short es, __esh;
- unsigned short cs, __csh;
- unsigned short ss, __ssh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
- unsigned short ldt, __ldth;
- unsigned short trace, bitmap;
- unsigned long io_bitmap[IO_BITMAP_SIZE+1];
- /*
- * pads the TSS to be cacheline-aligned (size is 0x100)
- */
- unsigned long __cacheline_filler[5];
-};
-
-struct thread_struct {
- unsigned long esp0;
- unsigned long eip;
- unsigned long esp;
- unsigned long fs;
- unsigned long gs;
- unsigned int io_pl;
-/* Hardware debugging registers */
- unsigned long debugreg[8]; /* %%db0-7 debug registers */
-/* fault info */
- unsigned long cr2, trap_no, error_code;
-/* floating point info */
- union i387_union i387;
-/* virtual 86 mode info */
- struct vm86_struct * vm86_info;
- unsigned long screen_bitmap;
- unsigned long v86flags, v86mask, saved_esp0;
-};
-
-#define INIT_THREAD { sizeof(init_stack) + (long) &init_stack, \
- 0, 0, 0, 0, 0, 0, {0}, 0, 0, 0, {{0}}, 0, 0, 0, 0, 0 }
-
-#define INIT_TSS { \
- 0,0, /* back_link, __blh */ \
- sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
- __KERNEL_DS, 0, /* ss0 */ \
- 0,0,0,0,0,0, /* stack1, stack2 */ \
- 0, /* cr3 */ \
- 0,0, /* eip,eflags */ \
- 0,0,0,0, /* eax,ecx,edx,ebx */ \
- 0,0,0,0, /* esp,ebp,esi,edi */ \
- 0,0,0,0,0,0, /* es,cs,ss */ \
- 0,0,0,0,0,0, /* ds,fs,gs */ \
- 0,0, /* ldt */ \
- 0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */ \
- {~0, } /* ioperm */ \
-}
-
-#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
- set_fs(USER_DS); \
- regs->xds = __USER_DS; \
- regs->xes = __USER_DS; \
- regs->xss = __USER_DS; \
- regs->xcs = __USER_CS; \
- regs->eip = new_eip; \
- regs->esp = new_esp; \
-} while (0)
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-/*
- * create a kernel thread without removing it from tasklists
- */
-extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long
flags);
-
-/* Copy and release all segment info associated with a VM
- * Unusable due to lack of error handling, use {init_new,destroy}_context
- * instead.
- */
-static inline void copy_segments(struct task_struct *p, struct mm_struct * mm)
{ }
-static inline void release_segments(struct mm_struct * mm) { }
-
-/*
- * Return saved PC of a blocked thread.
- */
-static inline unsigned long thread_saved_pc(struct thread_struct *t)
-{
- return ((unsigned long *)t->esp)[3];
-}
-
-unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
-#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
-
-#define THREAD_SIZE (2*PAGE_SIZE)
-#define alloc_task_struct() ((struct task_struct *)
__get_free_pages(GFP_KERNEL,1))
-#define free_task_struct(p) free_pages((unsigned long) (p), 1)
-#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
-
-#define init_task (init_task_union.task)
-#define init_stack (init_task_union.stack)
-
-struct microcode {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int reserved[5];
- unsigned int bits[500];
-};
-
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE _IO('6',0)
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- __asm__ __volatile__("rep;nop" ::: "memory");
-}
-
-#define cpu_relax() rep_nop()
-
-/* Prefetch instructions for Pentium III and AMD Athlon */
-#if defined(CONFIG_MPENTIUMIII) || defined (CONFIG_MPENTIUM4)
-
-#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
-{
- __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
-}
-
-#elif CONFIG_X86_USE_3DNOW
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-extern inline void prefetch(const void *x)
-{
- __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
-}
-
-extern inline void prefetchw(const void *x)
-{
- __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
-}
-#define spin_lock_prefetch(x) prefetchw(x)
-
-#endif
-
-#endif /* __ASM_I386_PROCESSOR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/queues.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/queues.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,20 +0,0 @@
-
-/* Work-queue emulation over task queues. Pretty simple. */
-
-#ifndef __QUEUES_H__
-#define __QUEUES_H__
-
-#include <linux/version.h>
-#include <linux/list.h>
-#include <linux/tqueue.h>
-
-#define DECLARE_TQUEUE(_name, _fn, _arg) \
- struct tq_struct _name = { LIST_HEAD_INIT((_name).list), 0, _fn, _arg }
-#define DECLARE_WORK(_name, _fn, _arg) DECLARE_TQUEUE(_name, _fn, _arg)
-
-#define work_struct tq_struct
-#define INIT_WORK(_work, _fn, _arg) INIT_TQUEUE(_work, _fn, _arg)
-
-#define schedule_work(_w) schedule_task(_w)
-
-#endif /* __QUEUES_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/segment.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/segment.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,15 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
-#include <asm-xen/xen-public/xen.h>
-
-#define __KERNEL_CS FLAT_RING1_CS
-#define __KERNEL_DS FLAT_RING1_DS
-
-#define __USER_CS FLAT_RING3_CS
-#define __USER_DS FLAT_RING3_DS
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/smp.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/smp.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,102 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#ifndef __ASSEMBLY__
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <linux/ptrace.h>
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#ifndef __ASSEMBLY__
-#include <asm/bitops.h>
-#include <asm/mpspec.h>
-#ifdef CONFIG_X86_IO_APIC
-#include <asm/io_apic.h>
-#endif
-#include <asm/apic.h>
-#endif
-#endif
-
-#ifdef CONFIG_SMP
-#ifndef __ASSEMBLY__
-
-/*
- * Private routines/data
- */
-
-extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
-extern volatile unsigned long smp_invalidate_needed;
-extern int pic_mode;
-extern int smp_num_siblings;
-extern int cpu_sibling_map[];
-
-extern void smp_flush_tlb(void);
-extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-extern void fastcall smp_send_reschedule(int cpu);
-extern void smp_invalidate_rcv(void); /* Process an NMI */
-extern void (*mtrr_hook) (void);
-extern void zap_low_mappings (void);
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-static inline int cpu_logical_map(int cpu)
-{
- return cpu;
-}
-static inline int cpu_number_map(int cpu)
-{
- return cpu;
-}
-
-/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
- */
-#define MAX_APICID 256
-extern volatile int cpu_to_physical_apicid[NR_CPUS];
-extern volatile int physical_apicid_to_cpu[MAX_APICID];
-extern volatile int cpu_to_logical_apicid[NR_CPUS];
-extern volatile int logical_apicid_to_cpu[MAX_APICID];
-
-/*
- * General functions that each host system must provide.
- */
-
-extern void smp_boot_cpus(void);
-extern void smp_store_cpu_info(int id); /* Store per CPU info
(like the initial udelay numbers */
-
-/*
- * This function is needed by all SMP systems. It must _always_ be valid
- * from the initial startup. We map APIC_BASE very early in page_setup(),
- * so this is correct in the x86 case.
- */
-
-#define smp_processor_id() (current->processor)
-
-#endif /* !__ASSEMBLY__ */
-
-#define NO_PROC_ID 0xFF /* No processor magic marker */
-
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
-#endif
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/system.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/system.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,424 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/synch_bitops.h>
-#include <asm/segment.h>
-#include <asm/hypervisor.h>
-#include <asm/evtchn.h>
-
-#ifdef __KERNEL__
-
-struct task_struct;
-extern void FASTCALL(__switch_to(struct task_struct *prev,
- struct task_struct *next));
-
-#define prepare_to_switch() \
-do { \
- struct thread_struct *__t = ¤t->thread; \
- __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (*(int *)&__t->fs) ); \
- __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (*(int *)&__t->gs) ); \
-} while (0)
-#define switch_to(prev,next,last) do { \
- asm volatile("pushl %%esi\n\t" \
- "pushl %%edi\n\t" \
- "pushl %%ebp\n\t" \
- "movl %%esp,%0\n\t" /* save ESP */ \
- "movl %3,%%esp\n\t" /* restore ESP */ \
- "movl $1f,%1\n\t" /* save EIP */ \
- "pushl %4\n\t" /* restore EIP */ \
- "jmp __switch_to\n" \
- "1:\t" \
- "popl %%ebp\n\t" \
- "popl %%edi\n\t" \
- "popl %%esi\n\t" \
- :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
- "=b" (last) \
- :"m" (next->thread.esp),"m" (next->thread.eip), \
- "a" (prev), "d" (next), \
- "b" (prev)); \
-} while (0)
-
-#define _set_base(addr,base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
-
-#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
-static inline unsigned long _get_base(char * addr)
-{
- unsigned long __base;
- __asm__("movb %3,%%dh\n\t"
- "movb %2,%%dl\n\t"
- "shll $16,%%edx\n\t"
- "movw %1,%%dx"
- :"=&d" (__base)
- :"m" (*((addr)+2)),
- "m" (*((addr)+4)),
- "m" (*((addr)+7)));
- return __base;
-}
-
-#define get_base(ldt) _get_base( ((char *)&(ldt)) )
-
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "pushl $0\n\t" \
- "popl %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" \
- : :"m" (*(unsigned int *)&(value)))
-
-/* NB. 'clts' is done for us by Xen during virtual trap. */
-#define clts() ((void)0)
-#define stts() (HYPERVISOR_fpu_taskswitch())
-
-#endif /* __KERNEL__ */
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- *
- * Taken from 2.6 for Xen.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned
long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-
-/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- *
- * chmxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
- */
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
-{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- "lock cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
-}
-
-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,ll_low(value), ll_high(value));
-}
-
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int
size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#ifdef CONFIG_X86_CMPXCHG
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__("lock cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__("lock cmpxchgw %w1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__("lock cmpxchgl %1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-#else
-/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
-#endif
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-
-#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#define rmb() mb()
-
-#ifdef CONFIG_X86_OOSTORE
-#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#else
-#define wmb() __asm__ __volatile__ ("": : :"memory")
-#endif
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define safe_halt() ((void)0)
-
-/*
- * The use of 'barrier' in the following reflects their use as local-lock
- * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operatiosn must complete
- * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
- * includes these barriers, for example.
- */
-
-#define __cli() \
-do { \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
-} while (0)
-
-#define __sti() \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
-} while (0)
-
-#define __save_flags(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
-} while (0)
-
-#define __restore_flags(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
- } \
-} while (0)
-
-#define __save_and_cli(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
-} while (0)
-
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
-} while (0)
-
-#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_set(x) __save_and_sti(x)
-#define local_irq_restore(x) __restore_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
-
-
-#ifdef CONFIG_SMP
-#error no SMP
-extern void __global_cli(void);
-extern void __global_sti(void);
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
-#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
-
-#else
-
-#define cli() __cli()
-#define sti() __sti()
-#define save_flags(x) __save_flags(x)
-#define restore_flags(x) __restore_flags(x)
-#define save_and_cli(x) __save_and_cli(x)
-#define save_and_sti(x) __save_and_sti(x)
-
-#endif
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-extern unsigned long dmi_broken;
-extern int is_sony_vaio_laptop;
-
-#define BROKEN_ACPI_Sx 0x0001
-#define BROKEN_INIT_AFTER_S1 0x0002
-#define BROKEN_PNP_BIOS 0x0004
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/vga.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/vga.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,42 +0,0 @@
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@xxxxxx>
- */
-
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-#include <asm/io.h>
-
-extern unsigned char *vgacon_mmap;
-
-static unsigned long VGA_MAP_MEM(unsigned long x)
-{
- if( vgacon_mmap == NULL )
- {
- /* This is our first time in this function. This whole thing
- is a rather grim hack. We know we're going to get asked
- to map a 32KB region between 0xb0000 and 0xb8000 because
- that's what VGAs are. We used the boot time permanent
- fixed map region, and map it to machine pages.
- */
- if( x != 0xb8000 )
- panic("Argghh! VGA Console is weird. 1:%08lx\n",x);
-
- vgacon_mmap = (unsigned char*) bt_ioremap( 0xa0000, 128*1024 );
- return (unsigned long) (vgacon_mmap+x-0xa0000);
- }
- else
- {
- if( x != 0xc0000 && x != 0xa0000 ) /* vidmem_end or charmap fonts */
- panic("Argghh! VGA Console is weird. 2:%08lx\n",x);
- return (unsigned long) (vgacon_mmap+x-0xa0000);
- }
- return 0;
-}
-
-static inline unsigned char vga_readb(unsigned char * x) { return (*(x)); }
-static inline void vga_writeb(unsigned char x, unsigned char *y) { *(y) = (x);
}
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/asm-xen/xor.h
--- a/linux-2.4.30-xen-sparse/include/asm-xen/xor.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,879 +0,0 @@
-/*
- * include/asm-i386/xor.h
- *
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * High-speed RAID5 checksumming functions utilizing MMX instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-#define FPU_SAVE \
- do { \
- if (!(current->flags & PF_USEDFPU)) \
- clts(); \
- __asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \
- } while (0)
-
-#define FPU_RESTORE \
- do { \
- __asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
- if (!(current->flags & PF_USEDFPU)) \
- stts(); \
- } while (0)
-
-#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
-#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
-#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
-#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
-#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
-#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
-
-
-static void
-xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- ST(i,0) \
- XO1(i+1,1) \
- ST(i+1,1) \
- XO1(i+2,2) \
- ST(i+2,2) \
- XO1(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- ST(i,0) \
- XO2(i+1,1) \
- ST(i+1,1) \
- XO2(i+2,2) \
- ST(i+2,2) \
- XO2(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- ST(i,0) \
- XO3(i+1,1) \
- ST(i+1,1) \
- XO3(i+2,2) \
- ST(i+2,2) \
- XO3(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-
-static void
-xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 7;
- char fpu_save[108];
-
- FPU_SAVE;
-
- /* need to save/restore p4/p5 manually otherwise gcc's 10 argument
- limit gets exceeded (+ counts as two arguments) */
- __asm__ __volatile__ (
- " pushl %4\n"
- " pushl %5\n"
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- ST(i,0) \
- XO4(i+1,1) \
- ST(i+1,1) \
- XO4(i+2,2) \
- ST(i+2,2) \
- XO4(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " addl $128, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- " popl %5\n"
- " popl %4\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- FPU_RESTORE;
-}
-
-#undef LD
-#undef XO1
-#undef XO2
-#undef XO3
-#undef XO4
-#undef ST
-#undef BLOCK
-
-static void
-xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
- " .align 32 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory" );
-
- FPU_RESTORE;
-}
-
-static void
-xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- FPU_RESTORE;
-}
-
-static void
-xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 6;
- char fpu_save[108];
-
- FPU_SAVE;
-
- /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
- __asm__ __volatile__ (
- " pushl %4\n"
- " pushl %5\n"
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor (%5), %%mm0 ;\n"
- " pxor 8(%5), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%5), %%mm2 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%5), %%mm3 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 32(%5), %%mm4 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " pxor 40(%5), %%mm5 ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%5), %%mm6 ;\n"
- " pxor 56(%5), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " addl $64, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- " popl %5\n"
- " popl %4\n"
- : "+g" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- FPU_RESTORE;
-}
-
-static struct xor_block_template xor_block_pII_mmx = {
- name: "pII_mmx",
- do_2: xor_pII_mmx_2,
- do_3: xor_pII_mmx_3,
- do_4: xor_pII_mmx_4,
- do_5: xor_pII_mmx_5,
-};
-
-static struct xor_block_template xor_block_p5_mmx = {
- name: "p5_mmx",
- do_2: xor_p5_mmx_2,
- do_3: xor_p5_mmx_3,
- do_4: xor_p5_mmx_4,
- do_5: xor_p5_mmx_5,
-};
-
-#undef FPU_SAVE
-#undef FPU_RESTORE
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define XMMS_SAVE \
- if (!(current->flags & PF_USEDFPU)) \
- clts(); \
- __asm__ __volatile__ ( \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
- : "r" (xmm_save) \
- : "memory")
-
-#define XMMS_RESTORE \
- __asm__ __volatile__ ( \
- "sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
- if (!(current->flags & PF_USEDFPU)) \
- stts()
-
-#define ALIGN16 __attribute__((aligned(16)))
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1)
;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y"
;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1)
;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r"(p2), "+r"(p3)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- /* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
- __asm__ __volatile__ (
- " pushl %4\n"
- " pushl %5\n"
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- " popl %5\n"
- " popl %4\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static struct xor_block_template xor_block_pIII_sse = {
- name: "pIII_sse",
- do_2: xor_sse_2,
- do_3: xor_sse_3,
- do_4: xor_sse_4,
- do_5: xor_sse_5,
-};
-
-/* Also try the generic routines. */
-#include <asm-generic/xor.h>
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_32regs); \
- if (cpu_has_xmm) \
- xor_speed(&xor_block_pIII_sse); \
- if (md_cpu_has_mmx()) { \
- xor_speed(&xor_block_pII_mmx); \
- xor_speed(&xor_block_p5_mmx); \
- } \
- } while (0)
-
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/include/linux/blk.h
--- a/linux-2.4.30-xen-sparse/include/linux/blk.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,409 +0,0 @@
-#ifndef _BLK_H
-#define _BLK_H
-
-#include <linux/blkdev.h>
-#include <linux/locks.h>
-#include <linux/config.h>
-#include <linux/spinlock.h>
-
-/*
- * Spinlock for protecting the request queue which
- * is mucked around with in interrupts on potentially
- * multiple CPU's..
- */
-extern spinlock_t io_request_lock;
-
-/*
- * Initialization functions.
- */
-extern int isp16_init(void);
-extern int cdu31a_init(void);
-extern int acsi_init(void);
-extern int mcd_init(void);
-extern int mcdx_init(void);
-extern int sbpcd_init(void);
-extern int aztcd_init(void);
-extern int sony535_init(void);
-extern int gscd_init(void);
-extern int cm206_init(void);
-extern int optcd_init(void);
-extern int sjcd_init(void);
-extern int cdi_init(void);
-extern int hd_init(void);
-extern int ide_init(void);
-extern int xd_init(void);
-extern int mfm_init(void);
-extern int loop_init(void);
-extern int md_init(void);
-extern int ap_init(void);
-extern int ddv_init(void);
-extern int z2_init(void);
-extern int swim3_init(void);
-extern int swimiop_init(void);
-extern int amiga_floppy_init(void);
-extern int atari_floppy_init(void);
-extern int ez_init(void);
-extern int bpcd_init(void);
-extern int ps2esdi_init(void);
-extern int jsfd_init(void);
-extern int viodasd_init(void);
-extern int viocd_init(void);
-
-#if defined(CONFIG_ARCH_S390)
-extern int dasd_init(void);
-extern int xpram_init(void);
-extern int tapeblock_init(void);
-#endif /* CONFIG_ARCH_S390 */
-
-#if defined(CONFIG_XEN)
-extern int xlblk_init(void);
-#endif /* CONFIG_XEN */
-
-extern void set_device_ro(kdev_t dev,int flag);
-void add_blkdev_randomness(int major);
-
-extern int floppy_init(void);
-extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
-extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_image_start; /* starting block # of image */
-
-#ifdef CONFIG_BLK_DEV_INITRD
-
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
-extern unsigned long initrd_start,initrd_end;
-extern int initrd_below_start_ok; /* 1 if it is not an error if initrd_start <
memory_start */
-void initrd_init(void);
-
-#endif
-
-
-/*
- * end_request() and friends. Must be called with the request queue spinlock
- * acquired. All functions called within end_request() _must_be_ atomic.
- *
- * Several drivers define their own end_request and call
- * end_that_request_first() and end_that_request_last()
- * for parts of the original function. This prevents
- * code duplication in drivers.
- */
-
-static inline void blkdev_dequeue_request(struct request * req)
-{
- list_del(&req->queue);
-}
-
-int end_that_request_first(struct request *req, int uptodate, char *name);
-void end_that_request_last(struct request *req);
-
-#if defined(MAJOR_NR) || defined(IDE_DRIVER)
-
-#undef DEVICE_ON
-#undef DEVICE_OFF
-
-/*
- * Add entries as needed.
- */
-
-#ifdef IDE_DRIVER
-
-#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
-#define DEVICE_NAME "ide"
-
-#elif (MAJOR_NR == RAMDISK_MAJOR)
-
-/* ram disk */
-#define DEVICE_NAME "ramdisk"
-#define DEVICE_NR(device) (MINOR(device))
-#define DEVICE_NO_RANDOM
-
-#elif (MAJOR_NR == Z2RAM_MAJOR)
-
-/* Zorro II Ram */
-#define DEVICE_NAME "Z2RAM"
-#define DEVICE_REQUEST do_z2_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == FLOPPY_MAJOR)
-
-static void floppy_off(unsigned int nr);
-
-#define DEVICE_NAME "floppy"
-#define DEVICE_INTR do_floppy
-#define DEVICE_REQUEST do_fd_request
-#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >>
5 ))
-#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
-
-#elif (MAJOR_NR == HD_MAJOR)
-
-/* Hard disk: timeout is 6 seconds. */
-#define DEVICE_NAME "hard disk"
-#define DEVICE_INTR do_hd
-#define TIMEOUT_VALUE (6*HZ)
-#define DEVICE_REQUEST do_hd_request
-#define DEVICE_NR(device) (MINOR(device)>>6)
-
-#elif (SCSI_DISK_MAJOR(MAJOR_NR))
-
-#define DEVICE_NAME "scsidisk"
-#define TIMEOUT_VALUE (2*HZ)
-#define DEVICE_NR(device) (((MAJOR(device) & SD_MAJOR_MASK) << (8 - 4)) +
(MINOR(device) >> 4))
-
-/* Kludge to use the same number for both char and block major numbers */
-#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
-
-#define DEVICE_NAME "Multiple devices driver"
-#define DEVICE_REQUEST do_md_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
-
-#define DEVICE_NAME "scsitape"
-#define DEVICE_INTR do_st
-#define DEVICE_NR(device) (MINOR(device) & 0x7f)
-
-#elif (MAJOR_NR == OSST_MAJOR)
-
-#define DEVICE_NAME "onstream"
-#define DEVICE_INTR do_osst
-#define DEVICE_NR(device) (MINOR(device) & 0x7f)
-#define DEVICE_ON(device)
-#define DEVICE_OFF(device)
-
-#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
-
-#define DEVICE_NAME "CD-ROM"
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == XT_DISK_MAJOR)
-
-#define DEVICE_NAME "xt disk"
-#define DEVICE_REQUEST do_xd_request
-#define DEVICE_NR(device) (MINOR(device) >> 6)
-
-#elif (MAJOR_NR == PS2ESDI_MAJOR)
-
-#define DEVICE_NAME "PS/2 ESDI"
-#define DEVICE_REQUEST do_ps2esdi_request
-#define DEVICE_NR(device) (MINOR(device) >> 6)
-
-#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
-
-#define DEVICE_NAME "CDU31A"
-#define DEVICE_REQUEST do_cdu31a_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == ACSI_MAJOR) && (defined(CONFIG_ATARI_ACSI) ||
defined(CONFIG_ATARI_ACSI_MODULE))
-
-#define DEVICE_NAME "ACSI"
-#define DEVICE_INTR do_acsi
-#define DEVICE_REQUEST do_acsi_request
-#define DEVICE_NR(device) (MINOR(device) >> 4)
-
-#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
-
-#define DEVICE_NAME "Mitsumi CD-ROM"
-/* #define DEVICE_INTR do_mcd */
-#define DEVICE_REQUEST do_mcd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
-
-#define DEVICE_NAME "Mitsumi CD-ROM"
-/* #define DEVICE_INTR do_mcdx */
-#define DEVICE_REQUEST do_mcdx_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
-
-#define DEVICE_NAME "Matsushita CD-ROM controller #1"
-#define DEVICE_REQUEST do_sbpcd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
-
-#define DEVICE_NAME "Matsushita CD-ROM controller #2"
-#define DEVICE_REQUEST do_sbpcd2_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
-
-#define DEVICE_NAME "Matsushita CD-ROM controller #3"
-#define DEVICE_REQUEST do_sbpcd3_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
-
-#define DEVICE_NAME "Matsushita CD-ROM controller #4"
-#define DEVICE_REQUEST do_sbpcd4_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
-
-#define DEVICE_NAME "Aztech CD-ROM"
-#define DEVICE_REQUEST do_aztcd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
-
-#define DEVICE_NAME "SONY-CDU535"
-#define DEVICE_INTR do_cdu535
-#define DEVICE_REQUEST do_cdu535_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
-
-#define DEVICE_NAME "Goldstar R420"
-#define DEVICE_REQUEST do_gscd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == CM206_CDROM_MAJOR)
-#define DEVICE_NAME "Philips/LMS CD-ROM cm206"
-#define DEVICE_REQUEST do_cm206_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
-
-#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
-#define DEVICE_REQUEST do_optcd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
-
-#define DEVICE_NAME "Sanyo H94A CD-ROM"
-#define DEVICE_REQUEST do_sjcd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == APBLOCK_MAJOR)
-
-#define DEVICE_NAME "apblock"
-#define DEVICE_REQUEST ap_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == DDV_MAJOR)
-
-#define DEVICE_NAME "ddv"
-#define DEVICE_REQUEST ddv_request
-#define DEVICE_NR(device) (MINOR(device)>>PARTN_BITS)
-
-#elif (MAJOR_NR == MFM_ACORN_MAJOR)
-
-#define DEVICE_NAME "mfm disk"
-#define DEVICE_INTR do_mfm
-#define DEVICE_REQUEST do_mfm_request
-#define DEVICE_NR(device) (MINOR(device) >> 6)
-
-#elif (MAJOR_NR == NBD_MAJOR)
-
-#define DEVICE_NAME "nbd"
-#define DEVICE_REQUEST do_nbd_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == MDISK_MAJOR)
-
-#define DEVICE_NAME "mdisk"
-#define DEVICE_REQUEST mdisk_request
-#define DEVICE_NR(device) (MINOR(device))
-
-#elif (MAJOR_NR == DASD_MAJOR)
-
-#define DEVICE_NAME "dasd"
-#define DEVICE_REQUEST do_dasd_request
-#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
-
-#elif (MAJOR_NR == I2O_MAJOR)
-
-#define DEVICE_NAME "I2O block"
-#define DEVICE_REQUEST i2ob_request
-#define DEVICE_NR(device) (MINOR(device)>>4)
-
-#elif (MAJOR_NR == COMPAQ_SMART2_MAJOR)
-
-#define DEVICE_NAME "ida"
-#define TIMEOUT_VALUE (25*HZ)
-#define DEVICE_REQUEST do_ida_request
-#define DEVICE_NR(device) (MINOR(device) >> 4)
-
-#endif /* MAJOR_NR == whatever */
-
-/* provide DEVICE_xxx defaults, if not explicitly defined
- * above in the MAJOR_NR==xxx if-elif tree */
-#ifndef DEVICE_ON
-#define DEVICE_ON(device) do {} while (0)
-#endif
-#ifndef DEVICE_OFF
-#define DEVICE_OFF(device) do {} while (0)
-#endif
-
-#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
-#if !defined(IDE_DRIVER)
-
-#ifndef CURRENT
-#define CURRENT
blkdev_entry_next_request(&blk_dev[MAJOR_NR].request_queue.queue_head)
-#endif
-#ifndef QUEUE_EMPTY
-#define QUEUE_EMPTY list_empty(&blk_dev[MAJOR_NR].request_queue.queue_head)
-#endif
-
-#ifndef DEVICE_NAME
-#define DEVICE_NAME "unknown"
-#endif
-
-#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
-
-#ifdef DEVICE_INTR
-static void (*DEVICE_INTR)(void) = NULL;
-#endif
-
-#define SET_INTR(x) (DEVICE_INTR = (x))
-
-#ifdef DEVICE_REQUEST
-static void (DEVICE_REQUEST)(request_queue_t *);
-#endif
-
-#ifdef DEVICE_INTR
-#define CLEAR_INTR SET_INTR(NULL)
-#else
-#define CLEAR_INTR
-#endif
-
-#define INIT_REQUEST \
- if (QUEUE_EMPTY) {\
- CLEAR_INTR; \
- return; \
- } \
- if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
- panic(DEVICE_NAME ": request list destroyed"); \
- if (CURRENT->bh) { \
- if (!buffer_locked(CURRENT->bh)) \
- panic(DEVICE_NAME ": block not locked"); \
- }
-
-#endif /* !defined(IDE_DRIVER) */
-
-
-#ifndef LOCAL_END_REQUEST /* If we have our own end_request, we do not
want to include this mess */
-
-#if ! SCSI_BLK_MAJOR(MAJOR_NR) && (MAJOR_NR != COMPAQ_SMART2_MAJOR)
-
-static inline void end_request(int uptodate) {
- struct request *req = CURRENT;
-
- if (end_that_request_first(req, uptodate, DEVICE_NAME))
- return;
-
-#ifndef DEVICE_NO_RANDOM
- add_blkdev_randomness(MAJOR(req->rq_dev));
-#endif
- DEVICE_OFF(req->rq_dev);
- blkdev_dequeue_request(req);
- end_that_request_last(req);
-}
-
-#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
-#endif /* LOCAL_END_REQUEST */
-
-#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
-#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
-
-#endif /* _BLK_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/linux/highmem.h
--- a/linux-2.4.30-xen-sparse/include/linux/highmem.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,137 +0,0 @@
-#ifndef _LINUX_HIGHMEM_H
-#define _LINUX_HIGHMEM_H
-
-#include <linux/config.h>
-#include <asm/pgalloc.h>
-
-#ifdef CONFIG_HIGHMEM
-
-extern struct page *highmem_start_page;
-
-#include <asm/highmem.h>
-
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-void kmap_flush_unused(void);
-
-extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig);
-
-static inline char *bh_kmap(struct buffer_head *bh)
-{
- return kmap(bh->b_page) + bh_offset(bh);
-}
-
-static inline void bh_kunmap(struct buffer_head *bh)
-{
- kunmap(bh->b_page);
-}
-
-/*
- * remember to add offset! and never ever reenable interrupts between a
- * bh_kmap_irq and bh_kunmap_irq!!
- */
-static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags)
-{
- unsigned long addr;
-
- __save_flags(*flags);
-
- /*
- * could be low
- */
- if (!PageHighMem(bh->b_page))
- return bh->b_data;
-
- /*
- * it's a highmem page
- */
- __cli();
- addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ);
-
- if (addr & ~PAGE_MASK)
- BUG();
-
- return (char *) addr + bh_offset(bh);
-}
-
-static inline void bh_kunmap_irq(char *buffer, unsigned long *flags)
-{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr, KM_BH_IRQ);
- __restore_flags(*flags);
-}
-
-#else /* CONFIG_HIGHMEM */
-
-static inline unsigned int nr_free_highpages(void) { return 0; }
-static inline void kmap_flush_unused(void) { }
-
-static inline void *kmap(struct page *page) { return page_address(page); }
-
-#define kunmap(page) do { } while (0)
-
-#define kmap_atomic(page,idx) kmap(page)
-#define kunmap_atomic(page,idx) kunmap(page)
-
-#define bh_kmap(bh) ((bh)->b_data)
-#define bh_kunmap(bh) do { } while (0)
-#define kmap_nonblock(page) kmap(page)
-#define bh_kmap_irq(bh, flags) ((bh)->b_data)
-#define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0)
-
-#endif /* CONFIG_HIGHMEM */
-
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
-static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *addr = kmap_atomic(page, KM_USER0);
- clear_user_page(addr, vaddr);
- kunmap_atomic(addr, KM_USER0);
-}
-
-static inline void clear_highpage(struct page *page)
-{
- clear_page(kmap(page));
- kunmap(page);
-}
-
-/*
- * Same but also flushes aliased cache contents to RAM.
- */
-static inline void memclear_highpage_flush(struct page *page, unsigned int
offset, unsigned int size)
-{
- char *kaddr;
-
- if (offset + size > PAGE_SIZE)
- out_of_line_bug();
- kaddr = kmap(page);
- memset(kaddr + offset, 0, size);
- flush_dcache_page(page);
- flush_page_to_ram(page);
- kunmap(page);
-}
-
-static inline void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
-{
- char *vfrom, *vto;
-
- vfrom = kmap_atomic(from, KM_USER0);
- vto = kmap_atomic(to, KM_USER1);
- copy_user_page(vto, vfrom, vaddr);
- kunmap_atomic(vfrom, KM_USER0);
- kunmap_atomic(vto, KM_USER1);
-}
-
-static inline void copy_highpage(struct page *to, struct page *from)
-{
- char *vfrom, *vto;
-
- vfrom = kmap_atomic(from, KM_USER0);
- vto = kmap_atomic(to, KM_USER1);
- copy_page(vto, vfrom);
- kunmap_atomic(vfrom, KM_USER0);
- kunmap_atomic(vto, KM_USER1);
-}
-
-#endif /* _LINUX_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/include/linux/irq.h
--- a/linux-2.4.30-xen-sparse/include/linux/irq.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,80 +0,0 @@
-#ifndef __irq_h
-#define __irq_h
-
-/*
- * Please do not include this file in generic code. There is currently
- * no requirement for any architecture to implement anything held
- * within this file.
- *
- * Thanks. --rmk
- */
-
-#include <linux/config.h>
-
-#if !defined(CONFIG_ARCH_S390)
-
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-
-#include <asm/irq.h>
-#include <asm/ptrace.h>
-
-/*
- * IRQ line status.
- */
-#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
-#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
-#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
-#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
-#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
-#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
-#define IRQ_LEVEL 64 /* IRQ level triggered */
-#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
-#define IRQ_PER_CPU 256 /* IRQ is per CPU */
-
-/*
- * Interrupt controller descriptor. This is all we need
- * to describe about the low-level hardware.
- */
-struct hw_interrupt_type {
- const char * typename;
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
- void (*ack)(unsigned int irq);
- void (*end)(unsigned int irq);
- void (*set_affinity)(unsigned int irq, unsigned long mask);
-};
-
-typedef struct hw_interrupt_type hw_irq_controller;
-
-/*
- * This is the "IRQ descriptor", which contains various information
- * about the irq, including what kind of hardware handling it has,
- * whether it is disabled etc etc.
- *
- * Pad this out to 32 bytes for cache and indexing reasons.
- */
-typedef struct {
- unsigned int status; /* IRQ status */
- hw_irq_controller *handler;
- struct irqaction *action; /* IRQ action list */
- unsigned int depth; /* nested irq disables */
- spinlock_t lock;
-} ____cacheline_aligned irq_desc_t;
-
-extern irq_desc_t irq_desc [NR_IRQS];
-
-#include <asm/hw_irq.h> /* the arch dependent stuff */
-
-extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction
*);
-extern int setup_irq(unsigned int , struct irqaction * );
-extern int teardown_irq(unsigned int , struct irqaction * );
-
-extern hw_irq_controller no_irq_type; /* needed in every arch ? */
-extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
-
-#endif
-
-#endif /* __irq_h */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/include/linux/mm.h
--- a/linux-2.4.30-xen-sparse/include/linux/mm.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,727 +0,0 @@
-#ifndef _LINUX_MM_H
-#define _LINUX_MM_H
-
-#include <linux/sched.h>
-#include <linux/errno.h>
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mmzone.h>
-#include <linux/swap.h>
-#include <linux/rbtree.h>
-
-extern unsigned long max_mapnr;
-extern unsigned long num_physpages;
-extern unsigned long num_mappedpages;
-extern void * high_memory;
-extern int page_cluster;
-/* The inactive_clean lists are per zone. */
-extern struct list_head active_list;
-extern struct list_head inactive_list;
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/atomic.h>
-
-/*
- * Linux kernel virtual memory manager primitives.
- * The idea being to have a "virtual" mm in the same way
- * we have a virtual fs - giving a cleaner interface to the
- * mm details, and allowing different kinds of memory mappings
- * (from shared memory to executable loading to arbitrary
- * mmap() functions).
- */
-
-/*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
- * space that has a special rule for the page-fault handlers (ie a shared
- * library, the executable area etc).
- */
-struct vm_area_struct {
- struct mm_struct * vm_mm; /* The address space we belong to. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
-
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next;
-
- pgprot_t vm_page_prot; /* Access permissions of this VMA. */
- unsigned long vm_flags; /* Flags, listed below. */
-
- rb_node_t vm_rb;
-
- /*
- * For areas with an address space and backing store,
- * one of the address_space->i_mmap{,shared} lists,
- * for shm areas, the list of attaches, otherwise unused.
- */
- struct vm_area_struct *vm_next_share;
- struct vm_area_struct **vm_pprev_share;
-
- /* Function pointers to deal with this struct. */
- struct vm_operations_struct * vm_ops;
-
- /* Information about our backing store: */
- unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
- struct file * vm_file; /* File we map to (can be NULL). */
- unsigned long vm_raend; /* XXX: put full readahead info here. */
- void * vm_private_data; /* was vm_pte (shared mem) */
-};
-
-/*
- * vm_flags..
- */
-#define VM_READ 0x00000001 /* currently active flags */
-#define VM_WRITE 0x00000002
-#define VM_EXEC 0x00000004
-#define VM_SHARED 0x00000008
-
-#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
-#define VM_MAYWRITE 0x00000020
-#define VM_MAYEXEC 0x00000040
-#define VM_MAYSHARE 0x00000080
-
-#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
-#define VM_GROWSUP 0x00000200
-#define VM_SHM 0x00000400 /* shared memory area, don't swap out */
-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
-
-#define VM_EXECUTABLE 0x00001000
-#define VM_LOCKED 0x00002000
-#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
-
- /* Used by sys_madvise() */
-#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
-#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered
reads */
-
-#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
-#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
-#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
-
-#ifndef VM_STACK_FLAGS
-#define VM_STACK_FLAGS 0x00000177
-#endif
-
-#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
-#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
-#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
-#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
-#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
-
-/* read ahead limits */
-extern int vm_min_readahead;
-extern int vm_max_readahead;
-
-/*
- * mapping from the currently active vm_flags protection bits (the
- * low four bits) to a page protection mask..
- */
-extern pgprot_t protection_map[16];
-
-
-/*
- * These are the virtual MM functions - opening of an area, closing and
- * unmapping it (needed to keep files on disk up-to-date etc), pointer
- * to the functions called when a no-page or a wp-page exception occurs.
- */
-struct vm_operations_struct {
- void (*open)(struct vm_area_struct * area);
- void (*close)(struct vm_area_struct * area);
- struct page * (*nopage)(struct vm_area_struct * area, unsigned long
address, int unused);
-};
-
-/*
- * Each physical page in the system has a struct page associated with
- * it to keep track of whatever it is we are using the page for at the
- * moment. Note that we have no way to track which tasks are using
- * a page.
- *
- * Try to keep the most commonly accessed fields in single cache lines
- * here (16 bytes or greater). This ordering should be particularly
- * beneficial on 32-bit processors.
- *
- * The first line is data used in page cache lookup, the second line
- * is used for linear searches (eg. clock algorithm scans).
- *
- * TODO: make this structure smaller, it could be as small as 32 bytes.
- */
-typedef struct page {
- struct list_head list; /* ->mapping has some page lists. */
- struct address_space *mapping; /* The inode (or ...) we belong to. */
- unsigned long index; /* Our offset within mapping. */
- struct page *next_hash; /* Next page sharing our hash bucket in
- the pagecache hash table. */
- atomic_t count; /* Usage count, see below. */
- unsigned long flags; /* atomic flags, some possibly
- updated asynchronously */
- struct list_head lru; /* Pageout list, eg. active_list;
- protected by pagemap_lru_lock !! */
- struct page **pprev_hash; /* Complement to *next_hash. */
- struct buffer_head * buffers; /* Buffer maps us to a disk block. */
-
- /*
- * On machines where all RAM is mapped into kernel address space,
- * we can simply calculate the virtual address. On machines with
- * highmem some memory is mapped into kernel virtual memory
- * dynamically, so we need a place to store that address.
- * Note that this field could be 16 bits on x86 ... ;)
- *
- * Architectures with slow multiplication can define
- * WANT_PAGE_VIRTUAL in asm/page.h
- */
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
- void *virtual; /* Kernel virtual address (NULL if
- not kmapped, ie. highmem) */
-#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
-} mem_map_t;
-
-/*
- * Methods to modify the page usage count.
- *
- * What counts for a page usage:
- * - cache mapping (page->mapping)
- * - disk mapping (page->buffers)
- * - page mapped in a task's page tables, each mapping
- * is counted separately
- *
- * Also, many kernel routines increase the page count before a critical
- * routine so they can be sure the page doesn't go away from under them.
- */
-#define get_page(p) atomic_inc(&(p)->count)
-#define put_page(p) __free_page(p)
-#define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
-#define page_count(p) atomic_read(&(p)->count)
-#define set_page_count(p,v) atomic_set(&(p)->count, v)
-
-static inline struct page *nth_page(struct page *page, int n)
-{
- return page + n;
-}
-
-/*
- * Various page->flags bits:
- *
- * PG_reserved is set for special pages, which can never be swapped
- * out. Some of them might not even exist (eg empty_bad_page)...
- *
- * Multiple processes may "see" the same page. E.g. for untouched
- * mappings of /dev/null, all processes see the same page full of
- * zeroes, and text pages of executables and shared libraries have
- * only one copy in memory, at most, normally.
- *
- * For the non-reserved pages, page->count denotes a reference count.
- * page->count == 0 means the page is free.
- * page->count == 1 means the page is used for exactly one purpose
- * (e.g. a private data page of one process).
- *
- * A page may be used for kmalloc() or anyone else who does a
- * __get_free_page(). In this case the page->count is at least 1, and
- * all other fields are unused but should be 0 or NULL. The
- * management of this page is the responsibility of the one who uses
- * it.
- *
- * The other pages (we may call them "process pages") are completely
- * managed by the Linux memory manager: I/O, buffers, swapping etc.
- * The following discussion applies only to them.
- *
- * A page may belong to an inode's memory mapping. In this case,
- * page->mapping is the pointer to the inode, and page->index is the
- * file offset of the page, in units of PAGE_CACHE_SIZE.
- *
- * A page may have buffers allocated to it. In this case,
- * page->buffers is a circular list of these buffer heads. Else,
- * page->buffers == NULL.
- *
- * For pages belonging to inodes, the page->count is the number of
- * attaches, plus 1 if buffers are allocated to the page, plus one
- * for the page cache itself.
- *
- * All pages belonging to an inode are in these doubly linked lists:
- * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
- * using the page->list list_head. These fields are also used for
- * freelist managemet (when page->count==0).
- *
- * There is also a hash table mapping (mapping,index) to the page
- * in memory if present. The lists for this hash table use the fields
- * page->next_hash and page->pprev_hash.
- *
- * All process pages can do I/O:
- * - inode pages may need to be read from disk,
- * - inode pages which have been modified and are MAP_SHARED may need
- * to be written to disk,
- * - private pages which have been modified may need to be swapped out
- * to swap space and (later) to be read back into memory.
- * During disk I/O, PG_locked is used. This bit is set before I/O
- * and reset when I/O completes. page_waitqueue(page) is a wait queue of all
- * tasks waiting for the I/O on this page to complete.
- * PG_uptodate tells whether the page's contents is valid.
- * When a read completes, the page becomes uptodate, unless a disk I/O
- * error happened.
- *
- * For choosing which pages to swap out, inode pages carry a
- * PG_referenced bit, which is set any time the system accesses
- * that page through the (mapping,index) hash table. This referenced
- * bit, together with the referenced bit in the page tables, is used
- * to manipulate page->age and move the page across the active,
- * inactive_dirty and inactive_clean lists.
- *
- * Note that the referenced bit, the page->lru list_head and the
- * active, inactive_dirty and inactive_clean lists are protected by
- * the pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
- *
- * PG_skip is used on sparc/sparc64 architectures to "skip" certain
- * parts of the address space.
- *
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
- * PG_arch_1 is an architecture specific page state bit. The generic
- * code guarantees that this bit is cleared for a page when it first
- * is entered into the page cache.
- *
- * PG_highmem pages are not permanently mapped into the kernel virtual
- * address space, they need to be kmapped separately for doing IO on
- * the pages. The struct page (these bits with information) are always
- * mapped into kernel address space...
- */
-#define PG_locked 0 /* Page is locked. Don't touch. */
-#define PG_error 1
-#define PG_referenced 2
-#define PG_uptodate 3
-#define PG_dirty 4
-#define PG_unused 5
-#define PG_lru 6
-#define PG_active 7
-#define PG_slab 8
-#define PG_skip 10
-#define PG_highmem 11
-#define PG_checked 12 /* kill me in 2.5.<early>. */
-#define PG_arch_1 13
-#define PG_reserved 14
-#define PG_launder 15 /* written out by VM pressure.. */
-#define PG_fs_1 16 /* Filesystem specific */
-#define PG_foreign 21 /* Page belongs to foreign allocator */
-
-#ifndef arch_set_page_uptodate
-#define arch_set_page_uptodate(page)
-#endif
-
-/* Make it prettier to test the above... */
-#define UnlockPage(page) unlock_page(page)
-#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
-#ifndef SetPageUptodate
-#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
-#endif
-#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
-#define LockPage(page) set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
-
-/* A foreign page uses a custom destructor rather than the buddy allocator. */
-#ifdef CONFIG_FOREIGN_PAGES
-#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
-#define SetPageForeign(page, dtor) do { \
- set_bit(PG_foreign, &(page)->flags); \
- (page)->mapping = (void *)dtor; \
-} while (0)
-#define ClearPageForeign(page) do { \
- clear_bit(PG_foreign, &(page)->flags); \
- (page)->mapping = NULL; \
-} while (0)
-#define PageForeignDestructor(page) \
- ( (void (*) (struct page *)) (page)->mapping )
-#else
-#define PageForeign(page) 0
-#define PageForeignDestructor(page) void
-#endif
-
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- */
-#define NODE_SHIFT 4
-#define ZONE_SHIFT (BITS_PER_LONG - 8)
-
-struct zone_struct;
-extern struct zone_struct *zone_table[];
-
-static inline zone_t *page_zone(struct page *page)
-{
- return zone_table[page->flags >> ZONE_SHIFT];
-}
-
-static inline void set_page_zone(struct page *page, unsigned long zone_num)
-{
- page->flags &= ~(~0UL << ZONE_SHIFT);
- page->flags |= zone_num << ZONE_SHIFT;
-}
-
-/*
- * In order to avoid #ifdefs within C code itself, we define
- * set_page_address to a noop for non-highmem machines, where
- * the field isn't useful.
- * The same is true for page_address() in arch-dependent code.
- */
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
-
-#define set_page_address(page, address) \
- do { \
- (page)->virtual = (address); \
- } while(0)
-
-#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-#define set_page_address(page, address) do { } while(0)
-#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-
-/*
- * Permanent address of a page. Obviously must never be
- * called on a highmem page.
- */
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
-
-#define page_address(page) ((page)->virtual)
-
-#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-
-#define page_address(page) \
- __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
- + page_zone(page)->zone_start_paddr)
-
-#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-
-extern void FASTCALL(set_page_dirty(struct page *));
-
-/*
- * The first mb is necessary to safely close the critical section opened by the
- * TryLockPage(), the second mb is necessary to enforce ordering between
- * the clear_bit and the read of the waitqueue (to avoid SMP races with a
- * parallel wait_on_page).
- */
-#define PageError(page) test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
-#define PageTestandClearReferenced(page)
test_and_clear_bit(PG_referenced, &(page)->flags)
-#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
-#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
-#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
-#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
-
-#define PageActive(page) test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-
-#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
-#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
-#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
-
-#ifdef CONFIG_HIGHMEM
-#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
-#else
-#define PageHighMem(page) 0 /* needed to optimize away at compile
time */
-#endif
-
-#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page) clear_bit(PG_reserved,
&(page)->flags)
-
-/*
- * Error return values for the *_nopage functions
- */
-#define NOPAGE_SIGBUS (NULL)
-#define NOPAGE_OOM ((struct page *) (-1))
-
-/* The array of struct pages */
-extern mem_map_t * mem_map;
-
-/*
- * There is only one page-allocator function, and two main namespaces to
- * it. The alloc_page*() variants return 'struct page *' and as such
- * can allocate highmem pages, the *get*page*() variants return
- * virtual kernel addresses to the allocated page(s).
- */
-extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int
order));
-extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned
int order, zonelist_t *zonelist));
-extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned
int order);
-
-static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int
order)
-{
- /*
- * Gets optimized away by the compiler.
- */
- if (order >= MAX_ORDER)
- return NULL;
- return _alloc_pages(gfp_mask, order);
-}
-
-#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-
-extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned
int order));
-extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
-
-#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask),0)
-
-#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA,(order))
-
-/*
- * The old interface name will be removed in 2.5:
- */
-#define get_free_page get_zeroed_page
-
-/*
- * There is only one 'core' page-freeing function.
- */
-extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
-extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
-
-#define __free_page(page) __free_pages((page), 0)
-#define free_page(addr) free_pages((addr),0)
-
-extern void show_free_areas(void);
-extern void show_free_areas_node(pg_data_t *pgdat);
-
-extern void clear_page_tables(struct mm_struct *, unsigned long, int);
-
-extern int fail_writepage(struct page *);
-struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address,
int unused);
-struct file *shmem_file_setup(char * name, loff_t size);
-extern void shmem_lock(struct file * file, int lock);
-extern int shmem_zero_setup(struct vm_area_struct *);
-
-extern void zap_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size);
-extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
-extern int remap_page_range(unsigned long from, unsigned long to, unsigned
long size, pgprot_t prot);
-extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t
prot);
-
-extern int vmtruncate(struct inode * inode, loff_t offset);
-extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned
long address));
-extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned
long address));
-extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
unsigned long address, int write_access);
-extern int make_pages_present(unsigned long addr, unsigned long end);
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void
*buf, int len, int write);
-extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char
*dst, int len);
-extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long
dst, int len);
-extern int ptrace_attach(struct task_struct *tsk);
-extern int ptrace_detach(struct task_struct *, unsigned int);
-extern void ptrace_disable(struct task_struct *);
-extern int ptrace_check_attach(struct task_struct *task, int kill);
-
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned
long start,
- int len, int write, int force, struct page **pages, struct
vm_area_struct **vmas);
-
-/*
- * On a two-level page table, this ends up being trivial. Thus the
- * inlining and the symmetry break with pte_alloc() that does all
- * of this out-of-line.
- */
-static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
-{
- if (pgd_none(*pgd))
- return __pmd_alloc(mm, pgd, address);
- return pmd_offset(pgd, address);
-}
-
-extern int pgt_cache_water[2];
-extern int check_pgt_cache(void);
-
-extern void free_area_init(unsigned long * zones_size);
-extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
- unsigned long * zones_size, unsigned long zone_start_paddr,
- unsigned long *zholes_size);
-extern void mem_init(void);
-extern void show_mem(void);
-extern void si_meminfo(struct sysinfo * val);
-extern void swapin_readahead(swp_entry_t);
-
-extern struct address_space swapper_space;
-#define PageSwapCache(page) ((page)->mapping == &swapper_space)
-
-static inline int is_page_cache_freeable(struct page * page)
-{
- return page_count(page) - !!page->buffers == 1;
-}
-
-extern int FASTCALL(can_share_swap_page(struct page *));
-extern int FASTCALL(remove_exclusive_swap_page(struct page *));
-
-extern void __free_pte(pte_t);
-
-/* mmap.c */
-extern void lock_vma_mappings(struct vm_area_struct *);
-extern void unlock_vma_mappings(struct vm_area_struct *);
-extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void build_mmap_rb(struct mm_struct *);
-extern void exit_mmap(struct mm_struct *);
-
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned
long, unsigned long, unsigned long);
-
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flag, unsigned long pgoff);
-
-static inline unsigned long do_mmap(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flag, unsigned long offset)
-{
- unsigned long ret = -EINVAL;
- if ((offset + PAGE_ALIGN(len)) < offset)
- goto out;
- if (!(offset & ~PAGE_MASK))
- ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >>
PAGE_SHIFT);
-out:
- return ret;
-}
-
-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
-
-extern unsigned long do_brk(unsigned long, unsigned long);
-
-static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct *
vma, struct vm_area_struct * prev)
-{
- prev->vm_next = vma->vm_next;
- rb_erase(&vma->vm_rb, &mm->mm_rb);
- if (mm->mmap_cache == vma)
- mm->mmap_cache = prev;
-}
-
-static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long
vm_flags)
-{
- if (!vma->vm_file && vma->vm_flags == vm_flags)
- return 1;
- else
- return 0;
-}
-
-struct zone_t;
-/* filemap.c */
-extern void remove_inode_page(struct page *);
-extern unsigned long page_unuse(struct page *);
-extern void truncate_inode_pages(struct address_space *, loff_t);
-
-/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t,
unsigned int);
-extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long,
int);
-
-/*
- * GFP bitmasks..
- */
-/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
-#define __GFP_DMA 0x01
-#define __GFP_HIGHMEM 0x02
-
-/* Action modifiers - doesn't change the zoning */
-#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
-#define __GFP_HIGH 0x20 /* Should access emergency pools? */
-#define __GFP_IO 0x40 /* Can start low memory physical IO? */
-#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
-#define __GFP_FS 0x100 /* Can call down to low-level FS? */
-
-#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
-#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
-#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
-#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS)
-#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS | __GFP_HIGHMEM)
-#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS)
-#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO |
__GFP_HIGHIO | __GFP_FS)
-#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO |
__GFP_FS)
-
-/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
- platforms, used as appropriate on others */
-
-#define GFP_DMA __GFP_DMA
-
-static inline unsigned int pf_gfp_mask(unsigned int gfp_mask)
-{
- /* avoid all memory balancing I/O methods if this task cannot block on
I/O */
- if (current->flags & PF_NOIO)
- gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS);
-
- return gfp_mask;
-}
-
-/* vma is the first one with address < vma->vm_end,
- * and even address < vma->vm_start. Have to extend vma. */
-static inline int expand_stack(struct vm_area_struct * vma, unsigned long
address)
-{
- unsigned long grow;
-
- /*
- * vma->vm_start/vm_end cannot change under us because the caller
- * is required to hold the mmap_sem in read mode. We need the
- * page_table_lock lock to serialize against concurrent expand_stacks.
- */
- address &= PAGE_MASK;
- spin_lock(&vma->vm_mm->page_table_lock);
-
- /* already expanded while we were spinning? */
- if (vma->vm_start <= address) {
- spin_unlock(&vma->vm_mm->page_table_lock);
- return 0;
- }
-
- grow = (vma->vm_start - address) >> PAGE_SHIFT;
- if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
- ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
current->rlim[RLIMIT_AS].rlim_cur) {
- spin_unlock(&vma->vm_mm->page_table_lock);
- return -ENOMEM;
- }
-
- if ((vma->vm_flags & VM_LOCKED) &&
- ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
current->rlim[RLIMIT_MEMLOCK].rlim_cur) {
- spin_unlock(&vma->vm_mm->page_table_lock);
- return -ENOMEM;
- }
-
-
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- vma->vm_mm->total_vm += grow;
- if (vma->vm_flags & VM_LOCKED)
- vma->vm_mm->locked_vm += grow;
- spin_unlock(&vma->vm_mm->page_table_lock);
- return 0;
-}
-
-/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
-extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long
addr);
-extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned
long addr,
- struct vm_area_struct **pprev);
-
-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct *
mm, unsigned long start_addr, unsigned long end_addr)
-{
- struct vm_area_struct * vma = find_vma(mm,start_addr);
-
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
-}
-
-extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned
long addr);
-
-extern struct page * vmalloc_to_page(void *addr);
-
-#endif /* __KERNEL__ */
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/linux/sched.h
--- a/linux-2.4.30-xen-sparse/include/linux/sched.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,971 +0,0 @@
-#ifndef _LINUX_SCHED_H
-#define _LINUX_SCHED_H
-
-#include <asm/param.h> /* for HZ */
-
-extern unsigned long event;
-
-#include <linux/config.h>
-#include <linux/binfmts.h>
-#include <linux/threads.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/times.h>
-#include <linux/timex.h>
-#include <linux/rbtree.h>
-
-#include <asm/system.h>
-#include <asm/semaphore.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/mmu.h>
-
-#include <linux/smp.h>
-#include <linux/tty.h>
-#include <linux/sem.h>
-#include <linux/signal.h>
-#include <linux/securebits.h>
-#include <linux/fs_struct.h>
-
-struct exec_domain;
-
-/*
- * cloning flags:
- */
-#define CSIGNAL 0x000000ff /* signal mask to be sent at
exit */
-#define CLONE_VM 0x00000100 /* set if VM shared between processes */
-#define CLONE_FS 0x00000200 /* set if fs info shared between
processes */
-#define CLONE_FILES 0x00000400 /* set if open files shared between
processes */
-#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked
signals shared */
-#define CLONE_PID 0x00001000 /* set if pid shared */
-#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing
continue on the child too */
-#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to
wake it up on mm_release */
-#define CLONE_PARENT 0x00008000 /* set if we want to have the same
parent as the cloner */
-#define CLONE_THREAD 0x00010000 /* Same thread group? */
-#define CLONE_NEWNS 0x00020000 /* New namespace group? */
-
-#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
-
-/*
- * These are the constant used to fake the fixed-point load-average
- * counting. Some notes:
- * - 11 bit fractions expand to 22 bits by the multiplies: this gives
- * a load-average precision of 10 bits integer + 11 bits fractional
- * - if you want to count load-averages more often, you need more
- * precision, or rounding will get you. With 2-second counting freq,
- * the EXP_n values would be 1981, 2034 and 2043 if still using only
- * 11 bit fractions.
- */
-extern unsigned long avenrun[]; /* Load averages */
-
-#define FSHIFT 11 /* nr of bits of precision */
-#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
-#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
-#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
-#define EXP_5 2014 /* 1/exp(5sec/5min) */
-#define EXP_15 2037 /* 1/exp(5sec/15min) */
-
-#define CALC_LOAD(load,exp,n) \
- load *= exp; \
- load += n*(FIXED_1-exp); \
- load >>= FSHIFT;
-
-#define CT_TO_SECS(x) ((x) / HZ)
-#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
-
-extern int nr_running, nr_threads;
-extern int last_pid;
-
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/param.h>
-#include <linux/resource.h>
-#ifdef __KERNEL__
-#include <linux/timer.h>
-#endif
-
-#include <asm/processor.h>
-
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-#define TASK_ZOMBIE 4
-#define TASK_STOPPED 8
-
-#define __set_task_state(tsk, state_value) \
- do { (tsk)->state = (state_value); } while (0)
-#define set_task_state(tsk, state_value) \
- set_mb((tsk)->state, (state_value))
-
-#define __set_current_state(state_value) \
- do { current->state = (state_value); } while (0)
-#define set_current_state(state_value) \
- set_mb(current->state, (state_value))
-
-/*
- * Scheduling policies
- */
-#define SCHED_OTHER 0
-#define SCHED_FIFO 1
-#define SCHED_RR 2
-
-/*
- * This is an additional bit set when we want to
- * yield the CPU for one re-schedule..
- */
-#define SCHED_YIELD 0x10
-
-struct sched_param {
- int sched_priority;
-};
-
-struct completion;
-
-#ifdef __KERNEL__
-
-#include <linux/spinlock.h>
-
-/*
- * This serializes "schedule()" and also protects
- * the run-queue from deletions/modifications (but
- * _adding_ to the beginning of the run-queue has
- * a separate lock).
- */
-extern rwlock_t tasklist_lock;
-extern spinlock_t runqueue_lock;
-extern spinlock_t mmlist_lock;
-
-extern void sched_init(void);
-extern void init_idle(void);
-extern void show_state(void);
-extern void cpu_init (void);
-extern void trap_init(void);
-extern void update_process_times(int user);
-#ifdef CONFIG_NO_IDLE_HZ
-extern void update_process_times_us(int user, int system);
-#endif
-extern void update_one_process(struct task_struct *p, unsigned long user,
- unsigned long system, int cpu);
-
-#define MAX_SCHEDULE_TIMEOUT LONG_MAX
-extern signed long FASTCALL(schedule_timeout(signed long timeout));
-asmlinkage void schedule(void);
-
-extern int schedule_task(struct tq_struct *task);
-extern void flush_scheduled_tasks(void);
-extern int start_context_thread(void);
-extern int current_is_keventd(void);
-
-#if CONFIG_SMP
-extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask);
-#else
-# define set_cpus_allowed(p, new_mask) do { } while (0)
-#endif
-
-/*
- * The default fd array needs to be at least BITS_PER_LONG,
- * as this is the granularity returned by copy_fdset().
- */
-#define NR_OPEN_DEFAULT BITS_PER_LONG
-
-struct namespace;
-/*
- * Open file table structure
- */
-struct files_struct {
- atomic_t count;
- rwlock_t file_lock; /* Protects all the below members. Nests
inside tsk->alloc_lock */
- int max_fds;
- int max_fdset;
- int next_fd;
- struct file ** fd; /* current fd array */
- fd_set *close_on_exec;
- fd_set *open_fds;
- fd_set close_on_exec_init;
- fd_set open_fds_init;
- struct file * fd_array[NR_OPEN_DEFAULT];
-};
-
-#define INIT_FILES \
-{ \
- count: ATOMIC_INIT(1), \
- file_lock: RW_LOCK_UNLOCKED, \
- max_fds: NR_OPEN_DEFAULT, \
- max_fdset: __FD_SETSIZE, \
- next_fd: 0, \
- fd: &init_files.fd_array[0], \
- close_on_exec: &init_files.close_on_exec_init, \
- open_fds: &init_files.open_fds_init, \
- close_on_exec_init: { { 0, } }, \
- open_fds_init: { { 0, } }, \
- fd_array: { NULL, } \
-}
-
-/* Maximum number of active map areas.. This is a random (large) number */
-#define DEFAULT_MAX_MAP_COUNT (65536)
-
-extern int max_map_count;
-
-struct mm_struct {
- struct vm_area_struct * mmap; /* list of VMAs */
- rb_root_t mm_rb;
- struct vm_area_struct * mmap_cache; /* last find_vma result */
- pgd_t * pgd;
- atomic_t mm_users; /* How many users with user
space? */
- atomic_t mm_count; /* How many references to
"struct mm_struct" (users count as 1) */
- int map_count; /* number of VMAs */
- struct rw_semaphore mmap_sem;
- spinlock_t page_table_lock; /* Protects task page tables
and mm->rss */
-
- struct list_head mmlist; /* List of all active mm's.
These are globally strung
- * together off init_mm.mmlist,
and are protected
- * by mmlist_lock
- */
-
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long rss, total_vm, locked_vm;
- unsigned long def_flags;
- unsigned long cpu_vm_mask;
- unsigned long swap_address;
-
- unsigned dumpable:1;
-
- /* Architecture-specific MM context */
- mm_context_t context;
-};
-
-extern int mmlist_nr;
-
-#define INIT_MM(name) \
-{ \
- mm_rb: RB_ROOT, \
- pgd: swapper_pg_dir, \
- mm_users: ATOMIC_INIT(2), \
- mm_count: ATOMIC_INIT(1), \
- mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem), \
- page_table_lock: SPIN_LOCK_UNLOCKED, \
- mmlist: LIST_HEAD_INIT(name.mmlist), \
-}
-
-struct signal_struct {
- atomic_t count;
- struct k_sigaction action[_NSIG];
- spinlock_t siglock;
-};
-
-
-#define INIT_SIGNALS { \
- count: ATOMIC_INIT(1), \
- action: { {{0,}}, }, \
- siglock: SPIN_LOCK_UNLOCKED \
-}
-
-/*
- * Some day this will be a full-fledged user tracking system..
- */
-struct user_struct {
- atomic_t __count; /* reference count */
- atomic_t processes; /* How many processes does this user have? */
- atomic_t files; /* How many open files does this user have? */
-
- /* Hash table maintenance information */
- struct user_struct *next, **pprev;
- uid_t uid;
-};
-
-#define get_current_user() ({ \
- struct user_struct *__tmp_user = current->user; \
- atomic_inc(&__tmp_user->__count); \
- __tmp_user; })
-
-extern struct user_struct root_user;
-#define INIT_USER (&root_user)
-
-struct task_struct {
- /*
- * offsets of these are hardcoded elsewhere - touch with care
- */
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- unsigned long flags; /* per process flags, defined below */
- int sigpending;
- mm_segment_t addr_limit; /* thread address space:
- 0-0xBFFFFFFF for user-thead
- 0-0xFFFFFFFF for kernel-thread
- */
- struct exec_domain *exec_domain;
- volatile long need_resched;
- unsigned long ptrace;
-
- int lock_depth; /* Lock depth */
-
-/*
- * offset 32 begins here on 32-bit platforms. We keep
- * all fields in a single cacheline that are needed for
- * the goodness() loop in schedule().
- */
- long counter;
- long nice;
- unsigned long policy;
- struct mm_struct *mm;
- int processor;
- /*
- * cpus_runnable is ~0 if the process is not running on any
- * CPU. It's (1 << cpu) if it's running on a CPU. This mask
- * is updated under the runqueue lock.
- *
- * To determine whether a process might run on a CPU, this
- * mask is AND-ed with cpus_allowed.
- */
- unsigned long cpus_runnable, cpus_allowed;
- /*
- * (only the 'next' pointer fits into the cacheline, but
- * that's just fine.)
- */
- struct list_head run_list;
- unsigned long sleep_time;
-
- struct task_struct *next_task, *prev_task;
- struct mm_struct *active_mm;
- struct list_head local_pages;
- unsigned int allocation_order, nr_local_pages;
-
-/* task state */
- struct linux_binfmt *binfmt;
- int exit_code, exit_signal;
- int pdeath_signal; /* The signal sent when the parent dies */
- /* ??? */
- unsigned long personality;
- int did_exec:1;
- unsigned task_dumpable:1;
- pid_t pid;
- pid_t pgrp;
- pid_t tty_old_pgrp;
- pid_t session;
- pid_t tgid;
- /* boolean value for session group leader */
- int leader;
- /*
- * pointers to (original) parent process, youngest child, younger
sibling,
- * older sibling, respectively. (p->father can be replaced with
- * p->p_pptr->pid)
- */
- struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
- struct list_head thread_group;
-
- /* PID hash table linkage. */
- struct task_struct *pidhash_next;
- struct task_struct **pidhash_pprev;
-
- wait_queue_head_t wait_chldexit; /* for wait4() */
- struct completion *vfork_done; /* for vfork() */
- unsigned long rt_priority;
- unsigned long it_real_value, it_prof_value, it_virt_value;
- unsigned long it_real_incr, it_prof_incr, it_virt_incr;
- struct timer_list real_timer;
- struct tms times;
- unsigned long start_time;
- long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
-/* mm fault and swap info: this can arguably be seen as either mm-specific or
thread-specific */
- unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
- int swappable:1;
-/* process credentials */
- uid_t uid,euid,suid,fsuid;
- gid_t gid,egid,sgid,fsgid;
- int ngroups;
- gid_t groups[NGROUPS];
- kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
- int keep_capabilities:1;
- struct user_struct *user;
-/* limits */
- struct rlimit rlim[RLIM_NLIMITS];
- unsigned short used_math;
- char comm[16];
-/* file system info */
- int link_count, total_link_count;
- struct tty_struct *tty; /* NULL if no tty */
- unsigned int locks; /* How many file locks are being held */
-/* ipc stuff */
- struct sem_undo *semundo;
- struct sem_queue *semsleeping;
-/* CPU-specific state of this task */
- struct thread_struct thread;
-/* filesystem information */
- struct fs_struct *fs;
-/* open file information */
- struct files_struct *files;
-/* namespace */
- struct namespace *namespace;
-/* signal handlers */
- spinlock_t sigmask_lock; /* Protects signal and blocked */
- struct signal_struct *sig;
-
- sigset_t blocked;
- struct sigpending pending;
-
- unsigned long sas_ss_sp;
- size_t sas_ss_size;
- int (*notifier)(void *priv);
- void *notifier_data;
- sigset_t *notifier_mask;
-
-/* Thread group tracking */
- u32 parent_exec_id;
- u32 self_exec_id;
-/* Protection of (de-)allocation: mm, files, fs, tty */
- spinlock_t alloc_lock;
-
-/* journalling filesystem info */
- void *journal_info;
-};
-
-/*
- * Per process flags
- */
-#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
- /* Not implemented yet, only for 486*/
-#define PF_STARTING 0x00000002 /* being created */
-#define PF_EXITING 0x00000004 /* getting shut down */
-#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
-#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
-#define PF_DUMPCORE 0x00000200 /* dumped core */
-#define PF_SIGNALED 0x00000400 /* killed by a signal */
-#define PF_MEMALLOC 0x00000800 /* Allocating memory */
-#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */
-#define PF_FREE_PAGES 0x00002000 /* per process page freeing */
-#define PF_NOIO 0x00004000 /* avoid generating further I/O
*/
-#define PF_FSTRANS 0x00008000 /* inside a filesystem transaction */
-
-#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */
-
-/*
- * Ptrace flags
- */
-
-#define PT_PTRACED 0x00000001
-#define PT_TRACESYS 0x00000002
-#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */
-#define PT_TRACESYSGOOD 0x00000008
-#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */
-
-#define is_dumpable(tsk) ((tsk)->task_dumpable && (tsk)->mm &&
(tsk)->mm->dumpable)
-
-/*
- * Limit the stack by to some sane default: root can always
- * increase this limit if needed.. 8MB seems reasonable.
- */
-#define _STK_LIM (8*1024*1024)
-
-#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */
-#define MAX_COUNTER (20*HZ/100)
-#define DEF_NICE (0)
-
-extern void yield(void);
-
-/*
- * The default (Linux) execution domain.
- */
-extern struct exec_domain default_exec_domain;
-
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- state: 0, \
- flags: 0, \
- sigpending: 0,
\
- addr_limit: KERNEL_DS,
\
- exec_domain: &default_exec_domain, \
- lock_depth: -1,
\
- counter: DEF_COUNTER, \
- nice: DEF_NICE, \
- policy: SCHED_OTHER, \
- mm: NULL,
\
- active_mm: &init_mm, \
- cpus_runnable: ~0UL, \
- cpus_allowed: ~0UL, \
- run_list: LIST_HEAD_INIT(tsk.run_list), \
- next_task: &tsk, \
- prev_task: &tsk, \
- p_opptr: &tsk, \
- p_pptr: &tsk, \
- thread_group: LIST_HEAD_INIT(tsk.thread_group), \
- wait_chldexit: __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
- real_timer: {
\
- function: it_real_fn \
- }, \
- cap_effective: CAP_INIT_EFF_SET, \
- cap_inheritable: CAP_INIT_INH_SET, \
- cap_permitted: CAP_FULL_SET, \
- keep_capabilities: 0, \
- rlim: INIT_RLIMITS, \
- user: INIT_USER, \
- comm: "swapper", \
- thread: INIT_THREAD, \
- fs: &init_fs,
\
- files: &init_files, \
- sigmask_lock: SPIN_LOCK_UNLOCKED, \
- sig: &init_signals, \
- pending: { NULL, &tsk.pending.head, {{0}}}, \
- blocked: {{0}}, \
- alloc_lock: SPIN_LOCK_UNLOCKED,
\
- journal_info: NULL, \
-}
-
-
-#ifndef INIT_TASK_SIZE
-# define INIT_TASK_SIZE 2048*sizeof(long)
-#endif
-
-union task_union {
- struct task_struct task;
- unsigned long stack[INIT_TASK_SIZE/sizeof(long)];
-};
-
-extern union task_union init_task_union;
-
-extern struct mm_struct init_mm;
-extern struct task_struct *init_tasks[NR_CPUS];
-
-/* PID hashing. (shouldnt this be dynamic?) */
-#define PIDHASH_SZ (4096 >> 2)
-extern struct task_struct *pidhash[PIDHASH_SZ];
-
-#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
-
-static inline void hash_pid(struct task_struct *p)
-{
- struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
-
- if((p->pidhash_next = *htable) != NULL)
- (*htable)->pidhash_pprev = &p->pidhash_next;
- *htable = p;
- p->pidhash_pprev = htable;
-}
-
-static inline void unhash_pid(struct task_struct *p)
-{
- if(p->pidhash_next)
- p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
- *p->pidhash_pprev = p->pidhash_next;
-}
-
-static inline struct task_struct *find_task_by_pid(int pid)
-{
- struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
-
- for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
- ;
-
- return p;
-}
-
-#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
-
-static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
-{
- tsk->processor = cpu;
- tsk->cpus_runnable = 1UL << cpu;
-}
-
-static inline void task_release_cpu(struct task_struct *tsk)
-{
- tsk->cpus_runnable = ~0UL;
-}
-
-/* per-UID process charging. */
-extern struct user_struct * alloc_uid(uid_t);
-extern void free_uid(struct user_struct *);
-extern void switch_uid(struct user_struct *);
-
-#include <asm/current.h>
-
-extern unsigned long volatile jiffies;
-extern unsigned long itimer_ticks;
-extern unsigned long itimer_next;
-extern struct timeval xtime;
-extern void do_timer(struct pt_regs *);
-#ifdef CONFIG_NO_IDLE_HZ
-extern void do_timer_ticks(int ticks);
-#endif
-
-extern unsigned int * prof_buffer;
-extern unsigned long prof_len;
-extern unsigned long prof_shift;
-
-#define CURRENT_TIME (xtime.tv_sec)
-
-extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int
nr));
-extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode,
int nr));
-extern void FASTCALL(sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout));
-extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout));
-extern int FASTCALL(wake_up_process(struct task_struct * tsk));
-
-#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE, 1)
-#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE, nr)
-#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE, 0)
-#define wake_up_sync(x)
__wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
-#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE, nr)
-#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
-#define wake_up_interruptible_nr(x, nr)
__wake_up((x),TASK_INTERRUPTIBLE, nr)
-#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE,
1)
-#define wake_up_interruptible_sync_nr(x, nr)
__wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
-asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
struct rusage * ru);
-
-extern int in_group_p(gid_t);
-extern int in_egroup_p(gid_t);
-
-extern void proc_caches_init(void);
-extern void flush_signals(struct task_struct *);
-extern void flush_signal_handlers(struct task_struct *);
-extern void sig_exit(int, int, struct siginfo *);
-extern int dequeue_signal(sigset_t *, siginfo_t *);
-extern void block_all_signals(int (*notifier)(void *priv), void *priv,
- sigset_t *mask);
-extern void unblock_all_signals(void);
-extern int send_sig_info(int, struct siginfo *, struct task_struct *);
-extern int force_sig_info(int, struct siginfo *, struct task_struct *);
-extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_sl_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
-extern void notify_parent(struct task_struct *, int);
-extern void do_notify_parent(struct task_struct *, int);
-extern void force_sig(int, struct task_struct *);
-extern int send_sig(int, struct task_struct *, int);
-extern int kill_pg(pid_t, int, int);
-extern int kill_sl(pid_t, int, int);
-extern int kill_proc(pid_t, int, int);
-extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
-extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
-
-static inline int signal_pending(struct task_struct *p)
-{
- return (p->sigpending != 0);
-}
-
-/*
- * Re-calculate pending state from the set of locally pending
- * signals, globally pending signals, and blocked signals.
- */
-static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
-{
- unsigned long ready;
- long i;
-
- switch (_NSIG_WORDS) {
- default:
- for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
- ready |= signal->sig[i] &~ blocked->sig[i];
- break;
-
- case 4: ready = signal->sig[3] &~ blocked->sig[3];
- ready |= signal->sig[2] &~ blocked->sig[2];
- ready |= signal->sig[1] &~ blocked->sig[1];
- ready |= signal->sig[0] &~ blocked->sig[0];
- break;
-
- case 2: ready = signal->sig[1] &~ blocked->sig[1];
- ready |= signal->sig[0] &~ blocked->sig[0];
- break;
-
- case 1: ready = signal->sig[0] &~ blocked->sig[0];
- }
- return ready != 0;
-}
-
-/* Reevaluate whether the task has signals pending delivery.
- This is required every time the blocked sigset_t changes.
- All callers should have t->sigmask_lock. */
-
-static inline void recalc_sigpending(struct task_struct *t)
-{
- t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
-}
-
-/* True if we are on the alternate signal stack. */
-
-static inline int on_sig_stack(unsigned long sp)
-{
- return (sp - current->sas_ss_sp < current->sas_ss_size);
-}
-
-static inline int sas_ss_flags(unsigned long sp)
-{
- return (current->sas_ss_size == 0 ? SS_DISABLE
- : on_sig_stack(sp) ? SS_ONSTACK : 0);
-}
-
-extern int request_irq(unsigned int,
- void (*handler)(int, void *, struct pt_regs *),
- unsigned long, const char *, void *);
-extern void free_irq(unsigned int, void *);
-
-/*
- * This has now become a routine instead of a macro, it sets a flag if
- * it returns true (to do BSD-style accounting where the process is flagged
- * if it uses root privs). The implication of this is that you should do
- * normal permissions checks first, and check suser() last.
- *
- * [Dec 1997 -- Chris Evans]
- * For correctness, the above considerations need to be extended to
- * fsuser(). This is done, along with moving fsuser() checks to be
- * last.
- *
- * These will be removed, but in the mean time, when the SECURE_NOROOT
- * flag is set, uids don't grant privilege.
- */
-static inline int suser(void)
-{
- if (!issecure(SECURE_NOROOT) && current->euid == 0) {
- current->flags |= PF_SUPERPRIV;
- return 1;
- }
- return 0;
-}
-
-static inline int fsuser(void)
-{
- if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
- current->flags |= PF_SUPERPRIV;
- return 1;
- }
- return 0;
-}
-
-/*
- * capable() checks for a particular capability.
- * New privilege checks should use this interface, rather than suser() or
- * fsuser(). See include/linux/capability.h for defined capabilities.
- */
-
-static inline int capable(int cap)
-{
-#if 1 /* ok now */
- if (cap_raised(current->cap_effective, cap))
-#else
- if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0)
-#endif
- {
- current->flags |= PF_SUPERPRIV;
- return 1;
- }
- return 0;
-}
-
-/*
- * Routines for handling mm_structs
- */
-extern struct mm_struct * mm_alloc(void);
-
-extern struct mm_struct * start_lazy_tlb(void);
-extern void end_lazy_tlb(struct mm_struct *mm);
-
-/* mmdrop drops the mm and the page tables */
-extern void FASTCALL(__mmdrop(struct mm_struct *));
-static inline void mmdrop(struct mm_struct * mm)
-{
- if (atomic_dec_and_test(&mm->mm_count))
- __mmdrop(mm);
-}
-
-/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
-/* Remove the current tasks stale references to the old mm_struct */
-extern void mm_release(void);
-
-/*
- * Routines for handling the fd arrays
- */
-extern struct file ** alloc_fd_array(int);
-extern int expand_fd_array(struct files_struct *, int nr);
-extern void free_fd_array(struct file **, int);
-
-extern fd_set *alloc_fdset(int);
-extern int expand_fdset(struct files_struct *, int nr);
-extern void free_fdset(fd_set *, int);
-
-extern int copy_thread(int, unsigned long, unsigned long, unsigned long,
struct task_struct *, struct pt_regs *);
-extern void flush_thread(void);
-extern void exit_thread(void);
-
-extern void exit_mm(struct task_struct *);
-extern void exit_files(struct task_struct *);
-extern void exit_sighand(struct task_struct *);
-
-extern void reparent_to_init(void);
-extern void daemonize(void);
-
-extern int do_execve(char *, char **, char **, struct pt_regs *);
-extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned
long);
-
-extern void set_task_comm(struct task_struct *tsk, char *from);
-extern void get_task_comm(char *to, struct task_struct *tsk);
-
-extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t *
wait));
-extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q,
wait_queue_t * wait));
-extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t *
wait));
-
-extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-#define __wait_event(wq, condition) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- schedule(); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event(wq, condition) \
-do { \
- if (condition) \
- break; \
- __wait_event(wq, condition); \
-} while (0)
-
-#define __wait_event_interruptible(wq, condition, ret) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event_interruptible(wq, condition)
\
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __wait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-#define REMOVE_LINKS(p) do { \
- (p)->next_task->prev_task = (p)->prev_task; \
- (p)->prev_task->next_task = (p)->next_task; \
- if ((p)->p_osptr) \
- (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
- if ((p)->p_ysptr) \
- (p)->p_ysptr->p_osptr = (p)->p_osptr; \
- else \
- (p)->p_pptr->p_cptr = (p)->p_osptr; \
- } while (0)
-
-#define SET_LINKS(p) do { \
- (p)->next_task = &init_task; \
- (p)->prev_task = init_task.prev_task; \
- init_task.prev_task->next_task = (p); \
- init_task.prev_task = (p); \
- (p)->p_ysptr = NULL; \
- if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
- (p)->p_osptr->p_ysptr = p; \
- (p)->p_pptr->p_cptr = p; \
- } while (0)
-
-#define for_each_task(p) \
- for (p = &init_task ; (p = p->next_task) != &init_task ; )
-
-#define for_each_thread(task) \
- for (task = next_thread(current) ; task != current ; task =
next_thread(task))
-
-#define next_thread(p) \
- list_entry((p)->thread_group.next, struct task_struct, thread_group)
-
-#define thread_group_leader(p) (p->pid == p->tgid)
-
-static inline void del_from_runqueue(struct task_struct * p)
-{
- nr_running--;
- p->sleep_time = jiffies;
- list_del(&p->run_list);
- p->run_list.next = NULL;
-}
-
-static inline int task_on_runqueue(struct task_struct *p)
-{
- return (p->run_list.next != NULL);
-}
-
-static inline void unhash_process(struct task_struct *p)
-{
- if (task_on_runqueue(p))
- out_of_line_bug();
- write_lock_irq(&tasklist_lock);
- nr_threads--;
- unhash_pid(p);
- REMOVE_LINKS(p);
- list_del(&p->thread_group);
- write_unlock_irq(&tasklist_lock);
-}
-
-/* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside
tasklist_lock */
-static inline void task_lock(struct task_struct *p)
-{
- spin_lock(&p->alloc_lock);
-}
-
-static inline void task_unlock(struct task_struct *p)
-{
- spin_unlock(&p->alloc_lock);
-}
-
-/* write full pathname into buffer and return start of pathname */
-static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
- char *buf, int buflen)
-{
- char *res;
- struct vfsmount *rootmnt;
- struct dentry *root;
- read_lock(¤t->fs->lock);
- rootmnt = mntget(current->fs->rootmnt);
- root = dget(current->fs->root);
- read_unlock(¤t->fs->lock);
- spin_lock(&dcache_lock);
- res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
- spin_unlock(&dcache_lock);
- dput(root);
- mntput(rootmnt);
- return res;
-}
-
-static inline int need_resched(void)
-{
- return (unlikely(current->need_resched));
-}
-
-extern void __cond_resched(void);
-static inline void cond_resched(void)
-{
- if (need_resched())
- __cond_resched();
-}
-
-#endif /* __KERNEL__ */
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/linux/skbuff.h
--- a/linux-2.4.30-xen-sparse/include/linux/skbuff.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1181 +0,0 @@
-/*
- * Definitions for the 'struct sk_buff' memory handlers.
- *
- * Authors:
- * Alan Cox, <gw4pts@xxxxxxxxxxxxxxx>
- * Florian La Roche, <rzsfl@xxxxxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_SKBUFF_H
-#define _LINUX_SKBUFF_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/cache.h>
-
-#include <asm/atomic.h>
-#include <asm/types.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-
-#define HAVE_ALLOC_SKB /* For the drivers to know */
-#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
-#define SLAB_SKB /* Slabified skbuffs */
-
-#define CHECKSUM_NONE 0
-#define CHECKSUM_HW 1
-#define CHECKSUM_UNNECESSARY 2
-
-#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) &
~(SMP_CACHE_BYTES-1))
-#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct
skb_shared_info))&~(SMP_CACHE_BYTES-1))
-#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
-#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
-
-/* A. Checksumming of received packets by device.
- *
- * NONE: device failed to checksum this packet.
- * skb->csum is undefined.
- *
- * UNNECESSARY: device parsed packet and wouldbe verified checksum.
- * skb->csum is undefined.
- * It is bad option, but, unfortunately, many of vendors do this.
- * Apparently with secret goal to sell you new device, when you
- * will add new protocol to your host. F.e. IPv6. 8)
- *
- * HW: the most generic way. Device supplied checksum of _all_
- * the packet as seen by netif_rx in skb->csum.
- * NOTE: Even if device supports only some protocols, but
- * is able to produce some skb->csum, it MUST use HW,
- * not UNNECESSARY.
- *
- * B. Checksumming on output.
- *
- * NONE: skb is checksummed by protocol or csum is not required.
- *
- * HW: device is required to csum packet as seen by hard_start_xmit
- * from skb->h.raw to the end and to record the checksum
- * at skb->h.raw+skb->csum.
- *
- * Device must show its capabilities in dev->features, set
- * at device setup time.
- * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
- * everything.
- * NETIF_F_NO_CSUM - loopback or reliable single hop media.
- * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
- * TCP/UDP over IPv4. Sigh. Vendors like this
- * way by an unknown reason. Though, see comment above
- * about CHECKSUM_UNNECESSARY. 8)
- *
- * Any questions? No questions, good. --ANK
- */
-
-#ifdef __i386__
-#define NET_CALLER(arg) (*(((void**)&arg)-1))
-#else
-#define NET_CALLER(arg) __builtin_return_address(0)
-#endif
-
-#ifdef CONFIG_NETFILTER
-struct nf_conntrack {
- atomic_t use;
- void (*destroy)(struct nf_conntrack *);
-};
-
-struct nf_ct_info {
- struct nf_conntrack *master;
-};
-#endif
-
-struct sk_buff_head {
- /* These two members must be first. */
- struct sk_buff * next;
- struct sk_buff * prev;
-
- __u32 qlen;
- spinlock_t lock;
-};
-
-struct sk_buff;
-
-#define MAX_SKB_FRAGS 6
-
-typedef struct skb_frag_struct skb_frag_t;
-
-struct skb_frag_struct
-{
- struct page *page;
- __u16 page_offset;
- __u16 size;
-};
-
-/* This data is invariant across clones and lives at
- * the end of the header data, ie. at skb->end.
- */
-struct skb_shared_info {
- atomic_t dataref;
- unsigned int nr_frags;
- struct sk_buff *frag_list;
- skb_frag_t frags[MAX_SKB_FRAGS];
-};
-
-struct sk_buff {
- /* These two members must be first. */
- struct sk_buff * next; /* Next buffer in list
*/
- struct sk_buff * prev; /* Previous buffer in list
*/
-
- struct sk_buff_head * list; /* List we are on
*/
- struct sock *sk; /* Socket we are owned by
*/
- struct timeval stamp; /* Time we arrived
*/
- struct net_device *dev; /* Device we arrived on/are
leaving by */
- struct net_device *real_dev; /* For support of point to
point protocols
- (e.g. 802.3ad) over bonding,
we must save the
- physical device that got the
packet before
- replacing skb->dev with the
virtual device. */
-
- /* Transport layer header */
- union
- {
- struct tcphdr *th;
- struct udphdr *uh;
- struct icmphdr *icmph;
- struct igmphdr *igmph;
- struct iphdr *ipiph;
- struct spxhdr *spxh;
- unsigned char *raw;
- } h;
-
- /* Network layer header */
- union
- {
- struct iphdr *iph;
- struct ipv6hdr *ipv6h;
- struct arphdr *arph;
- struct ipxhdr *ipxh;
- unsigned char *raw;
- } nh;
-
- /* Link layer header */
- union
- {
- struct ethhdr *ethernet;
- unsigned char *raw;
- } mac;
-
- struct dst_entry *dst;
-
- /*
- * This is the control buffer. It is free to use for every
- * layer. Please put your private variables there. If you
- * want to keep them across layers you have to do a skb_clone()
- * first. This is owned by whoever has the skb queued ATM.
- */
- char cb[48];
-
- unsigned int len; /* Length of actual data
*/
- unsigned int data_len;
- unsigned int csum; /* Checksum
*/
- unsigned char __unused, /* Dead field, may be reused
*/
- cloned, /* head may be cloned (check
refcnt to be sure). */
- pkt_type, /* Packet class
*/
- ip_summed; /* Driver fed us an IP checksum
*/
- __u32 priority; /* Packet queueing priority
*/
- atomic_t users; /* User count - see
datagram.c,tcp.c */
- unsigned short protocol; /* Packet protocol from driver.
*/
- unsigned short security; /* Security level of packet
*/
- unsigned int truesize; /* Buffer size
*/
-
- unsigned char *head; /* Head of buffer
*/
- unsigned char *data; /* Data head pointer
*/
- unsigned char *tail; /* Tail pointer
*/
- unsigned char *end; /* End pointer
*/
-
- void (*destructor)(struct sk_buff *); /* Destruct
function */
-#ifdef CONFIG_NETFILTER
- /* Can be used for communication between hooks. */
- unsigned long nfmark;
- /* Cache info */
- __u32 nfcache;
- /* Associated connection, if any */
- struct nf_ct_info *nfct;
-#ifdef CONFIG_NETFILTER_DEBUG
- unsigned int nf_debug;
-#endif
-#endif /*CONFIG_NETFILTER*/
-
-#if defined(CONFIG_HIPPI)
- union{
- __u32 ifield;
- } private;
-#endif
-
-#ifdef CONFIG_NET_SCHED
- __u32 tc_index; /* traffic control index */
-#endif
-};
-
-#ifdef __KERNEL__
-/*
- * Handling routines are only of interest to the kernel
- */
-#include <linux/slab.h>
-
-#include <asm/system.h>
-
-extern void __kfree_skb(struct sk_buff *skb);
-extern struct sk_buff * alloc_skb(unsigned int size, int
priority);
-extern struct sk_buff * alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int priority);
-extern void kfree_skbmem(struct sk_buff *skb);
-extern struct sk_buff * skb_clone(struct sk_buff *skb, int
priority);
-extern struct sk_buff * skb_copy(const struct sk_buff *skb, int
priority);
-extern struct sk_buff * pskb_copy(struct sk_buff *skb, int
gfp_mask);
-extern int pskb_expand_head(struct sk_buff *skb, int
nhead, int ntail, int gfp_mask);
-extern struct sk_buff * skb_realloc_headroom(struct sk_buff
*skb, unsigned int headroom);
-extern struct sk_buff * skb_copy_expand(const struct sk_buff
*skb,
- int newheadroom,
- int newtailroom,
- int priority);
-extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
-#define dev_kfree_skb(a) kfree_skb(a)
-extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
-extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
-
-/* Internal */
-#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
-
-/**
- * skb_queue_empty - check if a queue is empty
- * @list: queue head
- *
- * Returns true if the queue is empty, false otherwise.
- */
-
-static inline int skb_queue_empty(struct sk_buff_head *list)
-{
- return (list->next == (struct sk_buff *) list);
-}
-
-/**
- * skb_get - reference buffer
- * @skb: buffer to reference
- *
- * Makes another reference to a socket buffer and returns a pointer
- * to the buffer.
- */
-
-static inline struct sk_buff *skb_get(struct sk_buff *skb)
-{
- atomic_inc(&skb->users);
- return skb;
-}
-
-/*
- * If users==1, we are the only owner and are can avoid redundant
- * atomic change.
- */
-
-/**
- * kfree_skb - free an sk_buff
- * @skb: buffer to free
- *
- * Drop a reference to the buffer and free it if the usage count has
- * hit zero.
- */
-
-static inline void kfree_skb(struct sk_buff *skb)
-{
- if (likely(atomic_read(&skb->users) == 1))
- smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
- return;
- __kfree_skb(skb);
-}
-
-/**
- * skb_cloned - is the buffer a clone
- * @skb: buffer to check
- *
- * Returns true if the buffer was generated with skb_clone() and is
- * one of multiple shared copies of the buffer. Cloned buffers are
- * shared data so must not be written to under normal circumstances.
- */
-
-static inline int skb_cloned(struct sk_buff *skb)
-{
- return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
-}
-
-/**
- * skb_shared - is the buffer shared
- * @skb: buffer to check
- *
- * Returns true if more than one person has a reference to this
- * buffer.
- */
-
-static inline int skb_shared(struct sk_buff *skb)
-{
- return (atomic_read(&skb->users) != 1);
-}
-
-/**
- * skb_share_check - check if buffer is shared and if so clone it
- * @skb: buffer to check
- * @pri: priority for memory allocation
- *
- * If the buffer is shared the buffer is cloned and the old copy
- * drops a reference. A new clone with a single reference is returned.
- * If the buffer is not shared the original buffer is returned. When
- * being called from interrupt status or with spinlocks held pri must
- * be GFP_ATOMIC.
- *
- * NULL is returned on a memory allocation failure.
- */
-
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
-{
- if (skb_shared(skb)) {
- struct sk_buff *nskb;
- nskb = skb_clone(skb, pri);
- kfree_skb(skb);
- return nskb;
- }
- return skb;
-}
-
-
-/*
- * Copy shared buffers into a new sk_buff. We effectively do COW on
- * packets to handle cases where we have a local reader and forward
- * and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
- */
-
-/**
- * skb_unshare - make a copy of a shared buffer
- * @skb: buffer to check
- * @pri: priority for memory allocation
- *
- * If the socket buffer is a clone then this function creates a new
- * copy of the data, drops a reference count on the old copy and returns
- * the new copy with the reference count at 1. If the buffer is not a clone
- * the original buffer is returned. When called with a spinlock held or
- * from interrupt state @pri must be %GFP_ATOMIC
- *
- * %NULL is returned on a memory allocation failure.
- */
-
-static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
-{
- struct sk_buff *nskb;
- if(!skb_cloned(skb))
- return skb;
- nskb=skb_copy(skb, pri);
- kfree_skb(skb); /* Free our shared copy */
- return nskb;
-}
-
-/**
- * skb_peek
- * @list_: list to peek at
- *
- * Peek an &sk_buff. Unlike most other operations you _MUST_
- * be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. You must hold
- * the appropriate locks or have a private queue to do this.
- *
- * Returns %NULL for an empty list or a pointer to the head element.
- * The reference count is not incremented and the reference is therefore
- * volatile. Use with caution.
- */
-
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
-{
- struct sk_buff *list = ((struct sk_buff *)list_)->next;
- if (list == (struct sk_buff *)list_)
- list = NULL;
- return list;
-}
-
-/**
- * skb_peek_tail
- * @list_: list to peek at
- *
- * Peek an &sk_buff. Unlike most other operations you _MUST_
- * be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. You must hold
- * the appropriate locks or have a private queue to do this.
- *
- * Returns %NULL for an empty list or a pointer to the tail element.
- * The reference count is not incremented and the reference is therefore
- * volatile. Use with caution.
- */
-
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
-{
- struct sk_buff *list = ((struct sk_buff *)list_)->prev;
- if (list == (struct sk_buff *)list_)
- list = NULL;
- return list;
-}
-
-/**
- * skb_queue_len - get queue length
- * @list_: list to measure
- *
- * Return the length of an &sk_buff queue.
- */
-
-static inline __u32 skb_queue_len(struct sk_buff_head *list_)
-{
- return(list_->qlen);
-}
-
-static inline void skb_queue_head_init(struct sk_buff_head *list)
-{
- spin_lock_init(&list->lock);
- list->prev = (struct sk_buff *)list;
- list->next = (struct sk_buff *)list;
- list->qlen = 0;
-}
-
-/*
- * Insert an sk_buff at the start of a list.
- *
- * The "__skb_xxxx()" functions are the non-atomic ones that
- * can only be called with interrupts disabled.
- */
-
-/**
- * __skb_queue_head - queue a buffer at the list head
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the start of a list. This function takes no locks
- * and you must therefore hold required locks before calling it.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff
*newsk)
-{
- struct sk_buff *prev, *next;
-
- newsk->list = list;
- list->qlen++;
- prev = (struct sk_buff *)list;
- next = prev->next;
- newsk->next = next;
- newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
-}
-
-
-/**
- * skb_queue_head - queue a buffer at the list head
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the start of the list. This function takes the
- * list lock and can be used safely with other locking &sk_buff functions
- * safely.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff
*newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- __skb_queue_head(list, newsk);
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-/**
- * __skb_queue_tail - queue a buffer at the list tail
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the end of a list. This function takes no locks
- * and you must therefore hold required locks before calling it.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-
-static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff
*newsk)
-{
- struct sk_buff *prev, *next;
-
- newsk->list = list;
- list->qlen++;
- next = (struct sk_buff *)list;
- prev = next->prev;
- newsk->next = next;
- newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
-}
-
-/**
- * skb_queue_tail - queue a buffer at the list tail
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the tail of the list. This function takes the
- * list lock and can be used safely with other locking &sk_buff functions
- * safely.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff
*newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- __skb_queue_tail(list, newsk);
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-/**
- * __skb_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. This function does not take any locks
- * so must be used with appropriate locks held only. The head item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
-{
- struct sk_buff *next, *prev, *result;
-
- prev = (struct sk_buff *) list;
- next = prev->next;
- result = NULL;
- if (next != prev) {
- result = next;
- next = next->next;
- list->qlen--;
- next->prev = prev;
- prev->next = next;
- result->next = NULL;
- result->prev = NULL;
- result->list = NULL;
- }
- return result;
-}
-
-/**
- * skb_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. The list lock is taken so the function
- * may be used safely with other locking list functions. The head item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
-{
- unsigned long flags;
- struct sk_buff *result;
-
- spin_lock_irqsave(&list->lock, flags);
- result = __skb_dequeue(list);
- spin_unlock_irqrestore(&list->lock, flags);
- return result;
-}
-
-/*
- * Insert a packet on a list.
- */
-
-static inline void __skb_insert(struct sk_buff *newsk,
- struct sk_buff * prev, struct sk_buff *next,
- struct sk_buff_head * list)
-{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
- newsk->list = list;
- list->qlen++;
-}
-
-/**
- * skb_insert - insert a buffer
- * @old: buffer to insert before
- * @newsk: buffer to insert
- *
- * Place a packet before a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&old->list->lock, flags);
- __skb_insert(newsk, old->prev, old, old->list);
- spin_unlock_irqrestore(&old->list->lock, flags);
-}
-
-/*
- * Place a packet after a given packet in a list.
- */
-
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
-{
- __skb_insert(newsk, old, old->next, old->list);
-}
-
-/**
- * skb_append - append a buffer
- * @old: buffer to insert after
- * @newsk: buffer to insert
- *
- * Place a packet after a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls.
- * A buffer cannot be placed on two lists at the same time.
- */
-
-
-static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&old->list->lock, flags);
- __skb_append(old, newsk);
- spin_unlock_irqrestore(&old->list->lock, flags);
-}
-
-/*
- * remove sk_buff from list. _Must_ be called atomically, and with
- * the list known..
- */
-
-static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
-{
- struct sk_buff * next, * prev;
-
- list->qlen--;
- next = skb->next;
- prev = skb->prev;
- skb->next = NULL;
- skb->prev = NULL;
- skb->list = NULL;
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * skb_unlink - remove a buffer from a list
- * @skb: buffer to remove
- *
- * Place a packet after a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls
- *
- * Works even without knowing the list it is sitting on, which can be
- * handy at times. It also means that THE LIST MUST EXIST when you
- * unlink. Thus a list must have its contents unlinked before it is
- * destroyed.
- */
-
-static inline void skb_unlink(struct sk_buff *skb)
-{
- struct sk_buff_head *list = skb->list;
-
- if(list) {
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- if(skb->list == list)
- __skb_unlink(skb, skb->list);
- spin_unlock_irqrestore(&list->lock, flags);
- }
-}
-
-/* XXX: more streamlined implementation */
-
-/**
- * __skb_dequeue_tail - remove from the tail of the queue
- * @list: list to dequeue from
- *
- * Remove the tail of the list. This function does not take any locks
- * so must be used with appropriate locks held only. The tail item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
-{
- struct sk_buff *skb = skb_peek_tail(list);
- if (skb)
- __skb_unlink(skb, list);
- return skb;
-}
-
-/**
- * skb_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. The list lock is taken so the function
- * may be used safely with other locking list functions. The tail item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
-{
- unsigned long flags;
- struct sk_buff *result;
-
- spin_lock_irqsave(&list->lock, flags);
- result = __skb_dequeue_tail(list);
- spin_unlock_irqrestore(&list->lock, flags);
- return result;
-}
-
-static inline int skb_is_nonlinear(const struct sk_buff *skb)
-{
- return skb->data_len;
-}
-
-static inline unsigned int skb_headlen(const struct sk_buff *skb)
-{
- return skb->len - skb->data_len;
-}
-
-#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags)
out_of_line_bug(); } while (0)
-#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list)
out_of_line_bug(); } while (0)
-#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb))
out_of_line_bug(); } while (0)
-
-/*
- * Add data to an sk_buff
- */
-
-static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
-{
- unsigned char *tmp=skb->tail;
- SKB_LINEAR_ASSERT(skb);
- skb->tail+=len;
- skb->len+=len;
- return tmp;
-}
-
-/**
- * skb_put - add data to a buffer
- * @skb: buffer to use
- * @len: amount of data to add
- *
- * This function extends the used data area of the buffer. If this would
- * exceed the total buffer size the kernel will panic. A pointer to the
- * first byte of the extra data is returned.
- */
-
-static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
-{
- unsigned char *tmp=skb->tail;
- SKB_LINEAR_ASSERT(skb);
- skb->tail+=len;
- skb->len+=len;
- if(skb->tail>skb->end) {
- skb_over_panic(skb, len, current_text_addr());
- }
- return tmp;
-}
-
-static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
-{
- skb->data-=len;
- skb->len+=len;
- return skb->data;
-}
-
-/**
- * skb_push - add data to the start of a buffer
- * @skb: buffer to use
- * @len: amount of data to add
- *
- * This function extends the used data area of the buffer at the buffer
- * start. If this would exceed the total buffer headroom the kernel will
- * panic. A pointer to the first byte of the extra data is returned.
- */
-
-static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
-{
- skb->data-=len;
- skb->len+=len;
- if(skb->data<skb->head) {
- skb_under_panic(skb, len, current_text_addr());
- }
- return skb->data;
-}
-
-static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
-{
- skb->len-=len;
- if (skb->len < skb->data_len)
- out_of_line_bug();
- return skb->data+=len;
-}
-
-/**
- * skb_pull - remove data from the start of a buffer
- * @skb: buffer to use
- * @len: amount of data to remove
- *
- * This function removes data from the start of a buffer, returning
- * the memory to the headroom. A pointer to the next data in the buffer
- * is returned. Once the data has been pulled future pushes will overwrite
- * the old data.
- */
-
-static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb->len)
- return NULL;
- return __skb_pull(skb,len);
-}
-
-extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
-
-static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb_headlen(skb) &&
- __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
- return NULL;
- skb->len -= len;
- return skb->data += len;
-}
-
-static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb->len)
- return NULL;
- return __pskb_pull(skb,len);
-}
-
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len <= skb_headlen(skb))
- return 1;
- if (len > skb->len)
- return 0;
- return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
-}
-
-/**
- * skb_headroom - bytes at buffer head
- * @skb: buffer to check
- *
- * Return the number of bytes of free space at the head of an &sk_buff.
- */
-
-static inline int skb_headroom(const struct sk_buff *skb)
-{
- return skb->data-skb->head;
-}
-
-/**
- * skb_tailroom - bytes at buffer end
- * @skb: buffer to check
- *
- * Return the number of bytes of free space at the tail of an sk_buff
- */
-
-static inline int skb_tailroom(const struct sk_buff *skb)
-{
- return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
-}
-
-/**
- * skb_reserve - adjust headroom
- * @skb: buffer to alter
- * @len: bytes to move
- *
- * Increase the headroom of an empty &sk_buff by reducing the tail
- * room. This is only allowed for an empty buffer.
- */
-
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
-{
- skb->data+=len;
- skb->tail+=len;
-}
-
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
-
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data+len;
- } else {
- ___pskb_trim(skb, len, 0);
- }
-}
-
-/**
- * skb_trim - remove end from a buffer
- * @skb: buffer to alter
- * @len: new length
- *
- * Cut the length of a buffer down by removing data from the tail. If
- * the buffer is already under the length specified it is not modified.
- */
-
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (skb->len > len) {
- __skb_trim(skb, len);
- }
-}
-
-
-static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data+len;
- return 0;
- } else {
- return ___pskb_trim(skb, len, 1);
- }
-}
-
-static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (len < skb->len)
- return __pskb_trim(skb, len);
- return 0;
-}
-
-/**
- * skb_orphan - orphan a buffer
- * @skb: buffer to orphan
- *
- * If a buffer currently has an owner then we call the owner's
- * destructor function and make the @skb unowned. The buffer continues
- * to exist but is no longer charged to its former owner.
- */
-
-
-static inline void skb_orphan(struct sk_buff *skb)
-{
- if (skb->destructor)
- skb->destructor(skb);
- skb->destructor = NULL;
- skb->sk = NULL;
-}
-
-/**
- * skb_purge - empty a list
- * @list: list to empty
- *
- * Delete all buffers on an &sk_buff list. Each buffer is removed from
- * the list and one reference dropped. This function takes the list
- * lock and is atomic with respect to other list locking functions.
- */
-
-
-static inline void skb_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
- while ((skb=skb_dequeue(list))!=NULL)
- kfree_skb(skb);
-}
-
-/**
- * __skb_purge - empty a list
- * @list: list to empty
- *
- * Delete all buffers on an &sk_buff list. Each buffer is removed from
- * the list and one reference dropped. This function does not take the
- * list lock and the caller must hold the relevant locks to use it.
- */
-
-
-static inline void __skb_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
- while ((skb=__skb_dequeue(list))!=NULL)
- kfree_skb(skb);
-}
-
-/**
- * __dev_alloc_skb - allocate an skbuff for sending
- * @length: length to allocate
- * @gfp_mask: get_free_pages mask, passed to alloc_skb
- *
- * Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has unspecified headroom built in. Users should allocate
- * the headroom they think they need without accounting for the
- * built in space. The built in space is used for optimisations.
- *
- * %NULL is returned in there is no free memory.
- */
-#ifndef CONFIG_XEN
-static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
- int gfp_mask)
-{
- struct sk_buff *skb = alloc_skb(length+16, gfp_mask);
- if (skb)
- skb_reserve(skb,16);
- return skb;
-}
-#else
-extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
-#endif
-
-/**
- * dev_alloc_skb - allocate an skbuff for sending
- * @length: length to allocate
- *
- * Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has unspecified headroom built in. Users should allocate
- * the headroom they think they need without accounting for the
- * built in space. The built in space is used for optimisations.
- *
- * %NULL is returned in there is no free memory. Although this function
- * allocates memory it can be called from an interrupt.
- */
-
-static inline struct sk_buff *dev_alloc_skb(unsigned int length)
-{
- return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-
-/**
- * skb_cow - copy header of skb when it is required
- * @skb: buffer to cow
- * @headroom: needed headroom
- *
- * If the skb passed lacks sufficient headroom or its data part
- * is shared, data is reallocated. If reallocation fails, an error
- * is returned and original skb is not changed.
- *
- * The result is skb with writable area skb->head...skb->tail
- * and at least @headroom of space at head.
- */
-
-static inline int
-skb_cow(struct sk_buff *skb, unsigned int headroom)
-{
- int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
-
- if (delta < 0)
- delta = 0;
-
- if (delta || skb_cloned(skb))
- return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
- return 0;
-}
-
-/**
- * skb_padto - pad an skbuff up to a minimal size
- * @skb: buffer to pad
- * @len: minimal length
- *
- * Pads up a buffer to ensure the trailing bytes exist and are
- * blanked. If the buffer already contains sufficient data it
- * is untouched. Returns the buffer, which may be a replacement
- * for the original, or NULL for out of memory - in which case
- * the original buffer is still freed.
- */
-
-static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
-{
- unsigned int size = skb->len;
- if(likely(size >= len))
- return skb;
- return skb_pad(skb, len-size);
-}
-
-/**
- * skb_linearize - convert paged skb to linear one
- * @skb: buffer to linarize
- * @gfp: allocation mode
- *
- * If there is no free memory -ENOMEM is returned, otherwise zero
- * is returned and the old skb data released. */
-int skb_linearize(struct sk_buff *skb, int gfp);
-
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
- if (in_irq())
- out_of_line_bug();
-
- local_bh_disable();
-#endif
- return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
- kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
-#ifdef CONFIG_HIGHMEM
- local_bh_enable();
-#endif
-}
-
-#define skb_queue_walk(queue, skb) \
- for (skb = (queue)->next; \
- (skb != (struct sk_buff *)(queue)); \
- skb=skb->next)
-
-
-extern struct sk_buff * skb_recv_datagram(struct sock
*sk,unsigned flags,int noblock, int *err);
-extern unsigned int datagram_poll(struct file *file, struct socket
*sock, struct poll_table_struct *wait);
-extern int skb_copy_datagram(const struct sk_buff *from,
int offset, char *to,int size);
-extern int skb_copy_datagram_iovec(const struct sk_buff
*from, int offset, struct iovec *to,int size);
-extern int skb_copy_and_csum_datagram(const struct sk_buff
*skb, int offset, u8 *to, int len, unsigned int *csump);
-extern int skb_copy_and_csum_datagram_iovec(const struct
sk_buff *skb, int hlen, struct iovec *iov);
-extern void skb_free_datagram(struct sock * sk, struct
sk_buff *skb);
-
-extern unsigned int skb_checksum(const struct sk_buff *skb, int
offset, int len, unsigned int csum);
-extern int skb_copy_bits(const struct sk_buff *skb, int
offset, void *to, int len);
-extern unsigned int skb_copy_and_csum_bits(const struct sk_buff
*skb, int offset, u8 *to, int len, unsigned int csum);
-extern void skb_copy_and_csum_dev(const struct sk_buff
*skb, u8 *to);
-
-extern void skb_init(void);
-extern void skb_add_mtu(int mtu);
-
-#ifdef CONFIG_NETFILTER
-static inline void
-nf_conntrack_put(struct nf_ct_info *nfct)
-{
- if (nfct && atomic_dec_and_test(&nfct->master->use))
- nfct->master->destroy(nfct->master);
-}
-static inline void
-nf_conntrack_get(struct nf_ct_info *nfct)
-{
- if (nfct)
- atomic_inc(&nfct->master->use);
-}
-static inline void
-nf_reset(struct sk_buff *skb)
-{
- nf_conntrack_put(skb->nfct);
- skb->nfct = NULL;
-#ifdef CONFIG_NETFILTER_DEBUG
- skb->nf_debug = 0;
-#endif
-}
-#else /* CONFIG_NETFILTER */
-static inline void nf_reset(struct sk_buff *skb) {}
-#endif /* CONFIG_NETFILTER */
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_SKBUFF_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.4.30-xen-sparse/include/linux/timer.h
--- a/linux-2.4.30-xen-sparse/include/linux/timer.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,77 +0,0 @@
-#ifndef _LINUX_TIMER_H
-#define _LINUX_TIMER_H
-
-#include <linux/config.h>
-#include <linux/list.h>
-
-/*
- * In Linux 2.4, static timers have been removed from the kernel.
- * Timers may be dynamically created and destroyed, and should be initialized
- * by a call to init_timer() upon creation.
- *
- * The "data" field enables use of a common timeout function for several
- * timeouts. You can use this field to distinguish between the different
- * invocations.
- */
-struct timer_list {
- struct list_head list;
- unsigned long expires;
- unsigned long data;
- void (*function)(unsigned long);
-};
-
-extern void add_timer(struct timer_list * timer);
-extern int del_timer(struct timer_list * timer);
-#ifdef CONFIG_NO_IDLE_HZ
-extern struct timer_list *next_timer_event(void);
-#endif
-
-#ifdef CONFIG_SMP
-extern int del_timer_sync(struct timer_list * timer);
-extern void sync_timers(void);
-#else
-#define del_timer_sync(t) del_timer(t)
-#define sync_timers() do { } while (0)
-#endif
-
-/*
- * mod_timer is a more efficient way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- * mod_timer(a,b) is equivalent to del_timer(a); a->expires = b; add_timer(a).
- * If the timer is known to be not pending (ie, in the handler), mod_timer
- * is less efficient than a->expires = b; add_timer(a).
- */
-int mod_timer(struct timer_list *timer, unsigned long expires);
-
-extern void it_real_fn(unsigned long);
-
-static inline void init_timer(struct timer_list * timer)
-{
- timer->list.next = timer->list.prev = NULL;
-}
-
-static inline int timer_pending (const struct timer_list * timer)
-{
- return timer->list.next != NULL;
-}
-
-/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
- * 1. Because people otherwise forget
- * 2. Because if the timer wrap changes in future you wont have to
- * alter your driver code.
- *
- * time_after(a,b) returns true if the time a is after time b.
- *
- * Do this with "<0" and ">=0" to only test the sign of the result. A
- * good compiler would generate better code (and a really good compiler
- * wouldn't care). Gcc is currently neither.
- */
-#define time_after(a,b) ((long)(b) - (long)(a) < 0)
-#define time_before(a,b) time_after(b,a)
-
-#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0)
-#define time_before_eq(a,b) time_after_eq(b,a)
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/kernel/time.c
--- a/linux-2.4.30-xen-sparse/kernel/time.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,415 +0,0 @@
-/*
- * linux/kernel/time.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file contains the interface functions for the various
- * time related system calls: time, stime, gettimeofday, settimeofday,
- * adjtime
- */
-/*
- * Modification history kernel/time.c
- *
- * 1993-09-02 Philip Gladstone
- * Created file with time related functions from sched.c and adjtimex()
- * 1993-10-08 Torsten Duwe
- * adjtime interface update and CMOS clock write code
- * 1995-08-13 Torsten Duwe
- * kernel PLL updated to 1994-12-13 specs (rfc-1589)
- * 1999-01-16 Ulrich Windl
- * Introduced error checking for many cases in adjtimex().
- * Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
- * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
- * (Even though the technical memorandum forbids it)
- */
-
-#include <linux/mm.h>
-#include <linux/timex.h>
-#include <linux/smp_lock.h>
-
-#include <asm/uaccess.h>
-
-/*
- * The timezone where the local system is located. Used as a default by some
- * programs who obtain this value by using gettimeofday.
- */
-struct timezone sys_tz;
-
-/* The xtime_lock is not only serializing the xtime read/writes but it's also
- serializing all accesses to the global NTP variables now. */
-extern rwlock_t xtime_lock;
-
-#if !defined(__alpha__) && !defined(__ia64__)
-
-/*
- * sys_time() can be implemented in user-level using
- * sys_gettimeofday(). Is this for backwards compatibility? If so,
- * why not move it into the appropriate arch directory (for those
- * architectures that need it).
- *
- * XXX This function is NOT 64-bit clean!
- */
-asmlinkage long sys_time(int * tloc)
-{
- struct timeval now;
- int i;
-
- do_gettimeofday(&now);
- i = now.tv_sec;
- if (tloc) {
- if (put_user(i,tloc))
- i = -EFAULT;
- }
- return i;
-}
-
-#if !defined(CONFIG_XEN)
-
-/*
- * sys_stime() can be implemented in user-level using
- * sys_settimeofday(). Is this for backwards compatibility? If so,
- * why not move it into the appropriate arch directory (for those
- * architectures that need it).
- */
-
-asmlinkage long sys_stime(int * tptr)
-{
- int value;
-
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
- if (get_user(value, tptr))
- return -EFAULT;
- write_lock_irq(&xtime_lock);
- vxtime_lock();
- xtime.tv_sec = value;
- xtime.tv_usec = 0;
- vxtime_unlock();
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
- write_unlock_irq(&xtime_lock);
- return 0;
-}
-
-#endif
-
-#endif
-
-asmlinkage long sys_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
- if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (copy_to_user(tv, &ktv, sizeof(ktv)))
- return -EFAULT;
- }
- if (tz) {
- if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
- return -EFAULT;
- }
- return 0;
-}
-
-/*
- * Adjust the time obtained from the CMOS to be UTC time instead of
- * local time.
- *
- * This is ugly, but preferable to the alternatives. Otherwise we
- * would either need to write a program to do it in /etc/rc (and risk
- * confusion if the program gets run more than once; it would also be
- * hard to make the program warp the clock precisely n hours) or
- * compile in the timezone information into the kernel. Bad, bad....
- *
- * - TYT, 1992-01-01
- *
- * The best thing to do is to keep the CMOS clock in universal time (UTC)
- * as real UNIX machines always do it. This avoids all headaches about
- * daylight saving times and warping kernel clocks.
- */
-inline static void warp_clock(void)
-{
- write_lock_irq(&xtime_lock);
- vxtime_lock();
- xtime.tv_sec += sys_tz.tz_minuteswest * 60;
- vxtime_unlock();
- write_unlock_irq(&xtime_lock);
-}
-
-/*
- * In case for some reason the CMOS clock has not already been running
- * in UTC, but in some local time: The first time we set the timezone,
- * we will warp the clock so that it is ticking UTC time instead of
- * local time. Presumably, if someone is setting the timezone then we
- * are running in an environment where the programs understand about
- * timezones. This should be done at boot time in the /etc/rc script,
- * as soon as possible, so that the clock can be set right. Otherwise,
- * various programs will get confused when the clock gets warped.
- */
-
-int do_sys_settimeofday(struct timeval *tv, struct timezone *tz)
-{
- static int firsttime = 1;
-
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (tz) {
- /* SMP safe, global irq locking makes it work. */
- sys_tz = *tz;
- if (firsttime) {
- firsttime = 0;
- if (!tv)
- warp_clock();
- }
- }
- if (tv)
- {
- /* SMP safe, again the code in arch/foo/time.c should
- * globally block out interrupts when it runs.
- */
- do_settimeofday(tv);
- }
- return 0;
-}
-
-asmlinkage long sys_settimeofday(struct timeval *tv, struct timezone *tz)
-{
- struct timeval new_tv;
- struct timezone new_tz;
-
- if (tv) {
- if (copy_from_user(&new_tv, tv, sizeof(*tv)))
- return -EFAULT;
- }
- if (tz) {
- if (copy_from_user(&new_tz, tz, sizeof(*tz)))
- return -EFAULT;
- }
-
- return do_sys_settimeofday(tv ? &new_tv : NULL, tz ? &new_tz : NULL);
-}
-
-long pps_offset; /* pps time offset (us) */
-long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
-
-long pps_freq; /* frequency offset (scaled ppm) */
-long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
-
-long pps_valid = PPS_VALID; /* pps signal watchdog counter */
-
-int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
-
-long pps_jitcnt; /* jitter limit exceeded */
-long pps_calcnt; /* calibration intervals */
-long pps_errcnt; /* calibration errors */
-long pps_stbcnt; /* stability limit exceeded */
-
-/* hook for a loadable hardpps kernel module */
-void (*hardpps_ptr)(struct timeval *);
-
-/* adjtimex mainly allows reading (and writing, if superuser) of
- * kernel time-keeping variables. used by xntpd.
- */
-int do_adjtimex(struct timex *txc)
-{
- long ltemp, mtemp, save_adjust;
- int result;
-
- /* In order to modify anything, you gotta be super-user! */
- if (txc->modes && !capable(CAP_SYS_TIME))
- return -EPERM;
-
- /* Now we validate the data before disabling interrupts */
-
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
- /* singleshot must not be used with any other mode bits */
- if (txc->modes != ADJ_OFFSET_SINGLESHOT)
- return -EINVAL;
-
- if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
- /* adjustment Offset limited to +- .512 seconds */
- if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE )
- return -EINVAL;
-
- /* if the quartz is off by more than 10% something is VERY wrong ! */
- if (txc->modes & ADJ_TICK)
- if (txc->tick < 900000/HZ || txc->tick > 1100000/HZ)
- return -EINVAL;
-
- write_lock_irq(&xtime_lock);
- result = time_state; /* mostly `TIME_OK' */
-
- /* Save for later - semantics of adjtime is to return old value */
- save_adjust = time_adjust;
-
-#if 0 /* STA_CLOCKERR is never set yet */
- time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */
-#endif
- /* If there are input parameters, then process them */
- if (txc->modes)
- {
- if (txc->modes & ADJ_STATUS) /* only set allowed bits */
- time_status = (txc->status & ~STA_RONLY) |
- (time_status & STA_RONLY);
-
- if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */
- if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) {
- result = -EINVAL;
- goto leave;
- }
- time_freq = txc->freq - pps_freq;
- }
-
- if (txc->modes & ADJ_MAXERROR) {
- if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) {
- result = -EINVAL;
- goto leave;
- }
- time_maxerror = txc->maxerror;
- }
-
- if (txc->modes & ADJ_ESTERROR) {
- if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) {
- result = -EINVAL;
- goto leave;
- }
- time_esterror = txc->esterror;
- }
-
- if (txc->modes & ADJ_TIMECONST) { /* p. 24 */
- if (txc->constant < 0) { /* NTP v4 uses values > 6 */
- result = -EINVAL;
- goto leave;
- }
- time_constant = txc->constant;
- }
-
- if (txc->modes & ADJ_OFFSET) { /* values checked earlier */
- if (txc->modes == ADJ_OFFSET_SINGLESHOT) {
- /* adjtime() is independent from ntp_adjtime() */
- time_adjust = txc->offset;
- }
- else if ( time_status & (STA_PLL | STA_PPSTIME) ) {
- ltemp = (time_status & (STA_PPSTIME | STA_PPSSIGNAL)) ==
- (STA_PPSTIME | STA_PPSSIGNAL) ?
- pps_offset : txc->offset;
-
- /*
- * Scale the phase adjustment and
- * clamp to the operating range.
- */
- if (ltemp > MAXPHASE)
- time_offset = MAXPHASE << SHIFT_UPDATE;
- else if (ltemp < -MAXPHASE)
- time_offset = -(MAXPHASE << SHIFT_UPDATE);
- else
- time_offset = ltemp << SHIFT_UPDATE;
-
- /*
- * Select whether the frequency is to be controlled
- * and in which mode (PLL or FLL). Clamp to the operating
- * range. Ugly multiply/divide should be replaced someday.
- */
-
- if (time_status & STA_FREQHOLD || time_reftime == 0)
- time_reftime = xtime.tv_sec;
- mtemp = xtime.tv_sec - time_reftime;
- time_reftime = xtime.tv_sec;
- if (time_status & STA_FLL) {
- if (mtemp >= MINSEC) {
- ltemp = (time_offset / mtemp) << (SHIFT_USEC -
- SHIFT_UPDATE);
- if (ltemp < 0)
- time_freq -= -ltemp >> SHIFT_KH;
- else
- time_freq += ltemp >> SHIFT_KH;
- } else /* calibration interval too short (p. 12) */
- result = TIME_ERROR;
- } else { /* PLL mode */
- if (mtemp < MAXSEC) {
- ltemp *= mtemp;
- if (ltemp < 0)
- time_freq -= -ltemp >> (time_constant +
- time_constant +
- SHIFT_KF - SHIFT_USEC);
- else
- time_freq += ltemp >> (time_constant +
- time_constant +
- SHIFT_KF - SHIFT_USEC);
- } else /* calibration interval too long (p. 12) */
- result = TIME_ERROR;
- }
- if (time_freq > time_tolerance)
- time_freq = time_tolerance;
- else if (time_freq < -time_tolerance)
- time_freq = -time_tolerance;
- } /* STA_PLL || STA_PPSTIME */
- } /* txc->modes & ADJ_OFFSET */
- if (txc->modes & ADJ_TICK) {
- /* if the quartz is off by more than 10% something is
- VERY wrong ! */
- if (txc->tick < 900000/HZ || txc->tick > 1100000/HZ) {
- result = -EINVAL;
- goto leave;
- }
- tick = txc->tick;
- }
- } /* txc->modes */
-leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0
- || ((time_status & (STA_PPSFREQ|STA_PPSTIME)) != 0
- && (time_status & STA_PPSSIGNAL) == 0)
- /* p. 24, (b) */
- || ((time_status & (STA_PPSTIME|STA_PPSJITTER))
- == (STA_PPSTIME|STA_PPSJITTER))
- /* p. 24, (c) */
- || ((time_status & STA_PPSFREQ) != 0
- && (time_status & (STA_PPSWANDER|STA_PPSERROR)) != 0))
- /* p. 24, (d) */
- result = TIME_ERROR;
-
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
- txc->offset = save_adjust;
- else {
- if (time_offset < 0)
- txc->offset = -(-time_offset >> SHIFT_UPDATE);
- else
- txc->offset = time_offset >> SHIFT_UPDATE;
- }
- txc->freq = time_freq + pps_freq;
- txc->maxerror = time_maxerror;
- txc->esterror = time_esterror;
- txc->status = time_status;
- txc->constant = time_constant;
- txc->precision = time_precision;
- txc->tolerance = time_tolerance;
- txc->tick = tick;
- txc->ppsfreq = pps_freq;
- txc->jitter = pps_jitter >> PPS_AVG;
- txc->shift = pps_shift;
- txc->stabil = pps_stabil;
- txc->jitcnt = pps_jitcnt;
- txc->calcnt = pps_calcnt;
- txc->errcnt = pps_errcnt;
- txc->stbcnt = pps_stbcnt;
- write_unlock_irq(&xtime_lock);
- do_gettimeofday(&txc->time);
- return(result);
-}
-
-asmlinkage long sys_adjtimex(struct timex *txc_p)
-{
- struct timex txc; /* Local copy of parameter */
- int ret;
-
- /* Copy the user data space into the kernel copy
- * structure. But bear in mind that the structures
- * may change
- */
- if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
- return -EFAULT;
- ret = do_adjtimex(&txc);
- return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/kernel/timer.c
--- a/linux-2.4.30-xen-sparse/kernel/timer.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,968 +0,0 @@
-/*
- * linux/kernel/timer.c
- *
- * Kernel internal timers, kernel timekeeping, basic process system calls
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
- *
- * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
- * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- * serialize accesses to xtime/lost_ticks).
- * Copyright (C) 1998 Andrea Arcangeli
- * 1999-03-10 Improved NTP compatibility by Ulrich Windl
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/timex.h>
-#include <linux/delay.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-
-#include <asm/uaccess.h>
-
-/*
- * Timekeeping variables
- */
-
-long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
-
-/* The current time */
-struct timeval xtime __attribute__ ((aligned (16)));
-
-/* Don't completely fail for HZ > 500. */
-int tickadj = 500/HZ ? : 1; /* microsecs */
-
-DECLARE_TASK_QUEUE(tq_timer);
-DECLARE_TASK_QUEUE(tq_immediate);
-
-/*
- * phase-lock loop variables
- */
-/* TIME_ERROR prevents overwriting the CMOS clock */
-int time_state = TIME_OK; /* clock synchronization status */
-int time_status = STA_UNSYNC; /* clock status bits */
-long time_offset; /* time adjustment (us) */
-long time_constant = 2; /* pll time constant
*/
-long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
-long time_precision = 1; /* clock precision (us) */
-long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
-long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-long time_phase; /* phase offset (scaled us) */
-long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC;
- /* frequency offset (scaled ppm)*/
-long time_adj; /* tick adjust (scaled 1 / HZ) */
-long time_reftime; /* time at last adjustment (s) */
-
-long time_adjust;
-long time_adjust_step;
-
-unsigned long event;
-
-extern int do_setitimer(int, struct itimerval *, struct itimerval *);
-
-unsigned long volatile jiffies;
-
-unsigned int * prof_buffer;
-unsigned long prof_len;
-unsigned long prof_shift;
-
-/*
- * Event timer code
- */
-#define TVN_BITS 6
-#define TVR_BITS 8
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-
-struct timer_vec {
- int index;
- struct list_head vec[TVN_SIZE];
-};
-
-struct timer_vec_root {
- int index;
- struct list_head vec[TVR_SIZE];
-};
-
-static struct timer_vec tv5;
-static struct timer_vec tv4;
-static struct timer_vec tv3;
-static struct timer_vec tv2;
-static struct timer_vec_root tv1;
-
-static struct timer_vec * const tvecs[] = {
- (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
-};
-
-static struct list_head * run_timer_list_running;
-
-#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
-
-void init_timervecs (void)
-{
- int i;
-
- for (i = 0; i < TVN_SIZE; i++) {
- INIT_LIST_HEAD(tv5.vec + i);
- INIT_LIST_HEAD(tv4.vec + i);
- INIT_LIST_HEAD(tv3.vec + i);
- INIT_LIST_HEAD(tv2.vec + i);
- }
- for (i = 0; i < TVR_SIZE; i++)
- INIT_LIST_HEAD(tv1.vec + i);
-}
-
-static unsigned long timer_jiffies;
-
-static inline void internal_add_timer(struct timer_list *timer)
-{
- /*
- * must be cli-ed when calling this
- */
- unsigned long expires = timer->expires;
- unsigned long idx = expires - timer_jiffies;
- struct list_head * vec;
-
- if (run_timer_list_running)
- vec = run_timer_list_running;
- else if (idx < TVR_SIZE) {
- int i = expires & TVR_MASK;
- vec = tv1.vec + i;
- } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
- int i = (expires >> TVR_BITS) & TVN_MASK;
- vec = tv2.vec + i;
- } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
- int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
- vec = tv3.vec + i;
- } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
- int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
- vec = tv4.vec + i;
- } else if ((signed long) idx < 0) {
- /* can happen if you add a timer with expires == jiffies,
- * or you set a timer to go off in the past
- */
- vec = tv1.vec + tv1.index;
- } else if (idx <= 0xffffffffUL) {
- int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
- vec = tv5.vec + i;
- } else {
- /* Can only get here on architectures with 64-bit jiffies */
- INIT_LIST_HEAD(&timer->list);
- return;
- }
- /*
- * Timers are FIFO!
- */
- list_add(&timer->list, vec->prev);
-}
-
-/* Initialize both explicitly - let's try to have them in the same cache line
*/
-spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
-
-#ifdef CONFIG_SMP
-volatile struct timer_list * volatile running_timer;
-#define timer_enter(t) do { running_timer = t; mb(); } while (0)
-#define timer_exit() do { running_timer = NULL; } while (0)
-#define timer_is_running(t) (running_timer == t)
-#define timer_synchronize(t) while (timer_is_running(t)) barrier()
-#else
-#define timer_enter(t) do { } while (0)
-#define timer_exit() do { } while (0)
-#endif
-
-void add_timer(struct timer_list *timer)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- if (timer_pending(timer))
- goto bug;
- internal_add_timer(timer);
- spin_unlock_irqrestore(&timerlist_lock, flags);
- return;
-bug:
- spin_unlock_irqrestore(&timerlist_lock, flags);
- printk("bug: kernel timer added twice at %p.\n",
- __builtin_return_address(0));
-}
-
-static inline int detach_timer (struct timer_list *timer)
-{
- if (!timer_pending(timer))
- return 0;
- list_del(&timer->list);
- return 1;
-}
-
-int mod_timer(struct timer_list *timer, unsigned long expires)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- timer->expires = expires;
- ret = detach_timer(timer);
- internal_add_timer(timer);
- spin_unlock_irqrestore(&timerlist_lock, flags);
- return ret;
-}
-
-int del_timer(struct timer_list * timer)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- ret = detach_timer(timer);
- timer->list.next = timer->list.prev = NULL;
- spin_unlock_irqrestore(&timerlist_lock, flags);
- return ret;
-}
-
-#ifdef CONFIG_SMP
-void sync_timers(void)
-{
- spin_unlock_wait(&global_bh_lock);
-}
-
-/*
- * SMP specific function to delete periodic timer.
- * Caller must disable by some means restarting the timer
- * for new. Upon exit the timer is not queued and handler is not running
- * on any CPU. It returns number of times, which timer was deleted
- * (for reference counting).
- */
-
-int del_timer_sync(struct timer_list * timer)
-{
- int ret = 0;
-
- for (;;) {
- unsigned long flags;
- int running;
-
- spin_lock_irqsave(&timerlist_lock, flags);
- ret += detach_timer(timer);
- timer->list.next = timer->list.prev = 0;
- running = timer_is_running(timer);
- spin_unlock_irqrestore(&timerlist_lock, flags);
-
- if (!running)
- break;
-
- timer_synchronize(timer);
- }
-
- return ret;
-}
-#endif
-
-
-static inline void cascade_timers(struct timer_vec *tv)
-{
- /* cascade all the timers from tv up one level */
- struct list_head *head, *curr, *next;
-
- head = tv->vec + tv->index;
- curr = head->next;
- /*
- * We are removing _all_ timers from the list, so we don't have to
- * detach them individually, just clear the list afterwards.
- */
- while (curr != head) {
- struct timer_list *tmp;
-
- tmp = list_entry(curr, struct timer_list, list);
- next = curr->next;
- list_del(curr); // not needed
- internal_add_timer(tmp);
- curr = next;
- }
- INIT_LIST_HEAD(head);
- tv->index = (tv->index + 1) & TVN_MASK;
-}
-
-static inline void run_timer_list(void)
-{
- spin_lock_irq(&timerlist_lock);
- while ((long)(jiffies - timer_jiffies) >= 0) {
- LIST_HEAD(queued);
- struct list_head *head, *curr;
- if (!tv1.index) {
- int n = 1;
- do {
- cascade_timers(tvecs[n]);
- } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
- }
- run_timer_list_running = &queued;
-repeat:
- head = tv1.vec + tv1.index;
- curr = head->next;
- if (curr != head) {
- struct timer_list *timer;
- void (*fn)(unsigned long);
- unsigned long data;
-
- timer = list_entry(curr, struct timer_list, list);
- fn = timer->function;
- data= timer->data;
-
- detach_timer(timer);
- timer->list.next = timer->list.prev = NULL;
- timer_enter(timer);
- spin_unlock_irq(&timerlist_lock);
- fn(data);
- spin_lock_irq(&timerlist_lock);
- timer_exit();
- goto repeat;
- }
- run_timer_list_running = NULL;
- ++timer_jiffies;
- tv1.index = (tv1.index + 1) & TVR_MASK;
-
- curr = queued.next;
- while (curr != &queued) {
- struct timer_list *timer;
-
- timer = list_entry(curr, struct timer_list, list);
- curr = curr->next;
- internal_add_timer(timer);
- }
- }
- spin_unlock_irq(&timerlist_lock);
-}
-
-#ifdef CONFIG_NO_IDLE_HZ
-/*
- * Find out when the next timer event is due to happen. This
- * is used on S/390 to stop all activity when all cpus are idle.
- * And in XenoLinux to achieve the same.
- * The timerlist_lock must be acquired before calling this function.
- */
-struct timer_list *next_timer_event(void)
-{
- struct timer_list *nte, *tmp;
- struct list_head *lst;
- int i, j;
-
- /* Look for the next timer event in tv1. */
- i = 0;
- j = tvecs[0]->index;
- do {
- struct list_head *head = tvecs[0]->vec + j;
- if (!list_empty(head)) {
- nte = list_entry(head->next, struct timer_list, list);
- goto found;
- }
- j = (j + 1) & TVR_MASK;
- } while (j != tv1.index);
-
- /* No event found in tv1. Check tv2-tv5. */
- for (i = 1; i < NOOF_TVECS; i++) {
- j = tvecs[i]->index;
- do {
- nte = NULL;
- list_for_each(lst, tvecs[i]->vec + j) {
- tmp = list_entry(lst, struct timer_list, list);
- if (nte == NULL ||
- time_before(tmp->expires, nte->expires))
- nte = tmp;
- }
- if (nte)
- goto found;
- j = (j + 1) & TVN_MASK;
- } while (j != tvecs[i]->index);
- }
- return NULL;
-found:
- /* Found timer event in tvecs[i]->vec[j] */
- if (j < tvecs[i]->index && i < NOOF_TVECS-1) {
- /*
- * The search wrapped. We need to look at the next list
- * from tvecs[i+1] that would cascade into tvecs[i].
- */
- list_for_each(lst, tvecs[i+1]->vec+tvecs[i+1]->index) {
- tmp = list_entry(lst, struct timer_list, list);
- if (time_before(tmp->expires, nte->expires))
- nte = tmp;
- }
- }
- return nte;
-}
-#endif
-
-spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
-
-void tqueue_bh(void)
-{
- run_task_queue(&tq_timer);
-}
-
-void immediate_bh(void)
-{
- run_task_queue(&tq_immediate);
-}
-
-/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@xxxxxxxx) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
- *
- */
-static void second_overflow(void)
-{
- long ltemp;
-
- /* Bump the maxerror field */
- time_maxerror += time_tolerance >> SHIFT_USEC;
- if ( time_maxerror > NTP_PHASE_LIMIT ) {
- time_maxerror = NTP_PHASE_LIMIT;
- time_status |= STA_UNSYNC;
- }
-
- /*
- * Leap second processing. If in leap-insert state at
- * the end of the day, the system clock is set back one
- * second; if in leap-delete state, the system clock is
- * set ahead one second. The microtime() routine or
- * external clock driver will insure that reported time
- * is always monotonic. The ugly divides should be
- * replaced.
- */
- switch (time_state) {
-
- case TIME_OK:
- if (time_status & STA_INS)
- time_state = TIME_INS;
- else if (time_status & STA_DEL)
- time_state = TIME_DEL;
- break;
-
- case TIME_INS:
- if (xtime.tv_sec % 86400 == 0) {
- xtime.tv_sec--;
- time_state = TIME_OOP;
- printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
- }
- break;
-
- case TIME_DEL:
- if ((xtime.tv_sec + 1) % 86400 == 0) {
- xtime.tv_sec++;
- time_state = TIME_WAIT;
- printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
- }
- break;
-
- case TIME_OOP:
- time_state = TIME_WAIT;
- break;
-
- case TIME_WAIT:
- if (!(time_status & (STA_INS | STA_DEL)))
- time_state = TIME_OK;
- }
-
- /*
- * Compute the phase adjustment for the next second. In
- * PLL mode, the offset is reduced by a fixed factor
- * times the time constant. In FLL mode the offset is
- * used directly. In either mode, the maximum phase
- * adjustment for each second is clamped so as to spread
- * the adjustment over not more than the number of
- * seconds between updates.
- */
- if (time_offset < 0) {
- ltemp = -time_offset;
- if (!(time_status & STA_FLL))
- ltemp >>= SHIFT_KG + time_constant;
- if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
- ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
- time_offset += ltemp;
- time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
- } else {
- ltemp = time_offset;
- if (!(time_status & STA_FLL))
- ltemp >>= SHIFT_KG + time_constant;
- if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
- ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
- time_offset -= ltemp;
- time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
- }
-
- /*
- * Compute the frequency estimate and additional phase
- * adjustment due to frequency error for the next
- * second. When the PPS signal is engaged, gnaw on the
- * watchdog counter and update the frequency computed by
- * the pll and the PPS signal.
- */
- pps_valid++;
- if (pps_valid == PPS_VALID) { /* PPS signal lost */
- pps_jitter = MAXTIME;
- pps_stabil = MAXFREQ;
- time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
- STA_PPSWANDER | STA_PPSERROR);
- }
- ltemp = time_freq + pps_freq;
- if (ltemp < 0)
- time_adj -= -ltemp >>
- (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
- else
- time_adj += ltemp >>
- (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
-
-#if HZ == 100
- /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
- * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
- */
- if (time_adj < 0)
- time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
- else
- time_adj += (time_adj >> 2) + (time_adj >> 5);
-#endif
-}
-
-/* in the NTP reference this is called "hardclock()" */
-static void update_wall_time_one_tick(void)
-{
- if ( (time_adjust_step = time_adjust) != 0 ) {
- /* We are doing an adjtime thing.
- *
- * Prepare time_adjust_step to be within bounds.
- * Note that a positive time_adjust means we want the clock
- * to run faster.
- *
- * Limit the amount of the step to be in the range
- * -tickadj .. +tickadj
- */
- if (time_adjust > tickadj)
- time_adjust_step = tickadj;
- else if (time_adjust < -tickadj)
- time_adjust_step = -tickadj;
-
- /* Reduce by this step the amount of time left */
- time_adjust -= time_adjust_step;
- }
- xtime.tv_usec += tick + time_adjust_step;
- /*
- * Advance the phase, once it gets to one microsecond, then
- * advance the tick more.
- */
- time_phase += time_adj;
- if (time_phase <= -FINEUSEC) {
- long ltemp = -time_phase >> SHIFT_SCALE;
- time_phase += ltemp << SHIFT_SCALE;
- xtime.tv_usec -= ltemp;
- }
- else if (time_phase >= FINEUSEC) {
- long ltemp = time_phase >> SHIFT_SCALE;
- time_phase -= ltemp << SHIFT_SCALE;
- xtime.tv_usec += ltemp;
- }
-}
-
-/*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
- */
-static void update_wall_time(unsigned long ticks)
-{
- do {
- ticks--;
- update_wall_time_one_tick();
- } while (ticks);
-
- while (xtime.tv_usec >= 1000000) {
- xtime.tv_usec -= 1000000;
- xtime.tv_sec++;
- second_overflow();
- }
-}
-
-static inline void do_process_times(struct task_struct *p,
- unsigned long user, unsigned long system)
-{
- unsigned long psecs;
-
- psecs = (p->times.tms_utime += user);
- psecs += (p->times.tms_stime += system);
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
- /* Send SIGXCPU every second.. */
- if (!(psecs % HZ))
- send_sig(SIGXCPU, p, 1);
- /* and SIGKILL when we go over max.. */
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
- send_sig(SIGKILL, p, 1);
- }
-}
-
-static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
-{
- unsigned long it_virt = p->it_virt_value;
-
- if (it_virt) {
- it_virt -= ticks;
- if (!it_virt) {
- it_virt = p->it_virt_incr;
- send_sig(SIGVTALRM, p, 1);
- }
- p->it_virt_value = it_virt;
- }
-}
-
-static inline void do_it_prof(struct task_struct *p)
-{
- unsigned long it_prof = p->it_prof_value;
-
- if (it_prof) {
- if (--it_prof == 0) {
- it_prof = p->it_prof_incr;
- send_sig(SIGPROF, p, 1);
- }
- p->it_prof_value = it_prof;
- }
-}
-
-void update_one_process(struct task_struct *p, unsigned long user,
- unsigned long system, int cpu)
-{
- p->per_cpu_utime[cpu] += user;
- p->per_cpu_stime[cpu] += system;
- do_process_times(p, user, system);
- do_it_virt(p, user);
- do_it_prof(p);
-}
-
-/*
- * Called from the timer interrupt handler to charge one tick to the current
- * process. user_tick is 1 if the tick is user time, 0 for system.
- */
-void update_process_times(int user_tick)
-{
- struct task_struct *p = current;
- int cpu = smp_processor_id(), system = user_tick ^ 1;
-
- update_one_process(p, user_tick, system, cpu);
- if (p->pid) {
- if (--p->counter <= 0) {
- p->counter = 0;
- /*
- * SCHED_FIFO is priority preemption, so this is
- * not the place to decide whether to reschedule a
- * SCHED_FIFO task or not - Bhavesh Davda
- */
- if (p->policy != SCHED_FIFO) {
- p->need_resched = 1;
- }
- }
- if (p->nice > 0)
- kstat.per_cpu_nice[cpu] += user_tick;
- else
- kstat.per_cpu_user[cpu] += user_tick;
- kstat.per_cpu_system[cpu] += system;
- } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
- kstat.per_cpu_system[cpu] += system;
-}
-
-/*
- * Called from the timer interrupt handler to charge a couple of ticks
- * to the current process.
- */
-void update_process_times_us(int user_ticks, int system_ticks)
-{
- struct task_struct *p = current;
- int cpu = smp_processor_id();
-
- update_one_process(p, user_ticks, system_ticks, cpu);
- if (p->pid) {
- p->counter -= user_ticks + system_ticks;
- if (p->counter <= 0) {
- p->counter = 0;
- p->need_resched = 1;
- }
- if (p->nice > 0)
- kstat.per_cpu_nice[cpu] += user_ticks;
- else
- kstat.per_cpu_user[cpu] += user_ticks;
- kstat.per_cpu_system[cpu] += system_ticks;
- } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
- kstat.per_cpu_system[cpu] += system_ticks;
-}
-
-/*
- * Nr of active tasks - counted in fixed-point numbers
- */
-static unsigned long count_active_tasks(void)
-{
- struct task_struct *p;
- unsigned long nr = 0;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if ((p->state == TASK_RUNNING ||
- (p->state & TASK_UNINTERRUPTIBLE)))
- nr += FIXED_1;
- }
- read_unlock(&tasklist_lock);
- return nr;
-}
-
-/*
- * Hmm.. Changed this, as the GNU make sources (load.c) seems to
- * imply that avenrun[] is the standard name for this kind of thing.
- * Nothing else seems to be standardized: the fractional size etc
- * all seem to differ on different machines.
- */
-unsigned long avenrun[3];
-
-static inline void calc_load(unsigned long ticks)
-{
- unsigned long active_tasks; /* fixed-point */
- static int count = LOAD_FREQ;
-
- count -= ticks;
- while (count < 0) {
- count += LOAD_FREQ;
- active_tasks = count_active_tasks();
- CALC_LOAD(avenrun[0], EXP_1, active_tasks);
- CALC_LOAD(avenrun[1], EXP_5, active_tasks);
- CALC_LOAD(avenrun[2], EXP_15, active_tasks);
- }
-}
-
-/* jiffies at the most recent update of wall time */
-unsigned long wall_jiffies;
-
-/*
- * This spinlock protect us from races in SMP while playing with xtime. -arca
- */
-rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
-
-static inline void update_times(void)
-{
- unsigned long ticks;
-
- /*
- * update_times() is run from the raw timer_bh handler so we
- * just know that the irqs are locally enabled and so we don't
- * need to save/restore the flags of the local CPU here. -arca
- */
- write_lock_irq(&xtime_lock);
- vxtime_lock();
-
- ticks = jiffies - wall_jiffies;
- if (ticks) {
- wall_jiffies += ticks;
- update_wall_time(ticks);
- }
- vxtime_unlock();
- write_unlock_irq(&xtime_lock);
- calc_load(ticks);
-}
-
-void timer_bh(void)
-{
- update_times();
- run_timer_list();
-}
-
-void do_timer(struct pt_regs *regs)
-{
- (*(unsigned long *)&jiffies)++;
-#ifndef CONFIG_SMP
- /* SMP process accounting uses the local APIC timer */
-
- update_process_times(user_mode(regs));
-#endif
- mark_bh(TIMER_BH);
- if (TQ_ACTIVE(tq_timer))
- mark_bh(TQUEUE_BH);
-}
-
-void do_timer_ticks(int ticks)
-{
- (*(unsigned long *)&jiffies) += ticks;
- mark_bh(TIMER_BH);
- if (TQ_ACTIVE(tq_timer))
- mark_bh(TQUEUE_BH);
-}
-
-#if !defined(__alpha__) && !defined(__ia64__)
-
-/*
- * For backwards compatibility? This can be done in libc so Alpha
- * and all newer ports shouldn't need it.
- */
-asmlinkage unsigned long sys_alarm(unsigned int seconds)
-{
- struct itimerval it_new, it_old;
- unsigned int oldalarm;
-
- it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
- it_new.it_value.tv_sec = seconds;
- it_new.it_value.tv_usec = 0;
- do_setitimer(ITIMER_REAL, &it_new, &it_old);
- oldalarm = it_old.it_value.tv_sec;
- /* ehhh.. We can't return 0 if we have an alarm pending.. */
- /* And we'd better return too much than too little anyway */
- if (it_old.it_value.tv_usec)
- oldalarm++;
- return oldalarm;
-}
-
-#endif
-
-#ifndef __alpha__
-
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
- * should be moved into arch/i386 instead?
- */
-
-/**
- * sys_getpid - return the thread group id of the current process
- *
- * Note, despite the name, this returns the tgid not the pid. The tgid and
- * the pid are identical unless CLONE_THREAD was specified on clone() in
- * which case the tgid is the same in all threads of the same group.
- *
- * This is SMP safe as current->tgid does not change.
- */
-asmlinkage long sys_getpid(void)
-{
- return current->tgid;
-}
-
-/*
- * This is not strictly SMP safe: p_opptr could change
- * from under us. However, rather than getting any lock
- * we can use an optimistic algorithm: get the parent
- * pid, and go back and check that the parent is still
- * the same. If it has changed (which is extremely unlikely
- * indeed), we just try again..
- *
- * NOTE! This depends on the fact that even if we _do_
- * get an old value of "parent", we can happily dereference
- * the pointer: we just can't necessarily trust the result
- * until we know that the parent pointer is valid.
- *
- * The "mb()" macro is a memory barrier - a synchronizing
- * event. It also makes sure that gcc doesn't optimize
- * away the necessary memory references.. The barrier doesn't
- * have to have all that strong semantics: on x86 we don't
- * really require a synchronizing instruction, for example.
- * The barrier is more important for code generation than
- * for any real memory ordering semantics (even if there is
- * a small window for a race, using the old pointer is
- * harmless for a while).
- */
-asmlinkage long sys_getppid(void)
-{
- int pid;
- struct task_struct * me = current;
- struct task_struct * parent;
-
- parent = me->p_opptr;
- for (;;) {
- pid = parent->pid;
-#if CONFIG_SMP
-{
- struct task_struct *old = parent;
- mb();
- parent = me->p_opptr;
- if (old != parent)
- continue;
-}
-#endif
- break;
- }
- return pid;
-}
-
-asmlinkage long sys_getuid(void)
-{
- /* Only we change this so SMP safe */
- return current->uid;
-}
-
-asmlinkage long sys_geteuid(void)
-{
- /* Only we change this so SMP safe */
- return current->euid;
-}
-
-asmlinkage long sys_getgid(void)
-{
- /* Only we change this so SMP safe */
- return current->gid;
-}
-
-asmlinkage long sys_getegid(void)
-{
- /* Only we change this so SMP safe */
- return current->egid;
-}
-
-#endif
-
-/* Thread ID - the internal kernel "pid" */
-asmlinkage long sys_gettid(void)
-{
- return current->pid;
-}
-
-asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
-{
- struct timespec t;
- unsigned long expire;
-
- if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
- return -EFAULT;
-
- if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
- return -EINVAL;
-
-
- if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
- current->policy != SCHED_OTHER)
- {
- /*
- * Short delay requests up to 2 ms will be handled with
- * high precision by a busy wait for all real-time processes.
- *
- * Its important on SMP not to do this holding locks.
- */
- udelay((t.tv_nsec + 999) / 1000);
- return 0;
- }
-
- expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-
- current->state = TASK_INTERRUPTIBLE;
- expire = schedule_timeout(expire);
-
- if (expire) {
- if (rmtp) {
- jiffies_to_timespec(expire, &t);
- if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
- return -EFAULT;
- }
- return -EINTR;
- }
- return 0;
-}
-
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mkbuildtree
--- a/linux-2.4.30-xen-sparse/mkbuildtree Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,283 +0,0 @@
-#!/bin/bash
-
-# mkbuildtree <build tree>
-#
-# Creates symbolic links in <build tree> for the sparse tree
-# in the current directory.
-
-# Script to determine the relative path between two directories.
-# Copyright (c) D. J. Hawkey Jr. 2002
-# Fixed for Xen project by K. Fraser in 2003.
-abs_to_rel ()
-{
- local CWD SRCPATH
-
- if [ "$1" != "/" -a "${1##*[^/]}" = "/" ]; then
- SRCPATH=${1%?}
- else
- SRCPATH=$1
- fi
- if [ "$2" != "/" -a "${2##*[^/]}" = "/" ]; then
- DESTPATH=${2%?}
- else
- DESTPATH=$2
- fi
-
- CWD=$PWD
- [ "${1%%[^/]*}" != "/" ] && cd $1 && SRCPATH=$PWD
- [ "${2%%[^/]*}" != "/" ] && cd $2 && DESTPATH=$PWD
- [ "$CWD" != "$PWD" ] && cd $CWD
-
- BASEPATH=$SRCPATH
-
- [ "$SRCPATH" = "$DESTPATH" ] && DESTPATH="." && return
- [ "$SRCPATH" = "/" ] && DESTPATH=${DESTPATH#?} && return
-
- while [ "$BASEPATH/" != "${DESTPATH%${DESTPATH#$BASEPATH/}}" ]; do
- BASEPATH=${BASEPATH%/*}
- done
-
- SRCPATH=${SRCPATH#$BASEPATH}
- DESTPATH=${DESTPATH#$BASEPATH}
- DESTPATH=${DESTPATH#?}
- while [ -n "$SRCPATH" ]; do
- SRCPATH=${SRCPATH%/*}
- DESTPATH="../$DESTPATH"
- done
-
- [ -z "$BASEPATH" ] && BASEPATH="/"
- [ "${DESTPATH##*[^/]}" = "/" ] && DESTPATH=${DESTPATH%?}
-}
-
-# relative_lndir <target_dir>
-# Creates a tree of symlinks in the current working directory that mirror
-# real files in <target_dir>. <target_dir> should be relative to the current
-# working directory. Symlinks in <target_dir> are ignored. Source-control files
-# are ignored.
-relative_lndir ()
-{
- local SYMLINK_DIR REAL_DIR pref i j
- SYMLINK_DIR=$PWD
- REAL_DIR=$1
- (
- cd $REAL_DIR
- for i in `find . -type d | grep -v SCCS`; do
- [ -d $SYMLINK_DIR/$i ] || mkdir -p $SYMLINK_DIR/$i
- (
- cd $i
- pref=`echo $i | sed -e 's#/[^/]*#../#g' -e 's#^\.##'`
- for j in `find . -maxdepth 1 -type f -o -type l`; do
- ln -sf ${pref}${REAL_DIR}/$i/$j ${SYMLINK_DIR}/$i/$j
- done
- )
- done
- )
-}
-
-[ "$1" == "" ] && { echo "Syntax: $0 <linux tree to xenify>"; exit 1; }
-
-# Get absolute path to the destination directory
-pushd . >/dev/null
-cd ${1} || { echo "cannot cd to ${1}"; exit 1; }
-AD=$PWD
-popd >/dev/null
-
-# Get absolute path to the source directory
-AS=`pwd`
-
-# Get path to source, relative to destination
-abs_to_rel ${AD} ${AS}
-RS=$DESTPATH
-
-# Remove old copies of files and directories at the destination
-for i in `find . -type f -o -type l` ; do rm -f ${AD}/${i#./} ; done
-
-# We now work from the destination directory
-cd ${AD} || { echo "cannot cd to ${AD}"; exit 1; }
-
-# Remove old symlinks
-for i in `find . -type l`; do rm -f $i; done
-
-# Create symlinks of files and directories which exist in the sparse source
-relative_lndir ${RS}
-rm -f mkbuildtree
-
-set ${RS}/../linux-2.6.*-xen-sparse
-[ "$1" == "${RS}/../linux-2.6.*-xen-parse" ] && { echo "no Linux 2.6 sparse
tree at ${RS}/../linux-2.6.*-xen-sparse"; exit 1; }
-LINUX_26="$1"
-
-
-# Create links to the shared definitions of the Xen interfaces.
-rm -rf ${AD}/include/asm-xen/xen-public
-mkdir ${AD}/include/asm-xen/xen-public
-cd ${AD}/include/asm-xen/xen-public
-relative_lndir ../../../${RS}/../xen/include/public
-
-# Create a link to the shared definitions for the control interface
-cd ${AD}/include/asm-xen
-
-## Symlinks for files:
-## - which are identical in the i386 and xen-i386 architecture-dependent
-## subdirectories.
-## - which are identical in the Linux 2.6 and Linux 2.4 ports.
-
-cd ${AD}/include/asm-xen
-ln -sf ../asm-i386/a.out.h
-ln -sf ../asm-i386/apicdef.h
-ln -sf ../asm-i386/apic.h
-ln -sf ../asm-i386/atomic.h
-ln -sf ../asm-i386/bitops.h
-ln -sf ../asm-i386/boot.h
-ln -sf ../asm-i386/byteorder.h
-ln -sf ../asm-i386/cache.h
-ln -sf ../asm-i386/checksum.h
-ln -sf ../asm-i386/cpufeature.h
-ln -sf ../asm-i386/current.h
-ln -sf ../asm-i386/debugreg.h
-ln -sf ../asm-i386/delay.h
-ln -sf ../asm-i386/div64.h
-ln -sf ../asm-i386/dma.h
-ln -sf ../asm-i386/elf.h
-ln -sf ../asm-i386/errno.h
-ln -sf ../asm-i386/fcntl.h
-ln -sf ../asm-i386/floppy.h
-ln -sf ../asm-i386/hardirq.h
-ln -sf ../asm-i386/hdreg.h
-ln -sf ../asm-i386/i387.h
-ln -sf ../asm-i386/ide.h
-ln -sf ../asm-i386/init.h
-ln -sf ../asm-i386/io_apic.h
-ln -sf ../asm-i386/ioctl.h
-ln -sf ../asm-i386/ioctls.h
-ln -sf ../asm-i386/ipcbuf.h
-ln -sf ../asm-i386/ipc.h
-ln -sf ../asm-i386/kmap_types.h
-ln -sf ../asm-i386/ldt.h
-ln -sf ../asm-i386/linux_logo.h
-ln -sf ../asm-i386/locks.h
-ln -sf ../asm-i386/math_emu.h
-ln -sf ../asm-i386/mc146818rtc.h
-ln -sf ../asm-i386/mca_dma.h
-ln -sf ../asm-i386/mman.h
-ln -sf ../asm-i386/mmu.h
-ln -sf ../asm-i386/mmx.h
-ln -sf ../asm-i386/mpspec.h
-ln -sf ../asm-i386/msgbuf.h
-ln -sf ../asm-i386/mtrr.h
-ln -sf ../asm-i386/namei.h
-ln -sf ../asm-i386/param.h
-ln -sf ../asm-i386/parport.h
-ln -sf ../asm-i386/pgtable-3level.h
-ln -sf ../asm-i386/poll.h
-ln -sf ../asm-i386/posix_types.h
-ln -sf ../asm-i386/ptrace.h
-ln -sf ../asm-i386/resource.h
-ln -sf ../asm-i386/rwlock.h
-ln -sf ../asm-i386/rwsem.h
-ln -sf ../asm-i386/scatterlist.h
-ln -sf ../asm-i386/semaphore.h
-ln -sf ../asm-i386/sembuf.h
-ln -sf ../asm-i386/serial.h
-ln -sf ../asm-i386/setup.h
-ln -sf ../asm-i386/shmbuf.h
-ln -sf ../asm-i386/shmparam.h
-ln -sf ../asm-i386/sigcontext.h
-ln -sf ../asm-i386/siginfo.h
-ln -sf ../asm-i386/signal.h
-ln -sf ../asm-i386/smplock.h
-ln -sf ../asm-i386/socket.h
-ln -sf ../asm-i386/sockios.h
-ln -sf ../asm-i386/softirq.h
-ln -sf ../asm-i386/spinlock.h
-ln -sf ../asm-i386/statfs.h
-ln -sf ../asm-i386/stat.h
-ln -sf ../asm-i386/string-486.h
-ln -sf ../asm-i386/string.h
-ln -sf ../asm-i386/termbits.h
-ln -sf ../asm-i386/termios.h
-ln -sf ../asm-i386/timex.h
-ln -sf ../asm-i386/tlb.h
-ln -sf ../asm-i386/types.h
-ln -sf ../asm-i386/uaccess.h
-ln -sf ../asm-i386/ucontext.h
-ln -sf ../asm-i386/unaligned.h
-ln -sf ../asm-i386/unistd.h
-ln -sf ../asm-i386/user.h
-ln -sf ../asm-i386/vm86.h
-ln -sf ../../${LINUX_26}/include/asm-xen/balloon.h
-ln -sf ../../${LINUX_26}/include/asm-xen/ctrl_if.h
-ln -sf ../../${LINUX_26}/include/asm-xen/evtchn.h
-ln -sf ../../${LINUX_26}/include/asm-xen/hypervisor.h
-ln -sf ../../${LINUX_26}/include/asm-xen/multicall.h
-ln -sf ../../${LINUX_26}/include/asm-xen/xen_proc.h
-ln -sf ../../${LINUX_26}/include/asm-xen/asm-i386/synch_bitops.h
-
-mkdir -p linux-public && cd linux-public
-ln -sf ../../../${LINUX_26}/include/asm-xen/linux-public/privcmd.h
-ln -sf ../../../${LINUX_26}/include/asm-xen/linux-public/suspend.h
-
-cd ${AD}/arch/xen/kernel
-ln -sf ../../i386/kernel/i387.c
-ln -sf ../../i386/kernel/init_task.c
-ln -sf ../../i386/kernel/pci-i386.c
-ln -sf ../../i386/kernel/pci-i386.h
-ln -sf ../../i386/kernel/ptrace.c
-ln -sf ../../i386/kernel/semaphore.c
-ln -sf ../../i386/kernel/sys_i386.c
-ln -sf ../../../${LINUX_26}/arch/xen/kernel/ctrl_if.c
-ln -sf ../../../${LINUX_26}/arch/xen/kernel/evtchn.c
-ln -sf ../../../${LINUX_26}/arch/xen/kernel/fixup.c
-ln -sf ../../../${LINUX_26}/arch/xen/kernel/reboot.c
-ln -sf ../../../${LINUX_26}/arch/xen/kernel/skbuff.c
-ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/ioport.c
-ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/pci-dma.c
-
-cd ${AD}/arch/xen/lib
-ln -sf ../../i386/lib/checksum.S
-ln -sf ../../i386/lib/dec_and_lock.c
-ln -sf ../../i386/lib/getuser.S
-ln -sf ../../i386/lib/iodebug.c
-ln -sf ../../i386/lib/memcpy.c
-ln -sf ../../i386/lib/mmx.c
-ln -sf ../../i386/lib/old-checksum.c
-ln -sf ../../i386/lib/strstr.c
-ln -sf ../../i386/lib/usercopy.c
-ln -sf ../../../${LINUX_26}/arch/xen/kernel/xen_proc.c
-
-cd ${AD}/arch/xen/mm
-ln -sf ../../i386/mm/extable.c
-ln -sf ../../i386/mm/pageattr.c
-ln -sf ../../../${LINUX_26}/arch/xen/i386/mm/hypervisor.c
-
-cd ${AD}/arch/xen/drivers/balloon
-ln -sf ../../../../${LINUX_26}/drivers/xen/balloon/balloon.c
-
-cd ${AD}/arch/xen/drivers/console
-ln -sf ../../../../${LINUX_26}/drivers/xen/console/console.c
-
-cd ${AD}/arch/xen/drivers/dom0
-ln -sf ../../../../${LINUX_26}/drivers/xen/privcmd/privcmd.c core.c
-
-cd ${AD}/arch/xen/drivers/evtchn
-ln -sf ../../../../${LINUX_26}/drivers/xen/evtchn/evtchn.c
-
-cd ${AD}/arch/xen/drivers/netif/frontend
-ln -sf ../../../../../${LINUX_26}/drivers/xen/netfront/netfront.c main.c
-
-cd ${AD}/arch/xen/drivers/netif/backend
-ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/common.h
-ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/control.c
-ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/interface.c
-ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/netback.c main.c
-
-cd ${AD}/arch/xen/drivers/blkif/backend
-ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/common.h
-ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/blkback.c main.c
-ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/control.c
-ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/interface.c
-ln -sf ../../../../../${LINUX_26}/drivers/xen/blkback/vbd.c
-
-cd ${AD}/arch/xen/drivers/blkif/frontend
-ln -sf ../../../../../${LINUX_26}/drivers/xen/blkfront/blkfront.c
-
-
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/highmem.c
--- a/linux-2.4.30-xen-sparse/mm/highmem.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,462 +0,0 @@
-/*
- * High memory handling common code and variables.
- *
- * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@xxxxxxx
- * Gerhard Wichert, Siemens AG, Gerhard.Wichert@xxxxxxxxxxxxxx
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * 64-bit physical space. With current x86 CPUs this
- * means up to 64 Gigabytes physical RAM.
- *
- * Rewrote high memory support to move the page cache into
- * high memory. Implemented permanent (schedulable) kmaps
- * based on Linus' idea.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
- */
-
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-
-/*
- * Virtual_count is not a pure "count".
- * 0 means that it is not mapped, and has not been mapped
- * since a TLB flush - it is usable.
- * 1 means that there are no users, but it has been mapped
- * since the last TLB flush - so we can't use it.
- * n means that there are (n-1) current users of it.
- */
-static int pkmap_count[LAST_PKMAP];
-static unsigned int last_pkmap_nr;
-static spinlock_cacheline_t kmap_lock_cacheline = {SPIN_LOCK_UNLOCKED};
-#define kmap_lock kmap_lock_cacheline.lock
-
-pte_t * pkmap_page_table;
-
-static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
-
-static void flush_all_zero_pkmaps(void)
-{
- int i;
-
- flush_cache_all();
-
- for (i = 0; i < LAST_PKMAP; i++) {
- struct page *page;
-
- /*
- * zero means we don't have anything to do,
- * >1 means that it is still in use. Only
- * a count of 1 means that it is free but
- * needs to be unmapped
- */
- if (pkmap_count[i] != 1)
- continue;
- pkmap_count[i] = 0;
-
- /* sanity check */
- if (pte_none(pkmap_page_table[i]))
- BUG();
-
- /*
- * Don't need an atomic fetch-and-clear op here;
- * no-one has the page mapped, and cannot get at
- * its virtual address (and hence PTE) without first
- * getting the kmap_lock (which is held here).
- * So no dangers, even with speculative execution.
- */
- page = pte_page(pkmap_page_table[i]);
- pte_clear(&pkmap_page_table[i]);
-
- page->virtual = NULL;
- }
- flush_tlb_all();
-}
-
-static inline unsigned long map_new_virtual(struct page *page, int nonblocking)
-{
- unsigned long vaddr;
- int count;
-
-start:
- count = LAST_PKMAP;
- /* Find an empty entry */
- for (;;) {
- last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
- if (!last_pkmap_nr) {
- flush_all_zero_pkmaps();
- count = LAST_PKMAP;
- }
- if (!pkmap_count[last_pkmap_nr])
- break; /* Found a usable entry */
- if (--count)
- continue;
-
- if (nonblocking)
- return 0;
-
- /*
- * Sleep for somebody else to unmap their entries
- */
- {
- DECLARE_WAITQUEUE(wait, current);
-
- current->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue(&pkmap_map_wait, &wait);
- spin_unlock(&kmap_lock);
- schedule();
- remove_wait_queue(&pkmap_map_wait, &wait);
- spin_lock(&kmap_lock);
-
- /* Somebody else might have mapped it while we slept */
- if (page->virtual)
- return (unsigned long) page->virtual;
-
- /* Re-start */
- goto start;
- }
- }
- vaddr = PKMAP_ADDR(last_pkmap_nr);
- set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
- XEN_flush_page_update_queue();
-
- pkmap_count[last_pkmap_nr] = 1;
- page->virtual = (void *) vaddr;
-
- return vaddr;
-}
-
-void kmap_flush_unused(void)
-{
- spin_lock(&kmap_lock);
- flush_all_zero_pkmaps();
- spin_unlock(&kmap_lock);
-}
-
-void fastcall *kmap_high(struct page *page, int nonblocking)
-{
- unsigned long vaddr;
-
- /*
- * For highmem pages, we can't trust "virtual" until
- * after we have the lock.
- *
- * We cannot call this from interrupts, as it may block
- */
- spin_lock(&kmap_lock);
- vaddr = (unsigned long) page->virtual;
- if (!vaddr) {
- vaddr = map_new_virtual(page, nonblocking);
- if (!vaddr)
- goto out;
- }
- pkmap_count[PKMAP_NR(vaddr)]++;
- if (pkmap_count[PKMAP_NR(vaddr)] < 2)
- BUG();
- out:
- spin_unlock(&kmap_lock);
- return (void*) vaddr;
-}
-
-void fastcall kunmap_high(struct page *page)
-{
- unsigned long vaddr;
- unsigned long nr;
- int need_wakeup;
-
- spin_lock(&kmap_lock);
- vaddr = (unsigned long) page->virtual;
- if (!vaddr)
- BUG();
- nr = PKMAP_NR(vaddr);
-
- /*
- * A count must never go down to zero
- * without a TLB flush!
- */
- need_wakeup = 0;
- switch (--pkmap_count[nr]) {
- case 0:
- BUG();
- case 1:
- /*
- * Avoid an unnecessary wake_up() function call.
- * The common case is pkmap_count[] == 1, but
- * no waiters.
- * The tasks queued in the wait-queue are guarded
- * by both the lock in the wait-queue-head and by
- * the kmap_lock. As the kmap_lock is held here,
- * no need for the wait-queue-head's lock. Simply
- * test if the queue is empty.
- */
- need_wakeup = waitqueue_active(&pkmap_map_wait);
- }
- spin_unlock(&kmap_lock);
-
- /* do wake-up, if needed, race-free outside of the spin lock */
- if (need_wakeup)
- wake_up(&pkmap_map_wait);
-}
-
-#define POOL_SIZE 32
-
-/*
- * This lock gets no contention at all, normally.
- */
-static spinlock_t emergency_lock = SPIN_LOCK_UNLOCKED;
-
-int nr_emergency_pages;
-static LIST_HEAD(emergency_pages);
-
-int nr_emergency_bhs;
-static LIST_HEAD(emergency_bhs);
-
-/*
- * Simple bounce buffer support for highmem pages.
- * This will be moved to the block layer in 2.5.
- */
-
-static inline void copy_from_high_bh (struct buffer_head *to,
- struct buffer_head *from)
-{
- struct page *p_from;
- char *vfrom;
-
- p_from = from->b_page;
-
- vfrom = kmap_atomic(p_from, KM_USER0);
- memcpy(to->b_data, vfrom + bh_offset(from), to->b_size);
- kunmap_atomic(vfrom, KM_USER0);
-}
-
-static inline void copy_to_high_bh_irq (struct buffer_head *to,
- struct buffer_head *from)
-{
- struct page *p_to;
- char *vto;
- unsigned long flags;
-
- p_to = to->b_page;
- __save_flags(flags);
- __cli();
- vto = kmap_atomic(p_to, KM_BOUNCE_READ);
- memcpy(vto + bh_offset(to), from->b_data, to->b_size);
- kunmap_atomic(vto, KM_BOUNCE_READ);
- __restore_flags(flags);
-}
-
-static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
-{
- struct page *page;
- struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
- unsigned long flags;
-
- bh_orig->b_end_io(bh_orig, uptodate);
-
- page = bh->b_page;
-
- spin_lock_irqsave(&emergency_lock, flags);
- if (nr_emergency_pages >= POOL_SIZE)
- __free_page(page);
- else {
- /*
- * We are abusing page->list to manage
- * the highmem emergency pool:
- */
- list_add(&page->list, &emergency_pages);
- nr_emergency_pages++;
- }
-
- if (nr_emergency_bhs >= POOL_SIZE) {
-#ifdef HIGHMEM_DEBUG
- /* Don't clobber the constructed slab cache */
- init_waitqueue_head(&bh->b_wait);
-#endif
- kmem_cache_free(bh_cachep, bh);
- } else {
- /*
- * Ditto in the bh case, here we abuse b_inode_buffers:
- */
- list_add(&bh->b_inode_buffers, &emergency_bhs);
- nr_emergency_bhs++;
- }
- spin_unlock_irqrestore(&emergency_lock, flags);
-}
-
-static __init int init_emergency_pool(void)
-{
- struct sysinfo i;
- si_meminfo(&i);
- si_swapinfo(&i);
-
- if (!i.totalhigh)
- return 0;
-
- spin_lock_irq(&emergency_lock);
- while (nr_emergency_pages < POOL_SIZE) {
- struct page * page = alloc_page(GFP_ATOMIC);
- if (!page) {
- printk("couldn't refill highmem emergency pages");
- break;
- }
- list_add(&page->list, &emergency_pages);
- nr_emergency_pages++;
- }
- while (nr_emergency_bhs < POOL_SIZE) {
- struct buffer_head * bh = kmem_cache_alloc(bh_cachep,
SLAB_ATOMIC);
- if (!bh) {
- printk("couldn't refill highmem emergency bhs");
- break;
- }
- list_add(&bh->b_inode_buffers, &emergency_bhs);
- nr_emergency_bhs++;
- }
- spin_unlock_irq(&emergency_lock);
- printk("allocated %d pages and %d bhs reserved for the highmem
bounces\n",
- nr_emergency_pages, nr_emergency_bhs);
-
- return 0;
-}
-
-__initcall(init_emergency_pool);
-
-static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
-{
- bounce_end_io(bh, uptodate);
-}
-
-static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
-{
- struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
-
- if (uptodate)
- copy_to_high_bh_irq(bh_orig, bh);
- bounce_end_io(bh, uptodate);
-}
-
-struct page *alloc_bounce_page (void)
-{
- struct list_head *tmp;
- struct page *page;
-
- page = alloc_page(GFP_NOHIGHIO);
- if (page)
- return page;
- /*
- * No luck. First, kick the VM so it doesn't idle around while
- * we are using up our emergency rations.
- */
- wakeup_bdflush();
-
-repeat_alloc:
- /*
- * Try to allocate from the emergency pool.
- */
- tmp = &emergency_pages;
- spin_lock_irq(&emergency_lock);
- if (!list_empty(tmp)) {
- page = list_entry(tmp->next, struct page, list);
- list_del(tmp->next);
- nr_emergency_pages--;
- }
- spin_unlock_irq(&emergency_lock);
- if (page)
- return page;
-
- /* we need to wait I/O completion */
- run_task_queue(&tq_disk);
-
- yield();
- goto repeat_alloc;
-}
-
-struct buffer_head *alloc_bounce_bh (void)
-{
- struct list_head *tmp;
- struct buffer_head *bh;
-
- bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO);
- if (bh)
- return bh;
- /*
- * No luck. First, kick the VM so it doesn't idle around while
- * we are using up our emergency rations.
- */
- wakeup_bdflush();
-
-repeat_alloc:
- /*
- * Try to allocate from the emergency pool.
- */
- tmp = &emergency_bhs;
- spin_lock_irq(&emergency_lock);
- if (!list_empty(tmp)) {
- bh = list_entry(tmp->next, struct buffer_head, b_inode_buffers);
- list_del(tmp->next);
- nr_emergency_bhs--;
- }
- spin_unlock_irq(&emergency_lock);
- if (bh)
- return bh;
-
- /* we need to wait I/O completion */
- run_task_queue(&tq_disk);
-
- yield();
- goto repeat_alloc;
-}
-
-struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
-{
- struct page *page;
- struct buffer_head *bh;
-
- if (!PageHighMem(bh_orig->b_page))
- return bh_orig;
-
- bh = alloc_bounce_bh();
- /*
- * This is wasteful for 1k buffers, but this is a stopgap measure
- * and we are being ineffective anyway. This approach simplifies
- * things immensly. On boxes with more than 4GB RAM this should
- * not be an issue anyway.
- */
- page = alloc_bounce_page();
-
- set_bh_page(bh, page, 0);
-
- bh->b_next = NULL;
- bh->b_blocknr = bh_orig->b_blocknr;
- bh->b_size = bh_orig->b_size;
- bh->b_list = -1;
- bh->b_dev = bh_orig->b_dev;
- bh->b_count = bh_orig->b_count;
- bh->b_rdev = bh_orig->b_rdev;
- bh->b_state = bh_orig->b_state;
-#ifdef HIGHMEM_DEBUG
- bh->b_flushtime = jiffies;
- bh->b_next_free = NULL;
- bh->b_prev_free = NULL;
- /* bh->b_this_page */
- bh->b_reqnext = NULL;
- bh->b_pprev = NULL;
-#endif
- /* bh->b_page */
- if (rw == WRITE) {
- bh->b_end_io = bounce_end_io_write;
- copy_from_high_bh(bh, bh_orig);
- } else
- bh->b_end_io = bounce_end_io_read;
- bh->b_private = (void *)bh_orig;
- bh->b_rsector = bh_orig->b_rsector;
-#ifdef HIGHMEM_DEBUG
- memset(&bh->b_wait, -1, sizeof(bh->b_wait));
-#endif
-
- return bh;
-}
-
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/memory.c
--- a/linux-2.4.30-xen-sparse/mm/memory.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1548 +0,0 @@
-/*
- * linux/mm/memory.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- */
-
-/*
- * demand-loading started 01.12.91 - seems it is high on the list of
- * things wanted, and it should be easy to implement. - Linus
- */
-
-/*
- * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
- * pages started 02.12.91, seems to work. - Linus.
- *
- * Tested sharing by executing about 30 /bin/sh: under the old kernel it
- * would have taken more than the 6M I have free, but it worked well as
- * far as I could see.
- *
- * Also corrected some "invalidate()"s - I wasn't doing enough of them.
- */
-
-/*
- * Real VM (paging to/from disk) started 18.12.91. Much more work and
- * thought has to go into this. Oh, well..
- * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
- * Found it. Everything seems to work now.
- * 20.12.91 - Ok, making the swap-device changeable like the root.
- */
-
-/*
- * 05.04.94 - Multi-page memory management added for v1.1.
- * Idea by Alex Bligh (alex@xxxxxxxxxxxxxxx)
- *
- * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
- * (Gerhard.Wichert@xxxxxxxxxxxxxx)
- */
-
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/smp_lock.h>
-#include <linux/swapctl.h>
-#include <linux/iobuf.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/module.h>
-
-#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-
-unsigned long max_mapnr;
-unsigned long num_physpages;
-unsigned long num_mappedpages;
-void * high_memory;
-struct page *highmem_start_page;
-
-/*
- * We special-case the C-O-W ZERO_PAGE, because it's such
- * a common occurrence (no need to read the page to know
- * that it's zero - better for the cache and memory subsystem).
- */
-static inline void copy_cow_page(struct page * from, struct page * to,
unsigned long address)
-{
- if (from == ZERO_PAGE(address)) {
- clear_user_highpage(to, address);
- return;
- }
- copy_user_highpage(to, from, address);
-}
-
-mem_map_t * mem_map;
-
-/*
- * Called by TLB shootdown
- */
-void __free_pte(pte_t pte)
-{
- struct page *page = pte_page(pte);
- if ((!VALID_PAGE(page)) || PageReserved(page))
- return;
- if (pte_dirty(pte))
- set_page_dirty(page);
- free_page_and_swap_cache(page);
-}
-
-
-/*
- * Note: this doesn't free the actual pages themselves. That
- * has been handled earlier when unmapping all the memory regions.
- */
-static inline void free_one_pmd(pmd_t * dir)
-{
- pte_t * pte;
-
- if (pmd_none(*dir))
- return;
- if (pmd_bad(*dir)) {
- pmd_ERROR(*dir);
- pmd_clear(dir);
- return;
- }
- pte = pte_offset(dir, 0);
- pmd_clear(dir);
- pte_free(pte);
-}
-
-static inline void free_one_pgd(pgd_t * dir)
-{
- int j;
- pmd_t * pmd;
-
- if (pgd_none(*dir))
- return;
- if (pgd_bad(*dir)) {
- pgd_ERROR(*dir);
- pgd_clear(dir);
- return;
- }
- pmd = pmd_offset(dir, 0);
- pgd_clear(dir);
- for (j = 0; j < PTRS_PER_PMD ; j++) {
- prefetchw(pmd+j+(PREFETCH_STRIDE/16));
- free_one_pmd(pmd+j);
- }
- pmd_free(pmd);
-}
-
-/* Low and high watermarks for page table cache.
- The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
- */
-int pgt_cache_water[2] = { 25, 50 };
-
-/* Returns the number of pages freed */
-int check_pgt_cache(void)
-{
- return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
-}
-
-
-/*
- * This function clears all user-level page tables of a process - this
- * is needed by execve(), so that old pages aren't in the way.
- */
-void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
-{
- pgd_t * page_dir = mm->pgd;
-
- spin_lock(&mm->page_table_lock);
- page_dir += first;
- do {
- free_one_pgd(page_dir);
- page_dir++;
- } while (--nr);
- XEN_flush_page_update_queue();
- spin_unlock(&mm->page_table_lock);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-#define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
-#define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
-
-/*
- * copy one vm_area from one task to the other. Assumes the page tables
- * already present in the new task to be cleared in the whole range
- * covered by this vma.
- *
- * 08Jan98 Merged into one routine from several inline routines to reduce
- * variable count and make things faster. -jj
- *
- * dst->page_table_lock is held on entry and exit,
- * but may be dropped within pmd_alloc() and pte_alloc().
- */
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma)
-{
- pgd_t * src_pgd, * dst_pgd;
- unsigned long address = vma->vm_start;
- unsigned long end = vma->vm_end;
- unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
VM_MAYWRITE;
-
- src_pgd = pgd_offset(src, address)-1;
- dst_pgd = pgd_offset(dst, address)-1;
-
- for (;;) {
- pmd_t * src_pmd, * dst_pmd;
-
- src_pgd++; dst_pgd++;
-
- /* copy_pmd_range */
-
- if (pgd_none(*src_pgd))
- goto skip_copy_pmd_range;
- if (pgd_bad(*src_pgd)) {
- pgd_ERROR(*src_pgd);
- pgd_clear(src_pgd);
-skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
- if (!address || (address >= end))
- goto out;
- continue;
- }
-
- src_pmd = pmd_offset(src_pgd, address);
- dst_pmd = pmd_alloc(dst, dst_pgd, address);
- if (!dst_pmd)
- goto nomem;
-
- do {
- pte_t * src_pte, * dst_pte;
-
- /* copy_pte_range */
-
- if (pmd_none(*src_pmd))
- goto skip_copy_pte_range;
- if (pmd_bad(*src_pmd)) {
- pmd_ERROR(*src_pmd);
- pmd_clear(src_pmd);
-skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
- if (address >= end)
- goto out;
- goto cont_copy_pmd_range;
- }
-
- src_pte = pte_offset(src_pmd, address);
- dst_pte = pte_alloc(dst, dst_pmd, address);
- if (!dst_pte)
- goto nomem;
-
- spin_lock(&src->page_table_lock);
- do {
- pte_t pte = *src_pte;
- struct page *ptepage;
-
- /* copy_one_pte */
-
- if (pte_none(pte))
- goto cont_copy_pte_range_noset;
- if (!pte_present(pte)) {
- swap_duplicate(pte_to_swp_entry(pte));
- goto cont_copy_pte_range;
- }
- ptepage = pte_page(pte);
- if ((!VALID_PAGE(ptepage)) ||
- PageReserved(ptepage))
- goto cont_copy_pte_range;
-
- /* If it's a COW mapping, write protect it both
in the parent and the child */
- if (cow && pte_write(pte)) {
- /* XEN modification: modified ordering
here to avoid RaW hazard. */
- pte = *src_pte;
- pte = pte_wrprotect(pte);
- ptep_set_wrprotect(src_pte);
- }
-
- /* If it's a shared mapping, mark it clean in
the child */
- if (vma->vm_flags & VM_SHARED)
- pte = pte_mkclean(pte);
- pte = pte_mkold(pte);
- get_page(ptepage);
- dst->rss++;
-
-cont_copy_pte_range: set_pte(dst_pte, pte);
-cont_copy_pte_range_noset: address += PAGE_SIZE;
- if (address >= end)
- goto out_unlock;
- src_pte++;
- dst_pte++;
- } while ((unsigned long)src_pte & PTE_TABLE_MASK);
- spin_unlock(&src->page_table_lock);
-
-cont_copy_pmd_range: src_pmd++;
- dst_pmd++;
- } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
- }
-out_unlock:
- spin_unlock(&src->page_table_lock);
-out:
- return 0;
-nomem:
- return -ENOMEM;
-}
-
-/*
- * Return indicates whether a page was freed so caller can adjust rss
- */
-static inline void forget_pte(pte_t page)
-{
- if (!pte_none(page)) {
- printk("forget_pte: old mapping existed!\n");
- BUG();
- }
-}
-
-static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long
address, unsigned long size)
-{
- unsigned long offset;
- pte_t * ptep;
- int freed = 0;
-
- if (pmd_none(*pmd))
- return 0;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return 0;
- }
- ptep = pte_offset(pmd, address);
- offset = address & ~PMD_MASK;
- if (offset + size > PMD_SIZE)
- size = PMD_SIZE - offset;
- size &= PAGE_MASK;
- for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
- pte_t pte = *ptep;
- if (pte_none(pte))
- continue;
- if (pte_present(pte)) {
- struct page *page = pte_page(pte);
- if (VALID_PAGE(page) && !PageReserved(page))
- freed ++;
- /* This will eventually call __free_pte on the pte. */
- tlb_remove_page(tlb, ptep, address + offset);
- } else {
- free_swap_and_cache(pte_to_swp_entry(pte));
- pte_clear(ptep);
- }
- }
-
- return freed;
-}
-
-static inline int zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long
address, unsigned long size)
-{
- pmd_t * pmd;
- unsigned long end;
- int freed;
-
- if (pgd_none(*dir))
- return 0;
- if (pgd_bad(*dir)) {
- pgd_ERROR(*dir);
- pgd_clear(dir);
- return 0;
- }
- pmd = pmd_offset(dir, address);
- end = address + size;
- if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
- end = ((address + PGDIR_SIZE) & PGDIR_MASK);
- freed = 0;
- do {
- freed += zap_pte_range(tlb, pmd, address, end - address);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
- return freed;
-}
-
-/*
- * remove user pages in a given range.
- */
-void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long
size)
-{
- mmu_gather_t *tlb;
- pgd_t * dir;
- unsigned long start = address, end = address + size;
- int freed = 0;
-
- dir = pgd_offset(mm, address);
-
- /*
- * This is a long-lived spinlock. That's fine.
- * There's no contention, because the page table
- * lock only protects against kswapd anyway, and
- * even if kswapd happened to be looking at this
- * process we _want_ it to get stuck.
- */
- if (address >= end)
- BUG();
- spin_lock(&mm->page_table_lock);
- flush_cache_range(mm, address, end);
- tlb = tlb_gather_mmu(mm);
-
- do {
- freed += zap_pmd_range(tlb, dir, address, end - address);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
-
- /* this will flush any remaining tlb entries */
- tlb_finish_mmu(tlb, start, end);
-
- /*
- * Update rss for the mm_struct (not necessarily current->mm)
- * Notice that rss is an unsigned long.
- */
- if (mm->rss > freed)
- mm->rss -= freed;
- else
- mm->rss = 0;
- spin_unlock(&mm->page_table_lock);
-}
-
-/*
- * Do a quick page-table lookup for a single page.
- */
-static struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || pgd_bad(*pgd))
- goto out;
-
- pmd = pmd_offset(pgd, address);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- goto out;
-
- ptep = pte_offset(pmd, address);
- if (!ptep)
- goto out;
-
- pte = *ptep;
- if (pte_present(pte)) {
- if (!write ||
- (pte_write(pte) && pte_dirty(pte)))
- return pte_page(pte);
- }
-
-out:
- return 0;
-}
-
-/*
- * Given a physical address, is there a useful struct page pointing to
- * it? This may become more complex in the future if we start dealing
- * with IO-aperture pages in kiobufs.
- */
-
-static inline struct page * get_page_map(struct page *page)
-{
- if (!VALID_PAGE(page))
- return 0;
- return page;
-}
-
-/*
- * Please read Documentation/cachetlb.txt before using this function,
- * accessing foreign memory spaces can cause cache coherency problems.
- *
- * Accessing a VM_IO area is even more dangerous, therefore the function
- * fails if pages is != NULL and a VM_IO area is found.
- */
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned
long start,
- int len, int write, int force, struct page **pages, struct
vm_area_struct **vmas)
-{
- int i;
- unsigned int flags;
-
- /*
- * Require read or write permissions.
- * If 'force' is set, we only require the "MAY" flags.
- */
- flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
- i = 0;
-
- do {
- struct vm_area_struct * vma;
-
- vma = find_extend_vma(mm, start);
-
- if ( !vma || (pages && vma->vm_flags & VM_IO) || !(flags &
vma->vm_flags) )
- return i ? : -EFAULT;
-
- spin_lock(&mm->page_table_lock);
- do {
- struct page *map;
- while (!(map = follow_page(mm, start, write))) {
- spin_unlock(&mm->page_table_lock);
- switch (handle_mm_fault(mm, vma, start, write))
{
- case 1:
- tsk->min_flt++;
- break;
- case 2:
- tsk->maj_flt++;
- break;
- case 0:
- if (i) return i;
- return -EFAULT;
- default:
- if (i) return i;
- return -ENOMEM;
- }
- spin_lock(&mm->page_table_lock);
- }
- if (pages) {
- pages[i] = get_page_map(map);
- /* FIXME: call the correct function,
- * depending on the type of the found page
- */
- if (!pages[i] || PageReserved(pages[i])) {
- if (pages[i] != ZERO_PAGE(start))
- goto bad_page;
- } else
- page_cache_get(pages[i]);
- }
- if (vmas)
- vmas[i] = vma;
- i++;
- start += PAGE_SIZE;
- len--;
- } while(len && start < vma->vm_end);
- spin_unlock(&mm->page_table_lock);
- } while(len);
-out:
- return i;
-
- /*
- * We found an invalid page in the VMA. Release all we have
- * so far and fail.
- */
-bad_page:
- spin_unlock(&mm->page_table_lock);
- while (i--)
- page_cache_release(pages[i]);
- i = -EFAULT;
- goto out;
-}
-
-EXPORT_SYMBOL(get_user_pages);
-
-/*
- * Force in an entire range of pages from the current process's user VA,
- * and pin them in physical memory.
- */
-#define dprintk(x...)
-
-int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
-{
- int pgcount, err;
- struct mm_struct * mm;
-
- /* Make sure the iobuf is not already mapped somewhere. */
- if (iobuf->nr_pages)
- return -EINVAL;
-
- mm = current->mm;
- dprintk ("map_user_kiobuf: begin\n");
-
- pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;
- /* mapping 0 bytes is not permitted */
- if (!pgcount) BUG();
- err = expand_kiobuf(iobuf, pgcount);
- if (err)
- return err;
-
- iobuf->locked = 0;
- iobuf->offset = va & (PAGE_SIZE-1);
- iobuf->length = len;
-
- /* Try to fault in all of the necessary pages */
- down_read(&mm->mmap_sem);
- /* rw==READ means read from disk, write into memory area */
- err = get_user_pages(current, mm, va, pgcount,
- (rw==READ), 0, iobuf->maplist, NULL);
- up_read(&mm->mmap_sem);
- if (err < 0) {
- unmap_kiobuf(iobuf);
- dprintk ("map_user_kiobuf: end %d\n", err);
- return err;
- }
- iobuf->nr_pages = err;
- while (pgcount--) {
- /* FIXME: flush superflous for rw==READ,
- * probably wrong function for rw==WRITE
- */
- flush_dcache_page(iobuf->maplist[pgcount]);
- }
- dprintk ("map_user_kiobuf: end OK\n");
- return 0;
-}
-
-/*
- * Mark all of the pages in a kiobuf as dirty
- *
- * We need to be able to deal with short reads from disk: if an IO error
- * occurs, the number of bytes read into memory may be less than the
- * size of the kiobuf, so we have to stop marking pages dirty once the
- * requested byte count has been reached.
- *
- * Must be called from process context - set_page_dirty() takes VFS locks.
- */
-
-void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
-{
- int index, offset, remaining;
- struct page *page;
-
- index = iobuf->offset >> PAGE_SHIFT;
- offset = iobuf->offset & ~PAGE_MASK;
- remaining = bytes;
- if (remaining > iobuf->length)
- remaining = iobuf->length;
-
- while (remaining > 0 && index < iobuf->nr_pages) {
- page = iobuf->maplist[index];
-
- if (!PageReserved(page))
- set_page_dirty(page);
-
- remaining -= (PAGE_SIZE - offset);
- offset = 0;
- index++;
- }
-}
-
-/*
- * Unmap all of the pages referenced by a kiobuf. We release the pages,
- * and unlock them if they were locked.
- */
-
-void unmap_kiobuf (struct kiobuf *iobuf)
-{
- int i;
- struct page *map;
-
- for (i = 0; i < iobuf->nr_pages; i++) {
- map = iobuf->maplist[i];
- if (map) {
- if (iobuf->locked)
- UnlockPage(map);
- /* FIXME: cache flush missing for rw==READ
- * FIXME: call the correct reference counting function
- */
- page_cache_release(map);
- }
- }
-
- iobuf->nr_pages = 0;
- iobuf->locked = 0;
-}
-
-
-/*
- * Lock down all of the pages of a kiovec for IO.
- *
- * If any page is mapped twice in the kiovec, we return the error -EINVAL.
- *
- * The optional wait parameter causes the lock call to block until all
- * pages can be locked if set. If wait==0, the lock operation is
- * aborted if any locked pages are found and -EAGAIN is returned.
- */
-
-int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
-{
- struct kiobuf *iobuf;
- int i, j;
- struct page *page, **ppage;
- int doublepage = 0;
- int repeat = 0;
-
- repeat:
-
- for (i = 0; i < nr; i++) {
- iobuf = iovec[i];
-
- if (iobuf->locked)
- continue;
-
- ppage = iobuf->maplist;
- for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
- page = *ppage;
- if (!page)
- continue;
-
- if (TryLockPage(page)) {
- while (j--) {
- struct page *tmp = *--ppage;
- if (tmp)
- UnlockPage(tmp);
- }
- goto retry;
- }
- }
- iobuf->locked = 1;
- }
-
- return 0;
-
- retry:
-
- /*
- * We couldn't lock one of the pages. Undo the locking so far,
- * wait on the page we got to, and try again.
- */
-
- unlock_kiovec(nr, iovec);
- if (!wait)
- return -EAGAIN;
-
- /*
- * Did the release also unlock the page we got stuck on?
- */
- if (!PageLocked(page)) {
- /*
- * If so, we may well have the page mapped twice
- * in the IO address range. Bad news. Of
- * course, it _might_ just be a coincidence,
- * but if it happens more than once, chances
- * are we have a double-mapped page.
- */
- if (++doublepage >= 3)
- return -EINVAL;
-
- /* Try again... */
- wait_on_page(page);
- }
-
- if (++repeat < 16)
- goto repeat;
- return -EAGAIN;
-}
-
-/*
- * Unlock all of the pages of a kiovec after IO.
- */
-
-int unlock_kiovec(int nr, struct kiobuf *iovec[])
-{
- struct kiobuf *iobuf;
- int i, j;
- struct page *page, **ppage;
-
- for (i = 0; i < nr; i++) {
- iobuf = iovec[i];
-
- if (!iobuf->locked)
- continue;
- iobuf->locked = 0;
-
- ppage = iobuf->maplist;
- for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
- page = *ppage;
- if (!page)
- continue;
- UnlockPage(page);
- }
- }
- return 0;
-}
-
-static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
- unsigned long size, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address),
prot));
- pte_t oldpage = ptep_get_and_clear(pte);
- set_pte(pte, zero_pte);
- forget_pte(oldpage);
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd,
unsigned long address,
- unsigned long size, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- do {
- pte_t * pte = pte_alloc(mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- zeromap_pte_range(pte, address, end - address, prot);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t
prot)
-{
- int error = 0;
- pgd_t * dir;
- unsigned long beg = address;
- unsigned long end = address + size;
- struct mm_struct *mm = current->mm;
-
- dir = pgd_offset(mm, address);
- flush_cache_range(mm, beg, end);
- if (address >= end)
- BUG();
-
- spin_lock(&mm->page_table_lock);
- do {
- pmd_t *pmd = pmd_alloc(mm, dir, address);
- error = -ENOMEM;
- if (!pmd)
- break;
- error = zeromap_pmd_range(mm, pmd, address, end - address,
prot);
- if (error)
- break;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- spin_unlock(&mm->page_table_lock);
- flush_tlb_range(mm, beg, end);
- return error;
-}
-
-/*
- * maps a range of physical memory into the requested pages. the old
- * mappings are removed. any references to nonexistent pages results
- * in null mappings (currently treated as "copy-on-access")
- */
-static inline void remap_pte_range(pte_t * pte, unsigned long address,
unsigned long size,
- unsigned long phys_addr, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- struct page *page;
- pte_t oldpage;
- oldpage = ptep_get_and_clear(pte);
-
- page = virt_to_page(__va(phys_addr));
- if ((!VALID_PAGE(page)) || PageReserved(page))
- set_pte(pte, mk_pte_phys(phys_addr, prot));
- forget_pte(oldpage);
- address += PAGE_SIZE;
- phys_addr += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned
long address, unsigned long size,
- unsigned long phys_addr, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- do {
- pte_t * pte = pte_alloc(mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_pte_range(pte, address, end - address, address +
phys_addr, prot);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-/* Note: this is only safe if the mm semaphore is held when called. */
-int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned
long size, pgprot_t prot)
-{
- int error = 0;
- pgd_t * dir;
- unsigned long beg = from;
- unsigned long end = from + size;
- struct mm_struct *mm = current->mm;
-
- phys_addr -= from;
- dir = pgd_offset(mm, from);
- flush_cache_range(mm, beg, end);
- if (from >= end)
- BUG();
-
- spin_lock(&mm->page_table_lock);
- do {
- pmd_t *pmd = pmd_alloc(mm, dir, from);
- error = -ENOMEM;
- if (!pmd)
- break;
- error = remap_pmd_range(mm, pmd, from, end - from, phys_addr +
from, prot);
- if (error)
- break;
- from = (from + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (from && (from < end));
- spin_unlock(&mm->page_table_lock);
- flush_tlb_range(mm, beg, end);
- return error;
-}
-
-/*
- * Establish a new mapping:
- * - flush the old one
- * - update the page tables
- * - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
- */
-static inline void establish_pte(struct vm_area_struct * vma, unsigned long
address, pte_t *page_table, pte_t entry)
-{
-#ifdef CONFIG_XEN
- if ( likely(vma->vm_mm == current->mm) ) {
- XEN_flush_page_update_queue();
- HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, entry,
UVMF_INVLPG);
- } else {
- set_pte(page_table, entry);
- flush_tlb_page(vma, address);
- }
-#else
- set_pte(page_table, entry);
- flush_tlb_page(vma, address);
-#endif
- update_mmu_cache(vma, address, entry);
-}
-
-/*
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
- */
-static inline void break_cow(struct vm_area_struct * vma, struct page *
new_page, unsigned long address,
- pte_t *page_table)
-{
- flush_page_to_ram(new_page);
- flush_cache_page(vma, address);
- establish_pte(vma, address, page_table,
pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
-}
-
-/*
- * This routine handles present pages, when users try to write
- * to a shared page. It is done by copying the page to a new address
- * and decrementing the shared-page counter for the old page.
- *
- * Goto-purists beware: the only reason for goto's here is that it results
- * in better assembly code.. The "default" path will see no jumps at all.
- *
- * Note that this routine assumes that the protection checks have been
- * done by the caller (the low-level page fault routine in most cases).
- * Thus we can safely just mark it writable once we've done any necessary
- * COW.
- *
- * We also mark the page dirty at this point even though the page will
- * change only once the write actually happens. This avoids a few races,
- * and potentially makes it more efficient.
- *
- * We hold the mm semaphore and the page_table_lock on entry and exit
- * with the page_table_lock released.
- */
-static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
- unsigned long address, pte_t *page_table, pte_t pte)
-{
- struct page *old_page, *new_page;
-
- old_page = pte_page(pte);
- if (!VALID_PAGE(old_page))
- goto bad_wp_page;
-
- if (!TryLockPage(old_page)) {
- int reuse = can_share_swap_page(old_page);
- unlock_page(old_page);
- if (reuse) {
- flush_cache_page(vma, address);
- establish_pte(vma, address, page_table,
pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
- spin_unlock(&mm->page_table_lock);
- return 1; /* Minor fault */
- }
- }
-
- /*
- * Ok, we need to copy. Oh, well..
- */
- page_cache_get(old_page);
- spin_unlock(&mm->page_table_lock);
-
- new_page = alloc_page(GFP_HIGHUSER);
- if (!new_page)
- goto no_mem;
- copy_cow_page(old_page,new_page,address);
-
- /*
- * Re-check the pte - we dropped the lock
- */
- spin_lock(&mm->page_table_lock);
- if (pte_same(*page_table, pte)) {
- if (PageReserved(old_page))
- ++mm->rss;
- break_cow(vma, new_page, address, page_table);
- if (vm_anon_lru)
- lru_cache_add(new_page);
-
- /* Free the old page.. */
- new_page = old_page;
- }
- spin_unlock(&mm->page_table_lock);
- page_cache_release(new_page);
- page_cache_release(old_page);
- return 1; /* Minor fault */
-
-bad_wp_page:
- spin_unlock(&mm->page_table_lock);
- printk("do_wp_page: bogus page at address %08lx (page
0x%lx)\n",address,(unsigned long)old_page);
- return -1;
-no_mem:
- page_cache_release(old_page);
- return -1;
-}
-
-static void vmtruncate_list(struct vm_area_struct *mpnt, unsigned long pgoff)
-{
- do {
- struct mm_struct *mm = mpnt->vm_mm;
- unsigned long start = mpnt->vm_start;
- unsigned long end = mpnt->vm_end;
- unsigned long len = end - start;
- unsigned long diff;
-
- /* mapping wholly truncated? */
- if (mpnt->vm_pgoff >= pgoff) {
- zap_page_range(mm, start, len);
- continue;
- }
-
- /* mapping wholly unaffected? */
- len = len >> PAGE_SHIFT;
- diff = pgoff - mpnt->vm_pgoff;
- if (diff >= len)
- continue;
-
- /* Ok, partially affected.. */
- start += diff << PAGE_SHIFT;
- len = (len - diff) << PAGE_SHIFT;
- zap_page_range(mm, start, len);
- } while ((mpnt = mpnt->vm_next_share) != NULL);
-}
-
-/*
- * Handle all mappings that got truncated by a "truncate()"
- * system call.
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
- unsigned long pgoff;
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
-
- if (inode->i_size < offset)
- goto do_expand;
- inode->i_size = offset;
- spin_lock(&mapping->i_shared_lock);
- if (!mapping->i_mmap && !mapping->i_mmap_shared)
- goto out_unlock;
-
- pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (mapping->i_mmap != NULL)
- vmtruncate_list(mapping->i_mmap, pgoff);
- if (mapping->i_mmap_shared != NULL)
- vmtruncate_list(mapping->i_mmap_shared, pgoff);
-
-out_unlock:
- spin_unlock(&mapping->i_shared_lock);
- truncate_inode_pages(mapping, offset);
- goto out_truncate;
-
-do_expand:
- limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out;
- inode->i_size = offset;
-
-out_truncate:
- if (inode->i_op && inode->i_op->truncate) {
- lock_kernel();
- inode->i_op->truncate(inode);
- unlock_kernel();
- }
- return 0;
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out:
- return -EFBIG;
-}
-
-/*
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time. We also make sure to queue
- * the 'original' request together with the readahead ones...
- */
-void swapin_readahead(swp_entry_t entry)
-{
- int i, num;
- struct page *new_page;
- unsigned long offset;
-
- /*
- * Get the number of handles we should do readahead io to.
- */
- num = valid_swaphandles(entry, &offset);
- for (i = 0; i < num; offset++, i++) {
- /* Ok, do the async read-ahead now */
- new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry),
offset));
- if (!new_page)
- break;
- page_cache_release(new_page);
- }
- return;
-}
-
-/*
- * We hold the mm semaphore and the page_table_lock on entry and
- * should release the pagetable lock on exit..
- */
-static int do_swap_page(struct mm_struct * mm,
- struct vm_area_struct * vma, unsigned long address,
- pte_t * page_table, pte_t orig_pte, int write_access)
-{
- struct page *page;
- swp_entry_t entry = pte_to_swp_entry(orig_pte);
- pte_t pte;
- int ret = 1;
-
- spin_unlock(&mm->page_table_lock);
- page = lookup_swap_cache(entry);
- if (!page) {
- swapin_readahead(entry);
- page = read_swap_cache_async(entry);
- if (!page) {
- /*
- * Back out if somebody else faulted in this pte while
- * we released the page table lock.
- */
- int retval;
- spin_lock(&mm->page_table_lock);
- retval = pte_same(*page_table, orig_pte) ? -1 : 1;
- spin_unlock(&mm->page_table_lock);
- return retval;
- }
-
- /* Had to read the page from swap area: Major fault */
- ret = 2;
- }
-
- mark_page_accessed(page);
-
- lock_page(page);
-
- /*
- * Back out if somebody else faulted in this pte while we
- * released the page table lock.
- */
- spin_lock(&mm->page_table_lock);
- if (!pte_same(*page_table, orig_pte)) {
- spin_unlock(&mm->page_table_lock);
- unlock_page(page);
- page_cache_release(page);
- return 1;
- }
-
- /* The page isn't present yet, go ahead with the fault. */
-
- swap_free(entry);
- if (vm_swap_full())
- remove_exclusive_swap_page(page);
-
- mm->rss++;
- pte = mk_pte(page, vma->vm_page_prot);
- if (write_access && can_share_swap_page(page))
- pte = pte_mkdirty(pte_mkwrite(pte));
- unlock_page(page);
-
- flush_page_to_ram(page);
- flush_icache_page(vma, page);
-#ifdef CONFIG_XEN
- if ( likely(vma->vm_mm == current->mm) ) {
- XEN_flush_page_update_queue();
- HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, pte, 0);
- } else {
- set_pte(page_table, pte);
- XEN_flush_page_update_queue();
- }
-#else
- set_pte(page_table, pte);
-#endif
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, pte);
- spin_unlock(&mm->page_table_lock);
- return ret;
-}
-
-/*
- * We are called with the MM semaphore and page_table_lock
- * spinlock held to protect against concurrent faults in
- * multithreaded programs.
- */
-static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct *
vma, pte_t *page_table, int write_access, unsigned long addr)
-{
- pte_t entry;
-
- /* Read-only mapping of ZERO_PAGE. */
- entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
-
- /* ..except if it's a write access */
- if (write_access) {
- struct page *page;
-
- /* Allocate our own private page. */
- spin_unlock(&mm->page_table_lock);
-
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto no_mem;
- clear_user_highpage(page, addr);
-
- spin_lock(&mm->page_table_lock);
- if (!pte_none(*page_table)) {
- page_cache_release(page);
- spin_unlock(&mm->page_table_lock);
- return 1;
- }
- mm->rss++;
- flush_page_to_ram(page);
- entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
vma->vm_page_prot)));
- if (vm_anon_lru)
- lru_cache_add(page);
- mark_page_accessed(page);
- }
-
-#ifdef CONFIG_XEN
- if ( likely(vma->vm_mm == current->mm) ) {
- XEN_flush_page_update_queue();
- HYPERVISOR_update_va_mapping(addr>>PAGE_SHIFT, entry, 0);
- } else {
- set_pte(page_table, entry);
- XEN_flush_page_update_queue();
- }
-#else
- set_pte(page_table, entry);
-#endif
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, addr, entry);
- spin_unlock(&mm->page_table_lock);
- return 1; /* Minor fault */
-
-no_mem:
- return -1;
-}
-
-/*
- * do_no_page() tries to create a new page mapping. It aggressively
- * tries to share with existing pages, but makes a separate copy if
- * the "write_access" parameter is true in order to avoid the next
- * page fault.
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * This is called with the MM semaphore held and the page table
- * spinlock held. Exit with the spinlock released.
- */
-static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
- unsigned long address, int write_access, pte_t *page_table)
-{
- struct page * new_page;
- pte_t entry;
-
- if (!vma->vm_ops || !vma->vm_ops->nopage)
- return do_anonymous_page(mm, vma, page_table, write_access,
address);
- spin_unlock(&mm->page_table_lock);
-
- new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
-
- if (new_page == NULL) /* no page was available -- SIGBUS */
- return 0;
- if (new_page == NOPAGE_OOM)
- return -1;
-
- /*
- * Should we do an early C-O-W break?
- */
- if (write_access && !(vma->vm_flags & VM_SHARED)) {
- struct page * page = alloc_page(GFP_HIGHUSER);
- if (!page) {
- page_cache_release(new_page);
- return -1;
- }
- copy_user_highpage(page, new_page, address);
- page_cache_release(new_page);
- if (vm_anon_lru)
- lru_cache_add(page);
- new_page = page;
- }
-
- spin_lock(&mm->page_table_lock);
- /*
- * This silly early PAGE_DIRTY setting removes a race
- * due to the bad i386 page protection. But it's valid
- * for other architectures too.
- *
- * Note that if write_access is true, we either now have
- * an exclusive copy of the page, or this is a shared mapping,
- * so we can make it writable and dirty to avoid having to
- * handle that later.
- */
- /* Only go through if we didn't race with anybody else... */
- if (pte_none(*page_table)) {
- if (!PageReserved(new_page))
- ++mm->rss;
- flush_page_to_ram(new_page);
- flush_icache_page(vma, new_page);
- entry = mk_pte(new_page, vma->vm_page_prot);
- if (write_access)
- entry = pte_mkwrite(pte_mkdirty(entry));
-#ifdef CONFIG_XEN
- if ( likely(vma->vm_mm == current->mm) ) {
- XEN_flush_page_update_queue();
- HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT,
entry, 0);
- } else {
- set_pte(page_table, entry);
- XEN_flush_page_update_queue();
- }
-#else
- set_pte(page_table, entry);
-#endif
- } else {
- /* One of our sibling threads was faster, back out. */
- page_cache_release(new_page);
- spin_unlock(&mm->page_table_lock);
- return 1;
- }
-
- /* no need to invalidate: a not-present page shouldn't be cached */
- update_mmu_cache(vma, address, entry);
- spin_unlock(&mm->page_table_lock);
- return 2; /* Major fault */
-}
-
-/*
- * These routines also need to handle stuff like marking pages dirty
- * and/or accessed for architectures that don't do it in hardware (most
- * RISC architectures). The early dirtying is also good on the i386.
- *
- * There is also a hook called "update_mmu_cache()" that architectures
- * with external mmu caches can use to update those (ie the Sparc or
- * PowerPC hashed page tables that act as extended TLBs).
- *
- * Note the "page_table_lock". It is to protect against kswapd removing
- * pages from under us. Note that kswapd only ever _removes_ pages, never
- * adds them. As such, once we have noticed that the page is not present,
- * we can drop the lock early.
- *
- * The adding of pages is protected by the MM semaphore (which we hold),
- * so we don't need to worry about a page being suddenly been added into
- * our VM.
- *
- * We enter with the pagetable spinlock held, we are supposed to
- * release it when done.
- */
-static inline int handle_pte_fault(struct mm_struct *mm,
- struct vm_area_struct * vma, unsigned long address,
- int write_access, pte_t * pte)
-{
- pte_t entry;
-
- entry = *pte;
- if (!pte_present(entry)) {
- /*
- * If it truly wasn't present, we know that kswapd
- * and the PTE updates will not touch it later. So
- * drop the lock.
- */
- if (pte_none(entry))
- return do_no_page(mm, vma, address, write_access, pte);
- return do_swap_page(mm, vma, address, pte, entry, write_access);
- }
-
- if (write_access) {
- if (!pte_write(entry))
- return do_wp_page(mm, vma, address, pte, entry);
-
- entry = pte_mkdirty(entry);
- }
- entry = pte_mkyoung(entry);
- establish_pte(vma, address, pte, entry);
- spin_unlock(&mm->page_table_lock);
- return 1;
-}
-
-/*
- * By the time we get here, we already hold the mm semaphore
- */
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
- unsigned long address, int write_access)
-{
- pgd_t *pgd;
- pmd_t *pmd;
-
- current->state = TASK_RUNNING;
- pgd = pgd_offset(mm, address);
-
- /*
- * We need the page table lock to synchronize with kswapd
- * and the SMP-safe atomic PTE updates.
- */
- spin_lock(&mm->page_table_lock);
- pmd = pmd_alloc(mm, pgd, address);
-
- if (pmd) {
- pte_t * pte = pte_alloc(mm, pmd, address);
- if (pte)
- return handle_pte_fault(mm, vma, address, write_access,
pte);
- }
- spin_unlock(&mm->page_table_lock);
- return -1;
-}
-
-/*
- * Allocate page middle directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
- *
- * On a two-level page table, this ends up actually being entirely
- * optimized away.
- */
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
-{
- pmd_t *new;
-
- /* "fast" allocation can happen without dropping the lock.. */
- new = pmd_alloc_one_fast(mm, address);
- if (!new) {
- spin_unlock(&mm->page_table_lock);
- new = pmd_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (!pgd_none(*pgd)) {
- pmd_free(new);
- check_pgt_cache();
- goto out;
- }
- }
- pgd_populate(mm, pgd, new);
-out:
- return pmd_offset(pgd, address);
-}
-
-/*
- * Allocate the page table directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
- */
-pte_t fastcall *pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long
address)
-{
- if (pmd_none(*pmd)) {
- pte_t *new;
-
- /* "fast" allocation can happen without dropping the lock.. */
- new = pte_alloc_one_fast(mm, address);
- if (!new) {
- XEN_flush_page_update_queue();
- spin_unlock(&mm->page_table_lock);
- new = pte_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (!pmd_none(*pmd)) {
- pte_free(new);
- check_pgt_cache();
- goto out;
- }
- }
- pmd_populate(mm, pmd, new);
- }
-out:
- return pte_offset(pmd, address);
-}
-
-int make_pages_present(unsigned long addr, unsigned long end)
-{
- int ret, len, write;
- struct vm_area_struct * vma;
-
- vma = find_vma(current->mm, addr);
- write = (vma->vm_flags & VM_WRITE) != 0;
- if (addr >= end)
- BUG();
- if (end > vma->vm_end)
- BUG();
- len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
- ret = get_user_pages(current, current->mm, addr,
- len, write, 0, NULL, NULL);
- return ret == len ? 0 : -1;
-}
-
-struct page * vmalloc_to_page(void * vmalloc_addr)
-{
- unsigned long addr = (unsigned long) vmalloc_addr;
- struct page *page = NULL;
- pmd_t *pmd;
- pte_t *pte;
- pgd_t *pgd;
-
- pgd = pgd_offset_k(addr);
- if (!pgd_none(*pgd)) {
- pmd = pmd_offset(pgd, addr);
- if (!pmd_none(*pmd)) {
- pte = pte_offset(pmd, addr);
- if (pte_present(*pte)) {
- page = pte_page(*pte);
- }
- }
- }
- return page;
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/mprotect.c
--- a/linux-2.4.30-xen-sparse/mm/mprotect.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,344 +0,0 @@
-/*
- * linux/mm/mprotect.c
- *
- * (C) Copyright 1994 Linus Torvalds
- */
-#include <linux/slab.h>
-#include <linux/smp_lock.h>
-#include <linux/shm.h>
-#include <linux/mman.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-
-static inline void change_pte_range(pmd_t * pmd, unsigned long address,
- unsigned long size, pgprot_t newprot)
-{
- pte_t * pte;
- unsigned long end;
-
- if (pmd_none(*pmd))
- return;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- pte = pte_offset(pmd, address);
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- if (pte_present(*pte)) {
- pte_t entry;
-
- /* Avoid an SMP race with hardware updated dirty/clean
- * bits by wiping the pte and then setting the new pte
- * into place.
- */
- entry = ptep_get_and_clear(pte);
- set_pte(pte, pte_modify(entry, newprot));
- }
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
- unsigned long size, pgprot_t newprot)
-{
- pmd_t * pmd;
- unsigned long end;
-
- if (pgd_none(*pgd))
- return;
- if (pgd_bad(*pgd)) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return;
- }
- pmd = pmd_offset(pgd, address);
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- do {
- change_pte_range(pmd, address, end - address, newprot);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
-}
-
-static void change_protection(unsigned long start, unsigned long end, pgprot_t
newprot)
-{
- pgd_t *dir;
- unsigned long beg = start;
-
- dir = pgd_offset(current->mm, start);
- flush_cache_range(current->mm, beg, end);
- if (start >= end)
- BUG();
- spin_lock(¤t->mm->page_table_lock);
- do {
- change_pmd_range(dir, start, end - start, newprot);
- start = (start + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (start && (start < end));
- spin_unlock(¤t->mm->page_table_lock);
- flush_tlb_range(current->mm, beg, end);
- return;
-}
-
-static inline int mprotect_fixup_all(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
- int newflags, pgprot_t prot)
-{
- struct vm_area_struct * prev = *pprev;
- struct mm_struct * mm = vma->vm_mm;
-
- if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev,
newflags) &&
- !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
- spin_lock(&mm->page_table_lock);
- prev->vm_end = vma->vm_end;
- __vma_unlink(mm, vma, prev);
- spin_unlock(&mm->page_table_lock);
-
- kmem_cache_free(vm_area_cachep, vma);
- mm->map_count--;
-
- return 0;
- }
-
- spin_lock(&mm->page_table_lock);
- vma->vm_flags = newflags;
- vma->vm_page_prot = prot;
- spin_unlock(&mm->page_table_lock);
-
- *pprev = vma;
-
- return 0;
-}
-
-static inline int mprotect_fixup_start(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
- unsigned long end,
- int newflags, pgprot_t prot)
-{
- struct vm_area_struct * n, * prev = *pprev;
-
- *pprev = vma;
-
- if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev,
newflags) &&
- !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
- spin_lock(&vma->vm_mm->page_table_lock);
- prev->vm_end = end;
- vma->vm_start = end;
- spin_unlock(&vma->vm_mm->page_table_lock);
-
- return 0;
- }
- n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!n)
- return -ENOMEM;
- *n = *vma;
- n->vm_end = end;
- n->vm_flags = newflags;
- n->vm_raend = 0;
- n->vm_page_prot = prot;
- if (n->vm_file)
- get_file(n->vm_file);
- if (n->vm_ops && n->vm_ops->open)
- n->vm_ops->open(n);
- vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
- lock_vma_mappings(vma);
- spin_lock(&vma->vm_mm->page_table_lock);
- vma->vm_start = end;
- __insert_vm_struct(current->mm, n);
- spin_unlock(&vma->vm_mm->page_table_lock);
- unlock_vma_mappings(vma);
-
- return 0;
-}
-
-static inline int mprotect_fixup_end(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
- unsigned long start,
- int newflags, pgprot_t prot)
-{
- struct vm_area_struct * n;
-
- n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
- if (!n)
- return -ENOMEM;
- *n = *vma;
- n->vm_start = start;
- n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
- n->vm_flags = newflags;
- n->vm_raend = 0;
- n->vm_page_prot = prot;
- if (n->vm_file)
- get_file(n->vm_file);
- if (n->vm_ops && n->vm_ops->open)
- n->vm_ops->open(n);
- lock_vma_mappings(vma);
- spin_lock(&vma->vm_mm->page_table_lock);
- vma->vm_end = start;
- __insert_vm_struct(current->mm, n);
- spin_unlock(&vma->vm_mm->page_table_lock);
- unlock_vma_mappings(vma);
-
- *pprev = n;
-
- return 0;
-}
-
-static inline int mprotect_fixup_middle(struct vm_area_struct * vma, struct
vm_area_struct ** pprev,
- unsigned long start, unsigned long end,
- int newflags, pgprot_t prot)
-{
- struct vm_area_struct * left, * right;
-
- left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!left)
- return -ENOMEM;
- right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!right) {
- kmem_cache_free(vm_area_cachep, left);
- return -ENOMEM;
- }
- *left = *vma;
- *right = *vma;
- left->vm_end = start;
- right->vm_start = end;
- right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
- left->vm_raend = 0;
- right->vm_raend = 0;
- if (vma->vm_file)
- atomic_add(2,&vma->vm_file->f_count);
- if (vma->vm_ops && vma->vm_ops->open) {
- vma->vm_ops->open(left);
- vma->vm_ops->open(right);
- }
- vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
- vma->vm_raend = 0;
- vma->vm_page_prot = prot;
- lock_vma_mappings(vma);
- spin_lock(&vma->vm_mm->page_table_lock);
- vma->vm_start = start;
- vma->vm_end = end;
- vma->vm_flags = newflags;
- __insert_vm_struct(current->mm, left);
- __insert_vm_struct(current->mm, right);
- spin_unlock(&vma->vm_mm->page_table_lock);
- unlock_vma_mappings(vma);
-
- *pprev = right;
-
- return 0;
-}
-
-static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct
** pprev,
- unsigned long start, unsigned long end, unsigned int newflags)
-{
- pgprot_t newprot;
- int error;
-
- if (newflags == vma->vm_flags) {
- *pprev = vma;
- return 0;
- }
- newprot = protection_map[newflags & 0xf];
- if (start == vma->vm_start) {
- if (end == vma->vm_end)
- error = mprotect_fixup_all(vma, pprev, newflags,
newprot);
- else
- error = mprotect_fixup_start(vma, pprev, end, newflags,
newprot);
- } else if (end == vma->vm_end)
- error = mprotect_fixup_end(vma, pprev, start, newflags,
newprot);
- else
- error = mprotect_fixup_middle(vma, pprev, start, end, newflags,
newprot);
-
- if (error)
- return error;
-
- change_protection(start, end, newprot);
- return 0;
-}
-
-asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long
prot)
-{
- unsigned long nstart, end, tmp;
- struct vm_area_struct * vma, * next, * prev;
- int error = -EINVAL;
-
- if (start & ~PAGE_MASK)
- return -EINVAL;
- len = PAGE_ALIGN(len);
- end = start + len;
- if (end < start)
- return -ENOMEM;
- if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
- return -EINVAL;
- if (end == start)
- return 0;
-
- down_write(¤t->mm->mmap_sem);
-
- vma = find_vma_prev(current->mm, start, &prev);
- error = -ENOMEM;
- if (!vma || vma->vm_start > start)
- goto out;
-
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- /* mprotect() unsupported for I/O mappings in Xenolinux. */
- error = -EINVAL;
- if (vma->vm_flags & VM_IO)
- goto out;
-#endif
-
- for (nstart = start ; ; ) {
- unsigned int newflags;
- int last = 0;
-
- /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
-
- newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE |
PROT_EXEC));
- if ((newflags & ~(newflags >> 4)) & 0xf) {
- error = -EACCES;
- goto out;
- }
-
- if (vma->vm_end > end) {
- error = mprotect_fixup(vma, &prev, nstart, end,
newflags);
- goto out;
- }
- if (vma->vm_end == end)
- last = 1;
-
- tmp = vma->vm_end;
- next = vma->vm_next;
- error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
- if (error)
- goto out;
- if (last)
- break;
- nstart = tmp;
- vma = next;
- if (!vma || vma->vm_start != nstart) {
- error = -ENOMEM;
- goto out;
- }
- }
- if (next && prev->vm_end == next->vm_start && can_vma_merge(next,
prev->vm_flags) &&
- !prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
- spin_lock(&prev->vm_mm->page_table_lock);
- prev->vm_end = next->vm_end;
- __vma_unlink(prev->vm_mm, next, prev);
- spin_unlock(&prev->vm_mm->page_table_lock);
-
- kmem_cache_free(vm_area_cachep, next);
- prev->vm_mm->map_count--;
- }
-out:
- up_write(¤t->mm->mmap_sem);
- return error;
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/mremap.c
--- a/linux-2.4.30-xen-sparse/mm/mremap.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,392 +0,0 @@
-/*
- * linux/mm/remap.c
- *
- * (C) Copyright 1996 Linus Torvalds
- */
-
-#include <linux/slab.h>
-#include <linux/smp_lock.h>
-#include <linux/shm.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-
-extern int vm_enough_memory(long pages);
-
-static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t * pgd;
- pmd_t * pmd;
- pte_t * pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd))
- goto end;
- if (pgd_bad(*pgd)) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- goto end;
- }
-
- pmd = pmd_offset(pgd, addr);
- if (pmd_none(*pmd))
- goto end;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- goto end;
- }
-
- pte = pte_offset(pmd, addr);
- if (pte_none(*pte))
- pte = NULL;
-end:
- return pte;
-}
-
-static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
-{
- pmd_t * pmd;
- pte_t * pte = NULL;
-
- pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
- if (pmd)
- pte = pte_alloc(mm, pmd, addr);
- return pte;
-}
-
-static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
-{
- int error = 0;
- pte_t pte;
-
- if (!pte_none(*src)) {
- pte = ptep_get_and_clear(src);
- if (!dst) {
- /* No dest? We must put it back. */
- dst = src;
- error++;
- }
- set_pte(dst, pte);
- }
- return error;
-}
-
-static int move_one_page(struct mm_struct *mm, unsigned long old_addr,
unsigned long new_addr)
-{
- int error = 0;
- pte_t * src, * dst;
-
- spin_lock(&mm->page_table_lock);
- src = get_one_pte(mm, old_addr);
- if (src) {
- dst = alloc_one_pte(mm, new_addr);
- src = get_one_pte(mm, old_addr);
- if (src)
- error = copy_one_pte(mm, src, dst);
- }
- spin_unlock(&mm->page_table_lock);
- return error;
-}
-
-static int move_page_tables(struct mm_struct * mm,
- unsigned long new_addr, unsigned long old_addr, unsigned long len)
-{
- unsigned long offset = len;
-
- flush_cache_range(mm, old_addr, old_addr + len);
-
- /*
- * This is not the clever way to do this, but we're taking the
- * easy way out on the assumption that most remappings will be
- * only a few pages.. This also makes error recovery easier.
- */
- while (offset) {
- offset -= PAGE_SIZE;
- if (move_one_page(mm, old_addr + offset, new_addr + offset))
- goto oops_we_failed;
- }
- flush_tlb_range(mm, old_addr, old_addr + len);
- return 0;
-
- /*
- * Ok, the move failed because we didn't have enough pages for
- * the new page table tree. This is unlikely, but we have to
- * take the possibility into account. In that case we just move
- * all the pages back (this will work, because we still have
- * the old page tables)
- */
-oops_we_failed:
- XEN_flush_page_update_queue();
- flush_cache_range(mm, new_addr, new_addr + len);
- while ((offset += PAGE_SIZE) < len)
- move_one_page(mm, new_addr + offset, old_addr + offset);
- XEN_flush_page_update_queue();
- zap_page_range(mm, new_addr, len);
- return -1;
-}
-
-static inline unsigned long move_vma(struct vm_area_struct * vma,
- unsigned long addr, unsigned long old_len, unsigned long new_len,
- unsigned long new_addr)
-{
- struct mm_struct * mm = vma->vm_mm;
- struct vm_area_struct * new_vma, * next, * prev;
- int allocated_vma;
-
- new_vma = NULL;
- next = find_vma_prev(mm, new_addr, &prev);
- if (next) {
- if (prev && prev->vm_end == new_addr &&
- can_vma_merge(prev, vma->vm_flags) && !vma->vm_file &&
!(vma->vm_flags & VM_SHARED)) {
- spin_lock(&mm->page_table_lock);
- prev->vm_end = new_addr + new_len;
- spin_unlock(&mm->page_table_lock);
- new_vma = prev;
- if (next != prev->vm_next)
- BUG();
- if (prev->vm_end == next->vm_start &&
can_vma_merge(next, prev->vm_flags)) {
- spin_lock(&mm->page_table_lock);
- prev->vm_end = next->vm_end;
- __vma_unlink(mm, next, prev);
- spin_unlock(&mm->page_table_lock);
-
- mm->map_count--;
- kmem_cache_free(vm_area_cachep, next);
- }
- } else if (next->vm_start == new_addr + new_len &&
- can_vma_merge(next, vma->vm_flags) && !vma->vm_file
&& !(vma->vm_flags & VM_SHARED)) {
- spin_lock(&mm->page_table_lock);
- next->vm_start = new_addr;
- spin_unlock(&mm->page_table_lock);
- new_vma = next;
- }
- } else {
- prev = find_vma(mm, new_addr-1);
- if (prev && prev->vm_end == new_addr &&
- can_vma_merge(prev, vma->vm_flags) && !vma->vm_file &&
!(vma->vm_flags & VM_SHARED)) {
- spin_lock(&mm->page_table_lock);
- prev->vm_end = new_addr + new_len;
- spin_unlock(&mm->page_table_lock);
- new_vma = prev;
- }
- }
-
- allocated_vma = 0;
- if (!new_vma) {
- new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!new_vma)
- goto out;
- allocated_vma = 1;
- }
-
- if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
- unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
-
- if (allocated_vma) {
- *new_vma = *vma;
- new_vma->vm_start = new_addr;
- new_vma->vm_end = new_addr+new_len;
- new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
- new_vma->vm_raend = 0;
- if (new_vma->vm_file)
- get_file(new_vma->vm_file);
- if (new_vma->vm_ops && new_vma->vm_ops->open)
- new_vma->vm_ops->open(new_vma);
- insert_vm_struct(current->mm, new_vma);
- }
-
- /* XXX: possible errors masked, mapping might remain */
- do_munmap(current->mm, addr, old_len);
-
- current->mm->total_vm += new_len >> PAGE_SHIFT;
- if (vm_locked) {
- current->mm->locked_vm += new_len >> PAGE_SHIFT;
- if (new_len > old_len)
- make_pages_present(new_addr + old_len,
- new_addr + new_len);
- }
- return new_addr;
- }
- if (allocated_vma)
- kmem_cache_free(vm_area_cachep, new_vma);
- out:
- return -ENOMEM;
-}
-
-/*
- * Expand (or shrink) an existing mapping, potentially moving it at the
- * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
- *
- * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
- * This option implies MREMAP_MAYMOVE.
- */
-unsigned long do_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr)
-{
- struct vm_area_struct *vma;
- unsigned long ret = -EINVAL;
-
- if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
- goto out;
-
- if (addr & ~PAGE_MASK)
- goto out;
-
- old_len = PAGE_ALIGN(old_len);
- new_len = PAGE_ALIGN(new_len);
-
- if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
- goto out;
-
- if (addr >= TASK_SIZE)
- goto out;
-
- /* new_addr is only valid if MREMAP_FIXED is specified */
- if (flags & MREMAP_FIXED) {
- if (new_addr & ~PAGE_MASK)
- goto out;
- if (!(flags & MREMAP_MAYMOVE))
- goto out;
-
- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
- goto out;
-
- if (new_addr >= TASK_SIZE)
- goto out;
-
- /*
- * Allow new_len == 0 only if new_addr == addr
- * to preserve truncation in place (that was working
- * safe and some app may depend on it).
- */
- if (unlikely(!new_len && new_addr != addr))
- goto out;
-
- /* Check if the location we're moving into overlaps the
- * old location at all, and fail if it does.
- */
- if ((new_addr <= addr) && (new_addr+new_len) > addr)
- goto out;
-
- if ((addr <= new_addr) && (addr+old_len) > new_addr)
- goto out;
-
- ret = do_munmap(current->mm, new_addr, new_len);
- if (ret && new_len)
- goto out;
- }
-
- /*
- * Always allow a shrinking remap: that just unmaps
- * the unnecessary pages..
- */
- if (old_len >= new_len) {
- ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
- if (ret && old_len != new_len)
- goto out;
- ret = addr;
- if (!(flags & MREMAP_FIXED) || (new_addr == addr))
- goto out;
- }
-
- /*
- * Ok, we need to grow.. or relocate.
- */
- ret = -EFAULT;
- vma = find_vma(current->mm, addr);
- if (!vma || vma->vm_start > addr)
- goto out;
- /* We can't remap across vm area boundaries */
- if (old_len > vma->vm_end - addr)
- goto out;
- if (vma->vm_flags & VM_DONTEXPAND) {
- if (new_len > old_len)
- goto out;
- }
- if (vma->vm_flags & VM_LOCKED) {
- unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
- locked += new_len - old_len;
- ret = -EAGAIN;
- if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
- goto out;
- }
- ret = -ENOMEM;
- if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
- > current->rlim[RLIMIT_AS].rlim_cur)
- goto out;
- /* Private writable mapping? Check memory availability.. */
- if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
- !(flags & MAP_NORESERVE) &&
- !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
- goto out;
-
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- /* mremap() unsupported for I/O mappings in Xenolinux. */
- ret = -EINVAL;
- if (vma->vm_flags & VM_IO)
- goto out;
-#endif
-
- /* old_len exactly to the end of the area..
- * And we're not relocating the area.
- */
- if (old_len == vma->vm_end - addr &&
- !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
- (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
- unsigned long max_addr = TASK_SIZE;
- if (vma->vm_next)
- max_addr = vma->vm_next->vm_start;
- /* can we just expand the current mapping? */
- if (max_addr - addr >= new_len) {
- int pages = (new_len - old_len) >> PAGE_SHIFT;
- spin_lock(&vma->vm_mm->page_table_lock);
- vma->vm_end = addr + new_len;
- spin_unlock(&vma->vm_mm->page_table_lock);
- current->mm->total_vm += pages;
- if (vma->vm_flags & VM_LOCKED) {
- current->mm->locked_vm += pages;
- make_pages_present(addr + old_len,
- addr + new_len);
- }
- ret = addr;
- goto out;
- }
- }
-
- /*
- * We weren't able to just expand or shrink the area,
- * we need to create a new one and move it..
- */
- ret = -ENOMEM;
- if (flags & MREMAP_MAYMOVE) {
- if (!(flags & MREMAP_FIXED)) {
- unsigned long map_flags = 0;
- if (vma->vm_flags & VM_SHARED)
- map_flags |= MAP_SHARED;
-
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma->vm_pgoff, map_flags);
- ret = new_addr;
- if (new_addr & ~PAGE_MASK)
- goto out;
- }
- ret = move_vma(vma, addr, old_len, new_len, new_addr);
- }
-out:
- return ret;
-}
-
-asmlinkage unsigned long sys_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr)
-{
- unsigned long ret;
-
- down_write(¤t->mm->mmap_sem);
- ret = do_mremap(addr, old_len, new_len, flags, new_addr);
- up_write(¤t->mm->mmap_sem);
- return ret;
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/page_alloc.c
--- a/linux-2.4.30-xen-sparse/mm/page_alloc.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,972 +0,0 @@
-/*
- * linux/mm/page_alloc.c
- *
- * Manages the free list, the system allocates free pages here.
- * Note that kmalloc() lives in slab.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- * Swap reorganised 29.12.95, Stephen Tweedie
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/swapctl.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-int nr_swap_pages;
-int nr_active_pages;
-int nr_inactive_pages;
-LIST_HEAD(inactive_list);
-LIST_HEAD(active_list);
-pg_data_t *pgdat_list;
-
-/*
- *
- * The zone_table array is used to look up the address of the
- * struct zone corresponding to a given zone number (ZONE_DMA,
- * ZONE_NORMAL, or ZONE_HIGHMEM).
- */
-zone_t *zone_table[MAX_NR_ZONES*MAX_NR_NODES];
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
-static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
-static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
-static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
-static int lower_zone_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
-
-int vm_gfp_debug = 0;
-
-static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order));
-
-static spinlock_t free_pages_ok_no_irq_lock = SPIN_LOCK_UNLOCKED;
-struct page * free_pages_ok_no_irq_head;
-
-static void do_free_pages_ok_no_irq(void * arg)
-{
- struct page * page, * __page;
-
- spin_lock_irq(&free_pages_ok_no_irq_lock);
-
- page = free_pages_ok_no_irq_head;
- free_pages_ok_no_irq_head = NULL;
-
- spin_unlock_irq(&free_pages_ok_no_irq_lock);
-
- while (page) {
- __page = page;
- page = page->next_hash;
- __free_pages_ok(__page, __page->index);
- }
-}
-
-static struct tq_struct free_pages_ok_no_irq_task = {
- .routine = do_free_pages_ok_no_irq,
-};
-
-
-/*
- * Temporary debugging check.
- */
-#define BAD_RANGE(zone, page) \
-( \
- (((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \
- || (((page) - mem_map) < (zone)->zone_start_mapnr) \
- || ((zone) != page_zone(page)) \
-)
-
-/*
- * Freeing function for a buddy system allocator.
- * Contrary to prior comments, this is *NOT* hairy, and there
- * is no reason for anyone not to understand it.
- *
- * The concept of a buddy system is to maintain direct-mapped tables
- * (containing bit values) for memory blocks of various "orders".
- * The bottom level table contains the map for the smallest allocatable
- * units of memory (here, pages), and each level above it describes
- * pairs of units from the levels below, hence, "buddies".
- * At a high level, all that happens here is marking the table entry
- * at the bottom level available, and propagating the changes upward
- * as necessary, plus some accounting needed to play nicely with other
- * parts of the VM system.
- * At each level, we keep one bit for each pair of blocks, which
- * is set to 1 iff only one of the pair is allocated. So when we
- * are allocating or freeing one, we can derive the state of the
- * other. That is, if we allocate a small block, and both were
- * free, the remainder of the region must be split into blocks.
- * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.
- *
- * -- wli
- */
-
-static void fastcall __free_pages_ok (struct page *page, unsigned int order)
-{
- unsigned long index, page_idx, mask, flags;
- free_area_t *area;
- struct page *base;
- zone_t *zone;
-
- if (PageForeign(page))
- return (PageForeignDestructor(page))(page);
-
- /*
- * Yes, think what happens when other parts of the kernel take
- * a reference to a page in order to pin it for io. -ben
- */
- if (PageLRU(page)) {
- if (unlikely(in_interrupt())) {
- unsigned long flags;
-
- spin_lock_irqsave(&free_pages_ok_no_irq_lock, flags);
- page->next_hash = free_pages_ok_no_irq_head;
- free_pages_ok_no_irq_head = page;
- page->index = order;
-
- spin_unlock_irqrestore(&free_pages_ok_no_irq_lock,
flags);
-
- schedule_task(&free_pages_ok_no_irq_task);
- return;
- }
-
- lru_cache_del(page);
- }
-
- if (page->buffers)
- BUG();
- if (page->mapping)
- BUG();
- if (!VALID_PAGE(page))
- BUG();
- if (PageLocked(page))
- BUG();
- if (PageActive(page))
- BUG();
- ClearPageReferenced(page);
- ClearPageDirty(page);
-
- if (current->flags & PF_FREE_PAGES)
- goto local_freelist;
- back_local_freelist:
-
- zone = page_zone(page);
-
- mask = (~0UL) << order;
- base = zone->zone_mem_map;
- page_idx = page - base;
- if (page_idx & ~mask)
- BUG();
- index = page_idx >> (1 + order);
-
- area = zone->free_area + order;
-
- spin_lock_irqsave(&zone->lock, flags);
-
- zone->free_pages -= mask;
-
- while (mask + (1 << (MAX_ORDER-1))) {
- struct page *buddy1, *buddy2;
-
- if (area >= zone->free_area + MAX_ORDER)
- BUG();
- if (!__test_and_change_bit(index, area->map))
- /*
- * the buddy page is still allocated.
- */
- break;
- /*
- * Move the buddy up one level.
- * This code is taking advantage of the identity:
- * -mask = 1+~mask
- */
- buddy1 = base + (page_idx ^ -mask);
- buddy2 = base + page_idx;
- if (BAD_RANGE(zone,buddy1))
- BUG();
- if (BAD_RANGE(zone,buddy2))
- BUG();
-
- list_del(&buddy1->list);
- mask <<= 1;
- area++;
- index >>= 1;
- page_idx &= mask;
- }
- list_add(&(base + page_idx)->list, &area->free_list);
-
- spin_unlock_irqrestore(&zone->lock, flags);
- return;
-
- local_freelist:
- if (current->nr_local_pages)
- goto back_local_freelist;
- if (in_interrupt())
- goto back_local_freelist;
-
- list_add(&page->list, ¤t->local_pages);
- page->index = order;
- current->nr_local_pages++;
-}
-
-#define MARK_USED(index, order, area) \
- __change_bit((index) >> (1+(order)), (area)->map)
-
-static inline struct page * expand (zone_t *zone, struct page *page,
- unsigned long index, int low, int high, free_area_t * area)
-{
- unsigned long size = 1 << high;
-
- while (high > low) {
- if (BAD_RANGE(zone,page))
- BUG();
- area--;
- high--;
- size >>= 1;
- list_add(&(page)->list, &(area)->free_list);
- MARK_USED(index, high, area);
- index += size;
- page += size;
- }
- if (BAD_RANGE(zone,page))
- BUG();
- return page;
-}
-
-static FASTCALL(struct page * rmqueue(zone_t *zone, unsigned int order));
-static struct page * fastcall rmqueue(zone_t *zone, unsigned int order)
-{
- free_area_t * area = zone->free_area + order;
- unsigned int curr_order = order;
- struct list_head *head, *curr;
- unsigned long flags;
- struct page *page;
-
- spin_lock_irqsave(&zone->lock, flags);
- do {
- head = &area->free_list;
- curr = head->next;
-
- if (curr != head) {
- unsigned int index;
-
- page = list_entry(curr, struct page, list);
- if (BAD_RANGE(zone,page))
- BUG();
- list_del(curr);
- index = page - zone->zone_mem_map;
- if (curr_order != MAX_ORDER-1)
- MARK_USED(index, curr_order, area);
- zone->free_pages -= 1UL << order;
-
- page = expand(zone, page, index, order, curr_order,
area);
- spin_unlock_irqrestore(&zone->lock, flags);
-
- set_page_count(page, 1);
- if (BAD_RANGE(zone,page))
- BUG();
- if (PageLRU(page))
- BUG();
- if (PageActive(page))
- BUG();
- return page;
- }
- curr_order++;
- area++;
- } while (curr_order < MAX_ORDER);
- spin_unlock_irqrestore(&zone->lock, flags);
-
- return NULL;
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-struct page * fastcall _alloc_pages(unsigned int gfp_mask, unsigned int order)
-{
- return __alloc_pages(gfp_mask, order,
- contig_page_data.node_zonelists+(gfp_mask & GFP_ZONEMASK));
-}
-#endif
-
-static struct page * FASTCALL(balance_classzone(zone_t *, unsigned int,
unsigned int, int *));
-static struct page * fastcall balance_classzone(zone_t * classzone, unsigned
int gfp_mask, unsigned int order, int * freed)
-{
- struct page * page = NULL;
- int __freed;
-
- if (in_interrupt())
- BUG();
-
- current->allocation_order = order;
- current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
-
- __freed = try_to_free_pages_zone(classzone, gfp_mask);
-
- current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES);
-
- if (current->nr_local_pages) {
- struct list_head * entry, * local_pages;
- struct page * tmp;
- int nr_pages;
-
- local_pages = ¤t->local_pages;
-
- if (likely(__freed)) {
- /* pick from the last inserted so we're lifo */
- entry = local_pages->next;
- do {
- tmp = list_entry(entry, struct page, list);
- if (tmp->index == order &&
memclass(page_zone(tmp), classzone)) {
- list_del(entry);
- current->nr_local_pages--;
- set_page_count(tmp, 1);
- page = tmp;
-
- if (page->buffers)
- BUG();
- if (page->mapping)
- BUG();
- if (!VALID_PAGE(page))
- BUG();
- if (PageLocked(page))
- BUG();
- if (PageLRU(page))
- BUG();
- if (PageActive(page))
- BUG();
- if (PageDirty(page))
- BUG();
-
- break;
- }
- } while ((entry = entry->next) != local_pages);
- }
-
- nr_pages = current->nr_local_pages;
- /* free in reverse order so that the global order will be lifo
*/
- while ((entry = local_pages->prev) != local_pages) {
- list_del(entry);
- tmp = list_entry(entry, struct page, list);
- __free_pages_ok(tmp, tmp->index);
- if (!nr_pages--)
- BUG();
- }
- current->nr_local_pages = 0;
- }
-
- *freed = __freed;
- return page;
-}
-
-static inline unsigned long zone_free_pages(zone_t * zone, unsigned int order)
-{
- long free = zone->free_pages - (1UL << order);
- return free >= 0 ? free : 0;
-}
-
-/*
- * This is the 'heart' of the zoned buddy allocator:
- */
-struct page * fastcall __alloc_pages(unsigned int gfp_mask, unsigned int
order, zonelist_t *zonelist)
-{
- zone_t **zone, * classzone;
- struct page * page;
- int freed, class_idx;
-
- zone = zonelist->zones;
- classzone = *zone;
- class_idx = zone_idx(classzone);
-
- for (;;) {
- zone_t *z = *(zone++);
- if (!z)
- break;
-
- if (zone_free_pages(z, order) > z->watermarks[class_idx].low) {
- page = rmqueue(z, order);
- if (page)
- return page;
- }
- }
-
- classzone->need_balance = 1;
- mb();
- if (waitqueue_active(&kswapd_wait))
- wake_up_interruptible(&kswapd_wait);
-
- zone = zonelist->zones;
- for (;;) {
- unsigned long min;
- zone_t *z = *(zone++);
- if (!z)
- break;
-
- min = z->watermarks[class_idx].min;
- if (!(gfp_mask & __GFP_WAIT))
- min >>= 2;
- if (zone_free_pages(z, order) > min) {
- page = rmqueue(z, order);
- if (page)
- return page;
- }
- }
-
- /* here we're in the low on memory slow path */
-
- if ((current->flags & PF_MEMALLOC) &&
- (!in_interrupt() || (current->flags & PF_MEMDIE))) {
- zone = zonelist->zones;
- for (;;) {
- zone_t *z = *(zone++);
- if (!z)
- break;
-
- page = rmqueue(z, order);
- if (page)
- return page;
- }
- return NULL;
- }
-
- /* Atomic allocations - we can't balance anything */
- if (!(gfp_mask & __GFP_WAIT))
- goto out;
-
- rebalance:
- page = balance_classzone(classzone, gfp_mask, order, &freed);
- if (page)
- return page;
-
- zone = zonelist->zones;
- if (likely(freed)) {
- for (;;) {
- zone_t *z = *(zone++);
- if (!z)
- break;
-
- if (zone_free_pages(z, order) >
z->watermarks[class_idx].min) {
- page = rmqueue(z, order);
- if (page)
- return page;
- }
- }
- goto rebalance;
- } else {
- /*
- * Check that no other task is been killed meanwhile,
- * in such a case we can succeed the allocation.
- */
- for (;;) {
- zone_t *z = *(zone++);
- if (!z)
- break;
-
- if (zone_free_pages(z, order) >
z->watermarks[class_idx].high) {
- page = rmqueue(z, order);
- if (page)
- return page;
- }
- }
- }
-
- out:
- printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed
(gfp=0x%x/%i)\n",
- order, gfp_mask, !!(current->flags & PF_MEMALLOC));
- if (unlikely(vm_gfp_debug))
- dump_stack();
- return NULL;
-}
-
-/*
- * Common helper functions.
- */
-fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int
order)
-{
- struct page * page;
-
- page = alloc_pages(gfp_mask, order);
- if (!page)
- return 0;
- return (unsigned long) page_address(page);
-}
-
-fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
-{
- struct page * page;
-
- page = alloc_pages(gfp_mask, 0);
- if (page) {
- void *address = page_address(page);
- clear_page(address);
- return (unsigned long) address;
- }
- return 0;
-}
-
-fastcall void __free_pages(struct page *page, unsigned int order)
-{
- if (!PageReserved(page) && put_page_testzero(page))
- __free_pages_ok(page, order);
-}
-
-fastcall void free_pages(unsigned long addr, unsigned int order)
-{
- if (addr != 0)
- __free_pages(virt_to_page(addr), order);
-}
-
-/*
- * Total amount of free (allocatable) RAM:
- */
-unsigned int nr_free_pages (void)
-{
- unsigned int sum = 0;
- zone_t *zone;
-
- for_each_zone(zone)
- sum += zone->free_pages;
-
- return sum;
-}
-
-/*
- * Amount of free RAM allocatable as buffer memory:
- */
-unsigned int nr_free_buffer_pages (void)
-{
- pg_data_t *pgdat;
- unsigned int sum = 0;
- zonelist_t *zonelist;
- zone_t **zonep, *zone;
-
- for_each_pgdat(pgdat) {
- int class_idx;
- zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
- zonep = zonelist->zones;
- zone = *zonep;
- class_idx = zone_idx(zone);
-
- sum += zone->nr_cache_pages;
- for (; zone; zone = *zonep++) {
- int free = zone->free_pages -
zone->watermarks[class_idx].high;
- if (free <= 0)
- continue;
- sum += free;
- }
- }
-
- return sum;
-}
-
-#if CONFIG_HIGHMEM
-unsigned int nr_free_highpages (void)
-{
- pg_data_t *pgdat;
- unsigned int pages = 0;
-
- for_each_pgdat(pgdat)
- pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
-
- return pages;
-}
-
-unsigned int freeable_lowmem(void)
-{
- unsigned int pages = 0;
- pg_data_t *pgdat;
-
- for_each_pgdat(pgdat) {
- pages += pgdat->node_zones[ZONE_DMA].free_pages;
- pages += pgdat->node_zones[ZONE_DMA].nr_active_pages;
- pages += pgdat->node_zones[ZONE_DMA].nr_inactive_pages;
- pages += pgdat->node_zones[ZONE_NORMAL].free_pages;
- pages += pgdat->node_zones[ZONE_NORMAL].nr_active_pages;
- pages += pgdat->node_zones[ZONE_NORMAL].nr_inactive_pages;
- }
-
- return pages;
-}
-#endif
-
-#define K(x) ((x) << (PAGE_SHIFT-10))
-
-/*
- * Show free area list (used inside shift_scroll-lock stuff)
- * We also calculate the percentage fragmentation. We do this by counting the
- * memory on each free list with the exception of the first item on the list.
- */
-void show_free_areas_core(pg_data_t *pgdat)
-{
- unsigned int order;
- unsigned type;
- pg_data_t *tmpdat = pgdat;
-
- printk("Free pages: %6dkB (%6dkB HighMem)\n",
- K(nr_free_pages()),
- K(nr_free_highpages()));
-
- while (tmpdat) {
- zone_t *zone;
- for (zone = tmpdat->node_zones;
- zone < tmpdat->node_zones + MAX_NR_ZONES;
zone++)
- printk("Zone:%s freepages:%6lukB\n",
- zone->name,
- K(zone->free_pages));
-
- tmpdat = tmpdat->node_next;
- }
-
- printk("( Active: %d, inactive: %d, free: %d )\n",
- nr_active_pages,
- nr_inactive_pages,
- nr_free_pages());
-
- for (type = 0; type < MAX_NR_ZONES; type++) {
- struct list_head *head, *curr;
- zone_t *zone = pgdat->node_zones + type;
- unsigned long nr, total, flags;
-
- total = 0;
- if (zone->size) {
- spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order < MAX_ORDER; order++) {
- head = &(zone->free_area + order)->free_list;
- curr = head;
- nr = 0;
- for (;;) {
- if ((curr = curr->next) == head)
- break;
- nr++;
- }
- total += nr * (1 << order);
- printk("%lu*%lukB ", nr, K(1UL) << order);
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- }
- printk("= %lukB)\n", K(total));
- }
-
-#ifdef SWAP_CACHE_INFO
- show_swap_cache_info();
-#endif
-}
-
-void show_free_areas(void)
-{
- show_free_areas_core(pgdat_list);
-}
-
-/*
- * Builds allocation fallback zone lists.
- */
-static inline void build_zonelists(pg_data_t *pgdat)
-{
- int i, j, k;
-
- for (i = 0; i <= GFP_ZONEMASK; i++) {
- zonelist_t *zonelist;
- zone_t *zone;
-
- zonelist = pgdat->node_zonelists + i;
- memset(zonelist, 0, sizeof(*zonelist));
-
- j = 0;
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
-
- switch (k) {
- default:
- BUG();
- /*
- * fallthrough:
- */
- case ZONE_HIGHMEM:
- zone = pgdat->node_zones + ZONE_HIGHMEM;
- if (zone->size) {
-#ifndef CONFIG_HIGHMEM
- BUG();
-#endif
- zonelist->zones[j++] = zone;
- }
- case ZONE_NORMAL:
- zone = pgdat->node_zones + ZONE_NORMAL;
- if (zone->size)
- zonelist->zones[j++] = zone;
- case ZONE_DMA:
- zone = pgdat->node_zones + ZONE_DMA;
- if (zone->size)
- zonelist->zones[j++] = zone;
- }
- zonelist->zones[j++] = NULL;
- }
-}
-
-/*
- * Helper functions to size the waitqueue hash table.
- * Essentially these want to choose hash table sizes sufficiently
- * large so that collisions trying to wait on pages are rare.
- * But in fact, the number of active page waitqueues on typical
- * systems is ridiculously low, less than 200. So this is even
- * conservative, even though it seems large.
- *
- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
- * waitqueues, i.e. the size of the waitq table given the number of pages.
- */
-#define PAGES_PER_WAITQUEUE 256
-
-static inline unsigned long wait_table_size(unsigned long pages)
-{
- unsigned long size = 1;
-
- pages /= PAGES_PER_WAITQUEUE;
-
- while (size < pages)
- size <<= 1;
-
- /*
- * Once we have dozens or even hundreds of threads sleeping
- * on IO we've got bigger problems than wait queue collision.
- * Limit the size of the wait table to a reasonable size.
- */
- size = min(size, 4096UL);
-
- return size;
-}
-
-/*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
- */
-static inline unsigned long wait_table_bits(unsigned long size)
-{
- return ffz(~size);
-}
-
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
-/*
- * Set up the zone data structures:
- * - mark all pages reserved
- * - mark all memory queues empty
- * - clear the memory bitmaps
- */
-void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
- unsigned long *zones_size, unsigned long zone_start_paddr,
- unsigned long *zholes_size, struct page *lmem_map)
-{
- unsigned long i, j;
- unsigned long map_size;
- unsigned long totalpages, offset, realtotalpages;
- const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
-
- if (zone_start_paddr & ~PAGE_MASK)
- BUG();
-
- totalpages = 0;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- unsigned long size = zones_size[i];
- totalpages += size;
- }
- realtotalpages = totalpages;
- if (zholes_size)
- for (i = 0; i < MAX_NR_ZONES; i++)
- realtotalpages -= zholes_size[i];
-
- printk("On node %d totalpages: %lu\n", nid, realtotalpages);
-
- /*
- * Some architectures (with lots of mem and discontinous memory
- * maps) have to search for a good mem_map area:
- * For discontigmem, the conceptual mem map array starts from
- * PAGE_OFFSET, we need to align the actual array onto a mem map
- * boundary, so that MAP_NR works.
- */
- map_size = (totalpages + 1)*sizeof(struct page);
- if (lmem_map == (struct page *)0) {
- lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
- lmem_map = (struct page *)(PAGE_OFFSET +
- MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
- }
- *gmap = pgdat->node_mem_map = lmem_map;
- pgdat->node_size = totalpages;
- pgdat->node_start_paddr = zone_start_paddr;
- pgdat->node_start_mapnr = (lmem_map - mem_map);
- pgdat->nr_zones = 0;
-
- offset = lmem_map - mem_map;
- for (j = 0; j < MAX_NR_ZONES; j++) {
- zone_t *zone = pgdat->node_zones + j;
- unsigned long mask;
- unsigned long size, realsize;
- int idx;
-
- zone_table[nid * MAX_NR_ZONES + j] = zone;
- realsize = size = zones_size[j];
- if (zholes_size)
- realsize -= zholes_size[j];
-
- printk("zone(%lu): %lu pages.\n", j, size);
- zone->size = size;
- zone->realsize = realsize;
- zone->name = zone_names[j];
- zone->lock = SPIN_LOCK_UNLOCKED;
- zone->zone_pgdat = pgdat;
- zone->free_pages = 0;
- zone->need_balance = 0;
- zone->nr_active_pages = zone->nr_inactive_pages = 0;
-
-
- if (!size)
- continue;
-
- /*
- * The per-page waitqueue mechanism uses hashed waitqueues
- * per zone.
- */
- zone->wait_table_size = wait_table_size(size);
- zone->wait_table_shift =
- BITS_PER_LONG - wait_table_bits(zone->wait_table_size);
- zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node(pgdat, zone->wait_table_size
- * sizeof(wait_queue_head_t));
-
- for(i = 0; i < zone->wait_table_size; ++i)
- init_waitqueue_head(zone->wait_table + i);
-
- pgdat->nr_zones = j+1;
-
- mask = (realsize / zone_balance_ratio[j]);
- if (mask < zone_balance_min[j])
- mask = zone_balance_min[j];
- else if (mask > zone_balance_max[j])
- mask = zone_balance_max[j];
- zone->watermarks[j].min = mask;
- zone->watermarks[j].low = mask*2;
- zone->watermarks[j].high = mask*3;
- /* now set the watermarks of the lower zones in the "j"
classzone */
- for (idx = j-1; idx >= 0; idx--) {
- zone_t * lower_zone = pgdat->node_zones + idx;
- unsigned long lower_zone_reserve;
- if (!lower_zone->size)
- continue;
-
- mask = lower_zone->watermarks[idx].min;
- lower_zone->watermarks[j].min = mask;
- lower_zone->watermarks[j].low = mask*2;
- lower_zone->watermarks[j].high = mask*3;
-
- /* now the brainer part */
- lower_zone_reserve = realsize /
lower_zone_reserve_ratio[idx];
- lower_zone->watermarks[j].min += lower_zone_reserve;
- lower_zone->watermarks[j].low += lower_zone_reserve;
- lower_zone->watermarks[j].high += lower_zone_reserve;
-
- realsize += lower_zone->realsize;
- }
-
- zone->zone_mem_map = mem_map + offset;
- zone->zone_start_mapnr = offset;
- zone->zone_start_paddr = zone_start_paddr;
-
- if ((zone_start_paddr >> PAGE_SHIFT) &
(zone_required_alignment-1))
- printk("BUG: wrong zone alignment, it will crash\n");
-
- /*
- * Initially all pages are reserved - free ones are freed
- * up by free_all_bootmem() once the early boot process is
- * done. Non-atomic initialization, single-pass.
- */
- for (i = 0; i < size; i++) {
- struct page *page = mem_map + offset + i;
- set_page_zone(page, nid * MAX_NR_ZONES + j);
- set_page_count(page, 0);
- SetPageReserved(page);
- INIT_LIST_HEAD(&page->list);
- if (j != ZONE_HIGHMEM)
- set_page_address(page, __va(zone_start_paddr));
- zone_start_paddr += PAGE_SIZE;
- }
-
- offset += size;
- for (i = 0; ; i++) {
- unsigned long bitmap_size;
-
- INIT_LIST_HEAD(&zone->free_area[i].free_list);
- if (i == MAX_ORDER-1) {
- zone->free_area[i].map = NULL;
- break;
- }
-
- /*
- * Page buddy system uses "index >> (i+1)",
- * where "index" is at most "size-1".
- *
- * The extra "+3" is to round down to byte
- * size (8 bits per byte assumption). Thus
- * we get "(size-1) >> (i+4)" as the last byte
- * we can access.
- *
- * The "+1" is because we want to round the
- * byte allocation up rather than down. So
- * we should have had a "+7" before we shifted
- * down by three. Also, we have to add one as
- * we actually _use_ the last bit (it's [0,n]
- * inclusive, not [0,n[).
- *
- * So we actually had +7+1 before we shift
- * down by 3. But (n+8) >> 3 == (n >> 3) + 1
- * (modulo overflows, which we do not have).
- *
- * Finally, we LONG_ALIGN because all bitmap
- * operations are on longs.
- */
- bitmap_size = (size-1) >> (i+4);
- bitmap_size = LONG_ALIGN(bitmap_size+1);
- zone->free_area[i].map =
- (unsigned long *) alloc_bootmem_node(pgdat,
bitmap_size);
- }
- }
- build_zonelists(pgdat);
-}
-
-void __init free_area_init(unsigned long *zones_size)
-{
- free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0,
0);
-}
-
-static int __init setup_mem_frac(char *str)
-{
- int j = 0;
-
- while (get_option(&str, &zone_balance_ratio[j++]) == 2);
- printk("setup_mem_frac: ");
- for (j = 0; j < MAX_NR_ZONES; j++) printk("%d ",
zone_balance_ratio[j]);
- printk("\n");
- return 1;
-}
-
-__setup("memfrac=", setup_mem_frac);
-
-static int __init setup_lower_zone_reserve(char *str)
-{
- int j = 0;
-
- while (get_option(&str, &lower_zone_reserve_ratio[j++]) == 2);
- printk("setup_lower_zone_reserve: ");
- for (j = 0; j < MAX_NR_ZONES-1; j++) printk("%d ",
lower_zone_reserve_ratio[j]);
- printk("\n");
- return 1;
-}
-
-__setup("lower_zone_reserve=", setup_lower_zone_reserve);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/swapfile.c
--- a/linux-2.4.30-xen-sparse/mm/swapfile.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1269 +0,0 @@
-/*
- * linux/mm/swapfile.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- * Swap reorganised 29.12.95, Stephen Tweedie
- */
-
-#include <linux/slab.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel_stat.h>
-#include <linux/swap.h>
-#include <linux/swapctl.h>
-#include <linux/blkdev.h> /* for blk_size */
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/shm.h>
-
-#include <asm/pgtable.h>
-
-spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
-unsigned int nr_swapfiles;
-int total_swap_pages;
-static int swap_overflow;
-
-static const char Bad_file[] = "Bad swap file entry ";
-static const char Unused_file[] = "Unused swap file entry ";
-static const char Bad_offset[] = "Bad swap offset entry ";
-static const char Unused_offset[] = "Unused swap offset entry ";
-
-struct swap_list_t swap_list = {-1, -1};
-
-struct swap_info_struct swap_info[MAX_SWAPFILES];
-
-#define SWAPFILE_CLUSTER 256
-
-static inline int scan_swap_map(struct swap_info_struct *si)
-{
- unsigned long offset;
- /*
- * We try to cluster swap pages by allocating them
- * sequentially in swap. Once we've allocated
- * SWAPFILE_CLUSTER pages this way, however, we resort to
- * first-free allocation, starting a new cluster. This
- * prevents us from scattering swap pages all over the entire
- * swap partition, so that we reduce overall disk seek times
- * between swap pages. -- sct */
- if (si->cluster_nr) {
- while (si->cluster_next <= si->highest_bit) {
- offset = si->cluster_next++;
- if (si->swap_map[offset])
- continue;
- si->cluster_nr--;
- goto got_page;
- }
- }
- si->cluster_nr = SWAPFILE_CLUSTER;
-
- /* try to find an empty (even not aligned) cluster. */
- offset = si->lowest_bit;
- check_next_cluster:
- if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
- {
- int nr;
- for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
- if (si->swap_map[nr])
- {
- offset = nr+1;
- goto check_next_cluster;
- }
- /* We found a completly empty cluster, so start
- * using it.
- */
- goto got_page;
- }
- /* No luck, so now go finegrined as usual. -Andrea */
- for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
- if (si->swap_map[offset])
- continue;
- si->lowest_bit = offset+1;
- got_page:
- if (offset == si->lowest_bit)
- si->lowest_bit++;
- if (offset == si->highest_bit)
- si->highest_bit--;
- if (si->lowest_bit > si->highest_bit) {
- si->lowest_bit = si->max;
- si->highest_bit = 0;
- }
- si->swap_map[offset] = 1;
- nr_swap_pages--;
- si->cluster_next = offset+1;
- return offset;
- }
- si->lowest_bit = si->max;
- si->highest_bit = 0;
- return 0;
-}
-
-swp_entry_t get_swap_page(void)
-{
- struct swap_info_struct * p;
- unsigned long offset;
- swp_entry_t entry;
- int type, wrapped = 0;
-
- entry.val = 0; /* Out of memory */
- swap_list_lock();
- type = swap_list.next;
- if (type < 0)
- goto out;
- if (nr_swap_pages <= 0)
- goto out;
-
- while (1) {
- p = &swap_info[type];
- if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
- swap_device_lock(p);
- offset = scan_swap_map(p);
- swap_device_unlock(p);
- if (offset) {
- entry = SWP_ENTRY(type,offset);
- type = swap_info[type].next;
- if (type < 0 ||
- p->prio != swap_info[type].prio) {
- swap_list.next = swap_list.head;
- } else {
- swap_list.next = type;
- }
- goto out;
- }
- }
- type = p->next;
- if (!wrapped) {
- if (type < 0 || p->prio != swap_info[type].prio) {
- type = swap_list.head;
- wrapped = 1;
- }
- } else
- if (type < 0)
- goto out; /* out of swap space */
- }
-out:
- swap_list_unlock();
- return entry;
-}
-
-static struct swap_info_struct * swap_info_get(swp_entry_t entry)
-{
- struct swap_info_struct * p;
- unsigned long offset, type;
-
- if (!entry.val)
- goto out;
- type = SWP_TYPE(entry);
- if (type >= nr_swapfiles)
- goto bad_nofile;
- p = & swap_info[type];
- if (!(p->flags & SWP_USED))
- goto bad_device;
- offset = SWP_OFFSET(entry);
- if (offset >= p->max)
- goto bad_offset;
- if (!p->swap_map[offset])
- goto bad_free;
- swap_list_lock();
- if (p->prio > swap_info[swap_list.next].prio)
- swap_list.next = type;
- swap_device_lock(p);
- return p;
-
-bad_free:
- printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
- goto out;
-bad_offset:
- printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
- goto out;
-bad_device:
- printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
- goto out;
-bad_nofile:
- printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
-out:
- return NULL;
-}
-
-static void swap_info_put(struct swap_info_struct * p)
-{
- swap_device_unlock(p);
- swap_list_unlock();
-}
-
-static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
-{
- int count = p->swap_map[offset];
-
- if (count < SWAP_MAP_MAX) {
- count--;
- p->swap_map[offset] = count;
- if (!count) {
- if (offset < p->lowest_bit)
- p->lowest_bit = offset;
- if (offset > p->highest_bit)
- p->highest_bit = offset;
- nr_swap_pages++;
- }
- }
- return count;
-}
-
-/*
- * Caller has made sure that the swapdevice corresponding to entry
- * is still around or has not been recycled.
- */
-void swap_free(swp_entry_t entry)
-{
- struct swap_info_struct * p;
-
- p = swap_info_get(entry);
- if (p) {
- swap_entry_free(p, SWP_OFFSET(entry));
- swap_info_put(p);
- }
-}
-
-/*
- * Check if we're the only user of a swap page,
- * when the page is locked.
- */
-static int exclusive_swap_page(struct page *page)
-{
- int retval = 0;
- struct swap_info_struct * p;
- swp_entry_t entry;
-
- entry.val = page->index;
- p = swap_info_get(entry);
- if (p) {
- /* Is the only swap cache user the cache itself? */
- if (p->swap_map[SWP_OFFSET(entry)] == 1) {
- /* Recheck the page count with the pagecache lock
held.. */
- spin_lock(&pagecache_lock);
- if (page_count(page) - !!page->buffers == 2)
- retval = 1;
- spin_unlock(&pagecache_lock);
- }
- swap_info_put(p);
- }
- return retval;
-}
-
-/*
- * We can use this swap cache entry directly
- * if there are no other references to it.
- *
- * Here "exclusive_swap_page()" does the real
- * work, but we opportunistically check whether
- * we need to get all the locks first..
- */
-int fastcall can_share_swap_page(struct page *page)
-{
- int retval = 0;
-
- if (!PageLocked(page))
- BUG();
- switch (page_count(page)) {
- case 3:
- if (!page->buffers)
- break;
- /* Fallthrough */
- case 2:
- if (!PageSwapCache(page))
- break;
- retval = exclusive_swap_page(page);
- break;
- case 1:
- if (PageReserved(page))
- break;
- retval = 1;
- }
- return retval;
-}
-
-/*
- * Work out if there are any other processes sharing this
- * swap cache page. Free it if you can. Return success.
- */
-int fastcall remove_exclusive_swap_page(struct page *page)
-{
- int retval;
- struct swap_info_struct * p;
- swp_entry_t entry;
-
- if (!PageLocked(page))
- BUG();
- if (!PageSwapCache(page))
- return 0;
- if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
- return 0;
-
- entry.val = page->index;
- p = swap_info_get(entry);
- if (!p)
- return 0;
-
- /* Is the only swap cache user the cache itself? */
- retval = 0;
- if (p->swap_map[SWP_OFFSET(entry)] == 1) {
- /* Recheck the page count with the pagecache lock held.. */
- spin_lock(&pagecache_lock);
- if (page_count(page) - !!page->buffers == 2) {
- __delete_from_swap_cache(page);
- SetPageDirty(page);
- retval = 1;
- }
- spin_unlock(&pagecache_lock);
- }
- swap_info_put(p);
-
- if (retval) {
- block_flushpage(page, 0);
- swap_free(entry);
- page_cache_release(page);
- }
-
- return retval;
-}
-
-/*
- * Free the swap entry like above, but also try to
- * free the page cache entry if it is the last user.
- */
-void free_swap_and_cache(swp_entry_t entry)
-{
- struct swap_info_struct * p;
- struct page *page = NULL;
-
- p = swap_info_get(entry);
- if (p) {
- if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
- page = find_trylock_page(&swapper_space, entry.val);
- swap_info_put(p);
- }
- if (page) {
- page_cache_get(page);
- /* Only cache user (+us), or swap space full? Free it! */
- if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
- delete_from_swap_cache(page);
- SetPageDirty(page);
- }
- UnlockPage(page);
- page_cache_release(page);
- }
-}
-
-/*
- * The swap entry has been read in advance, and we return 1 to indicate
- * that the page has been used or is no longer needed.
- *
- * Always set the resulting pte to be nowrite (the same as COW pages
- * after one process has exited). We don't know just how many PTEs will
- * share this swap entry, so be cautious and let do_wp_page work out
- * what to do if a write is requested later.
- */
-/* mmlist_lock and vma->vm_mm->page_table_lock are held */
-static inline void unuse_pte(struct vm_area_struct * vma, unsigned long
address,
- pte_t *dir, swp_entry_t entry, struct page* page)
-{
- pte_t pte = *dir;
-
- if (likely(pte_to_swp_entry(pte).val != entry.val))
- return;
- if (unlikely(pte_none(pte) || pte_present(pte)))
- return;
- get_page(page);
- set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
- swap_free(entry);
- ++vma->vm_mm->rss;
-}
-
-/* mmlist_lock and vma->vm_mm->page_table_lock are held */
-static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
- unsigned long address, unsigned long size, unsigned long offset,
- swp_entry_t entry, struct page* page)
-{
- pte_t * pte;
- unsigned long end;
-
- if (pmd_none(*dir))
- return;
- if (pmd_bad(*dir)) {
- pmd_ERROR(*dir);
- pmd_clear(dir);
- return;
- }
- pte = pte_offset(dir, address);
- offset += address & PMD_MASK;
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page);
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-/* mmlist_lock and vma->vm_mm->page_table_lock are held */
-static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
- unsigned long address, unsigned long size,
- swp_entry_t entry, struct page* page)
-{
- pmd_t * pmd;
- unsigned long offset, end;
-
- if (pgd_none(*dir))
- return;
- if (pgd_bad(*dir)) {
- pgd_ERROR(*dir);
- pgd_clear(dir);
- return;
- }
- pmd = pmd_offset(dir, address);
- offset = address & PGDIR_MASK;
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- if (address >= end)
- BUG();
- do {
- unuse_pmd(vma, pmd, address, end - address, offset, entry,
- page);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
-}
-
-/* mmlist_lock and vma->vm_mm->page_table_lock are held */
-static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
- swp_entry_t entry, struct page* page)
-{
- unsigned long start = vma->vm_start, end = vma->vm_end;
-
- if (start >= end)
- BUG();
- do {
- unuse_pgd(vma, pgdir, start, end - start, entry, page);
- start = (start + PGDIR_SIZE) & PGDIR_MASK;
- pgdir++;
- } while (start && (start < end));
-}
-
-static void unuse_process(struct mm_struct * mm,
- swp_entry_t entry, struct page* page)
-{
- struct vm_area_struct* vma;
-
- /*
- * Go through process' page directory.
- */
- spin_lock(&mm->page_table_lock);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- pgd_t * pgd = pgd_offset(mm, vma->vm_start);
- unuse_vma(vma, pgd, entry, page);
- }
- XEN_flush_page_update_queue();
- spin_unlock(&mm->page_table_lock);
- return;
-}
-
-/*
- * Scan swap_map from current position to next entry still in use.
- * Recycle to start on reaching the end, returning 0 when empty.
- */
-static int find_next_to_unuse(struct swap_info_struct *si, int prev)
-{
- int max = si->max;
- int i = prev;
- int count;
-
- /*
- * No need for swap_device_lock(si) here: we're just looking
- * for whether an entry is in use, not modifying it; false
- * hits are okay, and sys_swapoff() has already prevented new
- * allocations from this area (while holding swap_list_lock()).
- */
- for (;;) {
- if (++i >= max) {
- if (!prev) {
- i = 0;
- break;
- }
- /*
- * No entries in use at top of swap_map,
- * loop back to start and recheck there.
- */
- max = prev + 1;
- prev = 0;
- i = 1;
- }
- count = si->swap_map[i];
- if (count && count != SWAP_MAP_BAD)
- break;
- }
- return i;
-}
-
-/*
- * We completely avoid races by reading each swap page in advance,
- * and then search for the process using it. All the necessary
- * page table adjustments can then be made atomically.
- */
-static int try_to_unuse(unsigned int type)
-{
- struct swap_info_struct * si = &swap_info[type];
- struct mm_struct *start_mm;
- unsigned short *swap_map;
- unsigned short swcount;
- struct page *page;
- swp_entry_t entry;
- int i = 0;
- int retval = 0;
- int reset_overflow = 0;
- int shmem;
-
- /*
- * When searching mms for an entry, a good strategy is to
- * start at the first mm we freed the previous entry from
- * (though actually we don't notice whether we or coincidence
- * freed the entry). Initialize this start_mm with a hold.
- *
- * A simpler strategy would be to start at the last mm we
- * freed the previous entry from; but that would take less
- * advantage of mmlist ordering (now preserved by swap_out()),
- * which clusters forked address spaces together, most recent
- * child immediately after parent. If we race with dup_mmap(),
- * we very much want to resolve parent before child, otherwise
- * we may miss some entries: using last mm would invert that.
- */
- start_mm = &init_mm;
- atomic_inc(&init_mm.mm_users);
-
- /*
- * Keep on scanning until all entries have gone. Usually,
- * one pass through swap_map is enough, but not necessarily:
- * mmput() removes mm from mmlist before exit_mmap() and its
- * zap_page_range(). That's not too bad, those entries are
- * on their way out, and handled faster there than here.
- * do_munmap() behaves similarly, taking the range out of mm's
- * vma list before zap_page_range(). But unfortunately, when
- * unmapping a part of a vma, it takes the whole out first,
- * then reinserts what's left after (might even reschedule if
- * open() method called) - so swap entries may be invisible
- * to swapoff for a while, then reappear - but that is rare.
- */
- while ((i = find_next_to_unuse(si, i))) {
- /*
- * Get a page for the entry, using the existing swap
- * cache page if there is one. Otherwise, get a clean
- * page and read the swap into it.
- */
- swap_map = &si->swap_map[i];
- entry = SWP_ENTRY(type, i);
- page = read_swap_cache_async(entry);
- if (!page) {
- /*
- * Either swap_duplicate() failed because entry
- * has been freed independently, and will not be
- * reused since sys_swapoff() already disabled
- * allocation from here, or alloc_page() failed.
- */
- if (!*swap_map)
- continue;
- retval = -ENOMEM;
- break;
- }
-
- /*
- * Don't hold on to start_mm if it looks like exiting.
- */
- if (atomic_read(&start_mm->mm_users) == 1) {
- mmput(start_mm);
- start_mm = &init_mm;
- atomic_inc(&init_mm.mm_users);
- }
-
- /*
- * Wait for and lock page. When do_swap_page races with
- * try_to_unuse, do_swap_page can handle the fault much
- * faster than try_to_unuse can locate the entry. This
- * apparently redundant "wait_on_page" lets try_to_unuse
- * defer to do_swap_page in such a case - in some tests,
- * do_swap_page and try_to_unuse repeatedly compete.
- */
- wait_on_page(page);
- lock_page(page);
-
- /*
- * Remove all references to entry, without blocking.
- * Whenever we reach init_mm, there's no address space
- * to search, but use it as a reminder to search shmem.
- */
- shmem = 0;
- swcount = *swap_map;
- if (swcount > 1) {
- flush_page_to_ram(page);
- if (start_mm == &init_mm)
- shmem = shmem_unuse(entry, page);
- else
- unuse_process(start_mm, entry, page);
- }
- if (*swap_map > 1) {
- int set_start_mm = (*swap_map >= swcount);
- struct list_head *p = &start_mm->mmlist;
- struct mm_struct *new_start_mm = start_mm;
- struct mm_struct *mm;
-
- spin_lock(&mmlist_lock);
- while (*swap_map > 1 &&
- (p = p->next) != &start_mm->mmlist) {
- mm = list_entry(p, struct mm_struct, mmlist);
- swcount = *swap_map;
- if (mm == &init_mm) {
- set_start_mm = 1;
- spin_unlock(&mmlist_lock);
- shmem = shmem_unuse(entry, page);
- spin_lock(&mmlist_lock);
- } else
- unuse_process(mm, entry, page);
- if (set_start_mm && *swap_map < swcount) {
- new_start_mm = mm;
- set_start_mm = 0;
- }
- }
- atomic_inc(&new_start_mm->mm_users);
- spin_unlock(&mmlist_lock);
- mmput(start_mm);
- start_mm = new_start_mm;
- }
-
- /*
- * How could swap count reach 0x7fff when the maximum
- * pid is 0x7fff, and there's no way to repeat a swap
- * page within an mm (except in shmem, where it's the
- * shared object which takes the reference count)?
- * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
- *
- * If that's wrong, then we should worry more about
- * exit_mmap() and do_munmap() cases described above:
- * we might be resetting SWAP_MAP_MAX too early here.
- * We know "Undead"s can happen, they're okay, so don't
- * report them; but do report if we reset SWAP_MAP_MAX.
- */
- if (*swap_map == SWAP_MAP_MAX) {
- swap_list_lock();
- swap_device_lock(si);
- nr_swap_pages++;
- *swap_map = 1;
- swap_device_unlock(si);
- swap_list_unlock();
- reset_overflow = 1;
- }
-
- /*
- * If a reference remains (rare), we would like to leave
- * the page in the swap cache; but try_to_swap_out could
- * then re-duplicate the entry once we drop page lock,
- * so we might loop indefinitely; also, that page could
- * not be swapped out to other storage meanwhile. So:
- * delete from cache even if there's another reference,
- * after ensuring that the data has been saved to disk -
- * since if the reference remains (rarer), it will be
- * read from disk into another page. Splitting into two
- * pages would be incorrect if swap supported "shared
- * private" pages, but they are handled by tmpfs files.
- *
- * Note shmem_unuse already deleted swappage from cache,
- * unless corresponding filepage found already in cache:
- * in which case it left swappage in cache, lowered its
- * swap count to pass quickly through the loops above,
- * and now we must reincrement count to try again later.
- */
- if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
- rw_swap_page(WRITE, page);
- lock_page(page);
- }
- if (PageSwapCache(page)) {
- if (shmem)
- swap_duplicate(entry);
- else
- delete_from_swap_cache(page);
- }
-
- /*
- * So we could skip searching mms once swap count went
- * to 1, we did not mark any present ptes as dirty: must
- * mark page dirty so try_to_swap_out will preserve it.
- */
- SetPageDirty(page);
- UnlockPage(page);
- page_cache_release(page);
-
- /*
- * Make sure that we aren't completely killing
- * interactive performance. Interruptible check on
- * signal_pending() would be nice, but changes the spec?
- */
- if (current->need_resched)
- schedule();
- }
-
- mmput(start_mm);
- if (reset_overflow) {
- printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
- swap_overflow = 0;
- }
- return retval;
-}
-
-asmlinkage long sys_swapoff(const char * specialfile)
-{
- struct swap_info_struct * p = NULL;
- unsigned short *swap_map;
- struct nameidata nd;
- int i, type, prev;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- err = user_path_walk(specialfile, &nd);
- if (err)
- goto out;
-
- lock_kernel();
- prev = -1;
- swap_list_lock();
- for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
- p = swap_info + type;
- if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
- if (p->swap_file == nd.dentry ||
- (S_ISBLK(nd.dentry->d_inode->i_mode) &&
- p->swap_device == nd.dentry->d_inode->i_rdev))
- break;
- }
- prev = type;
- }
- err = -EINVAL;
- if (type < 0) {
- swap_list_unlock();
- goto out_dput;
- }
-
- if (prev < 0) {
- swap_list.head = p->next;
- } else {
- swap_info[prev].next = p->next;
- }
- if (type == swap_list.next) {
- /* just pick something that's safe... */
- swap_list.next = swap_list.head;
- }
- nr_swap_pages -= p->pages;
- total_swap_pages -= p->pages;
- p->flags = SWP_USED;
- swap_list_unlock();
- unlock_kernel();
- err = try_to_unuse(type);
- lock_kernel();
- if (err) {
- /* re-insert swap space back into swap_list */
- swap_list_lock();
- for (prev = -1, i = swap_list.head; i >= 0; prev = i, i =
swap_info[i].next)
- if (p->prio >= swap_info[i].prio)
- break;
- p->next = i;
- if (prev < 0)
- swap_list.head = swap_list.next = p - swap_info;
- else
- swap_info[prev].next = p - swap_info;
- nr_swap_pages += p->pages;
- total_swap_pages += p->pages;
- p->flags = SWP_WRITEOK;
- swap_list_unlock();
- goto out_dput;
- }
- if (p->swap_device)
- blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
- path_release(&nd);
-
- swap_list_lock();
- swap_device_lock(p);
- nd.mnt = p->swap_vfsmnt;
- nd.dentry = p->swap_file;
- p->swap_vfsmnt = NULL;
- p->swap_file = NULL;
- p->swap_device = 0;
- p->max = 0;
- swap_map = p->swap_map;
- p->swap_map = NULL;
- p->flags = 0;
- swap_device_unlock(p);
- swap_list_unlock();
- vfree(swap_map);
- err = 0;
-
-out_dput:
- unlock_kernel();
- path_release(&nd);
-out:
- return err;
-}
-
-int get_swaparea_info(char *buf)
-{
- char * page = (char *) __get_free_page(GFP_KERNEL);
- struct swap_info_struct *ptr = swap_info;
- int i, j, len = 0, usedswap;
-
- if (!page)
- return -ENOMEM;
-
- len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
- for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
- if ((ptr->flags & SWP_USED) && ptr->swap_map) {
- char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt,
- page, PAGE_SIZE);
-
- len += sprintf(buf + len, "%-31s ", path);
-
- if (!ptr->swap_device)
- len += sprintf(buf + len, "file\t\t");
- else
- len += sprintf(buf + len, "partition\t");
-
- usedswap = 0;
- for (j = 0; j < ptr->max; ++j)
- switch (ptr->swap_map[j]) {
- case SWAP_MAP_BAD:
- case 0:
- continue;
- default:
- usedswap++;
- }
- len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages <<
(PAGE_SHIFT - 10),
- usedswap << (PAGE_SHIFT - 10), ptr->prio);
- }
- }
- free_page((unsigned long) page);
- return len;
-}
-
-int is_swap_partition(kdev_t dev) {
- struct swap_info_struct *ptr = swap_info;
- int i;
-
- for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
- if (ptr->flags & SWP_USED)
- if (ptr->swap_device == dev)
- return 1;
- }
- return 0;
-}
-
-/*
- * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
- *
- * The swapon system call
- */
-asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
-{
- struct swap_info_struct * p;
- struct nameidata nd;
- struct inode * swap_inode;
- unsigned int type;
- int i, j, prev;
- int error;
- static int least_priority = 0;
- union swap_header *swap_header = 0;
- int swap_header_version;
- int nr_good_pages = 0;
- unsigned long maxpages = 1;
- int swapfilesize;
- struct block_device *bdev = NULL;
- unsigned short *swap_map;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- lock_kernel();
- swap_list_lock();
- p = swap_info;
- for (type = 0 ; type < nr_swapfiles ; type++,p++)
- if (!(p->flags & SWP_USED))
- break;
- error = -EPERM;
- if (type >= MAX_SWAPFILES) {
- swap_list_unlock();
- goto out;
- }
- if (type >= nr_swapfiles)
- nr_swapfiles = type+1;
- p->flags = SWP_USED;
- p->swap_file = NULL;
- p->swap_vfsmnt = NULL;
- p->swap_device = 0;
- p->swap_map = NULL;
- p->lowest_bit = 0;
- p->highest_bit = 0;
- p->cluster_nr = 0;
- p->sdev_lock = SPIN_LOCK_UNLOCKED;
- p->next = -1;
- if (swap_flags & SWAP_FLAG_PREFER) {
- p->prio =
- (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
- } else {
- p->prio = --least_priority;
- }
- swap_list_unlock();
- error = user_path_walk(specialfile, &nd);
- if (error)
- goto bad_swap_2;
-
- p->swap_file = nd.dentry;
- p->swap_vfsmnt = nd.mnt;
- swap_inode = nd.dentry->d_inode;
- error = -EINVAL;
-
- if (S_ISBLK(swap_inode->i_mode)) {
- kdev_t dev = swap_inode->i_rdev;
- struct block_device_operations *bdops;
- devfs_handle_t de;
-
- if (is_mounted(dev)) {
- error = -EBUSY;
- goto bad_swap_2;
- }
-
- p->swap_device = dev;
- set_blocksize(dev, PAGE_SIZE);
-
- bd_acquire(swap_inode);
- bdev = swap_inode->i_bdev;
- de = devfs_get_handle_from_inode(swap_inode);
- bdops = devfs_get_ops(de); /* Increments module use count */
- if (bdops) bdev->bd_op = bdops;
-
- error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP);
- devfs_put_ops(de);/*Decrement module use count now we're safe*/
- if (error)
- goto bad_swap_2;
- set_blocksize(dev, PAGE_SIZE);
- error = -ENODEV;
- if (!dev || (blk_size[MAJOR(dev)] &&
- !blk_size[MAJOR(dev)][MINOR(dev)]))
- goto bad_swap;
- swapfilesize = 0;
- if (blk_size[MAJOR(dev)])
- swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
- >> (PAGE_SHIFT - 10);
- } else if (S_ISREG(swap_inode->i_mode))
- swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
- else
- goto bad_swap;
-
- error = -EBUSY;
- for (i = 0 ; i < nr_swapfiles ; i++) {
- struct swap_info_struct *q = &swap_info[i];
- if (i == type || !q->swap_file)
- continue;
- if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
- goto bad_swap;
- }
-
- swap_header = (void *) __get_free_page(GFP_USER);
- if (!swap_header) {
- printk("Unable to start swapping: out of memory :-)\n");
- error = -ENOMEM;
- goto bad_swap;
- }
-
- lock_page(virt_to_page(swap_header));
- rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
-
- if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
- swap_header_version = 1;
- else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
- swap_header_version = 2;
- else {
- printk("Unable to find swap-space signature\n");
- error = -EINVAL;
- goto bad_swap;
- }
-
- switch (swap_header_version) {
- case 1:
- memset(((char *) swap_header)+PAGE_SIZE-10,0,10);
- j = 0;
- p->lowest_bit = 0;
- p->highest_bit = 0;
- for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
- if (test_bit(i,(char *) swap_header)) {
- if (!p->lowest_bit)
- p->lowest_bit = i;
- p->highest_bit = i;
- maxpages = i+1;
- j++;
- }
- }
- nr_good_pages = j;
- p->swap_map = vmalloc(maxpages * sizeof(short));
- if (!p->swap_map) {
- error = -ENOMEM;
- goto bad_swap;
- }
- for (i = 1 ; i < maxpages ; i++) {
- if (test_bit(i,(char *) swap_header))
- p->swap_map[i] = 0;
- else
- p->swap_map[i] = SWAP_MAP_BAD;
- }
- break;
-
- case 2:
- /* Check the swap header's sub-version and the size of
- the swap file and bad block lists */
- if (swap_header->info.version != 1) {
- printk(KERN_WARNING
- "Unable to handle swap header version %d\n",
- swap_header->info.version);
- error = -EINVAL;
- goto bad_swap;
- }
-
- p->lowest_bit = 1;
- maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
- if (maxpages > swap_header->info.last_page)
- maxpages = swap_header->info.last_page;
- p->highest_bit = maxpages - 1;
-
- error = -EINVAL;
- if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
- goto bad_swap;
-
- /* OK, set up the swap map and apply the bad block list */
- if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
- error = -ENOMEM;
- goto bad_swap;
- }
-
- error = 0;
- memset(p->swap_map, 0, maxpages * sizeof(short));
- for (i=0; i<swap_header->info.nr_badpages; i++) {
- int page = swap_header->info.badpages[i];
- if (page <= 0 || page >= swap_header->info.last_page)
- error = -EINVAL;
- else
- p->swap_map[page] = SWAP_MAP_BAD;
- }
- nr_good_pages = swap_header->info.last_page -
- swap_header->info.nr_badpages -
- 1 /* header page */;
- if (error)
- goto bad_swap;
- }
-
- if (swapfilesize && maxpages > swapfilesize) {
- printk(KERN_WARNING
- "Swap area shorter than signature indicates\n");
- error = -EINVAL;
- goto bad_swap;
- }
- if (!nr_good_pages) {
- printk(KERN_WARNING "Empty swap-file\n");
- error = -EINVAL;
- goto bad_swap;
- }
- p->swap_map[0] = SWAP_MAP_BAD;
- swap_list_lock();
- swap_device_lock(p);
- p->max = maxpages;
- p->flags = SWP_WRITEOK;
- p->pages = nr_good_pages;
- nr_swap_pages += nr_good_pages;
- total_swap_pages += nr_good_pages;
- printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
- nr_good_pages<<(PAGE_SHIFT-10), p->prio);
-
- /* insert swap space into swap_list: */
- prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio) {
- break;
- }
- prev = i;
- }
- p->next = i;
- if (prev < 0) {
- swap_list.head = swap_list.next = p - swap_info;
- } else {
- swap_info[prev].next = p - swap_info;
- }
- swap_device_unlock(p);
- swap_list_unlock();
- error = 0;
- goto out;
-bad_swap:
- if (bdev)
- blkdev_put(bdev, BDEV_SWAP);
-bad_swap_2:
- swap_list_lock();
- swap_map = p->swap_map;
- nd.mnt = p->swap_vfsmnt;
- nd.dentry = p->swap_file;
- p->swap_device = 0;
- p->swap_file = NULL;
- p->swap_vfsmnt = NULL;
- p->swap_map = NULL;
- p->flags = 0;
- if (!(swap_flags & SWAP_FLAG_PREFER))
- ++least_priority;
- swap_list_unlock();
- if (swap_map)
- vfree(swap_map);
- path_release(&nd);
-out:
- if (swap_header)
- free_page((long) swap_header);
- unlock_kernel();
- return error;
-}
-
-void si_swapinfo(struct sysinfo *val)
-{
- unsigned int i;
- unsigned long nr_to_be_unused = 0;
-
- swap_list_lock();
- for (i = 0; i < nr_swapfiles; i++) {
- unsigned int j;
- if (swap_info[i].flags != SWP_USED)
- continue;
- for (j = 0; j < swap_info[i].max; ++j) {
- switch (swap_info[i].swap_map[j]) {
- case 0:
- case SWAP_MAP_BAD:
- continue;
- default:
- nr_to_be_unused++;
- }
- }
- }
- val->freeswap = nr_swap_pages + nr_to_be_unused;
- val->totalswap = total_swap_pages + nr_to_be_unused;
- swap_list_unlock();
-}
-
-/*
- * Verify that a swap entry is valid and increment its swap map count.
- *
- * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
- * "permanent", but will be reclaimed by the next swapoff.
- */
-int swap_duplicate(swp_entry_t entry)
-{
- struct swap_info_struct * p;
- unsigned long offset, type;
- int result = 0;
-
- type = SWP_TYPE(entry);
- if (type >= nr_swapfiles)
- goto bad_file;
- p = type + swap_info;
- offset = SWP_OFFSET(entry);
-
- swap_device_lock(p);
- if (offset < p->max && p->swap_map[offset]) {
- if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
- p->swap_map[offset]++;
- result = 1;
- } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
- if (swap_overflow++ < 5)
- printk(KERN_WARNING "swap_dup: swap entry
overflow\n");
- p->swap_map[offset] = SWAP_MAP_MAX;
- result = 1;
- }
- }
- swap_device_unlock(p);
-out:
- return result;
-
-bad_file:
- printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
- goto out;
-}
-
-/*
- * Prior swap_duplicate protects against swap device deletion.
- */
-void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
- kdev_t *dev, struct inode **swapf)
-{
- unsigned long type;
- struct swap_info_struct *p;
-
- type = SWP_TYPE(entry);
- if (type >= nr_swapfiles) {
- printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
- return;
- }
-
- p = &swap_info[type];
- *offset = SWP_OFFSET(entry);
- if (*offset >= p->max && *offset != 0) {
- printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset,
entry.val);
- return;
- }
- if (p->swap_map && !p->swap_map[*offset]) {
- printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset,
entry.val);
- return;
- }
- if (!(p->flags & SWP_USED)) {
- printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file,
entry.val);
- return;
- }
-
- if (p->swap_device) {
- *dev = p->swap_device;
- } else if (p->swap_file) {
- *swapf = p->swap_file->d_inode;
- } else {
- printk(KERN_ERR "rw_swap_page: no swap file or device\n");
- }
- return;
-}
-
-/*
- * swap_device_lock prevents swap_map being freed. Don't grab an extra
- * reference on the swaphandle, it doesn't matter if it becomes unused.
- */
-int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
-{
- int ret = 0, i = 1 << page_cluster;
- unsigned long toff;
- struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
-
- if (!page_cluster) /* no readahead */
- return 0;
- toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
- if (!toff) /* first page is swap header */
- toff++, i--;
- *offset = toff;
-
- swap_device_lock(swapdev);
- do {
- /* Don't read-ahead past the end of the swap area */
- if (toff >= swapdev->max)
- break;
- /* Don't read in free or bad pages */
- if (!swapdev->swap_map[toff])
- break;
- if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
- break;
- toff++;
- ret++;
- } while (--i);
- swap_device_unlock(swapdev);
- return ret;
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/mm/vmalloc.c
--- a/linux-2.4.30-xen-sparse/mm/vmalloc.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,385 +0,0 @@
-/*
- * linux/mm/vmalloc.c
- *
- * Copyright (C) 1993 Linus Torvalds
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@xxxxxxxxxxx>, May
2000
- */
-
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/smp_lock.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-
-rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
-struct vm_struct * vmlist;
-
-static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned
long size)
-{
- pte_t * pte;
- unsigned long end;
-
- if (pmd_none(*pmd))
- return;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- pte = pte_offset(pmd, address);
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- pte_t page;
- page = ptep_get_and_clear(pte);
- address += PAGE_SIZE;
- pte++;
- if (pte_none(page))
- continue;
- if (pte_present(page)) {
- struct page *ptpage = pte_page(page);
- if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
- __free_page(ptpage);
- continue;
- }
- printk(KERN_CRIT "Whee.. Swapped out page in kernel page
table\n");
- } while (address < end);
-}
-
-static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned
long size)
-{
- pmd_t * pmd;
- unsigned long end;
-
- if (pgd_none(*dir))
- return;
- if (pgd_bad(*dir)) {
- pgd_ERROR(*dir);
- pgd_clear(dir);
- return;
- }
- pmd = pmd_offset(dir, address);
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- do {
- free_area_pte(pmd, address, end - address);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
-}
-
-void vmfree_area_pages(unsigned long address, unsigned long size)
-{
- pgd_t * dir;
- unsigned long end = address + size;
-
- dir = pgd_offset_k(address);
- flush_cache_all();
- do {
- free_area_pmd(dir, address, end - address);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- flush_tlb_all();
-}
-
-static inline int alloc_area_pte (pte_t * pte, unsigned long address,
- unsigned long size, int gfp_mask,
- pgprot_t prot, struct page ***pages)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- struct page * page;
-
- if (!pages) {
- spin_unlock(&init_mm.page_table_lock);
- page = alloc_page(gfp_mask);
- spin_lock(&init_mm.page_table_lock);
- } else {
- page = (**pages);
- (*pages)++;
-
- /* Add a reference to the page so we can free later */
- if (page)
- atomic_inc(&page->count);
-
- }
- if (!pte_none(*pte))
- printk(KERN_ERR "alloc_area_pte: page already
exists\n");
- if (!page)
- return -ENOMEM;
- set_pte(pte, mk_pte(page, prot));
- address += PAGE_SIZE;
- pte++;
- } while (address < end);
- return 0;
-}
-
-static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
- unsigned long size, int gfp_mask,
- pgprot_t prot, struct page ***pages)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- do {
- pte_t * pte = pte_alloc(&init_mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- if (alloc_area_pte(pte, address, end - address,
- gfp_mask, prot, pages))
- return -ENOMEM;
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
- return 0;
-}
-
-/*static inline*/ int __vmalloc_area_pages (unsigned long address,
- unsigned long size,
- int gfp_mask,
- pgprot_t prot,
- struct page ***pages)
-{
- pgd_t * dir;
- unsigned long start = address;
- unsigned long end = address + size;
-
- dir = pgd_offset_k(address);
- spin_lock(&init_mm.page_table_lock);
- do {
- pmd_t *pmd;
-
- pmd = pmd_alloc(&init_mm, dir, address);
- if (!pmd)
- goto err;
-
- if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot,
pages))
- goto err; // The kernel NEVER reclaims pmds, so
no need to undo pmd_alloc() here
-
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
- flush_cache_all();
- XEN_flush_page_update_queue();
- return 0;
-err:
- spin_unlock(&init_mm.page_table_lock);
- flush_cache_all();
- if (address > start)
- vmfree_area_pages(start, address - start);
- return -ENOMEM;
-}
-
-int vmalloc_area_pages(unsigned long address, unsigned long size,
- int gfp_mask, pgprot_t prot)
-{
- return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
-}
-
-struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
-{
- unsigned long addr, next;
- struct vm_struct **p, *tmp, *area;
-
- area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
- if (!area)
- return NULL;
-
- size += PAGE_SIZE;
- if (!size) {
- kfree (area);
- return NULL;
- }
-
- addr = VMALLOC_START;
- write_lock(&vmlist_lock);
- for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
- if ((size + addr) < addr)
- goto out;
- if (size + addr <= (unsigned long) tmp->addr)
- break;
- next = tmp->size + (unsigned long) tmp->addr;
- if (next > addr)
- addr = next;
- if (addr > VMALLOC_END-size)
- goto out;
- }
- area->flags = flags;
- area->addr = (void *)addr;
- area->size = size;
- area->next = *p;
- *p = area;
- write_unlock(&vmlist_lock);
- return area;
-
-out:
- write_unlock(&vmlist_lock);
- kfree(area);
- return NULL;
-}
-
-void __vfree(void * addr, int free_area_pages)
-{
- struct vm_struct **p, *tmp;
-
- if (!addr)
- return;
- if ((PAGE_SIZE-1) & (unsigned long) addr) {
- printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
- return;
- }
- write_lock(&vmlist_lock);
- for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
- if (tmp->addr == addr) {
- *p = tmp->next;
- if (free_area_pages)
- vmfree_area_pages(VMALLOC_VMADDR(tmp->addr),
tmp->size);
- write_unlock(&vmlist_lock);
- kfree(tmp);
- return;
- }
- }
- write_unlock(&vmlist_lock);
- printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
-}
-
-void vfree(void * addr)
-{
- __vfree(addr,1);
-}
-
-void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
-{
- void * addr;
- struct vm_struct *area;
-
- size = PAGE_ALIGN(size);
- if (!size || (size >> PAGE_SHIFT) > num_physpages)
- return NULL;
- area = get_vm_area(size, VM_ALLOC);
- if (!area)
- return NULL;
- addr = area->addr;
- if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
- prot, NULL)) {
- __vfree(addr, 0);
- return NULL;
- }
- return addr;
-}
-
-void * vmap(struct page **pages, int count,
- unsigned long flags, pgprot_t prot)
-{
- void * addr;
- struct vm_struct *area;
- unsigned long size = count << PAGE_SHIFT;
-
- if (!size || size > (max_mapnr << PAGE_SHIFT))
- return NULL;
- area = get_vm_area(size, flags);
- if (!area) {
- return NULL;
- }
- addr = area->addr;
- if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
- prot, &pages)) {
- __vfree(addr, 0);
- return NULL;
- }
- return addr;
-}
-
-long vread(char *buf, char *addr, unsigned long count)
-{
- struct vm_struct *tmp;
- char *vaddr, *buf_start = buf;
- unsigned long n;
-
- /* Don't allow overflow */
- if ((unsigned long) addr + count < count)
- count = -(unsigned long) addr;
-
- read_lock(&vmlist_lock);
- for (tmp = vmlist; tmp; tmp = tmp->next) {
- vaddr = (char *) tmp->addr;
- if (addr >= vaddr + tmp->size - PAGE_SIZE)
- continue;
- while (addr < vaddr) {
- if (count == 0)
- goto finished;
- *buf = '\0';
- buf++;
- addr++;
- count--;
- }
- n = vaddr + tmp->size - PAGE_SIZE - addr;
- do {
- if (count == 0)
- goto finished;
- *buf = *addr;
- buf++;
- addr++;
- count--;
- } while (--n > 0);
- }
-finished:
- read_unlock(&vmlist_lock);
- return buf - buf_start;
-}
-
-long vwrite(char *buf, char *addr, unsigned long count)
-{
- struct vm_struct *tmp;
- char *vaddr, *buf_start = buf;
- unsigned long n;
-
- /* Don't allow overflow */
- if ((unsigned long) addr + count < count)
- count = -(unsigned long) addr;
-
- read_lock(&vmlist_lock);
- for (tmp = vmlist; tmp; tmp = tmp->next) {
- vaddr = (char *) tmp->addr;
- if (addr >= vaddr + tmp->size - PAGE_SIZE)
- continue;
- while (addr < vaddr) {
- if (count == 0)
- goto finished;
- buf++;
- addr++;
- count--;
- }
- n = vaddr + tmp->size - PAGE_SIZE - addr;
- do {
- if (count == 0)
- goto finished;
- *addr = *buf;
- buf++;
- addr++;
- count--;
- } while (--n > 0);
- }
-finished:
- read_unlock(&vmlist_lock);
- return buf - buf_start;
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.4.30-xen-sparse/net/core/skbuff.c
--- a/linux-2.4.30-xen-sparse/net/core/skbuff.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1309 +0,0 @@
-/*
- * Routines having to do with the 'struct sk_buff' memory handlers.
- *
- * Authors: Alan Cox <iiitac@xxxxxxxxxxxxxx>
- * Florian La Roche <rzsfl@xxxxxxxxxxxx>
- *
- * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
- *
- * Fixes:
- * Alan Cox : Fixed the worst of the load balancer
bugs.
- * Dave Platt : Interrupt stacking fix.
- * Richard Kooijman : Timestamp fixes.
- * Alan Cox : Changed buffer format.
- * Alan Cox : destructor hook for AF_UNIX etc.
- * Linus Torvalds : Better skb_clone.
- * Alan Cox : Added skb_copy.
- * Alan Cox : Added all the changed routines Linus
- * only put in the headers
- * Ray VanTassle : Fixed --skb->lock in free
- * Alan Cox : skb_copy copy arp field
- * Andi Kleen : slabified it.
- *
- * NOTE:
- * The __skb_ routines should be called with interrupts
- * disabled, or you better be *real* sure that the operation is atomic
- * with respect to whatever list is being frobbed (e.g. via lock_sock()
- * or via disabling bottom half handlers, etc).
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * The functions in this file will not compile correctly with gcc 2.4.x
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/skbuff.h>
-#include <linux/cache.h>
-#include <linux/rtnetlink.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-
-#include <net/protocol.h>
-#include <net/dst.h>
-#include <net/sock.h>
-#include <net/checksum.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-
-int sysctl_hot_list_len = 128;
-
-static kmem_cache_t *skbuff_head_cache;
-
-static union {
- struct sk_buff_head list;
- char pad[SMP_CACHE_BYTES];
-} skb_head_pool[NR_CPUS];
-
-/*
- * Keep out-of-line to prevent kernel bloat.
- * __builtin_return_address is not used because it is not always
- * reliable.
- */
-
-/**
- * skb_over_panic - private function
- * @skb: buffer
- * @sz: size
- * @here: address
- *
- * Out of line support code for skb_put(). Not user callable.
- */
-
-void skb_over_panic(struct sk_buff *skb, int sz, void *here)
-{
- printk("skput:over: %p:%d put:%d dev:%s",
- here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
- BUG();
-}
-
-/**
- * skb_under_panic - private function
- * @skb: buffer
- * @sz: size
- * @here: address
- *
- * Out of line support code for skb_push(). Not user callable.
- */
-
-
-void skb_under_panic(struct sk_buff *skb, int sz, void *here)
-{
- printk("skput:under: %p:%d put:%d dev:%s",
- here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
- BUG();
-}
-
-static __inline__ struct sk_buff *skb_head_from_pool(void)
-{
- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
-
- if (skb_queue_len(list)) {
- struct sk_buff *skb;
- unsigned long flags;
-
- local_irq_save(flags);
- skb = __skb_dequeue(list);
- local_irq_restore(flags);
- return skb;
- }
- return NULL;
-}
-
-static __inline__ void skb_head_to_pool(struct sk_buff *skb)
-{
- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
-
- if (skb_queue_len(list) < sysctl_hot_list_len) {
- unsigned long flags;
-
- local_irq_save(flags);
- __skb_queue_head(list, skb);
- local_irq_restore(flags);
-
- return;
- }
- kmem_cache_free(skbuff_head_cache, skb);
-}
-
-
-/* Allocate a new skbuff. We do this ourselves so we can fill in a few
- * 'private' fields and also do memory statistics to find all the
- * [BEEP] leaks.
- *
- */
-
-/**
- * alloc_skb - allocate a network buffer
- * @size: size to allocate
- * @gfp_mask: allocation mask
- *
- * Allocate a new &sk_buff. The returned buffer has no headroom and a
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
- *
- * Buffers may only be allocated from interrupts using a @gfp_mask of
- * %GFP_ATOMIC.
- */
-
-struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
-{
- struct sk_buff *skb;
- u8 *data;
-
- if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
- static int count = 0;
- if (++count < 5) {
- printk(KERN_ERR "alloc_skb called nonatomically "
- "from interrupt %p\n", NET_CALLER(size));
- BUG();
- }
- gfp_mask &= ~__GFP_WAIT;
- }
-
- /* Get the HEAD */
- skb = skb_head_from_pool();
- if (skb == NULL) {
- skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask &
~__GFP_DMA);
- if (skb == NULL)
- goto nohead;
- }
-
- /* Get the DATA. Size must match skb_add_mtu(). */
- size = SKB_DATA_ALIGN(size);
- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
- if (data == NULL)
- goto nodata;
-
- /* XXX: does not include slab overhead */
- skb->truesize = size + sizeof(struct sk_buff);
-
- /* Load the data pointers. */
- skb->head = data;
- skb->data = data;
- skb->tail = data;
- skb->end = data + size;
-
- /* Set up other state */
- skb->len = 0;
- skb->cloned = 0;
- skb->data_len = 0;
-
- atomic_set(&skb->users, 1);
- atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->frag_list = NULL;
- return skb;
-
-nodata:
- skb_head_to_pool(skb);
-nohead:
- return NULL;
-}
-
-/**
- * alloc_skb_from_cache - allocate a network buffer
- * @cp: kmem_cache from which to allocate the data area
- * (object size must be big enough for @size bytes + skb overheads)
- * @size: size to allocate
- * @gfp_mask: allocation mask
- *
- * Allocate a new &sk_buff. The returned buffer has no headroom and a
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
- *
- * Buffers may only be allocated from interrupts using a @gfp_mask of
- * %GFP_ATOMIC.
- */
-
-struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
- unsigned int size, int gfp_mask)
-{
- struct sk_buff *skb;
- u8 *data;
-
- if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
- static int count = 0;
- if (++count < 5) {
- printk(KERN_ERR "alloc_skb called nonatomically "
- "from interrupt %p\n", NET_CALLER(size));
- BUG();
- }
- gfp_mask &= ~__GFP_WAIT;
- }
-
- /* Get the HEAD */
- skb = skb_head_from_pool();
- if (skb == NULL) {
- skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask &
~__GFP_DMA);
- if (skb == NULL)
- goto nohead;
- }
-
- /* Get the DATA. */
- size = SKB_DATA_ALIGN(size);
- data = kmem_cache_alloc(cp, gfp_mask);
- if (data == NULL)
- goto nodata;
-
- /* XXX: does not include slab overhead */
- skb->truesize = size + sizeof(struct sk_buff);
-
- /* Load the data pointers. */
- skb->head = data;
- skb->data = data;
- skb->tail = data;
- skb->end = data + size;
-
- /* Set up other state */
- skb->len = 0;
- skb->cloned = 0;
- skb->data_len = 0;
-
- atomic_set(&skb->users, 1);
- atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->frag_list = NULL;
- return skb;
-
-nodata:
- skb_head_to_pool(skb);
-nohead:
- return NULL;
-}
-
-
-/*
- * Slab constructor for a skb head.
- */
-static inline void skb_headerinit(void *p, kmem_cache_t *cache,
- unsigned long flags)
-{
- struct sk_buff *skb = p;
-
- skb->next = NULL;
- skb->prev = NULL;
- skb->list = NULL;
- skb->sk = NULL;
- skb->stamp.tv_sec=0; /* No idea about time */
- skb->dev = NULL;
- skb->real_dev = NULL;
- skb->dst = NULL;
- memset(skb->cb, 0, sizeof(skb->cb));
- skb->pkt_type = PACKET_HOST; /* Default type */
- skb->ip_summed = 0;
- skb->priority = 0;
- skb->security = 0; /* By default packets are insecure */
- skb->destructor = NULL;
-
-#ifdef CONFIG_NETFILTER
- skb->nfmark = skb->nfcache = 0;
- skb->nfct = NULL;
-#ifdef CONFIG_NETFILTER_DEBUG
- skb->nf_debug = 0;
-#endif
-#endif
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
-#endif
-}
-
-static void skb_drop_fraglist(struct sk_buff *skb)
-{
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
- skb_shinfo(skb)->frag_list = NULL;
-
- do {
- struct sk_buff *this = list;
- list = list->next;
- kfree_skb(this);
- } while (list);
-}
-
-static void skb_clone_fraglist(struct sk_buff *skb)
-{
- struct sk_buff *list;
-
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next)
- skb_get(list);
-}
-
-static void skb_release_data(struct sk_buff *skb)
-{
- if (!skb->cloned ||
- atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
- if (skb_shinfo(skb)->nr_frags) {
- int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
- }
-
- if (skb_shinfo(skb)->frag_list)
- skb_drop_fraglist(skb);
-
- kfree(skb->head);
- }
-}
-
-/*
- * Free an skbuff by memory without cleaning the state.
- */
-void kfree_skbmem(struct sk_buff *skb)
-{
- skb_release_data(skb);
- skb_head_to_pool(skb);
-}
-
-/**
- * __kfree_skb - private function
- * @skb: buffer
- *
- * Free an sk_buff. Release anything attached to the buffer.
- * Clean the state. This is an internal helper function. Users should
- * always call kfree_skb
- */
-
-void __kfree_skb(struct sk_buff *skb)
-{
- if (skb->list) {
- printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
- "on a list (from %p).\n", NET_CALLER(skb));
- BUG();
- }
-
- dst_release(skb->dst);
- if(skb->destructor) {
- if (in_irq()) {
- printk(KERN_WARNING "Warning: kfree_skb on hard IRQ
%p\n",
- NET_CALLER(skb));
- }
- skb->destructor(skb);
- }
-#ifdef CONFIG_NETFILTER
- nf_conntrack_put(skb->nfct);
-#endif
- skb_headerinit(skb, NULL, 0); /* clean state */
- kfree_skbmem(skb);
-}
-
-/**
- * skb_clone - duplicate an sk_buff
- * @skb: buffer to clone
- * @gfp_mask: allocation priority
- *
- * Duplicate an &sk_buff. The new one is not owned by a socket. Both
- * copies share the same packet data but not structure. The new
- * buffer has a reference count of 1. If the allocation fails the
- * function returns %NULL otherwise the new buffer is returned.
- *
- * If this function is called from an interrupt gfp_mask() must be
- * %GFP_ATOMIC.
- */
-
-struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
-{
- struct sk_buff *n;
-
- n = skb_head_from_pool();
- if (!n) {
- n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
- if (!n)
- return NULL;
- }
-
-#define C(x) n->x = skb->x
-
- n->next = n->prev = NULL;
- n->list = NULL;
- n->sk = NULL;
- C(stamp);
- C(dev);
- C(real_dev);
- C(h);
- C(nh);
- C(mac);
- C(dst);
- dst_clone(n->dst);
- memcpy(n->cb, skb->cb, sizeof(skb->cb));
- C(len);
- C(data_len);
- C(csum);
- n->cloned = 1;
- C(pkt_type);
- C(ip_summed);
- C(priority);
- atomic_set(&n->users, 1);
- C(protocol);
- C(security);
- C(truesize);
- C(head);
- C(data);
- C(tail);
- C(end);
- n->destructor = NULL;
-#ifdef CONFIG_NETFILTER
- C(nfmark);
- C(nfcache);
- C(nfct);
-#ifdef CONFIG_NETFILTER_DEBUG
- C(nf_debug);
-#endif
-#endif /*CONFIG_NETFILTER*/
-#if defined(CONFIG_HIPPI)
- C(private);
-#endif
-#ifdef CONFIG_NET_SCHED
- C(tc_index);
-#endif
-
- atomic_inc(&(skb_shinfo(skb)->dataref));
- skb->cloned = 1;
-#ifdef CONFIG_NETFILTER
- nf_conntrack_get(skb->nfct);
-#endif
- return n;
-}
-
-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
-{
- /*
- * Shift between the two data areas in bytes
- */
- unsigned long offset = new->data - old->data;
-
- new->list=NULL;
- new->sk=NULL;
- new->dev=old->dev;
- new->real_dev=old->real_dev;
- new->priority=old->priority;
- new->protocol=old->protocol;
- new->dst=dst_clone(old->dst);
- new->h.raw=old->h.raw+offset;
- new->nh.raw=old->nh.raw+offset;
- new->mac.raw=old->mac.raw+offset;
- memcpy(new->cb, old->cb, sizeof(old->cb));
- atomic_set(&new->users, 1);
- new->pkt_type=old->pkt_type;
- new->stamp=old->stamp;
- new->destructor = NULL;
- new->security=old->security;
-#ifdef CONFIG_NETFILTER
- new->nfmark=old->nfmark;
- new->nfcache=old->nfcache;
- new->nfct=old->nfct;
- nf_conntrack_get(new->nfct);
-#ifdef CONFIG_NETFILTER_DEBUG
- new->nf_debug=old->nf_debug;
-#endif
-#endif
-#ifdef CONFIG_NET_SCHED
- new->tc_index = old->tc_index;
-#endif
-}
-
-/**
- * skb_copy - create private copy of an sk_buff
- * @skb: buffer to copy
- * @gfp_mask: allocation priority
- *
- * Make a copy of both an &sk_buff and its data. This is used when the
- * caller wishes to modify the data and needs a private copy of the
- * data to alter. Returns %NULL on failure or the pointer to the buffer
- * on success. The returned buffer has a reference count of 1.
- *
- * As by-product this function converts non-linear &sk_buff to linear
- * one, so that &sk_buff becomes completely private and caller is allowed
- * to modify all the data of returned buffer. This means that this
- * function is not recommended for use in circumstances when only
- * header is going to be modified. Use pskb_copy() instead.
- */
-
-struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
-{
- struct sk_buff *n;
- int headerlen = skb->data-skb->head;
-
- /*
- * Allocate the copy buffer
- */
- n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
- if(n==NULL)
- return NULL;
-
- /* Set the data pointer */
- skb_reserve(n,headerlen);
- /* Set the tail pointer and length */
- skb_put(n,skb->len);
- n->csum = skb->csum;
- n->ip_summed = skb->ip_summed;
-
- if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len))
- BUG();
-
- copy_skb_header(n, skb);
-
- return n;
-}
-
-/* Keep head the same: replace data */
-int skb_linearize(struct sk_buff *skb, int gfp_mask)
-{
- unsigned int size;
- u8 *data;
- long offset;
- int headerlen = skb->data - skb->head;
- int expand = (skb->tail+skb->data_len) - skb->end;
-
- if (skb_shared(skb))
- BUG();
-
- if (expand <= 0)
- expand = 0;
-
- size = (skb->end - skb->head + expand);
- size = SKB_DATA_ALIGN(size);
- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
- if (data == NULL)
- return -ENOMEM;
-
- /* Copy entire thing */
- if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
- BUG();
-
- /* Offset between the two in bytes */
- offset = data - skb->head;
-
- /* Free old data. */
- skb_release_data(skb);
-
- skb->head = data;
- skb->end = data + size;
-
- /* Set up new pointers */
- skb->h.raw += offset;
- skb->nh.raw += offset;
- skb->mac.raw += offset;
- skb->tail += offset;
- skb->data += offset;
-
- /* Set up shinfo */
- atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->frag_list = NULL;
-
- /* We are no longer a clone, even if we were. */
- skb->cloned = 0;
-
- skb->tail += skb->data_len;
- skb->data_len = 0;
- return 0;
-}
-
-
-/**
- * pskb_copy - create copy of an sk_buff with private head.
- * @skb: buffer to copy
- * @gfp_mask: allocation priority
- *
- * Make a copy of both an &sk_buff and part of its data, located
- * in header. Fragmented data remain shared. This is used when
- * the caller wishes to modify only header of &sk_buff and needs
- * private copy of the header to alter. Returns %NULL on failure
- * or the pointer to the buffer on success.
- * The returned buffer has a reference count of 1.
- */
-
-struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
-{
- struct sk_buff *n;
-
- /*
- * Allocate the copy buffer
- */
- n=alloc_skb(skb->end - skb->head, gfp_mask);
- if(n==NULL)
- return NULL;
-
- /* Set the data pointer */
- skb_reserve(n,skb->data-skb->head);
- /* Set the tail pointer and length */
- skb_put(n,skb_headlen(skb));
- /* Copy the bytes */
- memcpy(n->data, skb->data, n->len);
- n->csum = skb->csum;
- n->ip_summed = skb->ip_summed;
-
- n->data_len = skb->data_len;
- n->len = skb->len;
-
- if (skb_shinfo(skb)->nr_frags) {
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
- get_page(skb_shinfo(n)->frags[i].page);
- }
- skb_shinfo(n)->nr_frags = i;
- }
-
- if (skb_shinfo(skb)->frag_list) {
- skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
- skb_clone_fraglist(n);
- }
-
- copy_skb_header(n, skb);
-
- return n;
-}
-
-/**
- * pskb_expand_head - reallocate header of &sk_buff
- * @skb: buffer to reallocate
- * @nhead: room to add at head
- * @ntail: room to add at tail
- * @gfp_mask: allocation priority
- *
- * Expands (or creates identical copy, if &nhead and &ntail are zero)
- * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
- * reference count of 1. Returns zero in the case of success or error,
- * if expansion failed. In the last case, &sk_buff is not changed.
- *
- * All the pointers pointing into skb header may change and must be
- * reloaded after call to this function.
- */
-
-int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
-{
- int i;
- u8 *data;
- int size = nhead + (skb->end - skb->head) + ntail;
- long off;
-
- if (skb_shared(skb))
- BUG();
-
- size = SKB_DATA_ALIGN(size);
-
- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
- if (data == NULL)
- goto nodata;
-
- /* Copy only real data... and, alas, header. This should be
- * optimized for the cases when header is void. */
- memcpy(data+nhead, skb->head, skb->tail-skb->head);
- memcpy(data+size, skb->end, sizeof(struct skb_shared_info));
-
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
-
- if (skb_shinfo(skb)->frag_list)
- skb_clone_fraglist(skb);
-
- skb_release_data(skb);
-
- off = (data+nhead) - skb->head;
-
- skb->head = data;
- skb->end = data+size;
-
- skb->data += off;
- skb->tail += off;
- skb->mac.raw += off;
- skb->h.raw += off;
- skb->nh.raw += off;
- skb->cloned = 0;
- atomic_set(&skb_shinfo(skb)->dataref, 1);
- return 0;
-
-nodata:
- return -ENOMEM;
-}
-
-/* Make private copy of skb with writable head and some headroom */
-
-struct sk_buff *
-skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
-{
- struct sk_buff *skb2;
- int delta = headroom - skb_headroom(skb);
-
- if (delta <= 0)
- return pskb_copy(skb, GFP_ATOMIC);
-
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL ||
- !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
- return skb2;
-
- kfree_skb(skb2);
- return NULL;
-}
-
-
-/**
- * skb_copy_expand - copy and expand sk_buff
- * @skb: buffer to copy
- * @newheadroom: new free bytes at head
- * @newtailroom: new free bytes at tail
- * @gfp_mask: allocation priority
- *
- * Make a copy of both an &sk_buff and its data and while doing so
- * allocate additional space.
- *
- * This is used when the caller wishes to modify the data and needs a
- * private copy of the data to alter as well as more space for new fields.
- * Returns %NULL on failure or the pointer to the buffer
- * on success. The returned buffer has a reference count of 1.
- *
- * You must pass %GFP_ATOMIC as the allocation priority if this function
- * is called from an interrupt.
- */
-
-
-struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
- int newheadroom,
- int newtailroom,
- int gfp_mask)
-{
- struct sk_buff *n;
-
- /*
- * Allocate the copy buffer
- */
-
- n=alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask);
- if(n==NULL)
- return NULL;
-
- skb_reserve(n,newheadroom);
-
- /* Set the tail pointer and length */
- skb_put(n,skb->len);
-
- /* Copy the data only. */
- if (skb_copy_bits(skb, 0, n->data, skb->len))
- BUG();
-
- copy_skb_header(n, skb);
- return n;
-}
-
-/**
- * skb_pad - zero pad the tail of an skb
- * @skb: buffer to pad
- * @pad: space to pad
- *
- * Ensure that a buffer is followed by a padding area that is zero
- * filled. Used by network drivers which may DMA or transfer data
- * beyond the buffer end onto the wire.
- *
- * May return NULL in out of memory cases.
- */
-
-struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
-{
- struct sk_buff *nskb;
-
- /* If the skbuff is non linear tailroom is always zero.. */
- if(skb_tailroom(skb) >= pad)
- {
- memset(skb->data+skb->len, 0, pad);
- return skb;
- }
-
- nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad,
GFP_ATOMIC);
- kfree_skb(skb);
- if(nskb)
- memset(nskb->data+nskb->len, 0, pad);
- return nskb;
-}
-
-/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
- * If realloc==0 and trimming is impossible without change of data,
- * it is BUG().
- */
-
-int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
-{
- int offset = skb_headlen(skb);
- int nfrags = skb_shinfo(skb)->nr_frags;
- int i;
-
- for (i=0; i<nfrags; i++) {
- int end = offset + skb_shinfo(skb)->frags[i].size;
- if (end > len) {
- if (skb_cloned(skb)) {
- if (!realloc)
- BUG();
- if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
- return -ENOMEM;
- }
- if (len <= offset) {
- put_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->nr_frags--;
- } else {
- skb_shinfo(skb)->frags[i].size = len-offset;
- }
- }
- offset = end;
- }
-
- if (offset < len) {
- skb->data_len -= skb->len - len;
- skb->len = len;
- } else {
- if (len <= skb_headlen(skb)) {
- skb->len = len;
- skb->data_len = 0;
- skb->tail = skb->data + len;
- if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
- skb_drop_fraglist(skb);
- } else {
- skb->data_len -= skb->len - len;
- skb->len = len;
- }
- }
-
- return 0;
-}
-
-/**
- * __pskb_pull_tail - advance tail of skb header
- * @skb: buffer to reallocate
- * @delta: number of bytes to advance tail
- *
- * The function makes a sense only on a fragmented &sk_buff,
- * it expands header moving its tail forward and copying necessary
- * data from fragmented part.
- *
- * &sk_buff MUST have reference count of 1.
- *
- * Returns %NULL (and &sk_buff does not change) if pull failed
- * or value of new tail of skb in the case of success.
- *
- * All the pointers pointing into skb header may change and must be
- * reloaded after call to this function.
- */
-
-/* Moves tail of skb head forward, copying data from fragmented part,
- * when it is necessary.
- * 1. It may fail due to malloc failure.
- * 2. It may change skb pointers.
- *
- * It is pretty complicated. Luckily, it is called only in exceptional cases.
- */
-unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
-{
- int i, k, eat;
-
- /* If skb has not enough free space at tail, get new one
- * plus 128 bytes for future expansions. If we have enough
- * room at tail, reallocate without expansion only if skb is cloned.
- */
- eat = (skb->tail+delta) - skb->end;
-
- if (eat > 0 || skb_cloned(skb)) {
- if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))
- return NULL;
- }
-
- if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
- BUG();
-
- /* Optimization: no fragments, no reasons to preestimate
- * size of pulled pages. Superb.
- */
- if (skb_shinfo(skb)->frag_list == NULL)
- goto pull_pages;
-
- /* Estimate size of pulled pages. */
- eat = delta;
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size >= eat)
- goto pull_pages;
- eat -= skb_shinfo(skb)->frags[i].size;
- }
-
- /* If we need update frag list, we are in troubles.
- * Certainly, it possible to add an offset to skb data,
- * but taking into account that pulling is expected to
- * be very rare operation, it is worth to fight against
- * further bloating skb head and crucify ourselves here instead.
- * Pure masohism, indeed. 8)8)
- */
- if (eat) {
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
- struct sk_buff *clone = NULL;
- struct sk_buff *insp = NULL;
-
- do {
- if (list == NULL)
- BUG();
-
- if (list->len <= eat) {
- /* Eaten as whole. */
- eat -= list->len;
- list = list->next;
- insp = list;
- } else {
- /* Eaten partially. */
-
- if (skb_shared(list)) {
- /* Sucks! We need to fork list. :-( */
- clone = skb_clone(list, GFP_ATOMIC);
- if (clone == NULL)
- return NULL;
- insp = list->next;
- list = clone;
- } else {
- /* This may be pulled without
- * problems. */
- insp = list;
- }
- if (pskb_pull(list, eat) == NULL) {
- if (clone)
- kfree_skb(clone);
- return NULL;
- }
- break;
- }
- } while (eat);
-
- /* Free pulled out fragments. */
- while ((list = skb_shinfo(skb)->frag_list) != insp) {
- skb_shinfo(skb)->frag_list = list->next;
- kfree_skb(list);
- }
- /* And insert new clone at head. */
- if (clone) {
- clone->next = list;
- skb_shinfo(skb)->frag_list = clone;
- }
- }
- /* Success! Now we may commit changes to skb data. */
-
-pull_pages:
- eat = delta;
- k = 0;
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size <= eat) {
- put_page(skb_shinfo(skb)->frags[i].page);
- eat -= skb_shinfo(skb)->frags[i].size;
- } else {
- skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
- if (eat) {
- skb_shinfo(skb)->frags[k].page_offset += eat;
- skb_shinfo(skb)->frags[k].size -= eat;
- eat = 0;
- }
- k++;
- }
- }
- skb_shinfo(skb)->nr_frags = k;
-
- skb->tail += delta;
- skb->data_len -= delta;
-
- return skb->tail;
-}
-
-/* Copy some data bits from skb to kernel buffer. */
-
-int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
-{
- int i, copy;
- int start = skb->len - skb->data_len;
-
- if (offset > (int)skb->len-len)
- goto fault;
-
- /* Copy header. */
- if ((copy = start-offset) > 0) {
- if (copy > len)
- copy = len;
- memcpy(to, skb->data + offset, copy);
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- to += copy;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
-
- BUG_TRAP(start <= offset+len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
- if ((copy = end-offset) > 0) {
- u8 *vaddr;
-
- if (copy > len)
- copy = len;
-
- vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
- memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+
- offset-start, copy);
- kunmap_skb_frag(vaddr);
-
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- to += copy;
- }
- start = end;
- }
-
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list;
-
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
- int end;
-
- BUG_TRAP(start <= offset+len);
-
- end = start + list->len;
- if ((copy = end-offset) > 0) {
- if (copy > len)
- copy = len;
- if (skb_copy_bits(list, offset-start, to, copy))
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- to += copy;
- }
- start = end;
- }
- }
- if (len == 0)
- return 0;
-
-fault:
- return -EFAULT;
-}
-
-/* Checksum skb data. */
-
-unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len,
unsigned int csum)
-{
- int i, copy;
- int start = skb->len - skb->data_len;
- int pos = 0;
-
- /* Checksum header. */
- if ((copy = start-offset) > 0) {
- if (copy > len)
- copy = len;
- csum = csum_partial(skb->data+offset, copy, csum);
- if ((len -= copy) == 0)
- return csum;
- offset += copy;
- pos = copy;
- }
-
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
- int end;
-
- BUG_TRAP(start <= offset+len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
- if ((copy = end-offset) > 0) {
- unsigned int csum2;
- u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- if (copy > len)
- copy = len;
- vaddr = kmap_skb_frag(frag);
- csum2 = csum_partial(vaddr + frag->page_offset +
- offset-start, copy, 0);
- kunmap_skb_frag(vaddr);
- csum = csum_block_add(csum, csum2, pos);
- if (!(len -= copy))
- return csum;
- offset += copy;
- pos += copy;
- }
- start = end;
- }
-
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list;
-
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
- int end;
-
- BUG_TRAP(start <= offset+len);
-
- end = start + list->len;
- if ((copy = end-offset) > 0) {
- unsigned int csum2;
- if (copy > len)
- copy = len;
- csum2 = skb_checksum(list, offset-start, copy,
0);
- csum = csum_block_add(csum, csum2, pos);
- if ((len -= copy) == 0)
- return csum;
- offset += copy;
- pos += copy;
- }
- start = end;
- }
- }
- if (len == 0)
- return csum;
-
- BUG();
- return csum;
-}
-
-/* Both of above in one bottle. */
-
-unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8
*to, int len, unsigned int csum)
-{
- int i, copy;
- int start = skb->len - skb->data_len;
- int pos = 0;
-
- /* Copy header. */
- if ((copy = start-offset) > 0) {
- if (copy > len)
- copy = len;
- csum = csum_partial_copy_nocheck(skb->data+offset, to, copy,
csum);
- if ((len -= copy) == 0)
- return csum;
- offset += copy;
- to += copy;
- pos = copy;
- }
-
- for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
- int end;
-
- BUG_TRAP(start <= offset+len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
- if ((copy = end-offset) > 0) {
- unsigned int csum2;
- u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- if (copy > len)
- copy = len;
- vaddr = kmap_skb_frag(frag);
- csum2 = csum_partial_copy_nocheck(vaddr +
frag->page_offset +
- offset-start, to, copy,
0);
- kunmap_skb_frag(vaddr);
- csum = csum_block_add(csum, csum2, pos);
- if (!(len -= copy))
- return csum;
- offset += copy;
- to += copy;
- pos += copy;
- }
- start = end;
- }
-
- if (skb_shinfo(skb)->frag_list) {
- struct sk_buff *list;
-
- for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
- unsigned int csum2;
- int end;
-
- BUG_TRAP(start <= offset+len);
-
- end = start + list->len;
- if ((copy = end-offset) > 0) {
- if (copy > len)
- copy = len;
- csum2 = skb_copy_and_csum_bits(list,
offset-start, to, copy, 0);
- csum = csum_block_add(csum, csum2, pos);
- if ((len -= copy) == 0)
- return csum;
- offset += copy;
- to += copy;
- pos += copy;
- }
- start = end;
- }
- }
- if (len == 0)
- return csum;
-
- BUG();
- return csum;
-}
-
-void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
-{
- unsigned int csum;
- long csstart;
-
- if (skb->ip_summed == CHECKSUM_HW)
- csstart = skb->h.raw - skb->data;
- else
- csstart = skb->len - skb->data_len;
-
- if (csstart > skb->len - skb->data_len)
- BUG();
-
- memcpy(to, skb->data, csstart);
-
- csum = 0;
- if (csstart != skb->len)
- csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,
- skb->len-csstart, 0);
-
- if (skb->ip_summed == CHECKSUM_HW) {
- long csstuff = csstart + skb->csum;
-
- *((unsigned short *)(to + csstuff)) = csum_fold(csum);
- }
-}
-
-#if 0
-/*
- * Tune the memory allocator for a new MTU size.
- */
-void skb_add_mtu(int mtu)
-{
- /* Must match allocation in alloc_skb */
- mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
-
- kmem_add_cache_size(mtu);
-}
-#endif
-
-void __init skb_init(void)
-{
- int i;
-
- skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
- sizeof(struct sk_buff),
- 0,
- SLAB_HWCACHE_ALIGN,
- skb_headerinit, NULL);
- if (!skbuff_head_cache)
- panic("cannot create skbuff cache");
-
- for (i=0; i<NR_CPUS; i++)
- skb_queue_head_init(&skb_head_pool[i].list);
-}
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/arch/xen/Kconfig
--- a/linux-2.6.11-xen-sparse/arch/xen/Kconfig Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,152 +0,0 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
-config XEN
- bool
- default y
- help
- This is the Linux Xen port.
-
-config ARCH_XEN
- bool
- default y
-
-
-config NO_IDLE_HZ
- bool
- default y
-
-
-menu "XEN"
-
-config XEN_PRIVILEGED_GUEST
- bool "Privileged Guest (domain 0)"
- default n
- select XEN_PHYSDEV_ACCESS
- help
- Support for privileged operation (domain 0)
-
-config XEN_PHYSDEV_ACCESS
- bool "Physical device access"
- default XEN_PRIVILEGED_GUEST
- help
- Assume access is available to physical hardware devices
- (e.g., hard drives, network cards). This allows you to configure
- such devices and also includes some low-level support that is
- otherwise not compiled into the kernel.
-
-config XEN_BLKDEV_BACKEND
- bool "Block-device backend driver"
- depends on XEN_PHYSDEV_ACCESS
- default y
- help
- The block-device backend driver allows the kernel to export its
- block devices to other guests via a high-performance shared-memory
- interface.
-
-config XEN_NETDEV_BACKEND
- bool "Network-device backend driver"
- depends on XEN_PHYSDEV_ACCESS
- default y
- help
- The network-device backend driver allows the kernel to export its
- network devices to other guests via a high-performance shared-memory
- interface.
-
-config XEN_BLKDEV_FRONTEND
- bool "Block-device frontend driver"
- default y
- help
- The block-device frontend driver allows the kernel to access block
- devices mounted within another guest OS. Unless you are building a
- dedicated device-driver domain, or your master control domain
- (domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_FRONTEND
- bool "Network-device frontend driver"
- default y
- help
- The network-device frontend driver allows the kernel to access
- network interfaces within another guest OS. Unless you are building a
- dedicated device-driver domain, or your master control domain
- (domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
- bool "Pipelined transmitter (DANGEROUS)"
- depends on XEN_NETDEV_FRONTEND
- default n
- help
- The driver will assume that the backend is pipelining packets for
- transmission: whenever packets are pending in the remote backend,
- the driver will not send asynchronous notifications when it queues
- additional packets for transmission.
- If the backend is a dumb domain, such as a transparent Ethernet
- bridge with no local IP interface, it is safe to say Y here to get
- slightly lower network overhead.
- If the backend has a local IP interface; or may be doing smart things
- like reassembling packets to perform firewall filtering; or if you
- are unsure; or if you experience network hangs when this option is
- enabled; then you must say N here.
-
-config XEN_WRITABLE_PAGETABLES
- bool
- default y
-
-config XEN_SCRUB_PAGES
- bool "Scrub memory before freeing it to Xen"
- default y
- help
- Erase memory contents before freeing it back to Xen's global
- pool. This ensures that any secrets contained within that
- memory (e.g., private keys) cannot be found by other guests that
- may be running on the machine. Most people will want to say Y here.
- If security is not a concern then you may increase performance by
- saying N.
-
-choice
- prompt "Processor Type"
- default X86
-
-config X86
- bool "X86"
- help
- Choose this option if your computer is a X86 architecture.
-
-config X86_64
- bool "X86_64"
- help
- Choose this option if your computer is a X86 architecture.
-
-endchoice
-
-endmenu
-
-config HAVE_ARCH_DEV_ALLOC_SKB
- bool
- default y
-
-source "init/Kconfig"
-
-if X86
-source "arch/xen/i386/Kconfig"
-endif
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "arch/xen/Kconfig.drivers"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/Kconfig.drivers
--- a/linux-2.6.11-xen-sparse/arch/xen/Kconfig.drivers Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,94 +0,0 @@
-# arch/xen/Kconfig.drivers
-
-menu "Device Drivers"
-
-source "drivers/base/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/mtd/Kconfig"
-source "drivers/parport/Kconfig"
-source "drivers/pnp/Kconfig"
-endif
-
-source "drivers/block/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/ide/Kconfig"
-endif
-
-source "drivers/scsi/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/cdrom/Kconfig"
-endif
-
-source "drivers/md/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/message/fusion/Kconfig"
-source "drivers/ieee1394/Kconfig"
-source "drivers/message/i2o/Kconfig"
-endif
-
-source "net/Kconfig"
-
-if XEN_PHYSDEV_ACCESS
-source "drivers/isdn/Kconfig"
-source "drivers/telephony/Kconfig"
-source "drivers/input/Kconfig"
-source "drivers/char/Kconfig"
-source "drivers/i2c/Kconfig"
-source "drivers/w1/Kconfig"
-source "drivers/misc/Kconfig"
-source "drivers/media/Kconfig"
-source "drivers/video/Kconfig"
-source "sound/Kconfig"
-source "drivers/usb/Kconfig"
-source "drivers/mmc/Kconfig"
-source "drivers/infiniband/Kconfig"
-endif
-
-if !XEN_PHYSDEV_ACCESS
-
-menu "Character devices"
-
-config UNIX98_PTYS
- bool
- default y
-
-config LEGACY_PTYS
- bool "Legacy (BSD) PTY support"
- default y
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
- a physical terminal; the master device is used by a process to
- read data from and write data to the slave, thereby emulating a
- terminal. Typical programs for the master side are telnet servers
- and xterms.
-
- Linux has traditionally used the BSD-like names /dev/ptyxx
- for masters and /dev/ttyxx for slaves of pseudo
- terminals. This scheme has a number of problems, including
- security. This option enables these legacy devices; on most
- systems, it is safe to say N.
-
-
-config LEGACY_PTY_COUNT
- int "Maximum number of legacy PTY in use"
- depends on LEGACY_PTYS
- range 1 256
- default "256"
- ---help---
- The maximum number of legacy PTYs that can be used at any one time.
- The default is 256, and should be more than enough. Embedded
- systems may want to reduce this to save memory.
-
- When not in use, each legacy PTY occupies 12 bytes on 32-bit
- architectures and 24 bytes on 64-bit architectures.
-
-endmenu
-
-endif
-
-endmenu
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/arch/xen/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,81 +0,0 @@
-#
-# xen/Makefile
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" cleaning up for this architecture.
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 2004 by Christian Limpach
-#
-
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-# pick up headers from include/asm-xen/asm in preference over include/asm
-NOSTDINC_FLAGS = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen
-iwithprefix include
-
-# make uname return the processor arch
-UTS_MACHINE := $(XENARCH)
-
-core-y += arch/xen/kernel/
-
-include/.asm-ignore: include/asm
- @rm -f include/.asm-ignore
- @mv include/asm include/.asm-ignore
- @echo ' SYMLINK include/asm -> include/asm-$(XENARCH)'
- $(Q)if [ ! -d include ]; then mkdir -p include; fi;
- @ln -fsn $(srctree)/include/asm-$(XENARCH) include/asm
-
-include/asm-xen/asm:
- @echo ' SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
- @mkdir -p include/asm-xen
- @ln -fsn $(srctree)/include/asm-xen/asm-$(XENARCH) $@
-
-arch/xen/arch:
- @rm -f $@
- @mkdir -p arch/xen
- @ln -fsn $(srctree)/arch/xen/$(XENARCH) $@
-
-arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
- include/config/MARKER
-
-include/asm-$(ARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
- $(call filechk,gen-asm-offsets)
-
-prepare: include/.asm-ignore include/asm-xen/asm \
- arch/xen/arch include/asm-$(ARCH)/asm_offsets.h ;
-
-all: vmlinuz
-
-vmlinuz: vmlinux
- $(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
-
-XINSTALL_NAME ?= $(KERNELRELEASE)
-install: vmlinuz
- mkdir -p $(INSTALL_PATH)/boot
- ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
- rm -f $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0644 vmlinuz
$(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0644 vmlinux
$(INSTALL_PATH)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0664 .config
$(INSTALL_PATH)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
- install -m0664 System.map
$(INSTALL_PATH)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
- ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
- mkdir -p $(INSTALL_PATH)/usr/include/xen/linux
- install -m0644 include/asm-xen/linux-public/*.h
$(INSTALL_PATH)/usr/include/xen/linux
-
-archclean:
- @if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
- @rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
- @rm -f vmlinux-stripped vmlinuz
-
-define archhelp
- echo '* vmlinuz - Compressed kernel image'
- echo ' install - Install kernel image and config file'
-endef
-
-ifneq ($(XENARCH),)
-include $(srctree)/arch/xen/$(XENARCH)/Makefile
-endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/boot/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/boot/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,8 +0,0 @@
-
-OBJCOPYFLAGS := -g --strip-unneeded
-
-vmlinuz: vmlinux-stripped FORCE
- $(call if_changed,gzip)
-
-vmlinux-stripped: vmlinux FORCE
- $(call if_changed,objcopy)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig
--- a/linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1204 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.11-xen0
-# Tue May 3 13:22:55 2005
-#
-CONFIG_XEN=y
-CONFIG_ARCH_XEN=y
-CONFIG_NO_IDLE_HZ=y
-
-#
-# XEN
-#
-CONFIG_XEN_PRIVILEGED_GUEST=y
-CONFIG_XEN_PHYSDEV_ACCESS=y
-CONFIG_XEN_BLKDEV_BACKEND=y
-CONFIG_XEN_NETDEV_BACKEND=y
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
-CONFIG_XEN_WRITABLE_PAGETABLES=y
-CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_X86=y
-# CONFIG_X86_64 is not set
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_LOCK_KERNEL=y
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
-# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
-# CONFIG_TINY_SHMEM is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# X86 Processor Configuration
-#
-CONFIG_XENARCH="i386"
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_IOMAP=y
-# CONFIG_M386 is not set
-# CONFIG_M486 is not set
-# CONFIG_M586 is not set
-# CONFIG_M586TSC is not set
-# CONFIG_M586MMX is not set
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
-CONFIG_MPENTIUM4=y
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MEFFICEON is not set
-# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
-# CONFIG_MWINCHIP3D is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-# CONFIG_X86_GENERIC is not set
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-# CONFIG_HPET_TIMER is not set
-# CONFIG_HPET_EMULATE_RTC is not set
-# CONFIG_SMP is not set
-CONFIG_PREEMPT=y
-CONFIG_PREEMPT_BKL=y
-CONFIG_MICROCODE=y
-CONFIG_X86_CPUID=y
-
-#
-# Firmware Drivers
-#
-# CONFIG_EDD is not set
-CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_MTRR=y
-CONFIG_HAVE_DEC_LOCK=y
-# CONFIG_REGPARM is not set
-
-#
-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-#
-CONFIG_PCI=y
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_LEGACY_PROC=y
-# CONFIG_PCI_NAMES is not set
-CONFIG_ISA=y
-# CONFIG_EISA is not set
-# CONFIG_MCA is not set
-# CONFIG_SCx200 is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-# CONFIG_PCCARD is not set
-
-#
-# PC-card bridges
-#
-CONFIG_PCMCIA_PROBE=y
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_4KSTACKS is not set
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_PC=y
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-# CONFIG_STANDALONE is not set
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNP is not set
-
-#
-# Block devices
-#
-CONFIG_BLK_DEV_FD=y
-# CONFIG_BLK_DEV_XD is not set
-# CONFIG_BLK_CPQ_DA is not set
-CONFIG_BLK_CPQ_CISS_DA=y
-# CONFIG_CISS_SCSI_TAPE is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
-# CONFIG_BLK_DEV_UB is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_LBD is not set
-# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_ATA_OVER_ETH is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-# CONFIG_BLK_DEV_HD_IDE is not set
-CONFIG_BLK_DEV_IDEDISK=y
-# CONFIG_IDEDISK_MULTI_MODE is not set
-CONFIG_BLK_DEV_IDECD=y
-# CONFIG_BLK_DEV_IDETAPE is not set
-# CONFIG_BLK_DEV_IDEFLOPPY is not set
-# CONFIG_BLK_DEV_IDESCSI is not set
-# CONFIG_IDE_TASK_IOCTL is not set
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=y
-# CONFIG_BLK_DEV_CMD640 is not set
-CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=y
-# CONFIG_BLK_DEV_OPTI621 is not set
-# CONFIG_BLK_DEV_RZ1000 is not set
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-CONFIG_IDEDMA_PCI_AUTO=y
-# CONFIG_IDEDMA_ONLYDISK is not set
-# CONFIG_BLK_DEV_AEC62XX is not set
-# CONFIG_BLK_DEV_ALI15X3 is not set
-# CONFIG_BLK_DEV_AMD74XX is not set
-# CONFIG_BLK_DEV_ATIIXP is not set
-# CONFIG_BLK_DEV_CMD64X is not set
-# CONFIG_BLK_DEV_TRIFLEX is not set
-# CONFIG_BLK_DEV_CY82C693 is not set
-# CONFIG_BLK_DEV_CS5520 is not set
-# CONFIG_BLK_DEV_CS5530 is not set
-# CONFIG_BLK_DEV_HPT34X is not set
-# CONFIG_BLK_DEV_HPT366 is not set
-# CONFIG_BLK_DEV_SC1200 is not set
-CONFIG_BLK_DEV_PIIX=y
-# CONFIG_BLK_DEV_NS87415 is not set
-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-CONFIG_BLK_DEV_SVWKS=y
-# CONFIG_BLK_DEV_SIIMAGE is not set
-# CONFIG_BLK_DEV_SIS5513 is not set
-# CONFIG_BLK_DEV_SLC90E66 is not set
-# CONFIG_BLK_DEV_TRM290 is not set
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_IDE_ARM is not set
-# CONFIG_IDE_CHIPSETS is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
-CONFIG_IDEDMA_AUTO=y
-# CONFIG_BLK_DEV_HD is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=y
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI Transport Attributes
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-# CONFIG_SCSI_ISCSI_ATTRS is not set
-
-#
-# SCSI low-level drivers
-#
-CONFIG_BLK_DEV_3W_XXXX_RAID=y
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_7000FASST is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AHA152X is not set
-# CONFIG_SCSI_AHA1542 is not set
-CONFIG_SCSI_AACRAID=y
-CONFIG_SCSI_AIC7XXX=y
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-CONFIG_AIC7XXX_DEBUG_ENABLE=y
-CONFIG_AIC7XXX_DEBUG_MASK=0
-CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-CONFIG_SCSI_AIC79XX=y
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-CONFIG_AIC79XX_DEBUG_ENABLE=y
-CONFIG_AIC79XX_DEBUG_MASK=0
-CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_IN2000 is not set
-CONFIG_MEGARAID_NEWGEN=y
-# CONFIG_MEGARAID_MM is not set
-CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_AHCI is not set
-# CONFIG_SCSI_SATA_SVW is not set
-CONFIG_SCSI_ATA_PIIX=y
-# CONFIG_SCSI_SATA_NV is not set
-CONFIG_SCSI_SATA_PROMISE=y
-# CONFIG_SCSI_SATA_QSTOR is not set
-CONFIG_SCSI_SATA_SX4=y
-CONFIG_SCSI_SATA_SIL=y
-# CONFIG_SCSI_SATA_SIS is not set
-# CONFIG_SCSI_SATA_ULI is not set
-# CONFIG_SCSI_SATA_VIA is not set
-# CONFIG_SCSI_SATA_VITESSE is not set
-CONFIG_SCSI_BUSLOGIC=y
-# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-# CONFIG_SCSI_CPQFCTS is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_NCR53C406A is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_PCI2000 is not set
-# CONFIG_SCSI_PCI2220I is not set
-# CONFIG_SCSI_PSI240I is not set
-# CONFIG_SCSI_QLOGIC_FAS is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_SEAGATE is not set
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Old CD-ROM drivers (not SCSI, not IDE)
-#
-# CONFIG_CD_NO_IDESCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-# CONFIG_MD_LINEAR is not set
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-# CONFIG_MD_RAID10 is not set
-CONFIG_MD_RAID5=y
-# CONFIG_MD_RAID6 is not set
-# CONFIG_MD_MULTIPATH is not set
-# CONFIG_MD_FAULTY is not set
-CONFIG_BLK_DEV_DM=y
-# CONFIG_DM_CRYPT is not set
-CONFIG_DM_SNAPSHOT=y
-CONFIG_DM_MIRROR=y
-# CONFIG_DM_ZERO is not set
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_MAX_SGE=40
-# CONFIG_FUSION_CTL is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_BRIDGE_NETFILTER=y
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_CT_ACCT=y
-# CONFIG_IP_NF_CONNTRACK_MARK is not set
-# CONFIG_IP_NF_CT_PROTO_SCTP is not set
-CONFIG_IP_NF_FTP=m
-# CONFIG_IP_NF_IRC is not set
-# CONFIG_IP_NF_TFTP is not set
-# CONFIG_IP_NF_AMANDA is not set
-# CONFIG_IP_NF_QUEUE is not set
-CONFIG_IP_NF_IPTABLES=m
-# CONFIG_IP_NF_MATCH_LIMIT is not set
-CONFIG_IP_NF_MATCH_IPRANGE=m
-# CONFIG_IP_NF_MATCH_MAC is not set
-# CONFIG_IP_NF_MATCH_PKTTYPE is not set
-# CONFIG_IP_NF_MATCH_MARK is not set
-# CONFIG_IP_NF_MATCH_MULTIPORT is not set
-# CONFIG_IP_NF_MATCH_TOS is not set
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
-# CONFIG_IP_NF_MATCH_DSCP is not set
-# CONFIG_IP_NF_MATCH_AH_ESP is not set
-# CONFIG_IP_NF_MATCH_LENGTH is not set
-# CONFIG_IP_NF_MATCH_TTL is not set
-# CONFIG_IP_NF_MATCH_TCPMSS is not set
-# CONFIG_IP_NF_MATCH_HELPER is not set
-# CONFIG_IP_NF_MATCH_STATE is not set
-# CONFIG_IP_NF_MATCH_CONNTRACK is not set
-# CONFIG_IP_NF_MATCH_OWNER is not set
-# CONFIG_IP_NF_MATCH_PHYSDEV is not set
-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-# CONFIG_IP_NF_MATCH_REALM is not set
-# CONFIG_IP_NF_MATCH_SCTP is not set
-# CONFIG_IP_NF_MATCH_COMMENT is not set
-# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-# CONFIG_IP_NF_TARGET_LOG is not set
-# CONFIG_IP_NF_TARGET_ULOG is not set
-# CONFIG_IP_NF_TARGET_TCPMSS is not set
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-# CONFIG_IP_NF_TARGET_REDIRECT is not set
-# CONFIG_IP_NF_TARGET_NETMAP is not set
-# CONFIG_IP_NF_TARGET_SAME is not set
-# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
-CONFIG_IP_NF_NAT_FTP=m
-# CONFIG_IP_NF_MANGLE is not set
-# CONFIG_IP_NF_RAW is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-
-#
-# Bridge: Netfilter Configuration
-#
-# CONFIG_BRIDGE_NF_EBTABLES is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-CONFIG_BRIDGE=y
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
-CONFIG_NET_VENDOR_3COM=y
-# CONFIG_EL1 is not set
-# CONFIG_EL2 is not set
-# CONFIG_ELPLUS is not set
-# CONFIG_EL16 is not set
-# CONFIG_EL3 is not set
-# CONFIG_3C515 is not set
-CONFIG_VORTEX=y
-# CONFIG_TYPHOON is not set
-# CONFIG_LANCE is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_NET_VENDOR_RACAL is not set
-
-#
-# Tulip family network device support
-#
-CONFIG_NET_TULIP=y
-# CONFIG_DE2104X is not set
-CONFIG_TULIP=y
-# CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
-# CONFIG_DE4X5 is not set
-# CONFIG_WINBOND_840 is not set
-# CONFIG_DM9102 is not set
-# CONFIG_AT1700 is not set
-# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
-# CONFIG_NET_ISA is not set
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_AC3200 is not set
-# CONFIG_APRICOT is not set
-# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_CS89x0 is not set
-# CONFIG_DGRS is not set
-# CONFIG_EEPRO100 is not set
-CONFIG_E100=y
-# CONFIG_E100_NAPI is not set
-# CONFIG_FEALNX is not set
-# CONFIG_NATSEMI is not set
-CONFIG_NE2K_PCI=y
-# CONFIG_8139CP is not set
-CONFIG_8139TOO=y
-CONFIG_8139TOO_PIO=y
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_SIS900 is not set
-# CONFIG_EPIC100 is not set
-# CONFIG_SUNDANCE is not set
-# CONFIG_TLAN is not set
-CONFIG_VIA_RHINE=y
-# CONFIG_VIA_RHINE_MMIO is not set
-# CONFIG_NET_POCKET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-CONFIG_ACENIC=y
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-# CONFIG_DL2K is not set
-CONFIG_E1000=y
-# CONFIG_E1000_NAPI is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
-# CONFIG_VIA_VELOCITY is not set
-CONFIG_TIGON3=y
-
-#
-# Ethernet (10000 Mbit)
-#
-# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_INPORT is not set
-# CONFIG_MOUSE_LOGIBM is not set
-# CONFIG_MOUSE_PC110PAD is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-CONFIG_AGP=m
-CONFIG_AGP_ALI=m
-CONFIG_AGP_ATI=m
-CONFIG_AGP_AMD=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_INTEL_MCH=m
-CONFIG_AGP_NVIDIA=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_SWORKS=m
-CONFIG_AGP_VIA=m
-# CONFIG_AGP_EFFICEON is not set
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-# CONFIG_DRM_GAMMA is not set
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_I810=m
-CONFIG_DRM_I830=m
-CONFIG_DRM_I915=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_SIS=m
-# CONFIG_MWAVE is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_HANGCHECK_TIMER is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-# CONFIG_IBM_ASM is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-# CONFIG_VIDEO_SELECT is not set
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
-
-#
-# Miscellaneous USB options
-#
-# CONFIG_USB_DEVICEFS is not set
-# CONFIG_USB_BANDWIDTH is not set
-# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_OTG is not set
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
-
-#
-# USB Host Controller Drivers
-#
-# CONFIG_USB_EHCI_HCD is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_UHCI_HCD=y
-# CONFIG_USB_SL811_HCD is not set
-
-#
-# USB Device Class drivers
-#
-# CONFIG_USB_BLUETOOTH_TTY is not set
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_PRINTER is not set
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed;
see USB_STORAGE Help for more information
-#
-# CONFIG_USB_STORAGE is not set
-
-#
-# USB Input Devices
-#
-CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT=y
-# CONFIG_HID_FF is not set
-# CONFIG_USB_HIDDEV is not set
-# CONFIG_USB_AIPTEK is not set
-# CONFIG_USB_WACOM is not set
-# CONFIG_USB_KBTAB is not set
-# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
-# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
-
-#
-# USB Imaging devices
-#
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
-# CONFIG_USB_HPUSBSCSI is not set
-
-#
-# USB Multimedia devices
-#
-# CONFIG_USB_DABUSB is not set
-
-#
-# Video4Linux support is needed for USB Multimedia device support
-#
-
-#
-# USB Network Adapters
-#
-# CONFIG_USB_CATC is not set
-# CONFIG_USB_KAWETH is not set
-# CONFIG_USB_PEGASUS is not set
-# CONFIG_USB_RTL8150 is not set
-# CONFIG_USB_USBNET is not set
-
-#
-# USB port drivers
-#
-
-#
-# USB Serial Converter support
-#
-# CONFIG_USB_SERIAL is not set
-
-#
-# USB Miscellaneous drivers
-#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
-# CONFIG_USB_AUERSWALD is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_LEGOTOWER is not set
-# CONFIG_USB_LCD is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETKIT is not set
-# CONFIG_USB_PHIDGETSERVO is not set
-# CONFIG_USB_IDMOUSE is not set
-
-#
-# USB ATM/DSL drivers
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
-#
-# InfiniBand support
-#
-# CONFIG_INFINIBAND is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=y
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
-# CONFIG_JFS_FS is not set
-
-#
-# XFS support
-#
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_XATTR is not set
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
-CONFIG_CRYPTO_SHA1=m
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_586 is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
-# CONFIG_CRYPTO_DEV_PADLOCK is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=y
-CONFIG_ZLIB_INFLATE=y
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig
--- a/linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,536 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.11-xenU
-# Wed Apr 13 23:18:37 2005
-#
-CONFIG_XEN=y
-CONFIG_ARCH_XEN=y
-CONFIG_NO_IDLE_HZ=y
-
-#
-# XEN
-#
-# CONFIG_XEN_PRIVILEGED_GUEST is not set
-# CONFIG_XEN_PHYSDEV_ACCESS is not set
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
-CONFIG_XEN_WRITABLE_PAGETABLES=y
-CONFIG_XEN_SCRUB_PAGES=y
-CONFIG_X86=y
-# CONFIG_X86_64 is not set
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_LOCK_KERNEL=y
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
-# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
-# CONFIG_TINY_SHMEM is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# X86 Processor Configuration
-#
-CONFIG_XENARCH="i386"
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_IOMAP=y
-# CONFIG_M386 is not set
-# CONFIG_M486 is not set
-# CONFIG_M586 is not set
-# CONFIG_M586TSC is not set
-# CONFIG_M586MMX is not set
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
-CONFIG_MPENTIUM4=y
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MEFFICEON is not set
-# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
-# CONFIG_MWINCHIP3D is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-# CONFIG_X86_GENERIC is not set
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-# CONFIG_HPET_TIMER is not set
-# CONFIG_HPET_EMULATE_RTC is not set
-# CONFIG_SMP is not set
-CONFIG_PREEMPT=y
-CONFIG_PREEMPT_BKL=y
-CONFIG_X86_CPUID=y
-
-#
-# Firmware Drivers
-#
-# CONFIG_EDD is not set
-CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_HAVE_DEC_LOCK=y
-# CONFIG_REGPARM is not set
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_4KSTACKS is not set
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_PC=y
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_LBD is not set
-# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_ATA_OVER_ETH is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=m
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI Transport Attributes
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-# CONFIG_SCSI_ISCSI_ATTRS is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_SCSI_SATA is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-# CONFIG_NET_ETHERNET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# Character devices
-#
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=y
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
-# CONFIG_JFS_FS is not set
-
-#
-# XFS support
-#
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS_XATTR=y
-# CONFIG_DEVPTS_FS_SECURITY is not set
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_XATTR is not set
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-# CONFIG_CRYPTO_DES is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_586 is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
-# CONFIG_CRYPTO_DEV_PADLOCK is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC32 is not set
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=y
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,974 +0,0 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-menu "X86 Processor Configuration"
-
-config XENARCH
- string
- default i386
-
-config MMU
- bool
- default y
-
-config SBUS
- bool
-
-config UID16
- bool
- default y
-
-config GENERIC_ISA_DMA
- bool
- default y
-
-config GENERIC_IOMAP
- bool
- default y
-
-choice
- prompt "Processor family"
- default M686
-
-config M386
- bool "386"
- ---help---
- This is the processor type of your CPU. This information is used for
- optimizing purposes. In order to compile a kernel that can run on
- all x86 CPU types (albeit not optimally fast), you can specify
- "386" here.
-
- The kernel will not necessarily run on earlier architectures than
- the one you have chosen, e.g. a Pentium optimized kernel will run on
- a PPro, but not necessarily on a i486.
-
- Here are the settings recommended for greatest speed:
- - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
- 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
- will run on a 386 class machine.
- - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
- SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
- - "586" for generic Pentium CPUs lacking the TSC
- (time stamp counter) register.
- - "Pentium-Classic" for the Intel Pentium.
- - "Pentium-MMX" for the Intel Pentium MMX.
- - "Pentium-Pro" for the Intel Pentium Pro.
- - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
- - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
- - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
- - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
- - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
- - "Crusoe" for the Transmeta Crusoe series.
- - "Efficeon" for the Transmeta Efficeon series.
- - "Winchip-C6" for original IDT Winchip.
- - "Winchip-2" for IDT Winchip 2.
- - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
- - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
- - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
-
- If you don't know what to do, choose "386".
-
-config M486
- bool "486"
- help
- Select this for a 486 series processor, either Intel or one of the
- compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
- DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
- U5S.
-
-config M586
- bool "586/K5/5x86/6x86/6x86MX"
- help
- Select this for an 586 or 686 series processor such as the AMD K5,
- the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
- assume the RDTSC (Read Time Stamp Counter) instruction.
-
-config M586TSC
- bool "Pentium-Classic"
- help
- Select this for a Pentium Classic processor with the RDTSC (Read
- Time Stamp Counter) instruction for benchmarking.
-
-config M586MMX
- bool "Pentium-MMX"
- help
- Select this for a Pentium with the MMX graphics/multimedia
- extended instructions.
-
-config M686
- bool "Pentium-Pro"
- help
- Select this for Intel Pentium Pro chips. This enables the use of
- Pentium Pro extended instructions, and disables the init-time guard
- against the f00f bug found in earlier Pentiums.
-
-config MPENTIUMII
- bool "Pentium-II/Celeron(pre-Coppermine)"
- help
- Select this for Intel chips based on the Pentium-II and
- pre-Coppermine Celeron core. This option enables an unaligned
- copy optimization, compiles the kernel with optimization flags
- tailored for the chip, and applies any applicable Pentium Pro
- optimizations.
-
-config MPENTIUMIII
- bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
- help
- Select this for Intel chips based on the Pentium-III and
- Celeron-Coppermine core. This option enables use of some
- extended prefetch instructions in addition to the Pentium II
- extensions.
-
-config MPENTIUMM
- bool "Pentium M"
- help
- Select this for Intel Pentium M (not Pentium-4 M)
- notebook chips.
-
-config MPENTIUM4
- bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
- help
- Select this for Intel Pentium 4 chips. This includes the
- Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
- (not Pentium M) chips. This option enables compile flags
- optimized for the chip, uses the correct cache shift, and
- applies any applicable Pentium III optimizations.
-
-config MK6
- bool "K6/K6-II/K6-III"
- help
- Select this for an AMD K6-family processor. Enables use of
- some extended instructions, and passes appropriate optimization
- flags to GCC.
-
-config MK7
- bool "Athlon/Duron/K7"
- help
- Select this for an AMD Athlon K7-family processor. Enables use of
- some extended instructions, and passes appropriate optimization
- flags to GCC.
-
-config MK8
- bool "Opteron/Athlon64/Hammer/K8"
- help
- Select this for an AMD Opteron or Athlon64 Hammer-family processor.
Enables
- use of some extended instructions, and passes appropriate optimization
- flags to GCC.
-
-config MCRUSOE
- bool "Crusoe"
- help
- Select this for a Transmeta Crusoe processor. Treats the processor
- like a 586 with TSC, and sets some GCC optimization flags (like a
- Pentium Pro with no alignment requirements).
-
-config MEFFICEON
- bool "Efficeon"
- help
- Select this for a Transmeta Efficeon processor.
-
-config MWINCHIPC6
- bool "Winchip-C6"
- help
- Select this for an IDT Winchip C6 chip. Linux and GCC
- treat this chip as a 586TSC with some extended instructions
- and alignment requirements.
-
-config MWINCHIP2
- bool "Winchip-2"
- help
- Select this for an IDT Winchip-2. Linux and GCC
- treat this chip as a 586TSC with some extended instructions
- and alignment requirements.
-
-config MWINCHIP3D
- bool "Winchip-2A/Winchip-3"
- help
- Select this for an IDT Winchip-2A or 3. Linux and GCC
- treat this chip as a 586TSC with some extended instructions
- and alignment reqirements. Also enable out of order memory
- stores for this CPU, which can increase performance of some
- operations.
-
-config MCYRIXIII
- bool "CyrixIII/VIA-C3"
- help
- Select this for a Cyrix III or C3 chip. Presently Linux and GCC
- treat this chip as a generic 586. Whilst the CPU is 686 class,
- it lacks the cmov extension which gcc assumes is present when
- generating 686 code.
- Note that Nehemiah (Model 9) and above will not boot with this
- kernel due to them lacking the 3DNow! instructions used in earlier
- incarnations of the CPU.
-
-config MVIAC3_2
- bool "VIA C3-2 (Nehemiah)"
- help
- Select this for a VIA C3 "Nehemiah". Selecting this enables usage
- of SSE and tells gcc to treat the CPU as a 686.
- Note, this kernel will not boot on older (pre model 9) C3s.
-
-endchoice
-
-config X86_GENERIC
- bool "Generic x86 support"
- help
- Instead of just including optimizations for the selected
- x86 variant (e.g. PII, Crusoe or Athlon), include some more
- generic optimizations as well. This will make the kernel
- perform better on x86 CPUs other than that selected.
-
- This is really intended for distributors who need more
- generic optimizations.
-
-#
-# Define implied options from the CPU selection here
-#
-config X86_CMPXCHG
- bool
- depends on !M386
- default y
-
-config X86_XADD
- bool
- depends on !M386
- default y
-
-config X86_L1_CACHE_SHIFT
- int
- default "7" if MPENTIUM4 || X86_GENERIC
- default "4" if X86_ELAN || M486 || M386
- default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE ||
MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX
|| M586TSC || M586 || MVIAC3_2
- default "6" if MK7 || MK8 || MPENTIUMM
-
-config RWSEM_GENERIC_SPINLOCK
- bool
- depends on M386
- default y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- depends on !M386
- default y
-
-config GENERIC_CALIBRATE_DELAY
- bool
- default y
-
-config X86_PPRO_FENCE
- bool
- depends on M686 || M586MMX || M586TSC || M586 || M486 || M386
- default y
-
-config X86_F00F_BUG
- bool
- depends on M586MMX || M586TSC || M586 || M486 || M386
- default y
-
-config X86_WP_WORKS_OK
- bool
- depends on !M386
- default y
-
-config X86_INVLPG
- bool
- depends on !M386
- default y
-
-config X86_BSWAP
- bool
- depends on !M386
- default y
-
-config X86_POPAD_OK
- bool
- depends on !M386
- default y
-
-config X86_ALIGNMENT_16
- bool
- depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII ||
X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
- default y
-
-config X86_GOOD_APIC
- bool
- depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII
|| M686 || M586MMX || MK8 || MEFFICEON
- default y
-
-config X86_INTEL_USERCOPY
- bool
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII ||
M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
- default y
-
-config X86_USE_PPRO_CHECKSUM
- bool
- depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 ||
MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 ||
MVIAC3_2 || MEFFICEON
- default y
-
-config X86_USE_3DNOW
- bool
- depends on MCYRIXIII || MK7
- default y
-
-config X86_OOSTORE
- bool
- depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
- default y
-
-config HPET_TIMER
- bool
- default n
-#config HPET_TIMER
-# bool "HPET Timer Support"
-# help
-# This enables the use of the HPET for the kernel's internal timer.
-# HPET is the next generation timer replacing legacy 8254s.
-# You can safely choose Y here. However, HPET will only be
-# activated if the platform and the BIOS support this feature.
-# Otherwise the 8254 will be used for timing services.
-#
-# Choose N to continue using the legacy 8254 timer.
-
-config HPET_EMULATE_RTC
- def_bool HPET_TIMER && RTC=y
-
-config SMP
- bool
- default n
-#config SMP
-# bool "Symmetric multi-processing support"
-# ---help---
-# This enables support for systems with more than one CPU. If you have
-# a system with only one CPU, like most personal computers, say N. If
-# you have a system with more than one CPU, say Y.
-#
-# If you say N here, the kernel will run on single and multiprocessor
-# machines, but will use only one CPU of a multiprocessor machine. If
-# you say Y here, the kernel will run on many, but not all,
-# singleprocessor machines. On a singleprocessor machine, the kernel
-# will run faster if you say N here.
-#
-# Note that if you say Y here and choose architecture "586" or
-# "Pentium" under "Processor family", the kernel will not work on 486
-# architectures. Similarly, multiprocessor kernels for the "PPro"
-# architecture may not work on all Pentium based boards.
-#
-# People using multiprocessor machines who say Y here should also say
-# Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
-# Management" code will be disabled if you say Y here.
-#
-# See also the <file:Documentation/smp.txt>,
-# <file:Documentation/i386/IO-APIC.txt>,
-# <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
-# <http://www.tldp.org/docs.html#howto>.
-#
-# If you don't know what to do here, say N.
-
-config NR_CPUS
- int "Maximum number of CPUs (2-255)"
- range 2 255
- depends on SMP
- default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
- default "8"
- help
- This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 255 and the
- minimum value which makes sense is 2.
-
- This is purely to save memory - each supported CPU adds
- approximately eight kilobytes to the kernel image.
-
-config SCHED_SMT
- bool "SMT (Hyperthreading) scheduler support"
- depends on SMP
- default off
- help
- SMT scheduler support improves the CPU scheduler's decision making
- when dealing with Intel Pentium 4 chips with HyperThreading at a
- cost of slightly increased overhead in some places. If unsure say
- N here.
-
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- This allows applications to run more reliably even when the system is
- under load.
-
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
-
-config PREEMPT_BKL
- bool "Preempt The Big Kernel Lock"
- depends on PREEMPT
- default y
- help
- This option reduces the latency of the kernel by making the
- big kernel lock preemptible.
-
- Say Y here if you are building a kernel for a desktop system.
- Say N if you are unsure.
-
-#config X86_TSC
-# bool
-# depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON ||
MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII
|| M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
-# default y
-
-#config X86_MCE
-# bool "Machine Check Exception"
-# depends on !X86_VOYAGER
-# ---help---
-# Machine Check Exception support allows the processor to notify the
-# kernel if it detects a problem (e.g. overheating, component failure).
-# The action the kernel takes depends on the severity of the problem,
-# ranging from a warning message on the console, to halting the
machine.
-# Your processor must be a Pentium or newer to support this - check the
-# flags in /proc/cpuinfo for mce. Note that some older Pentium systems
-# have a design flaw which leads to false MCE events - hence MCE is
-# disabled on all P5 processors, unless explicitly enabled with "mce"
-# as a boot argument. Similarly, if MCE is built in and creates a
-# problem on some new non-standard machine, you can boot with "nomce"
-# to disable it. MCE support simply ignores non-MCE processors like
-# the 386 and 486, so nearly everyone can say Y here.
-
-#config X86_MCE_NONFATAL
-# tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel
Pentium 4"
-# depends on X86_MCE
-# help
-# Enabling this feature starts a timer that triggers every 5 seconds
which
-# will look at the machine check registers to see if anything happened.
-# Non-fatal problems automatically get corrected (but still logged).
-# Disable this if you don't want to see these messages.
-# Seeing the messages this option prints out may be indicative of
dying hardware,
-# or out-of-spec (ie, overclocked) hardware.
-# This option only does something on certain CPUs.
-# (AMD Athlon/Duron and Intel Pentium 4)
-
-#config X86_MCE_P4THERMAL
-# bool "check for P4 thermal throttling interrupt."
-# depends on X86_MCE && (X86_UP_APIC || SMP)
-# help
-# Enabling this feature will cause a message to be printed when the P4
-# enters thermal throttling.
-
-config MICROCODE
- tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
- depends on XEN_PRIVILEGED_GUEST
- ---help---
- If you say Y here and also to "/dev file system support" in the
- 'File systems' section, you will be able to update the microcode on
- Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
- Pentium III, Pentium 4, Xeon etc. You will obviously need the
- actual microcode binary data itself which is not shipped with the
- Linux kernel.
-
- For latest news and information on obtaining all the required
- ingredients for this driver, check:
- <http://www.urbanmyth.org/microcode/>.
-
- To compile this driver as a module, choose M here: the
- module will be called microcode.
-
-#config X86_MSR
-# tristate "/dev/cpu/*/msr - Model-specific register support"
-# help
-# This device gives privileged processes access to the x86
-# Model-Specific Registers (MSRs). It is a character device with
-# major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
-# MSR accesses are directed to a specific CPU on multi-processor
-# systems.
-
-config X86_CPUID
- tristate "/dev/cpu/*/cpuid - CPU information support"
- help
- This device gives processes access to the x86 CPUID instruction to
- be executed on a specific processor. It is a character device
- with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
- /dev/cpu/31/cpuid.
-
-source "drivers/firmware/Kconfig"
-
-choice
- prompt "High Memory Support"
- default NOHIGHMEM
-
-config NOHIGHMEM
- bool "off"
- ---help---
- Linux can use up to 64 Gigabytes of physical memory on x86 systems.
- However, the address space of 32-bit x86 processors is only 4
- Gigabytes large. That means that, if you have a large amount of
- physical memory, not all of it can be "permanently mapped" by the
- kernel. The physical memory that's not permanently mapped is called
- "high memory".
-
- If you are compiling a kernel which will never run on a machine with
- more than 1 Gigabyte total physical RAM, answer "off" here (default
- choice and suitable for most users). This will result in a "3GB/1GB"
- split: 3GB are mapped so that each process sees a 3GB virtual memory
- space and the remaining part of the 4GB virtual memory space is used
- by the kernel to permanently map as much physical memory as
- possible.
-
- If the machine has between 1 and 4 Gigabytes physical RAM, then
- answer "4GB" here.
-
- If more than 4 Gigabytes is used then answer "64GB" here. This
- selection turns Intel PAE (Physical Address Extension) mode on.
- PAE implements 3-level paging on IA32 processors. PAE is fully
- supported by Linux, PAE mode is implemented on all recent Intel
- processors (Pentium Pro and better). NOTE: If you say "64GB" here,
- then the kernel will not boot on CPUs that don't support PAE!
-
- The actual amount of total physical memory will either be
- auto detected or can be forced by using a kernel command line option
- such as "mem=256M". (Try "man bootparam" or see the documentation of
- your boot loader (lilo or loadlin) about how to pass options to the
- kernel at boot time.)
-
- If unsure, say "off".
-
-config HIGHMEM4G
- bool "4GB"
- help
- Select this if you have a 32-bit processor and between 1 and 4
- gigabytes of physical RAM.
-
-#config HIGHMEM64G
-# bool "64GB"
-# help
-# Select this if you have a 32-bit processor and more than 4
-# gigabytes of physical RAM.
-
-endchoice
-
-config HIGHMEM
- bool
- depends on HIGHMEM64G || HIGHMEM4G
- default y
-
-config X86_PAE
- bool
- depends on HIGHMEM64G
- default y
-
-# Common NUMA Features
-config NUMA
- bool "Numa Memory Allocation and Scheduler Support"
- depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH ||
(X86_SUMMIT && ACPI))
- default n if X86_PC
- default y if (X86_NUMAQ || X86_SUMMIT)
-
-# Need comments to help the hapless user trying to turn on NUMA support
-comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
- depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
-
-comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
- depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
-
-config DISCONTIGMEM
- bool
- depends on NUMA
- default y
-
-config HAVE_ARCH_BOOTMEM_NODE
- bool
- depends on NUMA
- default y
-
-#config HIGHPTE
-# bool "Allocate 3rd-level pagetables from highmem"
-# depends on HIGHMEM4G || HIGHMEM64G
-# help
-# The VM uses one page table entry for each page of physical memory.
-# For systems with a lot of RAM, this can be wasteful of precious
-# low memory. Setting this option will put user-space page table
-# entries in high memory.
-
-config MTRR
- bool
- depends on XEN_PRIVILEGED_GUEST
- default y
-
-#config MTRR
-# bool "MTRR (Memory Type Range Register) support"
-# ---help---
-# On Intel P6 family processors (Pentium Pro, Pentium II and later)
-# the Memory Type Range Registers (MTRRs) may be used to control
-# processor access to memory ranges. This is most useful if you have
-# a video (VGA) card on a PCI or AGP bus. Enabling write-combining
-# allows bus write transfers to be combined into a larger transfer
-# before bursting over the PCI/AGP bus. This can increase performance
-# of image write operations 2.5 times or more. Saying Y here creates a
-# /proc/mtrr file which may be used to manipulate your processor's
-# MTRRs. Typically the X server should use this.
-#
-# This code has a reasonably generic interface so that similar
-# control registers on other processors can be easily supported
-# as well:
-#
-# The Cyrix 6x86, 6x86MX and M II processors have Address Range
-# Registers (ARRs) which provide a similar functionality to MTRRs. For
-# these, the ARRs are used to emulate the MTRRs.
-# The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
-# MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
-# write-combining. All of these processors are supported by this code
-# and it makes sense to say Y here if you have one of them.
-#
-# Saying Y here also fixes a problem with buggy SMP BIOSes which only
-# set the MTRRs for the boot CPU and not for the secondary CPUs. This
-# can lead to all sorts of problems, so it's good to say Y here.
-#
-# You can safely say Y even if your machine doesn't have MTRRs, you'll
-# just add about 9 KB to your kernel.
-#
-# See <file:Documentation/mtrr.txt> for more information.
-
-config IRQBALANCE
- bool "Enable kernel irq balancing"
- depends on SMP && X86_IO_APIC
- default y
- help
- The default yes will allow the kernel to do irq load balancing.
- Saying no will keep the kernel from doing irq load balancing.
-
-config HAVE_DEC_LOCK
- bool
- depends on (SMP || PREEMPT) && X86_CMPXCHG
- default y
-
-# turning this on wastes a bunch of space.
-# Summit needs it only when NUMA is on
-config BOOT_IOREMAP
- bool
- depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
- default y
-
-config REGPARM
- bool "Use register arguments (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- default n
- help
- Compile the kernel with -mregparm=3. This uses a different ABI
- and passes the first three arguments of a function call in registers.
- This will probably break binary only modules.
-
- This feature is only enabled for gcc-3.0 and later - earlier compilers
- generate incorrect output with certain kernel constructs when
- -mregparm=3 is used.
-
-
-if XEN_PHYSDEV_ACCESS
-
-menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
-
-config X86_VISWS_APIC
- bool
- depends on X86_VISWS
- default y
-
-config X86_LOCAL_APIC
- bool
- depends on (X86_VISWS || SMP) && !X86_VOYAGER
- default y
-
-config X86_IO_APIC
- bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER)
- default y
-
-config PCI
- bool "PCI support" if !X86_VISWS
- depends on !X86_VOYAGER
- default y if X86_VISWS
- help
- Find out whether you have a PCI motherboard. PCI is the name of a
- bus system, i.e. the way the CPU talks to the other stuff inside
- your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
- VESA. If you have PCI, say Y, otherwise N.
-
- The PCI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, contains valuable
- information about which PCI hardware does work under Linux and which
- doesn't.
-
-#choice
-# prompt "PCI access mode"
-# depends on PCI && !X86_VISWS
-# default PCI_GOANY
-# ---help---
-# On PCI systems, the BIOS can be used to detect the PCI devices and
-# determine their configuration. However, some old PCI motherboards
-# have BIOS bugs and may crash if this is done. Also, some embedded
-# PCI-based systems don't have any BIOS at all. Linux can also try to
-# detect the PCI hardware directly without using the BIOS.
-#
-# With this option, you can specify how Linux should detect the
-# PCI devices. If you choose "BIOS", the BIOS will be used,
-# if you choose "Direct", the BIOS won't be used, and if you
-# choose "MMConfig", then PCI Express MMCONFIG will be used.
-# If you choose "Any", the kernel will try MMCONFIG, then the
-# direct access method and falls back to the BIOS if that doesn't
-# work. If unsure, go with the default, which is "Any".
-#
-#config PCI_GOBIOS
-# bool "BIOS"
-#
-#config PCI_GOMMCONFIG
-# bool "MMConfig"
-#
-#config PCI_GODIRECT
-# bool "Direct"
-#
-#config PCI_GOANY
-# bool "Any"
-#
-#endchoice
-#
-#config PCI_BIOS
-# bool
-# depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
-# default y
-#
-#config PCI_DIRECT
-# bool
-# depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
-# default y
-
-config PCI_DIRECT
- bool
- depends on PCI
- default y
-
-source "drivers/pci/pcie/Kconfig"
-
-source "drivers/pci/Kconfig"
-
-config ISA
- bool "ISA support"
- depends on !(X86_VOYAGER || X86_VISWS)
- help
- Find out whether you have ISA slots on your motherboard. ISA is the
- name of a bus system, i.e. the way the CPU talks to the other stuff
- inside your box. Other bus systems are PCI, EISA, MicroChannel
- (MCA) or VESA. ISA is an older system, now being displaced by PCI;
- newer boards don't support it. If you have ISA, say Y, otherwise N.
-
-config EISA
- bool "EISA support"
- depends on ISA
- ---help---
- The Extended Industry Standard Architecture (EISA) bus was
- developed as an open alternative to the IBM MicroChannel bus.
-
- The EISA bus provided some of the features of the IBM MicroChannel
- bus while maintaining backward compatibility with cards made for
- the older ISA bus. The EISA bus saw limited use between 1988 and
- 1995 when it was made obsolete by the PCI bus.
-
- Say Y here if you are building a kernel for an EISA-based machine.
-
- Otherwise, say N.
-
-source "drivers/eisa/Kconfig"
-
-config MCA
- bool "MCA support"
- depends on !(X86_VISWS || X86_VOYAGER)
- help
- MicroChannel Architecture is found in some IBM PS/2 machines and
- laptops. It is a bus system similar to PCI or ISA. See
- <file:Documentation/mca.txt> (and especially the web page given
- there) before attempting to build an MCA bus kernel.
-
-config MCA
- depends on X86_VOYAGER
- default y if X86_VOYAGER
-
-source "drivers/mca/Kconfig"
-
-config SCx200
- tristate "NatSemi SCx200 support"
- depends on !X86_VOYAGER
- help
- This provides basic support for the National Semiconductor SCx200
- processor. Right now this is just a driver for the GPIO pins.
-
- If you don't know what to do here, say N.
-
- This support is also available as a module. If compiled as a
- module, it will be called scx200.
-
-source "drivers/pcmcia/Kconfig"
-
-source "drivers/pci/hotplug/Kconfig"
-
-endmenu
-
-endif
-
-menu "Kernel hacking"
-
-config DEBUG_KERNEL
- bool "Kernel debugging"
- help
- Say Y here if you are developing drivers or trying to debug and
- identify kernel problems.
-
-config EARLY_PRINTK
- bool "Early printk" if EMBEDDED
- default y
- help
- Write kernel log output directly into the VGA buffer or to a serial
- port.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation
- it is not recommended because it looks ugly and doesn't cooperate
- with klogd/syslogd or the X server. You should normally N here,
- unless you want to debug such a crash.
-
-config DEBUG_STACKOVERFLOW
- bool "Check for stack overflows"
- depends on DEBUG_KERNEL
-
-config DEBUG_STACK_USAGE
- bool "Stack utilization instrumentation"
- depends on DEBUG_KERNEL
- help
- Enables the display of the minimum amount of free stack which each
- task has ever had available in the sysrq-T and sysrq-P debug output.
-
- This option will slow down process creation somewhat.
-
-config DEBUG_SLAB
- bool "Debug memory allocations"
- depends on DEBUG_KERNEL
- help
- Say Y here to have the kernel do limited verification on memory
- allocation as well as poisoning memory on free to catch use of freed
- memory.
-
-config MAGIC_SYSRQ
- bool "Magic SysRq key"
- depends on DEBUG_KERNEL
- help
- If you say Y here, you will have some control over the system even
- if the system crashes for example during kernel debugging (e.g., you
- will be able to flush the buffer cache to disk, reboot the system
- immediately or dump some status information). This is accomplished
- by pressing various keys while holding SysRq (Alt+PrintScreen). It
- also works on a serial console (on PC hardware at least), if you
- send a BREAK and then within 5 seconds a command keypress. The
- keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
- unless you really know what this hack does.
-
-config DEBUG_SPINLOCK
- bool "Spinlock debugging"
- depends on DEBUG_KERNEL
- help
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
- best used in conjunction with the NMI watchdog so that spinlock
- deadlocks are also debuggable.
-
-config DEBUG_PAGEALLOC
- bool "Page alloc debugging"
- depends on DEBUG_KERNEL
- help
- Unmap pages from the kernel linear mapping after free_pages().
- This results in a large slowdown, but helps to find certain types
- of memory corruptions.
-
-config DEBUG_HIGHMEM
- bool "Highmem debugging"
- depends on DEBUG_KERNEL && HIGHMEM
- help
- This options enables addition error checking for high memory systems.
- Disable for production systems.
-
-config DEBUG_INFO
- bool "Compile the kernel with debug info"
- depends on DEBUG_KERNEL
- help
- If you say Y here the resulting kernel image will include
- debugging info resulting in a larger kernel image.
- Say Y here only if you plan to use gdb to debug the kernel.
- If you don't debug the kernel, you can say N.
-
-config DEBUG_SPINLOCK_SLEEP
- bool "Sleep-inside-spinlock checking"
- help
- If you say Y here, various routines which may sleep will become very
- noisy if they are called with a spinlock held.
-
-config FRAME_POINTER
- bool "Compile the kernel with frame pointers"
- help
- If you say Y here the resulting kernel image will be slightly larger
- and slower, but it will give very useful debugging information.
- If you don't debug the kernel, you can say N, but we may not be able
- to solve problems without frame pointers.
-
-config 4KSTACKS
- bool "Use 4Kb for kernel stacks instead of 8Kb"
- help
- If you say Y here the kernel will use a 4Kb stacksize for the
- kernel stack attached to each process/thread. This facilitates
- running more threads on a system and also reduces the pressure
- on the VM subsystem for higher order allocations. This option
- will also use IRQ stacks to compensate for the reduced stackspace.
-
-config X86_FIND_SMP_CONFIG
- bool
- depends on X86_LOCAL_APIC || X86_VOYAGER
- default y
-
-config X86_MPPARSE
- bool
- depends on X86_LOCAL_APIC && !X86_VISWS
- default y
-
-endmenu
-
-#
-# Use the generic interrupt handling code in kernel/irq/:
-#
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_IRQ_PROBE
- bool
- default y
-
-config X86_SMP
- bool
- depends on SMP && !X86_VOYAGER
- default y
-
-config X86_HT
- bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER)
- default y
-
-config X86_BIOS_REBOOT
- bool
- depends on !(X86_VISWS || X86_VOYAGER)
- default y
-
-config X86_TRAMPOLINE
- bool
- depends on X86_SMP || (X86_VOYAGER && SMP)
- default y
-
-config PC
- bool
- depends on X86 && !EMBEDDED
- default y
-
-endmenu
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,102 +0,0 @@
-#
-# i386/Makefile
-#
-# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" cleaning up for this architecture.
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 1994 by Linus Torvalds
-#
-# 19990713 Artur Skawina <skawina@xxxxxxxxxxxxx>
-# Added '-march' and '-mpreferred-stack-boundary' support
-#
-
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-LDFLAGS := -m elf_i386
-LDFLAGS_vmlinux :=
-CHECK := $(CHECK) -D__i386__=1
-
-CFLAGS += -m32
-AFLAGS += -m32
-
-CFLAGS += -pipe -msoft-float
-
-# prevent gcc from keeping the stack 16 byte aligned
-CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2,)
-
-align := $(cc-option-align)
-cflags-$(CONFIG_M386) += -march=i386
-cflags-$(CONFIG_M486) += -march=i486
-cflags-$(CONFIG_M586) += -march=i586
-cflags-$(CONFIG_M586TSC) += -march=i586
-cflags-$(CONFIG_M586MMX) += $(call
cc-option,-march=pentium-mmx,-march=i586)
-cflags-$(CONFIG_M686) += -march=i686
-cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call cc-option,-mtune=pentium2)
-cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call cc-option,-mtune=pentium3)
-cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call cc-option,-mtune=pentium3)
-cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call cc-option,-mtune=pentium4)
-cflags-$(CONFIG_MK6) += -march=k6
-# Please note, that patches that add -march=athlon-xp and friends are
pointless.
-# They make zero difference whatsosever to performance at this time.
-cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686
$(align)-functions=4)
-cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call
cc-option,-march=athlon,-march=i686 $(align)-functions=4))
-cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0
$(align)-jumps=0 $(align)-loops=0
-cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call
cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0
$(align)-loops=0
-cflags-$(CONFIG_MWINCHIPC6) += $(call
cc-option,-march=winchip-c6,-march=i586)
-cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486)
$(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
-
-# AMD Elan support
-cflags-$(CONFIG_X86_ELAN) += -march=i486
-
-# -mregparm=3 works ok on gcc-3.0 and later
-#
-GCC_VERSION := $(shell $(CONFIG_SHELL)
$(srctree)/scripts/gcc-version.sh $(CC))
-cflags-$(CONFIG_REGPARM) += $(shell if [ $(GCC_VERSION) -ge 0300 ] ;
then echo "-mregparm=3"; fi ;)
-
-# Disable unit-at-a-time mode, it makes gcc use a lot more stack
-# due to the lack of sharing of stacklots.
-CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
-
-CFLAGS += $(cflags-y)
-
-head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
-
-libs-y += arch/i386/lib/
-core-y += arch/xen/i386/kernel/ \
- arch/xen/i386/mm/ \
- arch/i386/crypto/
-# \
-# arch/xen/$(mcore-y)/
-drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
-drivers-$(CONFIG_PCI) += arch/xen/i386/pci/
-# must be linked after kernel/
-drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
-drivers-$(CONFIG_PM) += arch/i386/power/
-
-# for clean
-obj- += kernel/ mm/ pci/
-#obj- += ../../i386/lib/ ../../i386/mm/
-#../../i386/$(mcore-y)/
-#obj- += ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
-
-xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen \
- -Iinclude/asm-i386/mach-default
-CFLAGS += $(xenflags-y)
-AFLAGS += $(xenflags-y)
-
-prepare: include/asm-$(XENARCH)/asm_offsets.h
-CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
-
-arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
- include/linux/version.h include/config/MARKER
-
-include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
- $(call filechk,gen-asm-offsets)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,93 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-CFLAGS += -Iarch/$(XENARCH)/kernel
-
-extra-y := head.o init_task.o
-
-obj-y := process.o signal.o entry.o traps.o \
- time.o ioport.o ldt.o setup.o \
- pci-dma.o i386_ksyms.o
-
-c-obj-y := semaphore.o irq.o vm86.o \
- ptrace.o sys_i386.o \
- i387.o dmi_scan.o bootflag.o \
- doublefault.o quirks.o
-s-obj-y :=
-
-obj-y += cpu/
-obj-y += timers/
-c-obj-$(CONFIG_ACPI_BOOT) += acpi/
-#c-obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
-c-obj-$(CONFIG_MCA) += mca.o
-c-obj-$(CONFIG_X86_MSR) += msr.o
-c-obj-$(CONFIG_X86_CPUID) += cpuid.o
-obj-$(CONFIG_MICROCODE) += microcode.o
-c-obj-$(CONFIG_APM) += apm.o
-c-obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
-c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
-c-obj-$(CONFIG_X86_MPPARSE) += mpparse.o
-c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
-c-obj-$(CONFIG_X86_IO_APIC) += io_apic.o
-c-obj-$(CONFIG_X86_NUMAQ) += numaq.o
-c-obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
-c-obj-$(CONFIG_MODULES) += module.o
-c-obj-y += sysenter.o
-obj-y += vsyscall.o
-c-obj-$(CONFIG_ACPI_SRAT) += srat.o
-c-obj-$(CONFIG_HPET_TIMER) += time_hpet.o
-c-obj-$(CONFIG_EFI) += efi.o efi_stub.o
-c-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-
-EXTRA_AFLAGS := -traditional
-
-c-obj-$(CONFIG_SCx200) += scx200.o
-
-# vsyscall.o contains the vsyscall DSO images as __initdata.
-# We must build both images before we can assemble it.
-# Note: kbuild does not track this dependency due to usage of .incbin
-$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
-targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
-targets += vsyscall.lds
-
-# The DSO images are built using a special linker script.
-quiet_cmd_syscall = SYSCALL $@
- cmd_syscall = $(CC) -nostdlib -m32 $(SYSCFLAGS_$(@F)) \
- -Wl,-T,$(filter-out FORCE,$^) -o $@
-
-export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
-
-vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
-SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
-SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
-
-$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
-$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
- $(call if_changed,syscall)
-
-# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO. With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
-extra-y += vsyscall-syms.o
-$(obj)/built-in.o: $(obj)/vsyscall-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
-
-SYSCFLAGS_vsyscall-syms.o = -r
-$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds $(obj)/vsyscall-sysenter.o FORCE
- $(call if_changed,syscall)
-
-c-link := init_task.o
-s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o
vsyscall.lds.o
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst
%.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
- @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
-
-$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
-
-obj-y += $(c-obj-y) $(s-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,31 +0,0 @@
-#
-# Makefile for x86-compatible CPU details and quirks
-#
-
-CFLAGS += -Iarch/i386/kernel/cpu
-
-obj-y := common.o
-c-obj-y += proc.o
-
-c-obj-y += amd.o
-c-obj-y += cyrix.o
-c-obj-y += centaur.o
-c-obj-y += transmeta.o
-c-obj-y += intel.o intel_cacheinfo.o
-c-obj-y += rise.o
-c-obj-y += nexgen.o
-c-obj-y += umc.o
-
-#obj-$(CONFIG_X86_MCE) += ../../../../i386/kernel/cpu/mcheck/
-
-obj-$(CONFIG_MTRR) += mtrr/
-#obj-$(CONFIG_CPU_FREQ) += ../../../../i386/kernel/cpu/cpufreq/
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
- @ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
-
-obj-y += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,643 +0,0 @@
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <mach_apic.h>
-#endif
-#include <asm-xen/hypervisor.h>
-
-#include "cpu.h"
-
-DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
-EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
-
-static int cachesize_override __initdata = -1;
-static int disable_x86_fxsr __initdata = 0;
-static int disable_x86_serial_nr __initdata = 1;
-
-struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-
-extern void mcheck_init(struct cpuinfo_x86 *c);
-
-extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
-
-extern int disable_pse;
-
-static void default_init(struct cpuinfo_x86 * c)
-{
- /* Not much we can do here... */
- /* Check if at least it has cpuid */
- if (c->cpuid_level == -1) {
- /* No cpuid. It must be an ancient CPU */
- if (c->x86 == 4)
- strcpy(c->x86_model_id, "486");
- else if (c->x86 == 3)
- strcpy(c->x86_model_id, "386");
- }
-}
-
-static struct cpu_dev default_cpu = {
- .c_init = default_init,
-};
-static struct cpu_dev * this_cpu = &default_cpu;
-
-static int __init cachesize_setup(char *str)
-{
- get_option (&str, &cachesize_override);
- return 1;
-}
-__setup("cachesize=", cachesize_setup);
-
-int __init get_model_name(struct cpuinfo_x86 *c)
-{
- unsigned int *v;
- char *p, *q;
-
- if (cpuid_eax(0x80000000) < 0x80000004)
- return 0;
-
- v = (unsigned int *) c->x86_model_id;
- cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
- cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
- cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
- c->x86_model_id[48] = 0;
-
- /* Intel chips right-justify this string for some dumb reason;
- undo that brain damage */
- p = q = &c->x86_model_id[0];
- while ( *p == ' ' )
- p++;
- if ( p != q ) {
- while ( *p )
- *q++ = *p++;
- while ( q <= &c->x86_model_id[48] )
- *q++ = '\0'; /* Zero-pad the rest */
- }
-
- return 1;
-}
-
-
-void __init display_cacheinfo(struct cpuinfo_x86 *c)
-{
- unsigned int n, dummy, ecx, edx, l2size;
-
- n = cpuid_eax(0x80000000);
-
- if (n >= 0x80000005) {
- cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
- printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache
%dK (%d bytes/line)\n",
- edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
- c->x86_cache_size=(ecx>>24)+(edx>>24);
- }
-
- if (n < 0x80000006) /* Some chips just has a large L1. */
- return;
-
- ecx = cpuid_ecx(0x80000006);
- l2size = ecx >> 16;
-
- /* do processor-specific cache resizing */
- if (this_cpu->c_size_cache)
- l2size = this_cpu->c_size_cache(c,l2size);
-
- /* Allow user to override all this if necessary. */
- if (cachesize_override != -1)
- l2size = cachesize_override;
-
- if ( l2size == 0 )
- return; /* Again, no L2 cache is possible */
-
- c->x86_cache_size = l2size;
-
- printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
- l2size, ecx & 0xFF);
-}
-
-/* Naming convention should be: <Name> [(<Codename>)] */
-/* This table only is used unless init_<vendor>() below doesn't set it; */
-/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*/
-
-/* Look up CPU names by table lookup. */
-static char __init *table_lookup_model(struct cpuinfo_x86 *c)
-{
- struct cpu_model_info *info;
-
- if ( c->x86_model >= 16 )
- return NULL; /* Range check */
-
- if (!this_cpu)
- return NULL;
-
- info = this_cpu->c_models;
-
- while (info && info->family) {
- if (info->family == c->x86)
- return info->model_names[c->x86_model];
- info++;
- }
- return NULL; /* Not found */
-}
-
-
-void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
-{
- char *v = c->x86_vendor_id;
- int i;
-
- for (i = 0; i < X86_VENDOR_NUM; i++) {
- if (cpu_devs[i]) {
- if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
- (cpu_devs[i]->c_ident[1] &&
- !strcmp(v,cpu_devs[i]->c_ident[1]))) {
- c->x86_vendor = i;
- if (!early)
- this_cpu = cpu_devs[i];
- break;
- }
- }
- }
-}
-
-
-static int __init x86_fxsr_setup(char * s)
-{
- disable_x86_fxsr = 1;
- return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
-
-/* Standard macro to see if a specific flag is changeable */
-static inline int flag_is_changeable_p(u32 flag)
-{
- u32 f1, f2;
-
- asm("pushfl\n\t"
- "pushfl\n\t"
- "popl %0\n\t"
- "movl %0,%1\n\t"
- "xorl %2,%0\n\t"
- "pushl %0\n\t"
- "popfl\n\t"
- "pushfl\n\t"
- "popl %0\n\t"
- "popfl\n\t"
- : "=&r" (f1), "=&r" (f2)
- : "ir" (flag));
-
- return ((f1^f2) & flag) != 0;
-}
-
-
-/* Probe for the CPUID instruction */
-int __init have_cpuid_p(void)
-{
- return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
-/* Do minimum CPU detection early.
- Fields really needed: vendor, cpuid_level, family, model, mask, cache
alignment.
- The others are not touched to avoid unwanted side effects. */
-void __init early_cpu_detect(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- c->x86_cache_alignment = 32;
-
- if (!have_cpuid_p())
- return;
-
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level,
- (int *)&c->x86_vendor_id[0],
- (int *)&c->x86_vendor_id[8],
- (int *)&c->x86_vendor_id[4]);
-
- get_cpu_vendor(c, 1);
-
- c->x86 = 4;
- if (c->cpuid_level >= 0x00000001) {
- u32 junk, tfms, cap0, misc;
- cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
- c->x86 = (tfms >> 8) & 15;
- c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
- c->x86 += (tfms >> 20) & 0xff;
- c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
- c->x86_mask = tfms & 15;
- if (cap0 & (1<<19))
- c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
- }
-
- early_intel_workaround(c);
-}
-
-void __init generic_identify(struct cpuinfo_x86 * c)
-{
- u32 tfms, xlvl;
- int junk;
-
- if (have_cpuid_p()) {
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level,
- (int *)&c->x86_vendor_id[0],
- (int *)&c->x86_vendor_id[8],
- (int *)&c->x86_vendor_id[4]);
-
- get_cpu_vendor(c, 0);
- /* Initialize the standard set of capabilities */
- /* Note that the vendor-specific code below might override */
-
- /* Intel-defined flags: level 0x00000001 */
- if ( c->cpuid_level >= 0x00000001 ) {
- u32 capability, excap;
- cpuid(0x00000001, &tfms, &junk, &excap, &capability);
- c->x86_capability[0] = capability;
- c->x86_capability[4] = excap;
- c->x86 = (tfms >> 8) & 15;
- c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
- c->x86 += (tfms >> 20) & 0xff;
- c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
- c->x86_mask = tfms & 15;
- } else {
- /* Have CPUID level 0 only - unheard of */
- c->x86 = 4;
- }
-
- /* AMD-defined flags: level 0x80000001 */
- xlvl = cpuid_eax(0x80000000);
- if ( (xlvl & 0xffff0000) == 0x80000000 ) {
- if ( xlvl >= 0x80000001 ) {
- c->x86_capability[1] = cpuid_edx(0x80000001);
- c->x86_capability[6] = cpuid_ecx(0x80000001);
- }
- if ( xlvl >= 0x80000004 )
- get_model_name(c); /* Default name */
- }
- }
-}
-
-static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-{
- if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
- /* Disable processor serial number */
- unsigned long lo,hi;
- rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
- lo |= 0x200000;
- wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
- printk(KERN_NOTICE "CPU serial number disabled.\n");
- clear_bit(X86_FEATURE_PN, c->x86_capability);
-
- /* Disabling the serial number may affect the cpuid level */
- c->cpuid_level = cpuid_eax(0);
- }
-}
-
-static int __init x86_serial_nr_setup(char *s)
-{
- disable_x86_serial_nr = 0;
- return 1;
-}
-__setup("serialnumber", x86_serial_nr_setup);
-
-
-
-/*
- * This does the hard work of actually picking apart the CPU stuff...
- */
-void __init identify_cpu(struct cpuinfo_x86 *c)
-{
- int i;
-
- c->loops_per_jiffy = loops_per_jiffy;
- c->x86_cache_size = -1;
- c->x86_vendor = X86_VENDOR_UNKNOWN;
- c->cpuid_level = -1; /* CPUID not detected */
- c->x86_model = c->x86_mask = 0; /* So far unknown... */
- c->x86_vendor_id[0] = '\0'; /* Unset */
- c->x86_model_id[0] = '\0'; /* Unset */
- c->x86_num_cores = 1;
- memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
- if (!have_cpuid_p()) {
- /* First of all, decide if this is a 486 or higher */
- /* It's a 486 if we can modify the AC flag */
- if ( flag_is_changeable_p(X86_EFLAGS_AC) )
- c->x86 = 4;
- else
- c->x86 = 3;
- }
-
- generic_identify(c);
-
- printk(KERN_DEBUG "CPU: After generic identify, caps:");
- for (i = 0; i < NCAPINTS; i++)
- printk(" %08lx", c->x86_capability[i]);
- printk("\n");
-
- if (this_cpu->c_identify) {
- this_cpu->c_identify(c);
-
- printk(KERN_DEBUG "CPU: After vendor identify, caps:");
- for (i = 0; i < NCAPINTS; i++)
- printk(" %08lx", c->x86_capability[i]);
- printk("\n");
- }
-
- /*
- * Vendor-specific initialization. In this section we
- * canonicalize the feature flags, meaning if there are
- * features a certain CPU supports which CPUID doesn't
- * tell us, CPUID claiming incorrect flags, or other bugs,
- * we handle them here.
- *
- * At the end of this section, c->x86_capability better
- * indicate the features this CPU genuinely supports!
- */
- if (this_cpu->c_init)
- this_cpu->c_init(c);
-
- /* Disable the PN if appropriate */
- squash_the_stupid_serial_number(c);
-
- /*
- * The vendor-specific functions might have changed features. Now
- * we do "generic changes."
- */
-
- /* TSC disabled? */
- if ( tsc_disable )
- clear_bit(X86_FEATURE_TSC, c->x86_capability);
-
- /* FXSR disabled? */
- if (disable_x86_fxsr) {
- clear_bit(X86_FEATURE_FXSR, c->x86_capability);
- clear_bit(X86_FEATURE_XMM, c->x86_capability);
- }
-
- if (disable_pse)
- clear_bit(X86_FEATURE_PSE, c->x86_capability);
-
- /* If the model name is still unset, do table lookup. */
- if ( !c->x86_model_id[0] ) {
- char *p;
- p = table_lookup_model(c);
- if ( p )
- strcpy(c->x86_model_id, p);
- else
- /* Last resort... */
- sprintf(c->x86_model_id, "%02x/%02x",
- c->x86_vendor, c->x86_model);
- }
-
- machine_specific_modify_cpu_capabilities(c);
-
- /* Now the feature flags better reflect actual CPU features! */
-
- printk(KERN_DEBUG "CPU: After all inits, caps:");
- for (i = 0; i < NCAPINTS; i++)
- printk(" %08lx", c->x86_capability[i]);
- printk("\n");
-
- /*
- * On SMP, boot_cpu_data holds the common feature set between
- * all CPUs; so make sure that we indicate which features are
- * common between the CPUs. The first time this routine gets
- * executed, c == &boot_cpu_data.
- */
- if ( c != &boot_cpu_data ) {
- /* AND the already accumulated flags with these */
- for ( i = 0 ; i < NCAPINTS ; i++ )
- boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
- }
-
- /* Init Machine Check Exception if available. */
-#ifdef CONFIG_X86_MCE
- mcheck_init(c);
-#endif
-}
-/*
- * Perform early boot up checks for a valid TSC. See
arch/i386/kernel/time.c
- */
-
-void __init dodgy_tsc(void)
-{
- if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
- ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
- cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
-}
-
-#ifdef CONFIG_X86_HT
-void __init detect_ht(struct cpuinfo_x86 *c)
-{
- u32 eax, ebx, ecx, edx;
- int index_lsb, index_msb, tmp;
- int cpu = smp_processor_id();
-
- if (!cpu_has(c, X86_FEATURE_HT))
- return;
-
- cpuid(1, &eax, &ebx, &ecx, &edx);
- smp_num_siblings = (ebx & 0xff0000) >> 16;
-
- if (smp_num_siblings == 1) {
- printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- } else if (smp_num_siblings > 1 ) {
- index_lsb = 0;
- index_msb = 31;
-
- if (smp_num_siblings > NR_CPUS) {
- printk(KERN_WARNING "CPU: Unsupported number of the
siblings %d", smp_num_siblings);
- smp_num_siblings = 1;
- return;
- }
- tmp = smp_num_siblings;
- while ((tmp & 1) == 0) {
- tmp >>=1 ;
- index_lsb++;
- }
- tmp = smp_num_siblings;
- while ((tmp & 0x80000000 ) == 0) {
- tmp <<=1 ;
- index_msb--;
- }
- if (index_lsb != index_msb )
- index_msb++;
- phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
- }
-}
-#endif
-
-void __init print_cpu_info(struct cpuinfo_x86 *c)
-{
- char *vendor = NULL;
-
- if (c->x86_vendor < X86_VENDOR_NUM)
- vendor = this_cpu->c_vendor;
- else if (c->cpuid_level >= 0)
- vendor = c->x86_vendor_id;
-
- if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
- printk("%s ", vendor);
-
- if (!c->x86_model_id[0])
- printk("%d86", c->x86);
- else
- printk("%s", c->x86_model_id);
-
- if (c->x86_mask || c->cpuid_level >= 0)
- printk(" stepping %02x\n", c->x86_mask);
- else
- printk("\n");
-}
-
-cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
-
-/* This is hacky. :)
- * We're emulating future behavior.
- * In the future, the cpu-specific init functions will be called implicitly
- * via the magic of initcalls.
- * They will insert themselves into the cpu_devs structure.
- * Then, when cpu_init() is called, we can just iterate over that array.
- */
-
-extern int intel_cpu_init(void);
-extern int cyrix_init_cpu(void);
-extern int nsc_init_cpu(void);
-extern int amd_init_cpu(void);
-extern int centaur_init_cpu(void);
-extern int transmeta_init_cpu(void);
-extern int rise_init_cpu(void);
-extern int nexgen_init_cpu(void);
-extern int umc_init_cpu(void);
-void early_cpu_detect(void);
-
-void __init early_cpu_init(void)
-{
- intel_cpu_init();
- cyrix_init_cpu();
- nsc_init_cpu();
- amd_init_cpu();
- centaur_init_cpu();
- transmeta_init_cpu();
- rise_init_cpu();
- nexgen_init_cpu();
- umc_init_cpu();
- early_cpu_detect();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
- /* pse is not compatible with on-the-fly unmapping,
- * disable it even if the cpus claim to support it.
- */
- clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
- disable_pse = 1;
-#endif
-}
-
-void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
-{
- unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
- unsigned long va;
- int f;
-
- for (va = gdt_descr->address, f = 0;
- va < gdt_descr->address + gdt_descr->size;
- va += PAGE_SIZE, f++) {
- frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
- make_page_readonly((void *)va);
- }
- flush_page_update_queue();
- if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
- BUG();
- lgdt_finish();
-}
-
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __init cpu_init (void)
-{
- int cpu = smp_processor_id();
- struct tss_struct * t = &per_cpu(init_tss, cpu);
- struct thread_struct *thread = ¤t->thread;
-
- if (cpu_test_and_set(cpu, cpu_initialized)) {
- printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
- for (;;) local_irq_enable();
- }
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-
- if (cpu_has_vme || cpu_has_de)
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
- if (tsc_disable && cpu_has_tsc) {
- printk(KERN_NOTICE "Disabling TSC...\n");
- /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
- clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
- set_in_cr4(X86_CR4_TSD);
- }
-
- /*
- * Set up the per-thread TLS descriptor cache:
- */
- memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
- GDT_ENTRY_TLS_ENTRIES * 8);
-
- cpu_gdt_init(&cpu_gdt_descr[cpu]);
-
- /*
- * Delete NT
- */
- __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
-
- /*
- * Set up and load the per-CPU TSS and LDT
- */
- atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
- if (current->mm)
- BUG();
- enter_lazy_tlb(&init_mm, current);
-
- load_esp0(t, thread);
-
- load_LDT(&init_mm.context);
-
- /* Clear %fs and %gs. */
- asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
-
- /* Clear all 6 debug registers: */
-
-#define CD(register) HYPERVISOR_set_debugreg(register, 0)
-
- CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
-
-#undef CD
-
- /*
- * Force FPU initialization:
- */
- current_thread_info()->status = 0;
- clear_used_math();
- mxcsr_feature_mask_init();
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,16 +0,0 @@
-obj-y := main.o
-c-obj-y := if.o
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)): $(obj)/mtrr.h
- @ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/$(notdir $@) $@
-
-$(patsubst %.o,$(obj)/%.c,$(obj-y)): $(obj)/mtrr.h
-
-$(obj)/mtrr.h:
- @ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/mtrr.h $@
-
-obj-y += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,165 +0,0 @@
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/ctype.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-
-#include <asm/mtrr.h>
-#include "mtrr.h"
-
-void generic_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned int *size, mtrr_type * type)
-{
- dom0_op_t op;
-
- op.cmd = DOM0_READ_MEMTYPE;
- op.u.read_memtype.reg = reg;
- (void)HYPERVISOR_dom0_op(&op);
-
- *size = op.u.read_memtype.nr_pfns;
- *base = op.u.read_memtype.pfn;
- *type = op.u.read_memtype.type;
-}
-
-struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = 1,
- .get = generic_get_mtrr,
-};
-
-struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
-unsigned int num_var_ranges;
-unsigned int *usage_table;
-
-void __init set_num_var_ranges(void)
-{
- dom0_op_t op;
-
- for (num_var_ranges = 0; ; num_var_ranges++) {
- op.cmd = DOM0_READ_MEMTYPE;
- op.u.read_memtype.reg = num_var_ranges;
- if (HYPERVISOR_dom0_op(&op) != 0)
- break;
- }
-}
-
-static void __init init_table(void)
-{
- int i, max;
-
- max = num_var_ranges;
- if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
- == NULL) {
- printk(KERN_ERR "mtrr: could not allocate\n");
- return;
- }
- for (i = 0; i < max; i++)
- usage_table[i] = 0;
-}
-
-int mtrr_add_page(unsigned long base, unsigned long size,
- unsigned int type, char increment)
-{
- int error;
- dom0_op_t op;
-
- op.cmd = DOM0_ADD_MEMTYPE;
- op.u.add_memtype.pfn = base;
- op.u.add_memtype.nr_pfns = size;
- op.u.add_memtype.type = type;
- if ((error = HYPERVISOR_dom0_op(&op)))
- return error;
-
- if (increment)
- ++usage_table[op.u.add_memtype.reg];
-
- return op.u.add_memtype.reg;
-}
-
-int
-mtrr_add(unsigned long base, unsigned long size, unsigned int type,
- char increment)
-{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_WARNING "mtrr: size and base must be multiples of 4
kiB\n");
- printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size,
base);
- return -EINVAL;
- }
- return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
- increment);
-}
-
-int mtrr_del_page(int reg, unsigned long base, unsigned long size)
-{
- int i, max;
- mtrr_type ltype;
- unsigned long lbase;
- unsigned int lsize;
- int error = -EINVAL;
- dom0_op_t op;
-
- max = num_var_ranges;
- if (reg < 0) {
- /* Search for existing MTRR */
- for (i = 0; i < max; ++i) {
- mtrr_if->get(i, &lbase, &lsize, <ype);
- if (lbase == base && lsize == size) {
- reg = i;
- break;
- }
- }
- if (reg < 0) {
- printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000
found\n", base,
- size);
- goto out;
- }
- }
- if (usage_table[reg] < 1) {
- printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
- goto out;
- }
- if (--usage_table[reg] < 1) {
- op.cmd = DOM0_DEL_MEMTYPE;
- op.u.del_memtype.handle = 0;
- op.u.add_memtype.reg = reg;
- (void)HYPERVISOR_dom0_op(&op);
- }
- error = reg;
- out:
- return error;
-}
-
-int
-mtrr_del(int reg, unsigned long base, unsigned long size)
-{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_INFO "mtrr: size and base must be multiples of 4
kiB\n");
- printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size,
base);
- return -EINVAL;
- }
- return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
-}
-
-EXPORT_SYMBOL(mtrr_add);
-EXPORT_SYMBOL(mtrr_del);
-
-static int __init mtrr_init(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- if (!(xen_start_info.flags & SIF_PRIVILEGED))
- return -ENODEV;
-
- if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
- (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
- (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
- (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
- return -ENODEV;
-
- set_num_var_ranges();
- init_table();
-
- return 0;
-}
-
-subsys_initcall(mtrr_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1031 +0,0 @@
-/*
- * linux/arch/i386/entry.S
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-/*
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- * I changed all the .align's to 4 (16 byte alignment), as that's faster
- * on a 486.
- *
- * Stack layout in 'ret_from_system_call':
- * ptrace needs to have all regs on the stack.
- * if the order here is changed, it needs to be
- * updated in fork.c:copy_process, signal.c:do_signal,
- * ptrace.c and ptrace.h
- *
- * 0(%esp) - %ebx
- * 4(%esp) - %ecx
- * 8(%esp) - %edx
- * C(%esp) - %esi
- * 10(%esp) - %edi
- * 14(%esp) - %ebp
- * 18(%esp) - %eax
- * 1C(%esp) - %ds
- * 20(%esp) - %es
- * 24(%esp) - orig_eax
- * 28(%esp) - %eip
- * 2C(%esp) - %cs
- * 30(%esp) - %eflags
- * 34(%esp) - %oldesp
- * 38(%esp) - %oldss
- *
- * "current" is in register %ebx during any slow entries.
- */
-
-#include <linux/config.h>
-#include <linux/linkage.h>
-#include <asm/thread_info.h>
-#include <asm/errno.h>
-#include <asm/segment.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include "irq_vectors.h"
-#include <asm-xen/xen-public/xen.h>
-
-#define nr_syscalls ((syscall_table_size)/4)
-
-EBX = 0x00
-ECX = 0x04
-EDX = 0x08
-ESI = 0x0C
-EDI = 0x10
-EBP = 0x14
-EAX = 0x18
-DS = 0x1C
-ES = 0x20
-ORIG_EAX = 0x24
-EIP = 0x28
-CS = 0x2C
-EVENT_MASK = 0x2E
-EFLAGS = 0x30
-OLDESP = 0x34
-OLDSS = 0x38
-
-CF_MASK = 0x00000001
-TF_MASK = 0x00000100
-IF_MASK = 0x00000200
-DF_MASK = 0x00000400
-NT_MASK = 0x00004000
-VM_MASK = 0x00020000
-
-/* Offsets into shared_info_t. */
-#define evtchn_upcall_pending /* 0 */
-#define evtchn_upcall_mask 1
-
-#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-#define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
- movb evtchn_upcall_mask(reg), tmp; \
- movb tmp, off(%esp)
-
-#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-
-#ifdef CONFIG_PREEMPT
-#define preempt_stop XEN_BLOCK_EVENTS(%esi)
-#else
-#define preempt_stop
-#define resume_kernel restore_all
-#endif
-
-#define SAVE_ALL_NO_EVENTMASK \
- cld; \
- pushl %es; \
- pushl %ds; \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- movl $(__USER_DS), %edx; \
- movl %edx, %ds; \
- movl %edx, %es;
-
-#define SAVE_ALL \
- SAVE_ALL_NO_EVENTMASK; \
- XEN_GET_VCPU_INFO(%esi); \
- XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
-
-#define RESTORE_INT_REGS \
- popl %ebx; \
- popl %ecx; \
- popl %edx; \
- popl %esi; \
- popl %edi; \
- popl %ebp; \
- popl %eax
-
-#define RESTORE_REGS \
- RESTORE_INT_REGS; \
-1: popl %ds; \
-2: popl %es; \
-.section .fixup,"ax"; \
-3: movl $0,(%esp); \
- jmp 1b; \
-4: movl $0,(%esp); \
- jmp 2b; \
-.previous; \
-.section __ex_table,"a";\
- .align 4; \
- .long 1b,3b; \
- .long 2b,4b; \
-.previous
-
-
-#define RESTORE_ALL \
- RESTORE_REGS \
- addl $4, %esp; \
-1: iret; \
-.section .fixup,"ax"; \
-2: movl $(__USER_DS), %edx; \
- movl %edx, %ds; \
- movl %edx, %es; \
- movl $11,%eax; \
- call do_exit; \
-.previous; \
-.section __ex_table,"a";\
- .align 4; \
- .long 1b,2b; \
-.previous
-
-
-ENTRY(ret_from_fork)
- pushl %eax
- call schedule_tail
- GET_THREAD_INFO(%ebp)
- popl %eax
- XEN_GET_VCPU_INFO(%esi)
- jmp syscall_exit
-
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-
- # userspace resumption stub bypassing syscall exit tracing
- ALIGN
-ret_from_exception:
- preempt_stop
-ret_from_intr:
- GET_THREAD_INFO(%ebp)
- movl EFLAGS(%esp), %eax # mix EFLAGS and CS
- movb CS(%esp), %al
- testl $(VM_MASK | 2), %eax
- jz resume_kernel # returning to kernel or vm86-space
-ENTRY(resume_userspace)
- XEN_GET_VCPU_INFO(%esi)
- XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
- # setting need_resched or sigpending
- # between sampling and the iret
- movl TI_flags(%ebp), %ecx
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
- # int/exception return?
- jne work_pending
- jmp restore_all
-
-#ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
- XEN_GET_VCPU_INFO(%esi)
- XEN_BLOCK_EVENTS(%esi)
- cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
- jnz restore_all
-need_resched:
- movl TI_flags(%ebp), %ecx # need_resched set ?
- testb $_TIF_NEED_RESCHED, %cl
- jz restore_all
- testb $0xFF,EVENT_MASK(%esp) # interrupts off (exception path) ?
- jnz restore_all
- call preempt_schedule_irq
- jmp need_resched
-#endif
-
-/* SYSENTER_RETURN points to after the "sysenter" instruction in
- the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
-
- # sysenter call handler stub
-ENTRY(sysenter_entry)
- movl TSS_sysenter_esp0(%esp),%esp
-sysenter_past_esp:
- sti
- pushl $(__USER_DS)
- pushl %ebp
- pushfl
- pushl $(__USER_CS)
- pushl $SYSENTER_RETURN
-
-/*
- * Load the potential sixth argument from user stack.
- * Careful about security.
- */
- cmpl $__PAGE_OFFSET-3,%ebp
- jae syscall_fault
-1: movl (%ebp),%ebp
-.section __ex_table,"a"
- .align 4
- .long 1b,syscall_fault
-.previous
-
- pushl %eax
- SAVE_ALL
- GET_THREAD_INFO(%ebp)
-
- testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
- jnz syscall_trace_entry
- cmpl $(nr_syscalls), %eax
- jae syscall_badsys
- call *sys_call_table(,%eax,4)
- movl %eax,EAX(%esp)
- cli
- movl TI_flags(%ebp), %ecx
- testw $_TIF_ALLWORK_MASK, %cx
- jne syscall_exit_work
-/* if something modifies registers it must also disable sysexit */
- movl EIP(%esp), %edx
- movl OLDESP(%esp), %ecx
- xorl %ebp,%ebp
- sti
- sysexit
-
-
- # system call handler stub
-ENTRY(system_call)
- pushl %eax # save orig_eax
- SAVE_ALL
- GET_THREAD_INFO(%ebp)
- # system call tracing in operation
- testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
- jnz syscall_trace_entry
- cmpl $(nr_syscalls), %eax
- jae syscall_badsys
-syscall_call:
- call *sys_call_table(,%eax,4)
- movl %eax,EAX(%esp) # store the return value
-syscall_exit:
- XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
- # setting need_resched or sigpending
- # between sampling and the iret
- movl TI_flags(%ebp), %ecx
- testw $_TIF_ALLWORK_MASK, %cx # current->work
- jne syscall_exit_work
-restore_all:
- testl $VM_MASK, EFLAGS(%esp)
- jnz resume_vm86
- movb EVENT_MASK(%esp), %al
- notb %al # %al == ~saved_mask
- andb evtchn_upcall_mask(%esi),%al
- andb $1,%al # %al == mask & ~saved_mask
- jnz restore_all_enable_events # != 0 => reenable event delivery
- RESTORE_ALL
-
-resume_vm86:
- XEN_UNBLOCK_EVENTS(%esi)
- RESTORE_REGS
- movl %eax,(%esp)
- movl $__HYPERVISOR_switch_vm86,%eax
- int $0x82
- ud2
-
- # perform work that needs to be done immediately before resumption
- ALIGN
-work_pending:
- testb $_TIF_NEED_RESCHED, %cl
- jz work_notifysig
-work_resched:
- call schedule
- XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
- # setting need_resched or sigpending
- # between sampling and the iret
- movl TI_flags(%ebp), %ecx
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
- # than syscall tracing?
- jz restore_all
- testb $_TIF_NEED_RESCHED, %cl
- jnz work_resched
-
-work_notifysig: # deal with pending signals and
- # notify-resume requests
- testl $VM_MASK, EFLAGS(%esp)
- movl %esp, %eax
- jne work_notifysig_v86 # returning to kernel-space or
- # vm86-space
- xorl %edx, %edx
- call do_notify_resume
- jmp restore_all
-
- ALIGN
-work_notifysig_v86:
- pushl %ecx # save ti_flags for do_notify_resume
- call save_v86_state # %eax contains pt_regs pointer
- popl %ecx
- movl %eax, %esp
- xorl %edx, %edx
- call do_notify_resume
- jmp restore_all
-
- # perform syscall exit tracing
- ALIGN
-syscall_trace_entry:
- movl $-ENOSYS,EAX(%esp)
- movl %esp, %eax
- xorl %edx,%edx
- call do_syscall_trace
- movl ORIG_EAX(%esp), %eax
- cmpl $(nr_syscalls), %eax
- jnae syscall_call
- jmp syscall_exit
-
- # perform syscall exit tracing
- ALIGN
-syscall_exit_work:
- testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
- jz work_pending
- XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call
- # schedule() instead
- movl %esp, %eax
- movl $1, %edx
- call do_syscall_trace
- jmp resume_userspace
-
- ALIGN
-syscall_fault:
- pushl %eax # save orig_eax
- SAVE_ALL
- GET_THREAD_INFO(%ebp)
- movl $-EFAULT,EAX(%esp)
- jmp resume_userspace
-
- ALIGN
-syscall_badsys:
- movl $-ENOSYS,EAX(%esp)
- jmp resume_userspace
-
-#if 0 /* XEN */
-/*
- * Build the entry stubs and pointer table with
- * some assembler magic.
- */
-.data
-ENTRY(interrupt)
-.text
-
-vector=0
-ENTRY(irq_entries_start)
-.rept NR_IRQS
- ALIGN
-1: pushl $vector-256
- jmp common_interrupt
-.data
- .long 1b
-.text
-vector=vector+1
-.endr
-
- ALIGN
-common_interrupt:
- SAVE_ALL
- movl %esp,%eax
- call do_IRQ
- jmp ret_from_intr
-
-#define BUILD_INTERRUPT(name, nr) \
-ENTRY(name) \
- pushl $nr-256; \
- SAVE_ALL \
- movl %esp,%eax; \
- call smp_/**/name; \
- jmp ret_from_intr;
-
-/* The include is where all of the SMP etc. interrupts come from */
-#include "entry_arch.h"
-#endif /* XEN */
-
-ENTRY(divide_error)
- pushl $0 # no error code
- pushl $do_divide_error
- ALIGN
-error_code:
- pushl %ds
- pushl %eax
- xorl %eax, %eax
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- decl %eax # eax = -1
- pushl %ecx
- pushl %ebx
- cld
- movl %es, %ecx
- movl ES(%esp), %edi # get the function address
- movl ORIG_EAX(%esp), %edx # get the error code
- movl %eax, ORIG_EAX(%esp)
- movl %ecx, ES(%esp)
- movl $(__USER_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
- movl %esp,%eax # pt_regs pointer
- XEN_GET_VCPU_INFO(%esi)
- XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
- call *%edi
- jmp ret_from_exception
-
-# A note on the "critical region" in our callback handler.
-# We want to avoid stacking callback handlers due to events occurring
-# during handling of the last event. To do this, we keep events disabled
-# until we've done all processing. HOWEVER, we must enable events before
-# popping the stack frame (can't be done atomically) and so it would still
-# be possible to get enough handler activations to overflow the stack.
-# Although unlikely, bugs of that kind are hard to track down, so we'd
-# like to avoid the possibility.
-# So, on entry to the handler we detect whether we interrupted an
-# existing activation in its critical region -- if so, we pop the current
-# activation and restart the handler using the previous one.
-ENTRY(hypervisor_callback)
- pushl %eax
- SAVE_ALL_NO_EVENTMASK
- movl EIP(%esp),%eax
- cmpl $scrit,%eax
- jb 11f
- cmpl $ecrit,%eax
- jb critical_region_fixup
-11: XEN_GET_VCPU_INFO(%esi)
- movb $0, EVENT_MASK(%esp)
- push %esp
- call evtchn_do_upcall
- add $4,%esp
- jmp ret_from_intr
-
- ALIGN
-restore_all_enable_events:
- XEN_UNBLOCK_EVENTS(%esi)
-scrit: /**** START OF CRITICAL REGION ****/
- XEN_TEST_PENDING(%esi)
- jnz 14f # process more events if necessary...
- RESTORE_ALL
-14: XEN_BLOCK_EVENTS(%esi)
- jmp 11b
-ecrit: /**** END OF CRITICAL REGION ****/
-# [How we do the fixup]. We want to merge the current stack frame with the
-# just-interrupted frame. How we do this depends on where in the critical
-# region the interrupted handler was executing, and so how many saved
-# registers are in each frame. We do this quickly using the lookup table
-# 'critical_fixup_table'. For each byte offset in the critical region, it
-# provides the number of bytes which have already been popped from the
-# interrupted stack frame.
-critical_region_fixup:
- addl $critical_fixup_table-scrit,%eax
- movzbl (%eax),%eax # %eax contains num bytes popped
- mov %esp,%esi
- add %eax,%esi # %esi points at end of src region
- mov %esp,%edi
- add $0x34,%edi # %edi points at end of dst region
- mov %eax,%ecx
- shr $2,%ecx # convert words to bytes
- je 16f # skip loop if nothing to copy
-15: subl $4,%esi # pre-decrementing copy loop
- subl $4,%edi
- movl (%esi),%eax
- movl %eax,(%edi)
- loop 15b
-16: movl %edi,%esp # final %edi is top of merged stack
- jmp 11b
-
-critical_fixup_table:
- .byte 0x00,0x00,0x00 # testb $0xff,(%esi) = XEN_TEST_PENDING
- .byte 0x00,0x00 # jnz 14f
- .byte 0x00 # pop %ebx
- .byte 0x04 # pop %ecx
- .byte 0x08 # pop %edx
- .byte 0x0c # pop %esi
- .byte 0x10 # pop %edi
- .byte 0x14 # pop %ebp
- .byte 0x18 # pop %eax
- .byte 0x1c # pop %ds
- .byte 0x20 # pop %es
- .byte 0x24,0x24,0x24 # add $4,%esp
- .byte 0x28 # iret
- .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
- .byte 0x00,0x00 # jmp 11b
-
-# Hypervisor uses this for application faults while it executes.
-ENTRY(failsafe_callback)
-1: popl %ds
-2: popl %es
-3: popl %fs
-4: popl %gs
- subl $4,%esp
- SAVE_ALL
- jmp ret_from_exception
-.section .fixup,"ax"; \
-6: movl $0,(%esp); \
- jmp 1b; \
-7: movl $0,(%esp); \
- jmp 2b; \
-8: movl $0,(%esp); \
- jmp 3b; \
-9: movl $0,(%esp); \
- jmp 4b; \
-.previous; \
-.section __ex_table,"a";\
- .align 4; \
- .long 1b,6b; \
- .long 2b,7b; \
- .long 3b,8b; \
- .long 4b,9b; \
-.previous
-
-ENTRY(coprocessor_error)
- pushl $0
- pushl $do_coprocessor_error
- jmp error_code
-
-ENTRY(simd_coprocessor_error)
- pushl $0
- pushl $do_simd_coprocessor_error
- jmp error_code
-
-ENTRY(device_not_available)
- pushl $-1 # mark this as an int
- SAVE_ALL
- preempt_stop
- call math_state_restore
- jmp ret_from_exception
-
-/*
- * Debug traps and NMI can happen at the one SYSENTER instruction
- * that sets up the real kernel stack. Check here, since we can't
- * allow the wrong stack to be used.
- *
- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
- * already pushed 3 words if it hits on the sysenter instruction:
- * eflags, cs and eip.
- *
- * We just load the right stack, and push the three (known) values
- * by hand onto the new stack - while updating the return eip past
- * the instruction that would have done it for sysenter.
- */
-#define FIX_STACK(offset, ok, label) \
- cmpw $__KERNEL_CS,4(%esp); \
- jne ok; \
-label: \
- movl TSS_sysenter_esp0+offset(%esp),%esp; \
- pushfl; \
- pushl $__KERNEL_CS; \
- pushl $sysenter_past_esp
-
-ENTRY(debug)
- cmpl $sysenter_entry,(%esp)
- jne debug_stack_correct
- FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
-debug_stack_correct:
- pushl $-1 # mark this as an int
- SAVE_ALL
- xorl %edx,%edx # error code 0
- movl %esp,%eax # pt_regs pointer
- call do_debug
- testl %eax,%eax
- jnz restore_all
- jmp ret_from_exception
-
-#if 0 /* XEN */
-/*
- * NMI is doubly nasty. It can happen _while_ we're handling
- * a debug fault, and the debug fault hasn't yet been able to
- * clear up the stack. So we first check whether we got an
- * NMI on the sysenter entry path, but after that we need to
- * check whether we got an NMI on the debug path where the debug
- * fault happened on the sysenter path.
- */
-ENTRY(nmi)
- cmpl $sysenter_entry,(%esp)
- je nmi_stack_fixup
- pushl %eax
- movl %esp,%eax
- /* Do not access memory above the end of our stack page,
- * it might not exist.
- */
- andl $(THREAD_SIZE-1),%eax
- cmpl $(THREAD_SIZE-20),%eax
- popl %eax
- jae nmi_stack_correct
- cmpl $sysenter_entry,12(%esp)
- je nmi_debug_stack_check
-nmi_stack_correct:
- pushl %eax
- SAVE_ALL
- xorl %edx,%edx # zero error code
- movl %esp,%eax # pt_regs pointer
- call do_nmi
- RESTORE_ALL
-
-nmi_stack_fixup:
- FIX_STACK(12,nmi_stack_correct, 1)
- jmp nmi_stack_correct
-nmi_debug_stack_check:
- cmpw $__KERNEL_CS,16(%esp)
- jne nmi_stack_correct
- cmpl $debug - 1,(%esp)
- jle nmi_stack_correct
- cmpl $debug_esp_fix_insn,(%esp)
- jle nmi_debug_stack_fixup
-nmi_debug_stack_fixup:
- FIX_STACK(24,nmi_stack_correct, 1)
- jmp nmi_stack_correct
-#endif /* XEN */
-
-ENTRY(int3)
- pushl $-1 # mark this as an int
- SAVE_ALL
- xorl %edx,%edx # zero error code
- movl %esp,%eax # pt_regs pointer
- call do_int3
- testl %eax,%eax
- jnz restore_all
- jmp ret_from_exception
-
-ENTRY(overflow)
- pushl $0
- pushl $do_overflow
- jmp error_code
-
-ENTRY(bounds)
- pushl $0
- pushl $do_bounds
- jmp error_code
-
-ENTRY(invalid_op)
- pushl $0
- pushl $do_invalid_op
- jmp error_code
-
-ENTRY(coprocessor_segment_overrun)
- pushl $0
- pushl $do_coprocessor_segment_overrun
- jmp error_code
-
-ENTRY(invalid_TSS)
- pushl $do_invalid_TSS
- jmp error_code
-
-ENTRY(segment_not_present)
- pushl $do_segment_not_present
- jmp error_code
-
-ENTRY(stack_segment)
- pushl $do_stack_segment
- jmp error_code
-
-ENTRY(general_protection)
- pushl $do_general_protection
- jmp error_code
-
-ENTRY(alignment_check)
- pushl $do_alignment_check
- jmp error_code
-
-# This handler is special, because it gets an extra value on its stack,
-# which is the linear faulting address.
-# fastcall register usage: %eax = pt_regs, %edx = error code,
-# %ecx = fault address
-ENTRY(page_fault)
- pushl %ds
- pushl %eax
- xorl %eax, %eax
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- decl %eax /* eax = -1 */
- pushl %ecx
- pushl %ebx
- cld
- movl %es,%edi
- movl ES(%esp), %ecx /* get the faulting address */
- movl ORIG_EAX(%esp), %edx /* get the error code */
- movl %eax, ORIG_EAX(%esp)
- movl %edi, ES(%esp)
- movl $(__KERNEL_DS),%eax
- movl %eax, %ds
- movl %eax, %es
- movl %esp,%eax /* pt_regs pointer */
- XEN_GET_VCPU_INFO(%esi)
- XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
- call do_page_fault
- jmp ret_from_exception
-
-#ifdef CONFIG_X86_MCE
-ENTRY(machine_check)
- pushl $0
- pushl machine_check_vector
- jmp error_code
-#endif
-
-ENTRY(fixup_4gb_segment)
- pushl $do_fixup_4gb_segment
- jmp error_code
-
-.data
-ENTRY(sys_call_table)
- .long sys_restart_syscall /* 0 - old "setup()" system call, used
for restarting */
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_lchown16
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_stat
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid16
- .long sys_getuid16
- .long sys_stime /* 25 */
- .long sys_ptrace
- .long sys_alarm
- .long sys_fstat
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 - old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid16
- .long sys_getgid16
- .long sys_signal
- .long sys_geteuid16
- .long sys_getegid16 /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys() */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_olduname
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_sigaction
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid16 /* 70 */
- .long sys_setregid16
- .long sys_sigsuspend
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_old_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups16 /* 80 */
- .long sys_setgroups16
- .long old_select
- .long sys_symlink
- .long sys_lstat
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long old_readdir
- .long old_mmap /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown16 /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ioperm
- .long sys_socketcall
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_uname
- .long sys_iopl /* 110 */
- .long sys_vhangup
- .long sys_ni_syscall /* old "idle" system call */
- .long sys_vm86old
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ipc
- .long sys_fsync
- .long sys_sigreturn
- .long sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_modify_ldt
- .long sys_adjtimex
- .long sys_mprotect /* 125 */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130: old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* reserved for afs_syscall */
- .long sys_setfsuid16
- .long sys_setfsgid16
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150 */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid16
- .long sys_getresuid16 /* 165 */
- .long sys_vm86
- .long sys_ni_syscall /* Old sys_query_module */
- .long sys_poll
- .long sys_nfsservctl
- .long sys_setresgid16 /* 170 */
- .long sys_getresgid16
- .long sys_prctl
- .long sys_rt_sigreturn
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread64 /* 180 */
- .long sys_pwrite64
- .long sys_chown16
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_sigaltstack
- .long sys_sendfile
- .long sys_ni_syscall /* reserved for streams1 */
- .long sys_ni_syscall /* reserved for streams2 */
- .long sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_lchown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_chown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- .long sys_getdents64 /* 220 */
- .long sys_fcntl64
- .long sys_ni_syscall /* reserved for TUX */
- .long sys_ni_syscall
- .long sys_gettid
- .long sys_readahead /* 225 */
- .long sys_setxattr
- .long sys_lsetxattr
- .long sys_fsetxattr
- .long sys_getxattr
- .long sys_lgetxattr /* 230 */
- .long sys_fgetxattr
- .long sys_listxattr
- .long sys_llistxattr
- .long sys_flistxattr
- .long sys_removexattr /* 235 */
- .long sys_lremovexattr
- .long sys_fremovexattr
- .long sys_tkill
- .long sys_sendfile64
- .long sys_futex /* 240 */
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity
- .long sys_set_thread_area
- .long sys_get_thread_area
- .long sys_io_setup /* 245 */
- .long sys_io_destroy
- .long sys_io_getevents
- .long sys_io_submit
- .long sys_io_cancel
- .long sys_fadvise64 /* 250 */
- .long sys_ni_syscall
- .long sys_exit_group
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl /* 255 */
- .long sys_epoll_wait
- .long sys_remap_file_pages
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime /* 260 */
- .long sys_timer_gettime
- .long sys_timer_getoverrun
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime /* 265 */
- .long sys_clock_getres
- .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill /* 270 */
- .long sys_utimes
- .long sys_fadvise64_64
- .long sys_ni_syscall /* sys_vserver */
- .long sys_mbind
- .long sys_get_mempolicy
- .long sys_set_mempolicy
- .long sys_mq_open
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive /* 280 */
- .long sys_mq_notify
- .long sys_mq_getsetattr
- .long sys_ni_syscall /* reserved for kexec */
- .long sys_waitid
- .long sys_ni_syscall /* 285 */ /* available */
- .long sys_add_key
- .long sys_request_key
- .long sys_keyctl
-
-syscall_table_size=(.-sys_call_table)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,187 +0,0 @@
-
-#include <linux/config.h>
-
-.section __xen_guest
- .ascii "GUEST_OS=linux,GUEST_VER=2.6"
- .ascii ",XEN_VER=2.0"
- .ascii ",VIRT_BASE=0xC0000000"
- .ascii ",LOADER=generic"
- .ascii ",PT_MODE_WRITABLE"
- .byte 0
-
-.text
-#include <linux/threads.h>
-#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/thread_info.h>
-#include <asm/asm_offsets.h>
-#include <asm-xen/xen-public/arch-x86_32.h>
-
-/*
- * References to members of the new_cpu_data structure.
- */
-
-#define X86 new_cpu_data+CPUINFO_x86
-#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
-#define X86_MODEL new_cpu_data+CPUINFO_x86_model
-#define X86_MASK new_cpu_data+CPUINFO_x86_mask
-#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
-#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
-#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
-#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
-
-ENTRY(startup_32)
- cld
-
- /* Copy the necessary stuff from xen_start_info structure. */
- mov $xen_start_info_union,%edi
- mov $128,%ecx
- rep movsl
-
-#ifdef CONFIG_SMP
-ENTRY(startup_32_smp)
- cld
-#endif /* CONFIG_SMP */
-
- /* Set up the stack pointer */
- lss stack_start,%esp
-
-checkCPUtype:
-
- /* get vendor info */
- xorl %eax,%eax # call CPUID with 0 -> return vendor ID
- cpuid
- movl %eax,X86_CPUID # save CPUID level
- movl %ebx,X86_VENDOR_ID # lo 4 chars
- movl %edx,X86_VENDOR_ID+4 # next 4 chars
- movl %ecx,X86_VENDOR_ID+8 # last 4 chars
-
- movl $1,%eax # Use the CPUID instruction to get CPU type
- cpuid
- movb %al,%cl # save reg for future use
- andb $0x0f,%ah # mask processor family
- movb %ah,X86
- andb $0xf0,%al # mask model
- shrb $4,%al
- movb %al,X86_MODEL
- andb $0x0f,%cl # mask mask revision
- movb %cl,X86_MASK
- movl %edx,X86_CAPABILITY
-
- incb ready
-
- xorl %eax,%eax # Clear FS/GS and LDT
- movl %eax,%fs
- movl %eax,%gs
- cld # gcc2 wants the direction flag cleared at all
times
-
-#ifdef CONFIG_SMP
- movb ready, %cl
- cmpb $1,%cl
- je 1f # the first CPU calls start_kernel
- # all other CPUs call initialize_secondary
- call initialize_secondary
- jmp L6
-1:
-#endif /* CONFIG_SMP */
- call start_kernel
-L6:
- jmp L6 # main should never return here, but
- # just in case, we know what happens.
-
-ENTRY(lgdt_finish)
- movl $(__KERNEL_DS),%eax # reload all the segment registers
- movw %ax,%ss # after changing gdt.
-
- movl $(__USER_DS),%eax # DS/ES contains default USER segment
- movw %ax,%ds
- movw %ax,%es
-
- popl %eax # reload CS by intersegment return
- pushl $(__KERNEL_CS)
- pushl %eax
- lret
-
-ENTRY(stack_start)
- .long init_thread_union+THREAD_SIZE
- .long __BOOT_DS
-
-ready: .byte 0
-
-.globl idt_descr
-.globl cpu_gdt_descr
-
- ALIGN
- .word 0 # 32-bit align idt_desc.address
-idt_descr:
- .word IDT_ENTRIES*8-1 # idt contains 256 entries
- .long idt_table
-
-# boot GDT descriptor (later on used by CPU#0):
- .word 0 # 32 bit align gdt_desc.address
-cpu_gdt_descr:
- .word GDT_SIZE
- .long cpu_gdt_table
-
- .fill NR_CPUS-1,8,0 # space for the other GDT descriptors
-
-.org 0x1000
-ENTRY(empty_zero_page)
-
-.org 0x2000
-ENTRY(swapper_pg_dir)
-
-.org 0x3000
-ENTRY(cpu_gdt_table)
- .quad 0x0000000000000000 /* NULL descriptor */
- .quad 0x0000000000000000 /* 0x0b reserved */
- .quad 0x0000000000000000 /* 0x13 reserved */
- .quad 0x0000000000000000 /* 0x1b reserved */
- .quad 0x0000000000000000 /* 0x20 unused */
- .quad 0x0000000000000000 /* 0x28 unused */
- .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
- .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
- .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
- .quad 0x0000000000000000 /* 0x4b reserved */
- .quad 0x0000000000000000 /* 0x53 reserved */
- .quad 0x0000000000000000 /* 0x5b reserved */
-
- .quad 0x00cfbb000000c3ff /* 0x60 kernel 4GB code at 0x00000000 */
- .quad 0x00cfb3000000c3ff /* 0x68 kernel 4GB data at 0x00000000 */
- .quad 0x00cffb000000c3ff /* 0x73 user 4GB code at 0x00000000 */
- .quad 0x00cff3000000c3ff /* 0x7b user 4GB data at 0x00000000 */
-
- .quad 0x0000000000000000 /* 0x80 TSS descriptor */
- .quad 0x0000000000000000 /* 0x88 LDT descriptor */
-
- /* Segments used for calling PnP BIOS */
- .quad 0x0000000000000000 /* 0x90 32-bit code */
- .quad 0x0000000000000000 /* 0x98 16-bit code */
- .quad 0x0000000000000000 /* 0xa0 16-bit data */
- .quad 0x0000000000000000 /* 0xa8 16-bit data */
- .quad 0x0000000000000000 /* 0xb0 16-bit data */
- /*
- * The APM segments have byte granularity and their bases
- * and limits are set at run time.
- */
- .quad 0x0000000000000000 /* 0xb8 APM CS code */
- .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
- .quad 0x0000000000000000 /* 0xc8 APM DS data */
-
- .quad 0x0000000000000000 /* 0xd0 - unused */
- .quad 0x0000000000000000 /* 0xd8 - unused */
- .quad 0x0000000000000000 /* 0xe0 - unused */
- .quad 0x0000000000000000 /* 0xe8 - unused */
- .quad 0x0000000000000000 /* 0xf0 - unused */
- .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault
TSS */
- .fill GDT_ENTRIES-32,8,0
-
-.org 0x4000
-ENTRY(default_ldt)
-
-.org 0x5000
-/*
- * Real beginning of normal "text" segment
- */
-ENTRY(stext)
-ENTRY(_stext)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,194 +0,0 @@
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mca.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/highmem.h>
-#include <linux/time.h>
-
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/nmi.h>
-#include <asm/ist.h>
-#include <asm/kdebug.h>
-
-extern void dump_thread(struct pt_regs *, struct user *);
-extern spinlock_t rtc_lock;
-
-/* This is definitely a GPL-only symbol */
-EXPORT_SYMBOL_GPL(cpu_gdt_table);
-
-#if defined(CONFIG_APM_MODULE)
-extern void machine_real_restart(unsigned char *, int);
-EXPORT_SYMBOL(machine_real_restart);
-extern void default_idle(void);
-EXPORT_SYMBOL(default_idle);
-#endif
-
-#ifdef CONFIG_SMP
-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-#endif
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) ||
defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
-extern unsigned long cpu_khz;
-extern unsigned long get_cmos_time(void);
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-#ifdef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(physnode_map);
-#endif
-#ifdef CONFIG_X86_NUMAQ
-EXPORT_SYMBOL(xquad_portio);
-#endif
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-#ifdef CONFIG_APM
-EXPORT_SYMBOL(pm_power_off);
-#endif
-EXPORT_SYMBOL(get_cmos_time);
-EXPORT_SYMBOL(cpu_khz);
-EXPORT_SYMBOL(apm_info);
-
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_failed_interruptible);
-EXPORT_SYMBOL(__down_failed_trylock);
-EXPORT_SYMBOL(__up_wakeup);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_generic);
-/* Delay loops */
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
-
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_from_user_ll);
-EXPORT_SYMBOL(__copy_to_user_ll);
-EXPORT_SYMBOL(strnlen_user);
-
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pcibios_penalize_isa_irq);
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
-#ifdef CONFIG_PCI_BIOS
-EXPORT_SYMBOL(pcibios_set_irq_routing);
-EXPORT_SYMBOL(pcibios_get_irq_routing_table);
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-EXPORT_SYMBOL(_mmx_memcpy);
-EXPORT_SYMBOL(mmx_clear_page);
-EXPORT_SYMBOL(mmx_copy_page);
-#endif
-
-#ifdef CONFIG_X86_HT
-EXPORT_SYMBOL(smp_num_siblings);
-EXPORT_SYMBOL(cpu_sibling_map);
-#endif
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_callout_map);
-EXPORT_SYMBOL(__write_lock_failed);
-EXPORT_SYMBOL(__read_lock_failed);
-
-/* Global SMP stuff */
-EXPORT_SYMBOL(smp_call_function);
-
-/* TLB flushing */
-EXPORT_SYMBOL(flush_tlb_page);
-EXPORT_SYMBOL_GPL(flush_tlb_all);
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-#endif
-
-#ifdef CONFIG_MCA
-EXPORT_SYMBOL(machine_id);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
-EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(rtc_lock);
-
-EXPORT_SYMBOL_GPL(set_nmi_callback);
-EXPORT_SYMBOL_GPL(unset_nmi_callback);
-
-#undef memcmp
-extern int memcmp(const void *,const void *,__kernel_size_t);
-EXPORT_SYMBOL(memcmp);
-
-EXPORT_SYMBOL(register_die_notifier);
-#ifdef CONFIG_HAVE_DEC_LOCK
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
-
-EXPORT_SYMBOL(__PAGE_KERNEL);
-
-#ifdef CONFIG_HIGHMEM
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
-#endif
-
-#if defined(CONFIG_X86_SPEEDSTEP_SMI) ||
defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-EXPORT_SYMBOL(ist_info);
-#endif
-
-EXPORT_SYMBOL(csum_partial);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,51 +0,0 @@
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-
-asmlinkage long sys_iopl(unsigned int new_io_pl)
-{
- unsigned int old_io_pl = current->thread.io_pl;
- dom0_op_t op;
-
- if (new_io_pl > 3)
- return -EINVAL;
-
- /* Need "raw I/O" privileges for direct port access. */
- if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- if (!(xen_start_info.flags & SIF_PRIVILEGED))
- return -EPERM;
-
- /* Maintain OS privileges even if user attempts to relinquish them. */
- if (new_io_pl == 0)
- new_io_pl = 1;
-
- /* Change our version of the privilege levels. */
- current->thread.io_pl = new_io_pl;
-
- /* Force the change at ring 0. */
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = new_io_pl;
- HYPERVISOR_dom0_op(&op);
-
- return 0;
-}
-
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-{
-#if 0
- printk(KERN_INFO "ioperm not fully supported - %s\n",
- turn_on ? "set iopl to 3" : "ignore resource release");
-#endif
- return turn_on ? sys_iopl(3) : 0;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,267 +0,0 @@
-/*
- * linux/kernel/ldt.c
- *
- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
- * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/ldt.h>
-#include <asm/desc.h>
-
-#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-static void flush_ldt(void *null)
-{
- if (current->active_mm)
- load_LDT(¤t->active_mm->context);
-}
-#endif
-
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
-{
- void *oldldt;
- void *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount+511)&(~511);
- if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
- else
- newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
-
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt+oldsize*LDT_ENTRY_SIZE, 0,
(mincount-oldsize)*LDT_ENTRY_SIZE);
- pc->ldt = newldt;
- wmb();
- pc->size = mincount;
- wmb();
-
- if (reload) {
-#ifdef CONFIG_SMP
- cpumask_t mask;
- preempt_disable();
-#endif
- make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
- load_LDT(pc);
-#ifdef CONFIG_SMP
- mask = cpumask_of_cpu(smp_processor_id());
- if (!cpus_equal(current->mm->cpu_vm_mask, mask))
- smp_call_function(flush_ldt, NULL, 1, 1);
- preempt_enable();
-#endif
- }
- if (oldsize) {
- make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
- flush_page_update_queue();
- if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- kfree(oldldt);
- }
- return 0;
-}
-
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-{
- int err = alloc_ldt(new, old->size, 0);
- if (err < 0)
- return err;
- memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
- make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
- flush_page_update_queue();
- return 0;
-}
-
-/*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- struct mm_struct * old_mm;
- int retval = 0;
-
- init_MUTEX(&mm->context.sem);
- mm->context.size = 0;
- old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- down(&old_mm->context.sem);
- retval = copy_ldt(&mm->context, &old_mm->context);
- up(&old_mm->context.sem);
- }
- return retval;
-}
-
-/*
- * No need to lock the MM as we are the last user
- */
-void destroy_context(struct mm_struct *mm)
-{
- if (mm->context.size) {
- if (mm == current->active_mm)
- clear_LDT();
- make_pages_writable(mm->context.ldt,
- (mm->context.size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
- flush_page_update_queue();
- if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- kfree(mm->context.ldt);
- mm->context.size = 0;
- }
-}
-
-static int read_ldt(void __user * ptr, unsigned long bytecount)
-{
- int err;
- unsigned long size;
- struct mm_struct * mm = current->mm;
-
- if (!mm->context.size)
- return 0;
- if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
- bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-
- down(&mm->context.sem);
- size = mm->context.size*LDT_ENTRY_SIZE;
- if (size > bytecount)
- size = bytecount;
-
- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- up(&mm->context.sem);
- if (err < 0)
- goto error_return;
- if (size != bytecount) {
- /* zero-fill the rest */
- if (clear_user(ptr+size, bytecount-size) != 0) {
- err = -EFAULT;
- goto error_return;
- }
- }
- return bytecount;
-error_return:
- return err;
-}
-
-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-{
- int err;
- unsigned long size;
- void *address;
-
- err = 0;
- address = &default_ldt[0];
- size = 5*sizeof(struct desc_struct);
- if (size > bytecount)
- size = bytecount;
-
- err = size;
- if (copy_to_user(ptr, address, size))
- err = -EFAULT;
-
- return err;
-}
-
-static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-{
- struct mm_struct * mm = current->mm;
- __u32 entry_1, entry_2, *lp;
- unsigned long mach_lp;
- int error;
- struct user_desc ldt_info;
-
- error = -EINVAL;
- if (bytecount != sizeof(ldt_info))
- goto out;
- error = -EFAULT;
- if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
- goto out;
-
- error = -EINVAL;
- if (ldt_info.entry_number >= LDT_ENTRIES)
- goto out;
- if (ldt_info.contents == 3) {
- if (oldmode)
- goto out;
- if (ldt_info.seg_not_present == 0)
- goto out;
- }
-
- down(&mm->context.sem);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(¤t->mm->context,
ldt_info.entry_number+1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *)
mm->context.ldt);
- mach_lp = arbitrary_virt_to_machine(lp);
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode || LDT_empty(&ldt_info)) {
- entry_1 = 0;
- entry_2 = 0;
- goto install;
- }
- }
-
- entry_1 = LDT_entry_a(&ldt_info);
- entry_2 = LDT_entry_b(&ldt_info);
- if (oldmode)
- entry_2 &= ~(1 << 20);
-
- /* Install the new entry ... */
-install:
- error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
-
-out_unlock:
- up(&mm->context.sem);
-out:
- return error;
-}
-
-asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long
bytecount)
-{
- int ret = -ENOSYS;
-
- switch (func) {
- case 0:
- ret = read_ldt(ptr, bytecount);
- break;
- case 1:
- ret = write_ldt(ptr, bytecount, 1);
- break;
- case 2:
- ret = read_default_ldt(ptr, bytecount);
- break;
- case 0x11:
- ret = write_ldt(ptr, bytecount, 0);
- break;
- }
- return ret;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/microcode.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/microcode.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,163 +0,0 @@
-/*
- * Intel CPU Microcode Update Driver for Linux
- *
- * Copyright (C) 2000-2004 Tigran Aivazian
- *
- * This driver allows to upgrade microcode on Intel processors
- * belonging to IA-32 family - PentiumPro, Pentium II,
- * Pentium III, Xeon, Pentium 4, etc.
- *
- * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
- * Order Number 245472 or free download from:
- *
- * http://developer.intel.com/design/pentium4/manuals/245472.htm
- *
- * For more information, go to http://www.urbanmyth.org/microcode
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-//#define DEBUG /* pr_debug */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/miscdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/syscalls.h>
-
-#include <asm/msr.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-
-MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
-MODULE_AUTHOR("Tigran Aivazian <tigran@xxxxxxxxxxx>");
-MODULE_LICENSE("GPL");
-
-#define MICROCODE_VERSION "1.14-xen"
-
-#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
-#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
-#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /*
2048 bytes */
-
-/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
-static DECLARE_MUTEX(microcode_sem);
-
-static void __user *user_buffer; /* user area microcode data buffer */
-static unsigned int user_buffer_size; /* it's size */
-
-static int microcode_open (struct inode *unused1, struct file *unused2)
-{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-
-static int do_microcode_update (void)
-{
- int err;
- dom0_op_t op;
-
- err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
- if (err != 0)
- return err;
-
- op.cmd = DOM0_MICROCODE;
- op.u.microcode.data = user_buffer;
- op.u.microcode.length = user_buffer_size;
- err = HYPERVISOR_dom0_op(&op);
-
- (void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
-
- return err;
-}
-
-static ssize_t microcode_write (struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
-{
- ssize_t ret;
-
- if (len < DEFAULT_UCODE_TOTALSIZE) {
- printk(KERN_ERR "microcode: not enough data\n");
- return -EINVAL;
- }
-
- if ((len >> PAGE_SHIFT) > num_physpages) {
- printk(KERN_ERR "microcode: too much data (max %ld pages)\n",
num_physpages);
- return -EINVAL;
- }
-
- down(µcode_sem);
-
- user_buffer = (void __user *) buf;
- user_buffer_size = (int) len;
-
- ret = do_microcode_update();
- if (!ret)
- ret = (ssize_t)len;
-
- up(µcode_sem);
-
- return ret;
-}
-
-static int microcode_ioctl (struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- /*
- * XXX: will be removed after microcode_ctl
- * is updated to ignore failure of this ioctl()
- */
- case MICROCODE_IOCFREE:
- return 0;
- default:
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-static struct file_operations microcode_fops = {
- .owner = THIS_MODULE,
- .write = microcode_write,
- .ioctl = microcode_ioctl,
- .open = microcode_open,
-};
-
-static struct miscdevice microcode_dev = {
- .minor = MICROCODE_MINOR,
- .name = "microcode",
- .devfs_name = "cpu/microcode",
- .fops = µcode_fops,
-};
-
-static int __init microcode_init (void)
-{
- int error;
-
- error = misc_register(µcode_dev);
- if (error) {
- printk(KERN_ERR
- "microcode: can't misc_register on minor=%d\n",
- MICROCODE_MINOR);
- return error;
- }
-
- printk(KERN_INFO
- "IA-32 Microcode Update Driver: v" MICROCODE_VERSION "
<tigran@xxxxxxxxxxx>\n");
- return 0;
-}
-
-static void __exit microcode_exit (void)
-{
- misc_deregister(µcode_dev);
- printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION "
unregistered\n");
-}
-
-module_init(microcode_init)
-module_exit(microcode_exit)
-MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,363 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * On i386 there is no hardware dynamic DMA address translation,
- * so consistent alloc/free are merely page allocation/freeing.
- * The rest of the dynamic DMA mapping interface is implemented
- * in asm/pci.h.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/version.h>
-#include <asm/io.h>
-#include <asm-xen/balloon.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define pte_offset_kernel pte_offset
-#define pud_t pgd_t
-#define pud_offset(d, va) d
-#endif
-
-struct dma_coherent_mem {
- void *virt_base;
- u32 device_base;
- int size;
- int flags;
- unsigned long *bitmap;
-};
-
-static void
-xen_contig_memory(unsigned long vstart, unsigned int order)
-{
- /*
- * Ensure multi-page extents are contiguous in machine memory.
- * This code could be cleaned up some, and the number of
- * hypercalls reduced.
- */
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long pfn, i, flags;
-
- scrub_pages(vstart, 1 << order);
-
- balloon_lock(flags);
-
- /* 1. Zap current PTEs, giving away the underlying pages. */
- for (i = 0; i < (1<<order); i++) {
- pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
- pud = pud_offset(pgd, vstart + (i*PAGE_SIZE));
- pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
- pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
- pfn = pte_val_ma(*pte) >> PAGE_SHIFT;
- queue_l1_entry_update(pte, 0);
- phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
- INVALID_P2M_ENTRY;
- flush_page_update_queue();
- if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
- &pfn, 1, 0) != 1) BUG();
- }
- /* 2. Get a new contiguous memory extent. */
- if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
- &pfn, 1, order) != 1) BUG();
- /* 3. Map the new extent in place of old pages. */
- for (i = 0; i < (1<<order); i++) {
- pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
- pud = pud_offset(pgd, vstart + (i*PAGE_SIZE));
- pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
- pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
- queue_l1_entry_update(pte,
- ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
- queue_machphys_update(pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
- phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = pfn+i;
- }
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
-
- balloon_unlock(flags);
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-#else
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int gfp)
-#endif
-{
- void *ret;
- unsigned int order = get_order(size);
- unsigned long vstart;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- int gfp = GFP_ATOMIC;
-
- if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
-#else
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (mem) {
- int page = bitmap_find_free_region(mem->bitmap, mem->size,
- order);
- if (page >= 0) {
- *dma_handle = mem->device_base + (page << PAGE_SHIFT);
- ret = mem->virt_base + (page << PAGE_SHIFT);
- memset(ret, 0, size);
- return ret;
- }
- if (mem->flags & DMA_MEMORY_EXCLUSIVE)
- return NULL;
- }
-
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
-#endif
-
- vstart = __get_free_pages(gfp, order);
- ret = (void *)vstart;
-
- if (ret != NULL) {
- xen_contig_memory(vstart, order);
-
- memset(ret, 0, size);
- *dma_handle = virt_to_bus(ret);
- }
- return ret;
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-#else
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
- int order = get_order(size);
-
- if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base +
(mem->size << PAGE_SHIFT))) {
- int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-
- bitmap_release_region(mem->bitmap, page, order);
- } else
- free_pages((unsigned long)vaddr, order);
-}
-
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void __iomem *mem_base;
- int pages = size >> PAGE_SHIFT;
- int bitmap_size = (pages + 31)/32;
-
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
- goto out;
- if (dev->dma_mem)
- goto out;
-
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
- mem_base = ioremap(bus_addr, size);
- if (!mem_base)
- goto out;
-
- dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
- goto out;
- memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
- dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
- goto free1_out;
- memset(dev->dma_mem->bitmap, 0, bitmap_size);
-
- dev->dma_mem->virt_base = mem_base;
- dev->dma_mem->device_base = device_addr;
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
-
- free1_out:
- kfree(dev->dma_mem->bitmap);
- out:
- return 0;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
-
- if(!mem)
- return;
- dev->dma_mem = NULL;
- iounmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
- int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
- int pos, err;
-
- if (!mem)
- return ERR_PTR(-EINVAL);
-
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
- if (err != 0)
- return ERR_PTR(err);
- return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
-static LIST_HEAD(dma_map_head);
-static DEFINE_SPINLOCK(dma_map_lock);
-struct dma_map_entry {
- struct list_head list;
- dma_addr_t dma;
- char *bounce, *host;
- size_t size;
-};
-#define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
-
-dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_map_entry *ent;
- void *bnc;
- dma_addr_t dma;
- unsigned long flags;
-
- BUG_ON(direction == DMA_NONE);
-
- /*
- * Even if size is sub-page, the buffer may still straddle a page
- * boundary. Take into account buffer start offset. All other calls are
- * conservative and always search the dma_map list if it's non-empty.
- */
- if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
- dma = virt_to_bus(ptr);
- } else {
- BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, 0)) == NULL);
- BUG_ON((ent = kmalloc(sizeof(*ent), GFP_KERNEL)) == NULL);
- if (direction != DMA_FROM_DEVICE)
- memcpy(bnc, ptr, size);
- ent->dma = dma;
- ent->bounce = bnc;
- ent->host = ptr;
- ent->size = size;
- spin_lock_irqsave(&dma_map_lock, flags);
- list_add(&ent->list, &dma_map_head);
- spin_unlock_irqrestore(&dma_map_lock, flags);
- }
-
- flush_write_buffers();
- return dma;
-}
-EXPORT_SYMBOL(dma_map_single);
-
-void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_map_entry *ent;
- unsigned long flags;
-
- BUG_ON(direction == DMA_NONE);
-
- /* Fast-path check: are there any multi-page DMA mappings? */
- if (!list_empty(&dma_map_head)) {
- spin_lock_irqsave(&dma_map_lock, flags);
- list_for_each_entry ( ent, &dma_map_head, list ) {
- if (DMA_MAP_MATCHES(ent, dma_addr)) {
- list_del(&ent->list);
- break;
- }
- }
- spin_unlock_irqrestore(&dma_map_lock, flags);
- if (&ent->list != &dma_map_head) {
- BUG_ON(dma_addr != ent->dma);
- BUG_ON(size != ent->size);
- if (direction != DMA_TO_DEVICE)
- memcpy(ent->host, ent->bounce, size);
- dma_free_coherent(dev, size, ent->bounce, ent->dma);
- kfree(ent);
- }
- }
-}
-EXPORT_SYMBOL(dma_unmap_single);
-
-void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_map_entry *ent;
- unsigned long flags, off;
-
- /* Fast-path check: are there any multi-page DMA mappings? */
- if (!list_empty(&dma_map_head)) {
- spin_lock_irqsave(&dma_map_lock, flags);
- list_for_each_entry ( ent, &dma_map_head, list )
- if (DMA_MAP_MATCHES(ent, dma_handle))
- break;
- spin_unlock_irqrestore(&dma_map_lock, flags);
- if (&ent->list != &dma_map_head) {
- off = dma_handle - ent->dma;
- BUG_ON((off + size) > ent->size);
- /*if (direction != DMA_TO_DEVICE)*/
- memcpy(ent->host+off, ent->bounce+off, size);
- }
- }
-}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t
size,
- enum dma_data_direction direction)
-{
- struct dma_map_entry *ent;
- unsigned long flags, off;
-
- /* Fast-path check: are there any multi-page DMA mappings? */
- if (!list_empty(&dma_map_head)) {
- spin_lock_irqsave(&dma_map_lock, flags);
- list_for_each_entry ( ent, &dma_map_head, list )
- if (DMA_MAP_MATCHES(ent, dma_handle))
- break;
- spin_unlock_irqrestore(&dma_map_lock, flags);
- if (&ent->list != &dma_map_head) {
- off = dma_handle - ent->dma;
- BUG_ON((off + size) > ent->size);
- /*if (direction != DMA_FROM_DEVICE)*/
- memcpy(ent->bounce+off, ent->host+off, size);
- }
- }
-
- flush_write_buffers();
-}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,779 +0,0 @@
-/*
- * linux/arch/i386/kernel/process.c
- *
- * Copyright (C) 1995 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
- */
-
-/*
- * This file handles the architecture-dependent parts of process handling..
- */
-
-#include <stdarg.h>
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/elfcore.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/config.h>
-#include <linux/utsname.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/mc146818rtc.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/ptrace.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ldt.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/irq.h>
-#include <asm/desc.h>
-#include <asm-xen/multicall.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-#ifdef CONFIG_MATH_EMULATION
-#include <asm/math_emu.h>
-#endif
-
-#include <linux/irq.h>
-#include <linux/err.h>
-
-asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-
-int hlt_counter;
-
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return ((unsigned long *)tsk->thread.esp)[3];
-}
-
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-static cpumask_t cpu_idle_map;
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
-/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-extern void stop_hz_timer(void);
-extern void start_hz_timer(void);
-void xen_idle(void)
-{
- local_irq_disable();
-
- if (need_resched()) {
- local_irq_enable();
- } else {
- stop_hz_timer();
- HYPERVISOR_block(); /* implicit local_irq_enable() */
- start_hz_timer();
- }
-}
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle (void)
-{
- int cpu = _smp_processor_id();
-
- /* endless idle loop with no priority at all */
- while (1) {
- while (!need_resched()) {
-
- if (cpu_isset(cpu, cpu_idle_map))
- cpu_clear(cpu, cpu_idle_map);
- rmb();
-
- irq_stat[cpu].idle_timestamp = jiffies;
- xen_idle();
- }
- schedule();
- }
-}
-
-void cpu_idle_wait(void)
-{
- int cpu;
- cpumask_t map;
-
- for_each_online_cpu(cpu)
- cpu_set(cpu, cpu_idle_map);
-
- wmb();
- do {
- ssleep(1);
- cpus_and(map, cpu_idle_map, cpu_online_map);
- } while (!cpus_empty(map));
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-/* Always use xen_idle() instead. */
-void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
-
-void show_regs(struct pt_regs * regs)
-{
- printk("\n");
- printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
- printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip,
smp_processor_id());
- print_symbol("EIP is at %s\n", regs->eip);
-
- if (regs->xcs & 2)
- printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
- printk(" EFLAGS: %08lx %s (%s)\n",
- regs->eflags, print_tainted(), system_utsname.release);
- printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
- regs->eax,regs->ebx,regs->ecx,regs->edx);
- printk("ESI: %08lx EDI: %08lx EBP: %08lx",
- regs->esi, regs->edi, regs->ebp);
- printk(" DS: %04x ES: %04x\n",
- 0xffff & regs->xds,0xffff & regs->xes);
-
- show_trace(NULL, ®s->esp);
-}
-
-/*
- * This gets run with %ebx containing the
- * function to call, and %edx containing
- * the "args".
- */
-extern void kernel_thread_helper(void);
-__asm__(".section .text\n"
- ".align 4\n"
- "kernel_thread_helper:\n\t"
- "movl %edx,%eax\n\t"
- "pushl %edx\n\t"
- "call *%ebx\n\t"
- "pushl %eax\n\t"
- "call do_exit\n"
- ".previous");
-
-/*
- * Create a kernel thread
- */
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(®s, 0, sizeof(regs));
-
- regs.ebx = (unsigned long) fn;
- regs.edx = (unsigned long) arg;
-
- regs.xds = __USER_DS;
- regs.xes = __USER_DS;
- regs.orig_eax = -1;
- regs.eip = (unsigned long) kernel_thread_helper;
- regs.xcs = __KERNEL_CS;
- regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-
- /* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL,
NULL);
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(void)
-{
- struct task_struct *tsk = current;
- struct thread_struct *t = &tsk->thread;
-
- /* The process may have allocated an io port bitmap... nuke it. */
- if (unlikely(NULL != t->io_bitmap_ptr)) {
- int cpu = get_cpu();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
- kfree(t->io_bitmap_ptr);
- t->io_bitmap_ptr = NULL;
- /*
- * Careful, clear this in the TSS too:
- */
- memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
- t->io_bitmap_max = 0;
- tss->io_bitmap_owner = NULL;
- tss->io_bitmap_max = 0;
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
- put_cpu();
- }
-}
-
-void flush_thread(void)
-{
- struct task_struct *tsk = current;
-
- memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
- memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- /*
- * Forget coprocessor state..
- */
- clear_fpu(tsk);
- clear_used_math();
-}
-
-void release_thread(struct task_struct *dead_task)
-{
- if (dead_task->mm) {
- // temporary debugging check
- if (dead_task->mm->context.size) {
- printk("WARNING: dead process %8s still has LDT?
<%p/%d>\n",
- dead_task->comm,
- dead_task->mm->context.ldt,
- dead_task->mm->context.size);
- BUG();
- }
- }
-
- release_vm86_irqs(dead_task);
-}
-
-/*
- * This gets called before we allocate a new thread and copy
- * the current task into it.
- */
-void prepare_to_copy(struct task_struct *tsk)
-{
- unlazy_fpu(tsk);
-}
-
-int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
- unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
-{
- struct pt_regs * childregs;
- struct task_struct *tsk;
- int err;
- unsigned long eflags;
-
- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long)
p->thread_info)) - 1;
- *childregs = *regs;
- childregs->eax = 0;
- childregs->esp = esp;
-
- p->thread.esp = (unsigned long) childregs;
- p->thread.esp0 = (unsigned long) (childregs+1);
-
- p->thread.eip = (unsigned long) ret_from_fork;
-
- savesegment(fs,p->thread.fs);
- savesegment(gs,p->thread.gs);
-
- tsk = current;
- if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
- p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
- if (!p->thread.io_bitmap_ptr) {
- p->thread.io_bitmap_max = 0;
- return -ENOMEM;
- }
- memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
- IO_BITMAP_BYTES);
- }
-
- /*
- * Set a new TLS for the child thread?
- */
- if (clone_flags & CLONE_SETTLS) {
- struct desc_struct *desc;
- struct user_desc info;
- int idx;
-
- err = -EFAULT;
- if (copy_from_user(&info, (void __user *)childregs->esi,
sizeof(info)))
- goto out;
- err = -EINVAL;
- if (LDT_empty(&info))
- goto out;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- goto out;
-
- desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
-
-
- __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
- p->thread.io_pl = (eflags >> 12) & 3;
-
- err = 0;
- out:
- if (err && p->thread.io_bitmap_ptr) {
- kfree(p->thread.io_bitmap_ptr);
- p->thread.io_bitmap_max = 0;
- }
- return err;
-}
-
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
- int i;
-
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >>
PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
- for (i = 0; i < 8; i++)
- dump->u_debugreg[i] = current->thread.debugreg[i];
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE -
dump->start_stack)) >> PAGE_SHIFT;
-
- dump->regs.ebx = regs->ebx;
- dump->regs.ecx = regs->ecx;
- dump->regs.edx = regs->edx;
- dump->regs.esi = regs->esi;
- dump->regs.edi = regs->edi;
- dump->regs.ebp = regs->ebp;
- dump->regs.eax = regs->eax;
- dump->regs.ds = regs->xds;
- dump->regs.es = regs->xes;
- savesegment(fs,dump->regs.fs);
- savesegment(gs,dump->regs.gs);
- dump->regs.orig_eax = regs->orig_eax;
- dump->regs.eip = regs->eip;
- dump->regs.cs = regs->xcs;
- dump->regs.eflags = regs->eflags;
- dump->regs.esp = regs->esp;
- dump->regs.ss = regs->xss;
-
- dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-}
-
-/*
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- struct pt_regs ptregs;
-
- ptregs = *(struct pt_regs *)
- ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
- ptregs.xcs &= 0xffff;
- ptregs.xds &= 0xffff;
- ptregs.xes &= 0xffff;
- ptregs.xss &= 0xffff;
-
- elf_core_copy_regs(regs, &ptregs);
-
- boot_option_idle_override = 1;
- return 1;
-}
-
-static inline void
-handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
-{
- if (!next->io_bitmap_ptr) {
- /*
- * Disable the bitmap via an invalid offset. We still cache
- * the previous bitmap owner and the IO bitmap contents:
- */
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
- return;
- }
- if (likely(next == tss->io_bitmap_owner)) {
- /*
- * Previous owner of the bitmap (hence the bitmap content)
- * matches the next task, we dont have to do anything but
- * to set a valid offset in the TSS:
- */
- tss->io_bitmap_base = IO_BITMAP_OFFSET;
- return;
- }
- /*
- * Lazy TSS's I/O bitmap copy. We set an invalid offset here
- * and we let the task to get a GPF in case an I/O instruction
- * is performed. The handler of the GPF will verify that the
- * faulting task has a valid I/O bitmap and, it true, does the
- * real copy and restart the instruction. This will save us
- * redundant copies when the currently switched task does not
- * perform any I/O during its timeslice.
- */
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
-}
-/*
- * This special macro can be used to load a debugging register
- */
-#define loaddebug(thread,register) \
- HYPERVISOR_set_debugreg((register), \
- (thread->debugreg[register]))
-
-/*
- * switch_to(x,yn) should switch tasks from x to y.
- *
- * We fsave/fwait so that an exception goes off at the right time
- * (as a call from the fsave or fwait in effect) rather than to
- * the wrong process. Lazy FP saving no longer makes any sense
- * with modern CPU's, and this simplifies a lot of things (SMP
- * and UP become the same).
- *
- * NOTE! We used to use the x86 hardware context switching. The
- * reason for not using it any more becomes apparent when you
- * try to recover gracefully from saved state that is no longer
- * valid (stale segment register values in particular). With the
- * hardware task-switch, there is no way to fix up bad state in
- * a reasonable manner.
- *
- * The fact that Intel documents the hardware task-switching to
- * be slow is a fairly red herring - this code is not noticeably
- * faster. However, there _is_ some room for improvement here,
- * so the performance issues may eventually be a valid point.
- * More important, however, is the fact that this allows us much
- * more flexibility.
- *
- * The return value (in %eax) will be the "prev" task after
- * the task-switch, and shows up in ret_from_fork in entry.S,
- * for example.
- */
-struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct
task_struct *next_p)
-{
- struct thread_struct *prev = &prev_p->thread,
- *next = &next_p->thread;
- int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
- dom0_op_t op;
-
- /* NB. No need to disable interrupts as already done in sched.c */
- /* __cli(); */
-
- /*
- * Save away %fs and %gs. No need to save %es and %ds, as
- * those are always kernel segments while inside the kernel.
- */
- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
-
- /*
- * We clobber FS and GS here so that we avoid a GPF when restoring
- * previous task's FS/GS values in Xen when the LDT is switched.
- */
- __asm__ __volatile__ (
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
- "eax" );
-
- MULTICALL_flush_page_update_queue();
-
- /* never put a printk in __switch_to... printk() calls wake_up*()
indirectly */
-
- /*
- * This is basically '__unlazy_fpu', except that we queue a
- * multicall to indicate FPU task switch, rather than
- * synchronously trapping to Xen.
- */
- if (prev_p->thread_info->status & TS_USEDFPU) {
- __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
- queue_multicall0(__HYPERVISOR_fpu_taskswitch);
- }
-
- /*
- * Reload esp0, LDT and the page table pointer:
- * This is load_esp0(tss, next) with a multicall.
- */
- tss->esp0 = next->esp0;
- queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
-
- /*
- * Load the per-thread Thread-Local Storage descriptor.
- * This is load_TLS(next, cpu) with multicalls.
- */
-#define C(i) do { \
- if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
- next->tls_array[i].b != prev->tls_array[i].b)) \
- queue_multicall3(__HYPERVISOR_update_descriptor, \
- virt_to_machine(&get_cpu_gdt_table(cpu) \
- [GDT_ENTRY_TLS_MIN + i]), \
- ((u32 *)&next->tls_array[i])[0], \
- ((u32 *)&next->tls_array[i])[1]); \
-} while (0)
- C(0); C(1); C(2);
-#undef C
-
- if (xen_start_info.flags & SIF_PRIVILEGED) {
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = next->io_pl;
- op.interface_version = DOM0_INTERFACE_VERSION;
- queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
- }
-
- /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
- execute_multicall_list();
- /* __sti(); */
-
- /*
- * Restore %fs and %gs if needed.
- */
- if (unlikely(next->fs | next->gs)) {
- loadsegment(fs, next->fs);
- loadsegment(gs, next->gs);
- }
-
- /*
- * Now maybe reload the debug registers
- */
- if (unlikely(next->debugreg[7])) {
- loaddebug(next, 0);
- loaddebug(next, 1);
- loaddebug(next, 2);
- loaddebug(next, 3);
- /* no 4 and 5 */
- loaddebug(next, 6);
- loaddebug(next, 7);
- }
-
- if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
- handle_io_bitmap(next, tss);
-
- return prev_p;
-}
-
-asmlinkage int sys_fork(struct pt_regs regs)
-{
- return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
-}
-
-asmlinkage int sys_clone(struct pt_regs regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs.ebx;
- newsp = regs.ecx;
- parent_tidptr = (int __user *)regs.edx;
- child_tidptr = (int __user *)regs.edi;
- if (!newsp)
- newsp = regs.esp;
- return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr,
child_tidptr);
-}
-
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(struct pt_regs regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0,
NULL, NULL);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(struct pt_regs regs)
-{
- int error;
- char * filename;
-
- filename = getname((char __user *) regs.ebx);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename,
- (char __user * __user *) regs.ecx,
- (char __user * __user *) regs.edx,
- ®s);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- /* Make sure we don't return using sysenter.. */
- set_thread_flag(TIF_IRET);
- }
- putname(filename);
-out:
- return error;
-}
-
-#define top_esp (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long ebp, esp, eip;
- unsigned long stack_page;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- stack_page = (unsigned long)p->thread_info;
- esp = p->thread.esp;
- if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
- return 0;
- /* include/asm-i386/system.h:switch_to() pushes ebp last. */
- ebp = *(unsigned long *) esp;
- do {
- if (ebp < stack_page || ebp > top_ebp+stack_page)
- return 0;
- eip = *(unsigned long *) (ebp+4);
- if (!in_sched_functions(eip))
- return eip;
- ebp = *(unsigned long *) ebp;
- } while (count++ < 16);
- return 0;
-}
-
-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
- struct thread_struct *t = ¤t->thread;
- int idx;
-
- for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty(t->tls_array + idx))
- return idx + GDT_ENTRY_TLS_MIN;
- return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- */
-asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-{
- struct thread_struct *t = ¤t->thread;
- struct user_desc info;
- struct desc_struct *desc;
- int cpu, idx;
-
- if (copy_from_user(&info, u_info, sizeof(info)))
- return -EFAULT;
- idx = info.entry_number;
-
- /*
- * index -1 means the kernel should try to find and
- * allocate an empty descriptor:
- */
- if (idx == -1) {
- idx = get_free_idx();
- if (idx < 0)
- return idx;
- if (put_user(idx, &u_info->entry_number))
- return -EFAULT;
- }
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- /*
- * We must not get preempted while modifying the TLS.
- */
- cpu = get_cpu();
-
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
- load_TLS(t, cpu);
-
- put_cpu();
-
- return 0;
-}
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-
-asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct desc_struct *desc;
- int idx;
-
- if (get_user(idx, &u_info->entry_number))
- return -EFAULT;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
-
- if (copy_to_user(u_info, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1638 +0,0 @@
-/*
- * linux/arch/i386/kernel/setup.c
- *
- * Copyright (C) 1995 Linus Torvalds
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- *
- * Memory region support
- * David Parsons <orc@xxxxxxxxxxxxxx>, July-August 1999
- *
- * Added E820 sanitization routine (removes overlapping memory regions);
- * Brian Moyle <bmoyle@xxxxxxxxxx>, February 2001
- *
- * Moved CPU detection code to cpu/${cpu}.c
- * Patrick Mochel <mochel@xxxxxxxx>, March 2002
- *
- * Provisions for empty E820 memory regions (reported by certain BIOSes).
- * Alex Achenbach <xela@xxxxxxx>, December 2002.
- *
- */
-
-/*
- * This file handles the architecture-dependent parts of initialization
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/apm_bios.h>
-#include <linux/initrd.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/console.h>
-#include <linux/mca.h>
-#include <linux/root_dev.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/efi.h>
-#include <linux/init.h>
-#include <linux/edd.h>
-#include <linux/kernel.h>
-#include <linux/notifier.h>
-#include <video/edid.h>
-#include <asm/e820.h>
-#include <asm/mpspec.h>
-#include <asm/setup.h>
-#include <asm/arch_hooks.h>
-#include <asm/sections.h>
-#include <asm/io_apic.h>
-#include <asm/ist.h>
-#include <asm/io.h>
-#include <asm-xen/hypervisor.h>
-#include "setup_arch_pre.h"
-#include <bios_ebda.h>
-
-/* Allows setting of maximum possible memory size */
-static unsigned long xen_override_max_pfn;
-
-static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-static struct notifier_block xen_panic_block = {
- xen_panic_event, NULL, 0 /* try to go last */
-};
-
-int disable_pse __initdata = 0;
-
-/*
- * Machine setup..
- */
-
-#ifdef CONFIG_EFI
-int efi_enabled = 0;
-EXPORT_SYMBOL(efi_enabled);
-#endif
-
-/* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
-/* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
-
-unsigned long mmu_cr4_features;
-EXPORT_SYMBOL_GPL(mmu_cr4_features);
-
-#ifdef CONFIG_ACPI_INTERPRETER
- int acpi_disabled = 0;
-#else
- int acpi_disabled = 1;
-#endif
-EXPORT_SYMBOL(acpi_disabled);
-
-#ifdef CONFIG_ACPI_BOOT
-int __initdata acpi_force = 0;
-extern acpi_interrupt_flags acpi_sci_flags;
-#endif
-
-/* for MCA, but anyone else can use it if they want */
-unsigned int machine_id;
-unsigned int machine_submodel_id;
-unsigned int BIOS_revision;
-unsigned int mca_pentium_flag;
-
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x10000000;
-
-/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-int bootloader_type;
-
-/* user-defined highmem size */
-static unsigned int highmem_pages = -1;
-
-/*
- * Setup options
- */
-struct drive_info_struct { char dummy[32]; } drive_info;
-struct screen_info screen_info;
-struct apm_info apm_info;
-struct sys_desc_table_struct {
- unsigned short length;
- unsigned char table[0];
-};
-struct edid_info edid_info;
-struct ist_info ist_info;
-struct e820map e820;
-
-unsigned char aux_device_present;
-
-extern void early_cpu_init(void);
-extern void dmi_scan_machine(void);
-extern void generic_apic_probe(char *);
-extern int root_mountflags;
-
-unsigned long saved_videomode;
-
-#define RAMDISK_IMAGE_START_MASK 0x07FF
-#define RAMDISK_PROMPT_FLAG 0x8000
-#define RAMDISK_LOAD_FLAG 0x4000
-
-static char command_line[COMMAND_LINE_SIZE];
-
-unsigned char __initdata boot_params[PARAM_SIZE];
-
-static struct resource data_resource = {
- .name = "Kernel data",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource code_resource = {
- .name = "Kernel code",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static struct resource system_rom_resource = {
- .name = "System ROM",
- .start = 0xf0000,
- .end = 0xfffff,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource extension_rom_resource = {
- .name = "Extension ROM",
- .start = 0xe0000,
- .end = 0xeffff,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource adapter_rom_resources[] = { {
- .name = "Adapter ROM",
- .start = 0xc8000,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-} };
-
-#define ADAPTER_ROM_RESOURCES \
- (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
-
-static struct resource video_rom_resource = {
- .name = "Video ROM",
- .start = 0xc0000,
- .end = 0xc7fff,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-#endif
-
-static struct resource video_ram_resource = {
- .name = "Video RAM area",
- .start = 0xa0000,
- .end = 0xbffff,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource standard_io_resources[] = { {
- .name = "dma1",
- .start = 0x0000,
- .end = 0x001f,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "pic1",
- .start = 0x0020,
- .end = 0x0021,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "timer0",
- .start = 0x0040,
- .end = 0x0043,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "timer1",
- .start = 0x0050,
- .end = 0x0053,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "keyboard",
- .start = 0x0060,
- .end = 0x006f,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "dma page reg",
- .start = 0x0080,
- .end = 0x008f,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "pic2",
- .start = 0x00a0,
- .end = 0x00a1,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "dma2",
- .start = 0x00c0,
- .end = 0x00df,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "fpu",
- .start = 0x00f0,
- .end = 0x00ff,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-} };
-
-#define STANDARD_IO_RESOURCES \
- (sizeof standard_io_resources / sizeof standard_io_resources[0])
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-
-static int __init romchecksum(unsigned char *rom, unsigned long length)
-{
- unsigned char *p, sum = 0;
-
- for (p = rom; p < rom + length; p++)
- sum += *p;
- return sum == 0;
-}
-
-static void __init probe_roms(void)
-{
- unsigned long start, length, upper;
- unsigned char *rom;
- int i;
-
- /* video rom */
- upper = adapter_rom_resources[0].start;
- for (start = video_rom_resource.start; start < upper; start += 2048) {
- rom = isa_bus_to_virt(start);
- if (!romsignature(rom))
- continue;
-
- video_rom_resource.start = start;
-
- /* 0 < length <= 0x7f * 512, historically */
- length = rom[2] * 512;
-
- /* if checksum okay, trust length byte */
- if (length && romchecksum(rom, length))
- video_rom_resource.end = start + length - 1;
-
- request_resource(&iomem_resource, &video_rom_resource);
- break;
- }
-
- start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
- if (start < upper)
- start = upper;
-
- /* system rom */
- request_resource(&iomem_resource, &system_rom_resource);
- upper = system_rom_resource.start;
-
- /* check for extension rom (ignore length byte!) */
- rom = isa_bus_to_virt(extension_rom_resource.start);
- if (romsignature(rom)) {
- length = extension_rom_resource.end -
extension_rom_resource.start + 1;
- if (romchecksum(rom, length)) {
- request_resource(&iomem_resource,
&extension_rom_resource);
- upper = extension_rom_resource.start;
- }
- }
-
- /* check for adapter roms on 2k boundaries */
- for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
- rom = isa_bus_to_virt(start);
- if (!romsignature(rom))
- continue;
-
- /* 0 < length <= 0x7f * 512, historically */
- length = rom[2] * 512;
-
- /* but accept any length that fits if checksum okay */
- if (!length || start + length > upper || !romchecksum(rom,
length))
- continue;
-
- adapter_rom_resources[i].start = start;
- adapter_rom_resources[i].end = start + length - 1;
- request_resource(&iomem_resource, &adapter_rom_resources[i]);
-
- start = adapter_rom_resources[i++].end & ~2047UL;
- }
-}
-#endif
-
-/*
- * Point at the empty zero page to start with. We map the real shared_info
- * page as soon as fixmap is up and running.
- */
-shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-EXPORT_SYMBOL(HYPERVISOR_shared_info);
-
-unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
-EXPORT_SYMBOL(phys_to_machine_mapping);
-
-multicall_entry_t multicall_list[8];
-int nr_multicall_ents = 0;
-
-/* Raw start-of-day parameters from the hypervisor. */
-union xen_start_info_union xen_start_info_union;
-
-static void __init limit_regions(unsigned long long size)
-{
- unsigned long long current_addr = 0;
- int i;
-
- if (efi_enabled) {
- for (i = 0; i < memmap.nr_map; i++) {
- current_addr = memmap.map[i].phys_addr +
- (memmap.map[i].num_pages << 12);
- if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
- if (current_addr >= size) {
- memmap.map[i].num_pages -=
- (((current_addr-size) +
PAGE_SIZE-1) >> PAGE_SHIFT);
- memmap.nr_map = i + 1;
- return;
- }
- }
- }
- }
- for (i = 0; i < e820.nr_map; i++) {
- if (e820.map[i].type == E820_RAM) {
- current_addr = e820.map[i].addr + e820.map[i].size;
- if (current_addr >= size) {
- e820.map[i].size -= current_addr-size;
- e820.nr_map = i + 1;
- return;
- }
- }
- }
-}
-
-static void __init add_memory_region(unsigned long long start,
- unsigned long long size, int type)
-{
- int x;
-
- if (!efi_enabled) {
- x = e820.nr_map;
-
- if (x == E820MAX) {
- printk(KERN_ERR "Ooops! Too many entries in the memory
map!\n");
- return;
- }
-
- e820.map[x].addr = start;
- e820.map[x].size = size;
- e820.map[x].type = type;
- e820.nr_map++;
- }
-} /* add_memory_region */
-
-#define E820_DEBUG 1
-
-static void __init print_memory_map(char *who)
-{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- printk(" %s: %016Lx - %016Lx ", who,
- e820.map[i].addr,
- e820.map[i].addr + e820.map[i].size);
- switch (e820.map[i].type) {
- case E820_RAM: printk("(usable)\n");
- break;
- case E820_RESERVED:
- printk("(reserved)\n");
- break;
- case E820_ACPI:
- printk("(ACPI data)\n");
- break;
- case E820_NVS:
- printk("(ACPI NVS)\n");
- break;
- default: printk("type %lu\n", e820.map[i].type);
- break;
- }
- }
-}
-
-#if 0
-/*
- * Sanitize the BIOS e820 map.
- *
- * Some e820 responses include overlapping entries. The following
- * replaces the original e820 map with a new one, removing overlaps.
- *
- */
-struct change_member {
- struct e820entry *pbios; /* pointer to original bios entry */
- unsigned long long addr; /* address for this change point */
-};
-struct change_member change_point_list[2*E820MAX] __initdata;
-struct change_member *change_point[2*E820MAX] __initdata;
-struct e820entry *overlap_list[E820MAX] __initdata;
-struct e820entry new_bios[E820MAX] __initdata;
-
-static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-{
- struct change_member *change_tmp;
- unsigned long current_type, last_type;
- unsigned long long last_addr;
- int chgidx, still_changing;
- int overlap_entries;
- int new_bios_entry;
- int old_nr, new_nr, chg_nr;
- int i;
-
- /*
- Visually we're performing the following (1,2,3,4 = memory
types)...
-
- Sample memory map (w/overlaps):
- ____22__________________
- ______________________4_
- ____1111________________
- _44_____________________
- 11111111________________
- ____________________33__
- ___________44___________
- __________33333_________
- ______________22________
- ___________________2222_
- _________111111111______
- _____________________11_
- _________________4______
-
- Sanitized equivalent (no overlap):
- 1_______________________
- _44_____________________
- ___1____________________
- ____22__________________
- ______11________________
- _________1______________
- __________3_____________
- ___________44___________
- _____________33_________
- _______________2________
- ________________1_______
- _________________4______
- ___________________2____
- ____________________33__
- ______________________4_
- */
-
- /* if there's only one memory region, don't bother */
- if (*pnr_map < 2)
- return -1;
-
- old_nr = *pnr_map;
-
- /* bail out if we find any unreasonable addresses in bios map */
- for (i=0; i<old_nr; i++)
- if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
- return -1;
-
- /* create pointers for initial change-point information (for sorting) */
- for (i=0; i < 2*old_nr; i++)
- change_point[i] = &change_point_list[i];
-
- /* record all known change-points (starting and ending addresses),
- omitting those that are for empty memory regions */
- chgidx = 0;
- for (i=0; i < old_nr; i++) {
- if (biosmap[i].size != 0) {
- change_point[chgidx]->addr = biosmap[i].addr;
- change_point[chgidx++]->pbios = &biosmap[i];
- change_point[chgidx]->addr = biosmap[i].addr +
biosmap[i].size;
- change_point[chgidx++]->pbios = &biosmap[i];
- }
- }
- chg_nr = chgidx; /* true number of change-points */
-
- /* sort change-point list by memory addresses (low -> high) */
- still_changing = 1;
- while (still_changing) {
- still_changing = 0;
- for (i=1; i < chg_nr; i++) {
- /* if <current_addr> > <last_addr>, swap */
- /* or, if current=<start_addr> & last=<end_addr>, swap
*/
- if ((change_point[i]->addr < change_point[i-1]->addr) ||
- ((change_point[i]->addr ==
change_point[i-1]->addr) &&
- (change_point[i]->addr ==
change_point[i]->pbios->addr) &&
- (change_point[i-1]->addr !=
change_point[i-1]->pbios->addr))
- )
- {
- change_tmp = change_point[i];
- change_point[i] = change_point[i-1];
- change_point[i-1] = change_tmp;
- still_changing=1;
- }
- }
- }
-
- /* create a new bios memory map, removing overlaps */
- overlap_entries=0; /* number of entries in the overlap table */
- new_bios_entry=0; /* index for creating new bios map entries */
- last_type = 0; /* start with undefined memory type */
- last_addr = 0; /* start with 0 as last starting address */
- /* loop through change-points, determining affect on the new bios map */
- for (chgidx=0; chgidx < chg_nr; chgidx++)
- {
- /* keep track of all overlapping bios entries */
- if (change_point[chgidx]->addr ==
change_point[chgidx]->pbios->addr)
- {
- /* add map entry to overlap list (> 1 entry implies an
overlap) */
-
overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
- }
- else
- {
- /* remove entry from list (order independent, so swap
with last) */
- for (i=0; i<overlap_entries; i++)
- {
- if (overlap_list[i] ==
change_point[chgidx]->pbios)
- overlap_list[i] =
overlap_list[overlap_entries-1];
- }
- overlap_entries--;
- }
- /* if there are overlapping entries, decide which "type" to use
*/
- /* (larger value takes precedence -- 1=usable,
2,3,4,4+=unusable) */
- current_type = 0;
- for (i=0; i<overlap_entries; i++)
- if (overlap_list[i]->type > current_type)
- current_type = overlap_list[i]->type;
- /* continue building up new bios map based on this information
*/
- if (current_type != last_type) {
- if (last_type != 0) {
- new_bios[new_bios_entry].size =
- change_point[chgidx]->addr - last_addr;
- /* move forward only if the new size was
non-zero */
- if (new_bios[new_bios_entry].size != 0)
- if (++new_bios_entry >= E820MAX)
- break; /* no more space left
for new bios entries */
- }
- if (current_type != 0) {
- new_bios[new_bios_entry].addr =
change_point[chgidx]->addr;
- new_bios[new_bios_entry].type = current_type;
- last_addr=change_point[chgidx]->addr;
- }
- last_type = current_type;
- }
- }
- new_nr = new_bios_entry; /* retain count for new bios entries */
-
- /* copy new bios mapping into original location */
- memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
- *pnr_map = new_nr;
-
- return 0;
-}
-
-/*
- * Copy the BIOS e820 map into a safe place.
- *
- * Sanity-check it while we're at it..
- *
- * If we're lucky and live on a modern system, the setup code
- * will have given us a memory map that we can use to properly
- * set up memory. If we aren't, we'll fake a memory map.
- *
- * We check to see that the memory map contains at least 2 elements
- * before we'll use it, because the detection code in setup.S may
- * not be perfect and most every PC known to man has two memory
- * regions: one from 0 to 640k, and one from 1mb up. (The IBM
- * thinkpad 560x, for example, does not cooperate with the memory
- * detection code.)
- */
-static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-{
- /* Only one memory region (or negative)? Ignore it */
- if (nr_map < 2)
- return -1;
-
- do {
- unsigned long long start = biosmap->addr;
- unsigned long long size = biosmap->size;
- unsigned long long end = start + size;
- unsigned long type = biosmap->type;
-
- /* Overflow in 64 bits? Ignore the memory map. */
- if (start > end)
- return -1;
-
- /*
- * Some BIOSes claim RAM in the 640k - 1M region.
- * Not right. Fix it up.
- */
- if (type == E820_RAM) {
- if (start < 0x100000ULL && end > 0xA0000ULL) {
- if (start < 0xA0000ULL)
- add_memory_region(start,
0xA0000ULL-start, type);
- if (end <= 0x100000ULL)
- continue;
- start = 0x100000ULL;
- size = end - start;
- }
- }
- add_memory_region(start, size, type);
- } while (biosmap++,--nr_map);
- return 0;
-}
-#endif
-
-#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-struct edd edd;
-#ifdef CONFIG_EDD_MODULE
-EXPORT_SYMBOL(edd);
-#endif
-/**
- * copy_edd() - Copy the BIOS EDD information
- * from boot_params into a safe place.
- *
- */
-static inline void copy_edd(void)
-{
- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
- edd.mbr_signature_nr = EDD_MBR_SIG_NR;
- edd.edd_info_nr = EDD_NR;
-}
-#else
-static inline void copy_edd(void)
-{
-}
-#endif
-
-/*
- * Do NOT EVER look at the BIOS memory size location.
- * It does not work on many machines.
- */
-#define LOWMEMSIZE() (0x9f000)
-
-static void __init parse_cmdline_early (char ** cmdline_p)
-{
- char c = ' ', *to = command_line, *from = saved_command_line;
- int len = 0;
- int userdef = 0;
-
- memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
- /* Save unparsed command line copy for /proc/cmdline */
- saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
-
- for (;;) {
- if (c != ' ')
- goto next_char;
- /*
- * "mem=nopentium" disables the 4MB page tables.
- * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
- * to <mem>, overriding the bios size.
- * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
- * <start> to <start>+<mem>, overriding the bios size.
- *
- * HPA tells me bootloaders need to parse mem=, so no new
- * option should be mem= [also see Documentation/i386/boot.txt]
- */
- if (!memcmp(from, "mem=", 4)) {
- if (to != command_line)
- to--;
- if (!memcmp(from+4, "nopentium", 9)) {
- from += 9+4;
- clear_bit(X86_FEATURE_PSE,
boot_cpu_data.x86_capability);
- disable_pse = 1;
- } else {
- /* If the user specifies memory size, we
- * limit the BIOS-provided memory map to
- * that size. exactmap can be used to specify
- * the exact map. mem=number can be used to
- * trim the existing memory map.
- */
- unsigned long long mem_size;
-
- mem_size = memparse(from+4, &from);
-#if 0
- limit_regions(mem_size);
- userdef=1;
-#else
- xen_override_max_pfn =
- (unsigned long)(mem_size>>PAGE_SHIFT);
-#endif
- }
- }
-
- else if (!memcmp(from, "memmap=", 7)) {
- if (to != command_line)
- to--;
- if (!memcmp(from+7, "exactmap", 8)) {
- from += 8+7;
- e820.nr_map = 0;
- userdef = 1;
- } else {
- /* If the user specifies memory size, we
- * limit the BIOS-provided memory map to
- * that size. exactmap can be used to specify
- * the exact map. mem=number can be used to
- * trim the existing memory map.
- */
- unsigned long long start_at, mem_size;
-
- mem_size = memparse(from+7, &from);
- if (*from == '@') {
- start_at = memparse(from+1, &from);
- add_memory_region(start_at, mem_size,
E820_RAM);
- } else if (*from == '#') {
- start_at = memparse(from+1, &from);
- add_memory_region(start_at, mem_size,
E820_ACPI);
- } else if (*from == '$') {
- start_at = memparse(from+1, &from);
- add_memory_region(start_at, mem_size,
E820_RESERVED);
- } else {
- limit_regions(mem_size);
- userdef=1;
- }
- }
- }
-
- else if (!memcmp(from, "noexec=", 7))
- noexec_setup(from + 7);
-
-
-#ifdef CONFIG_X86_SMP
- /*
- * If the BIOS enumerates physical processors before logical,
- * maxcpus=N at enumeration-time can be used to disable HT.
- */
- else if (!memcmp(from, "maxcpus=", 8)) {
- extern unsigned int maxcpus;
-
- maxcpus = simple_strtoul(from + 8, NULL, 0);
- }
-#endif
-
-#ifdef CONFIG_ACPI_BOOT
- /* "acpi=off" disables both ACPI table parsing and interpreter
*/
- else if (!memcmp(from, "acpi=off", 8)) {
- disable_acpi();
- }
-
- /* acpi=force to over-ride black-list */
- else if (!memcmp(from, "acpi=force", 10)) {
- acpi_force = 1;
- acpi_ht = 1;
- acpi_disabled = 0;
- }
-
- /* acpi=strict disables out-of-spec workarounds */
- else if (!memcmp(from, "acpi=strict", 11)) {
- acpi_strict = 1;
- }
-
- /* Limit ACPI just to boot-time to enable HT */
- else if (!memcmp(from, "acpi=ht", 7)) {
- if (!acpi_force)
- disable_acpi();
- acpi_ht = 1;
- }
-
- /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
- else if (!memcmp(from, "pci=noacpi", 10)) {
- acpi_disable_pci();
- }
- /* "acpi=noirq" disables ACPI interrupt routing */
- else if (!memcmp(from, "acpi=noirq", 10)) {
- acpi_noirq_set();
- }
-
- else if (!memcmp(from, "acpi_sci=edge", 13))
- acpi_sci_flags.trigger = 1;
-
- else if (!memcmp(from, "acpi_sci=level", 14))
- acpi_sci_flags.trigger = 3;
-
- else if (!memcmp(from, "acpi_sci=high", 13))
- acpi_sci_flags.polarity = 1;
-
- else if (!memcmp(from, "acpi_sci=low", 12))
- acpi_sci_flags.polarity = 3;
-
-#ifdef CONFIG_X86_IO_APIC
- else if (!memcmp(from, "acpi_skip_timer_override", 24))
- acpi_skip_timer_override = 1;
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
- /* disable IO-APIC */
- else if (!memcmp(from, "noapic", 6))
- disable_ioapic_setup();
-#endif /* CONFIG_X86_LOCAL_APIC */
-#endif /* CONFIG_ACPI_BOOT */
-
- /*
- * highmem=size forces highmem to be exactly 'size' bytes.
- * This works even on boxes that have no highmem otherwise.
- * This also works to reduce highmem size on bigger boxes.
- */
- else if (!memcmp(from, "highmem=", 8))
- highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
-
- /*
- * vmalloc=size forces the vmalloc area to be exactly 'size'
- * bytes. This can be used to increase (or decrease) the
- * vmalloc area - the default is 128m.
- */
- else if (!memcmp(from, "vmalloc=", 8))
- __VMALLOC_RESERVE = memparse(from+8, &from);
-
- next_char:
- c = *(from++);
- if (!c)
- break;
- if (COMMAND_LINE_SIZE <= ++len)
- break;
- *(to++) = c;
- }
- *to = '\0';
- *cmdline_p = command_line;
- if (userdef) {
- printk(KERN_INFO "user-defined physical RAM map:\n");
- print_memory_map("user");
- }
-}
-
-#if 0 /* !XEN */
-/*
- * Callback for efi_memory_walk.
- */
-static int __init
-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-{
- unsigned long *max_pfn = arg, pfn;
-
- if (start < end) {
- pfn = PFN_UP(end -1);
- if (pfn > *max_pfn)
- *max_pfn = pfn;
- }
- return 0;
-}
-
-
-/*
- * Find the highest page frame number we have available
- */
-void __init find_max_pfn(void)
-{
- int i;
-
- max_pfn = 0;
- if (efi_enabled) {
- efi_memmap_walk(efi_find_max_pfn, &max_pfn);
- return;
- }
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
- /* RAM? */
- if (e820.map[i].type != E820_RAM)
- continue;
- start = PFN_UP(e820.map[i].addr);
- end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
- if (start >= end)
- continue;
- if (end > max_pfn)
- max_pfn = end;
- }
-}
-#else
-/* We don't use the fake e820 because we need to respond to user override. */
-void __init find_max_pfn(void)
-{
- if ( xen_override_max_pfn < xen_start_info.nr_pages )
- xen_override_max_pfn = xen_start_info.nr_pages;
- max_pfn = xen_override_max_pfn;
-}
-#endif /* XEN */
-
-/*
- * Determine low and high memory ranges:
- */
-unsigned long __init find_max_low_pfn(void)
-{
- unsigned long max_low_pfn;
-
- max_low_pfn = max_pfn;
- if (max_low_pfn > MAXMEM_PFN) {
- if (highmem_pages == -1)
- highmem_pages = max_pfn - MAXMEM_PFN;
- if (highmem_pages + MAXMEM_PFN < max_pfn)
- max_pfn = MAXMEM_PFN + highmem_pages;
- if (highmem_pages + MAXMEM_PFN > max_pfn) {
- printk("only %luMB highmem pages available, ignoring
highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN),
pages_to_mb(highmem_pages));
- highmem_pages = 0;
- }
- max_low_pfn = MAXMEM_PFN;
-#ifndef CONFIG_HIGHMEM
- /* Maximum memory usable is what is directly addressable */
- printk(KERN_WARNING "Warning only %ldMB will be used.\n",
- MAXMEM>>20);
- if (max_pfn > MAX_NONPAE_PFN)
- printk(KERN_WARNING "Use a PAE enabled kernel.\n");
- else
- printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
- max_pfn = MAXMEM_PFN;
-#else /* !CONFIG_HIGHMEM */
-#ifndef CONFIG_X86_PAE
- if (max_pfn > MAX_NONPAE_PFN) {
- max_pfn = MAX_NONPAE_PFN;
- printk(KERN_WARNING "Warning only 4GB will be used.\n");
- printk(KERN_WARNING "Use a PAE enabled kernel.\n");
- }
-#endif /* !CONFIG_X86_PAE */
-#endif /* !CONFIG_HIGHMEM */
- } else {
- if (highmem_pages == -1)
- highmem_pages = 0;
-#ifdef CONFIG_HIGHMEM
- if (highmem_pages >= max_pfn) {
- printk(KERN_ERR "highmem size specified (%uMB) is
bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages),
pages_to_mb(max_pfn));
- highmem_pages = 0;
- }
- if (highmem_pages) {
- if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
- printk(KERN_ERR "highmem size %uMB results in
smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
- highmem_pages = 0;
- }
- max_low_pfn -= highmem_pages;
- }
-#else
- if (highmem_pages)
- printk(KERN_ERR "ignoring highmem size on non-highmem
kernel!\n");
-#endif
- }
- return max_low_pfn;
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-
-/*
- * Free all available memory for boot time allocation. Used
- * as a callback function by efi_memory_walk()
- */
-
-static int __init
-free_available_memory(unsigned long start, unsigned long end, void *arg)
-{
- /* check max_low_pfn */
- if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
- return 0;
- if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
- end = (max_low_pfn + 1) << PAGE_SHIFT;
- if (start < end)
- free_bootmem(start, end - start);
-
- return 0;
-}
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-{
- int i;
-
- if (efi_enabled) {
- efi_memmap_walk(free_available_memory, NULL);
- return;
- }
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long curr_pfn, last_pfn, size;
- /*
- * Reserve usable low memory
- */
- if (e820.map[i].type != E820_RAM)
- continue;
- /*
- * We are rounding up the start address of usable memory:
- */
- curr_pfn = PFN_UP(e820.map[i].addr);
- if (curr_pfn >= max_low_pfn)
- continue;
- /*
- * ... and at the end of the usable range downwards:
- */
- last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
-
- /*
- * .. finally, did all the rounding and playing
- * around just make the area go away?
- */
- if (last_pfn <= curr_pfn)
- continue;
-
- size = last_pfn - curr_pfn;
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
- }
-}
-
-/*
- * workaround for Dell systems that neglect to reserve EBDA
- */
-static void __init reserve_ebda_region(void)
-{
- unsigned int addr;
- addr = get_bios_ebda();
- if (addr)
- reserve_bootmem(addr, PAGE_SIZE);
-}
-
-static unsigned long __init setup_memory(void)
-{
- unsigned long bootmap_size, start_pfn, max_low_pfn;
-
- /*
- * partially used pages are not usable - thus
- * we are rounding upwards:
- */
- start_pfn = PFN_UP(__pa(xen_start_info.pt_base)) +
xen_start_info.nr_pt_frames;
-
- find_max_pfn();
-
- max_low_pfn = find_max_low_pfn();
-
-#ifdef CONFIG_HIGHMEM
- highstart_pfn = highend_pfn = max_pfn;
- if (max_pfn > max_low_pfn) {
- highstart_pfn = max_low_pfn;
- }
- printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
- pages_to_mb(highend_pfn - highstart_pfn));
-#endif
- printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
- pages_to_mb(max_low_pfn));
- /*
- * Initialize the boot-time allocator (with low memory only):
- */
- bootmap_size = init_bootmem(start_pfn, max_low_pfn);
-
- register_bootmem_low_pages(max_low_pfn);
-
- /*
- * Reserve the bootmem bitmap itself as well. We do this in two
- * steps (first step was init_bootmem()) because this catches
- * the (very unlikely) case of us accidentally initializing the
- * bootmem allocator with an invalid RAM area.
- */
- reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
- bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
-
- /* reserve EBDA region, it's a 4K region */
- reserve_ebda_region();
-
- /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
- PCI prefetch into it (errata #56). Usually the page is reserved anyways,
- unless you have no PS/2 mouse plugged in. */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 == 6)
- reserve_bootmem(0xa0000 - 4096, 4096);
-
-#ifdef CONFIG_SMP
- /*
- * But first pinch a few for the stack/trampoline stuff
- * FIXME: Don't need the extra page at 4K, but need to fix
- * trampoline before removing it. (see the GDT stuff)
- */
- reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
-#endif
-#ifdef CONFIG_ACPI_SLEEP
- /*
- * Reserve low memory region for sleep support.
- */
- acpi_reserve_bootmem();
-#endif
-#ifdef CONFIG_X86_FIND_SMP_CONFIG
- /*
- * Find and reserve possible boot-time SMP configuration:
- */
- find_smp_config();
-#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (xen_start_info.mod_start) {
- if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
- /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
- initrd_start = INITRD_START + PAGE_OFFSET;
- initrd_end = initrd_start+INITRD_SIZE;
- initrd_below_start_ok = 1;
- }
- else {
- printk(KERN_ERR "initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- INITRD_START + INITRD_SIZE,
- max_low_pfn << PAGE_SHIFT);
- initrd_start = 0;
- }
- }
-#endif
-
- phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
-
- return max_low_pfn;
-}
-#else
-extern unsigned long setup_memory(void);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-/*
- * Request address space for all standard RAM and ROM resources
- * and also for regions reported as reserved by the e820.
- */
-static void __init
-legacy_init_iomem_resources(struct resource *code_resource, struct resource
*data_resource)
-{
- int i;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- probe_roms();
-#endif
- for (i = 0; i < e820.nr_map; i++) {
- struct resource *res;
- if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
- continue;
- res = alloc_bootmem_low(sizeof(struct resource));
- switch (e820.map[i].type) {
- case E820_RAM: res->name = "System RAM"; break;
- case E820_ACPI: res->name = "ACPI Tables"; break;
- case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
- default: res->name = "reserved";
- }
- res->start = e820.map[i].addr;
- res->end = res->start + e820.map[i].size - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- request_resource(&iomem_resource, res);
- if (e820.map[i].type == E820_RAM) {
- /*
- * We don't know which RAM region contains kernel data,
- * so we try it repeatedly and let the resource manager
- * test it.
- */
- request_resource(res, code_resource);
- request_resource(res, data_resource);
- }
- }
-}
-
-/*
- * Request address space for all standard resources
- */
-static void __init register_memory(void)
-{
- unsigned long gapstart, gapsize;
- unsigned long long last;
- int i;
-
- if (efi_enabled)
- efi_initialize_iomem_resources(&code_resource, &data_resource);
- else
- legacy_init_iomem_resources(&code_resource, &data_resource);
-
- /* EFI systems may still have VGA */
- request_resource(&iomem_resource, &video_ram_resource);
-
- /* request I/O space for devices used on all i[345]86 PCs */
- for (i = 0; i < STANDARD_IO_RESOURCES; i++)
- request_resource(&ioport_resource, &standard_io_resources[i]);
-
- /*
- * Search for the bigest gap in the low 32 bits of the e820
- * memory space.
- */
- last = 0x100000000ull;
- gapstart = 0x10000000;
- gapsize = 0x400000;
- i = e820.nr_map;
- while (--i >= 0) {
- unsigned long long start = e820.map[i].addr;
- unsigned long long end = start + e820.map[i].size;
-
- /*
- * Since "last" is at most 4GB, we know we'll
- * fit in 32 bits if this condition is true
- */
- if (last > end) {
- unsigned long gap = last - end;
-
- if (gap > gapsize) {
- gapsize = gap;
- gapstart = end;
- }
- }
- if (start < last)
- last = start;
- }
-
- /*
- * Start allocating dynamic PCI memory a bit into the gap,
- * aligned up to the nearest megabyte.
- *
- * Question: should we try to pad it up a bit (do something
- * like " + (gapsize >> 3)" in there too?). We now have the
- * technology.
- */
- pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-
- printk("Allocating PCI resources starting at %08lx (gap:
%08lx:%08lx)\n",
- pci_mem_start, gapstart, gapsize);
-}
-
-/* Use inline assembly to define this because the nops are defined
- as inline assembly strings in the include files and we cannot
- get them easily into strings. */
-asm("\t.data\nintelnops: "
- GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5
GENERIC_NOP6
- GENERIC_NOP7 GENERIC_NOP8);
-asm("\t.data\nk8nops: "
- K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
- K8_NOP7 K8_NOP8);
-asm("\t.data\nk7nops: "
- K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
- K7_NOP7 K7_NOP8);
-
-extern unsigned char intelnops[], k8nops[], k7nops[];
-static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
- NULL,
- intelnops,
- intelnops + 1,
- intelnops + 1 + 2,
- intelnops + 1 + 2 + 3,
- intelnops + 1 + 2 + 3 + 4,
- intelnops + 1 + 2 + 3 + 4 + 5,
- intelnops + 1 + 2 + 3 + 4 + 5 + 6,
- intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-};
-static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
- NULL,
- k8nops,
- k8nops + 1,
- k8nops + 1 + 2,
- k8nops + 1 + 2 + 3,
- k8nops + 1 + 2 + 3 + 4,
- k8nops + 1 + 2 + 3 + 4 + 5,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-};
-static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
- NULL,
- k7nops,
- k7nops + 1,
- k7nops + 1 + 2,
- k7nops + 1 + 2 + 3,
- k7nops + 1 + 2 + 3 + 4,
- k7nops + 1 + 2 + 3 + 4 + 5,
- k7nops + 1 + 2 + 3 + 4 + 5 + 6,
- k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-};
-static struct nop {
- int cpuid;
- unsigned char **noptable;
-} noptypes[] = {
- { X86_FEATURE_K8, k8_nops },
- { X86_FEATURE_K7, k7_nops },
- { -1, NULL }
-};
-
-/* Replace instructions with better alternatives for this CPU type.
-
- This runs before SMP is initialized to avoid SMP problems with
- self modifying code. This implies that assymetric systems where
- APs have less capabilities than the boot processor are not handled.
- In this case boot with "noreplacement". */
-void apply_alternatives(void *start, void *end)
-{
- struct alt_instr *a;
- int diff, i, k;
- unsigned char **noptable = intel_nops;
- for (i = 0; noptypes[i].cpuid >= 0; i++) {
- if (boot_cpu_has(noptypes[i].cpuid)) {
- noptable = noptypes[i].noptable;
- break;
- }
- }
- for (a = start; (void *)a < end; a++) {
- if (!boot_cpu_has(a->cpuid))
- continue;
- BUG_ON(a->replacementlen > a->instrlen);
- memcpy(a->instr, a->replacement, a->replacementlen);
- diff = a->instrlen - a->replacementlen;
- /* Pad the rest with nops */
- for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
- k = diff;
- if (k > ASM_NOP_MAX)
- k = ASM_NOP_MAX;
- memcpy(a->instr + i, noptable[k], k);
- }
- }
-}
-
-static int no_replacement __initdata = 0;
-
-void __init alternative_instructions(void)
-{
- extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
- if (no_replacement)
- return;
- apply_alternatives(__alt_instructions, __alt_instructions_end);
-}
-
-static int __init noreplacement_setup(char *s)
-{
- no_replacement = 1;
- return 0;
-}
-
-__setup("noreplacement", noreplacement_setup);
-
-static char * __init machine_specific_memory_setup(void);
-
-#ifdef CONFIG_MCA
-static void set_mca_bus(int x)
-{
- MCA_bus = x;
-}
-#else
-static void set_mca_bus(int x) { }
-#endif
-
-/*
- * Determine if we were loaded by an EFI loader. If so, then we have also been
- * passed the efi memmap, systab, etc., so we should use these data structures
- * for initialization. Note, the efi init code path is determined by the
- * global efi_enabled. This allows the same kernel image to be used on existing
- * systems (with a traditional BIOS) as well as on EFI systems.
- */
-void __init setup_arch(char **cmdline_p)
-{
- int i, j;
- unsigned long max_low_pfn;
-
- /* Force a quick death if the kernel panics. */
- extern int panic_timeout;
- if (panic_timeout == 0)
- panic_timeout = 1;
-
- /* Register a call for panic conditions. */
- notifier_chain_register(&panic_notifier_list, &xen_panic_block);
-
- HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
-
- memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
- early_cpu_init();
-
- /*
- * FIXME: This isn't an official loader_type right
- * now but does currently work with elilo.
- * If we were configured as an EFI kernel, check to make
- * sure that we were loaded correctly from elilo and that
- * the system table is valid. If not, then initialize normally.
- */
-#ifdef CONFIG_EFI
- if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
- efi_enabled = 1;
-#endif
-
- /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
- properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
- */
- ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
- drive_info = DRIVE_INFO;
- screen_info = SCREEN_INFO;
- edid_info = EDID_INFO;
- apm_info.bios = APM_BIOS_INFO;
- ist_info = IST_INFO;
- saved_videomode = VIDEO_MODE;
- if( SYS_DESC_TABLE.length != 0 ) {
- set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
- machine_id = SYS_DESC_TABLE.table[0];
- machine_submodel_id = SYS_DESC_TABLE.table[1];
- BIOS_revision = SYS_DESC_TABLE.table[2];
- }
- aux_device_present = AUX_DEVICE_INFO;
- bootloader_type = LOADER_TYPE;
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
- /* This is drawn from a dump from vgacon:startup in standard Linux. */
- screen_info.orig_video_mode = 3;
- screen_info.orig_video_isVGA = 1;
- screen_info.orig_video_lines = 25;
- screen_info.orig_video_cols = 80;
- screen_info.orig_video_ega_bx = 3;
- screen_info.orig_video_points = 16;
-#endif
-
-#ifdef CONFIG_BLK_DEV_RAM
- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-#endif
- ARCH_SETUP
- if (efi_enabled)
- efi_init();
- else {
- printk(KERN_INFO "BIOS-provided physical RAM map:\n");
- print_memory_map(machine_specific_memory_setup());
- }
-
- copy_edd();
-
- if (!MOUNT_ROOT_RDONLY)
- root_mountflags &= ~MS_RDONLY;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) +
- xen_start_info.nr_pt_frames) << PAGE_SHIFT;
-
- /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
- /*code_resource.start = virt_to_phys(_text);*/
- /*code_resource.end = virt_to_phys(_etext)-1;*/
- /*data_resource.start = virt_to_phys(_etext);*/
- /*data_resource.end = virt_to_phys(_edata)-1;*/
-
- parse_cmdline_early(cmdline_p);
-
- max_low_pfn = setup_memory();
-
- /*
- * NOTE: before this point _nobody_ is allowed to allocate
- * any memory using the bootmem allocator. Although the
- * alloctor is now initialised only the first 8Mb of the kernel
- * virtual address space has been mapped. All allocations before
- * paging_init() has completed must use the alloc_bootmem_low_pages()
- * variant (which allocates DMA'able memory) and care must be taken
- * not to exceed the 8Mb limit.
- */
-
-#ifdef CONFIG_SMP
- smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
-#endif
- paging_init();
-
- /* Make sure we have a correctly sized P->M table. */
- if (max_pfn != xen_start_info.nr_pages) {
- phys_to_machine_mapping = alloc_bootmem_low_pages(
- max_pfn * sizeof(unsigned long));
-
- if (max_pfn > xen_start_info.nr_pages) {
- /* set to INVALID_P2M_ENTRY */
- memset(phys_to_machine_mapping, ~0,
- max_pfn * sizeof(unsigned long));
- memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- xen_start_info.nr_pages * sizeof(unsigned
long));
- } else {
- memcpy(phys_to_machine_mapping,
- (unsigned long *)xen_start_info.mfn_list,
- max_pfn * sizeof(unsigned long));
- if (HYPERVISOR_dom_mem_op(
- MEMOP_decrease_reservation,
- (unsigned long *)xen_start_info.mfn_list +
max_pfn,
- xen_start_info.nr_pages - max_pfn, 0) !=
- (xen_start_info.nr_pages - max_pfn)) BUG();
- }
- free_bootmem(
- __pa(xen_start_info.mfn_list),
- PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
- sizeof(unsigned long))));
- }
-
- pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
- for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
- {
- pfn_to_mfn_frame_list[j] =
- virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
- }
- HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
- virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
-
-
- /*
- * NOTE: at this point the bootmem allocator is fully available.
- */
-
-#ifdef CONFIG_EARLY_PRINTK
- {
- char *s = strstr(*cmdline_p, "earlyprintk=");
- if (s) {
- extern void setup_early_printk(char *);
-
- setup_early_printk(s);
- printk("early console enabled\n");
- }
- }
-#endif
-
-
- dmi_scan_machine();
-
-#ifdef CONFIG_X86_GENERICARCH
- generic_apic_probe(*cmdline_p);
-#endif
- if (efi_enabled)
- efi_map_memmap();
-
- /*
- * Parse the ACPI tables for possible boot-time SMP configuration.
- */
- acpi_boot_table_init();
- acpi_boot_init();
-
-#ifdef CONFIG_X86_LOCAL_APIC
- if (smp_found_config)
- get_smp_config();
-#endif
-
- /* XXX Disable irqdebug until we have a way to avoid interrupt
- * conflicts. */
- noirqdebug_setup("");
-
- register_memory();
-
- /* If we are a privileged guest OS then we should request IO privs. */
- if (xen_start_info.flags & SIF_PRIVILEGED) {
- dom0_op_t op;
- op.cmd = DOM0_IOPL;
- op.u.iopl.domain = DOMID_SELF;
- op.u.iopl.iopl = 1;
- if (HYPERVISOR_dom0_op(&op) != 0)
- panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
- current->thread.io_pl = 1;
- }
-
- if (xen_start_info.flags & SIF_INITDOMAIN) {
- if (!(xen_start_info.flags & SIF_PRIVILEGED))
- panic("Xen granted us console access "
- "but not privileged status");
-
-#ifdef CONFIG_VT
-#if defined(CONFIG_VGA_CONSOLE)
- if (!efi_enabled ||
- (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
- conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
-#endif
- } else {
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- extern const struct consw xennull_con;
- extern int console_use_vt;
-#if defined(CONFIG_VGA_CONSOLE)
- /* disable VGA driver */
- ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
-#endif
- conswitchp = &xennull_con;
- console_use_vt = 0;
-#endif
- }
-}
-
-static int
-xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- HYPERVISOR_crash();
- /* we're never actually going to get here... */
- return NOTIFY_DONE;
-}
-
-#include "setup_arch_post.h"
-/*
- * Local Variables:
- * mode:c
- * c-file-style:"k&r"
- * c-basic-offset:8
- * End:
- */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/signal.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/signal.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,656 +0,0 @@
-/*
- * linux/arch/i386/kernel/signal.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
- * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/uaccess.h>
-#include <asm/i387.h>
-#include "sigframe.h"
-
-#define DEBUG_SIG 0
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
-{
- struct pt_regs * regs = (struct pt_regs *) &history0;
- sigset_t saveset;
-
- mask &= _BLOCKABLE;
- spin_lock_irq(¤t->sighand->siglock);
- saveset = current->blocked;
- siginitset(¤t->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- regs->eax = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(struct pt_regs regs)
-{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (regs.ecx != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, (sigset_t __user *)regs.ebx,
sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(¤t->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- regs.eax = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(®s, &saveset))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
- struct old_sigaction __user *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
-
- return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(unsigned long ebx)
-{
- /* This is needed to make gcc realize it doesn't own the "struct
pt_regs" */
- struct pt_regs *regs = (struct pt_regs *)&ebx;
- const stack_t __user *uss = (const stack_t __user *)ebx;
- stack_t __user *uoss = (stack_t __user *)regs->ecx;
-
- return do_sigaltstack(uss, uoss, regs->esp);
-}
-
-
-/*
- * Do a signal return; undo the signal stack.
- */
-
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int
*peax)
-{
- unsigned int err = 0;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-#define COPY(x) err |= __get_user(regs->x, &sc->x)
-
-#define COPY_SEG(seg) \
- { unsigned short tmp; \
- err |= __get_user(tmp, &sc->seg); \
- regs->x##seg = tmp; }
-
-#define COPY_SEG_STRICT(seg) \
- { unsigned short tmp; \
- err |= __get_user(tmp, &sc->seg); \
- regs->x##seg = tmp|3; }
-
-#define GET_SEG(seg) \
- { unsigned short tmp; \
- err |= __get_user(tmp, &sc->seg); \
- loadsegment(seg,tmp); }
-
-#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF
| \
- X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
- X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
-
- GET_SEG(gs);
- GET_SEG(fs);
- COPY_SEG(es);
- COPY_SEG(ds);
- COPY(edi);
- COPY(esi);
- COPY(ebp);
- COPY(esp);
- COPY(ebx);
- COPY(edx);
- COPY(ecx);
- COPY(eip);
- COPY_SEG_STRICT(cs);
- COPY_SEG_STRICT(ss);
-
- {
- unsigned int tmpflags;
- err |= __get_user(tmpflags, &sc->eflags);
- regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags &
FIX_EFLAGS);
- regs->orig_eax = -1; /* disable syscall checks */
- }
-
- {
- struct _fpstate __user * buf;
- err |= __get_user(buf, &sc->fpstate);
- if (buf) {
- if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
- goto badframe;
- err |= restore_i387(buf);
- } else {
- struct task_struct *me = current;
- if (used_math()) {
- clear_fpu(me);
- clear_used_math();
- }
- }
- }
-
- err |= __get_user(*peax, &sc->eax);
- return err;
-
-badframe:
- return 1;
-}
-
-asmlinkage int sys_sigreturn(unsigned long __unused)
-{
- struct pt_regs *regs = (struct pt_regs *) &__unused;
- struct sigframe __user *frame = (struct sigframe __user *)(regs->esp -
8);
- sigset_t set;
- int eax;
-
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->sc, &eax))
- goto badframe;
- return eax;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-{
- struct pt_regs *regs = (struct pt_regs *) &__unused;
- struct rt_sigframe __user *frame = (struct rt_sigframe __user
*)(regs->esp - 4);
- sigset_t set;
- int eax;
-
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
- goto badframe;
-
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
- goto badframe;
-
- return eax;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-
-static int
-setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
- struct pt_regs *regs, unsigned long mask)
-{
- int tmp, err = 0;
-
- tmp = 0;
- __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
- err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
- __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
- err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
-
- err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
- err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
- err |= __put_user(regs->edi, &sc->edi);
- err |= __put_user(regs->esi, &sc->esi);
- err |= __put_user(regs->ebp, &sc->ebp);
- err |= __put_user(regs->esp, &sc->esp);
- err |= __put_user(regs->ebx, &sc->ebx);
- err |= __put_user(regs->edx, &sc->edx);
- err |= __put_user(regs->ecx, &sc->ecx);
- err |= __put_user(regs->eax, &sc->eax);
- err |= __put_user(current->thread.trap_no, &sc->trapno);
- err |= __put_user(current->thread.error_code, &sc->err);
- err |= __put_user(regs->eip, &sc->eip);
- err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
- err |= __put_user(regs->eflags, &sc->eflags);
- err |= __put_user(regs->esp, &sc->esp_at_signal);
- err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
-
- tmp = save_i387(fpstate);
- if (tmp < 0)
- err = 1;
- else
- err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
-
- /* non-iBCS2 extensions.. */
- err |= __put_user(mask, &sc->oldmask);
- err |= __put_user(current->thread.cr2, &sc->cr2);
-
- return err;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
-{
- unsigned long esp;
-
- /* Default to using normal stack */
- esp = regs->esp;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(esp) == 0)
- esp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- /* This is the legacy signal stack switching. */
- else if ((regs->xss & 0xffff) != __USER_DS &&
- !(ka->sa.sa_flags & SA_RESTORER) &&
- ka->sa.sa_restorer) {
- esp = (unsigned long) ka->sa.sa_restorer;
- }
-
- return (void __user *)((esp - frame_size) & -8ul);
-}
-
-/* These symbols are defined with the addresses in the vsyscall page.
- See vsyscall-sigreturn.S. */
-extern void __user __kernel_sigreturn;
-extern void __user __kernel_rt_sigreturn;
-
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs * regs)
-{
- void __user *restorer;
- struct sigframe __user *frame;
- int err = 0;
- int usig;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- usig = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err = __put_user(usig, &frame->sig);
- if (err)
- goto give_sigsegv;
-
- err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
- if (err)
- goto give_sigsegv;
-
- if (_NSIG_WORDS > 1) {
- err = __copy_to_user(&frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
- if (err)
- goto give_sigsegv;
- }
-
- restorer = &__kernel_sigreturn;
- if (ka->sa.sa_flags & SA_RESTORER)
- restorer = ka->sa.sa_restorer;
-
- /* Set up to return from userspace. */
- err |= __put_user(restorer, &frame->pretcode);
-
- /*
- * This is popl %eax ; movl $,%eax ; int $0x80
- *
- * WE DO NOT USE IT ANY MORE! It's only left here for historical
- * reasons and because gdb uses it as a signature to notice
- * signal handler stack frames.
- */
- err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
- err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
- err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- regs->esp = (unsigned long) frame;
- regs->eip = (unsigned long) ka->sa.sa_handler;
- regs->eax = (unsigned long) sig;
- regs->edx = (unsigned long) 0;
- regs->ecx = (unsigned long) 0;
-
- set_fs(USER_DS);
- regs->xds = __USER_DS;
- regs->xes = __USER_DS;
- regs->xss = __USER_DS;
- regs->xcs = __USER_CS;
-
- /*
- * Clear TF when entering the signal handler, but
- * notify any tracer that was single-stepping it.
- * The tracer may want to single-step inside the
- * handler too.
- */
- regs->eflags &= ~TF_MASK;
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
- current->comm, current->pid, frame, regs->eip, frame->pretcode);
-#endif
-
- return;
-
-give_sigsegv:
- force_sigsegv(sig, current);
-}
-
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs * regs)
-{
- void __user *restorer;
- struct rt_sigframe __user *frame;
- int err = 0;
- int usig;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- usig = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err |= __put_user(usig, &frame->sig);
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, info);
- if (err)
- goto give_sigsegv;
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->esp),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
- regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto give_sigsegv;
-
- /* Set up to return from userspace. */
- restorer = &__kernel_rt_sigreturn;
- if (ka->sa.sa_flags & SA_RESTORER)
- restorer = ka->sa.sa_restorer;
- err |= __put_user(restorer, &frame->pretcode);
-
- /*
- * This is movl $,%eax ; int $0x80
- *
- * WE DO NOT USE IT ANY MORE! It's only left here for historical
- * reasons and because gdb uses it as a signature to notice
- * signal handler stack frames.
- */
- err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
- err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
- err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- regs->esp = (unsigned long) frame;
- regs->eip = (unsigned long) ka->sa.sa_handler;
- regs->eax = (unsigned long) usig;
- regs->edx = (unsigned long) &frame->info;
- regs->ecx = (unsigned long) &frame->uc;
-
- set_fs(USER_DS);
- regs->xds = __USER_DS;
- regs->xes = __USER_DS;
- regs->xss = __USER_DS;
- regs->xcs = __USER_CS;
-
- /*
- * Clear TF when entering the signal handler, but
- * notify any tracer that was single-stepping it.
- * The tracer may want to single-step inside the
- * handler too.
- */
- regs->eflags &= ~TF_MASK;
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
- current->comm, current->pid, frame, regs->eip, frame->pretcode);
-#endif
-
- return;
-
-give_sigsegv:
- force_sigsegv(sig, current);
-}
-
-/*
- * OK, we're invoking a handler
- */
-
-static void
-handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- sigset_t *oldset, struct pt_regs * regs)
-{
- /* Are we from a system call? */
- if (regs->orig_eax >= 0) {
- /* If so, check system call restarting.. */
- switch (regs->eax) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->eax = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->eax = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- regs->eax = regs->orig_eax;
- regs->eip -= 2;
- }
- }
-
- /* Set up the stack frame */
- if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
- else
- setup_frame(sig, ka, oldset, regs);
-
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(¤t->sighand->siglock);
- sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
- sigaddset(¤t->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
- }
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
-{
- siginfo_t info;
- int signr;
- struct k_sigaction ka;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if ((regs->xcs & 2) != 2)
- return 1;
-
- if (current->flags & PF_FREEZE) {
- refrigerator(0);
- goto no_signal;
- }
-
- if (!oldset)
- oldset = ¤t->blocked;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- /* Reenable any watchpoints before delivering the
- * signal to user space. The processor register will
- * have been cleared if the watchpoint triggered
- * inside the kernel.
- */
- if (unlikely(current->thread.debugreg[7])) {
- HYPERVISOR_set_debugreg(7,
- current->thread.debugreg[7]);
- }
-
- /* Whee! Actually deliver the signal. */
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
- }
-
- no_signal:
- /* Did we come from a system call? */
- if (regs->orig_eax >= 0) {
- /* Restart the system call - no handlers present */
- if (regs->eax == -ERESTARTNOHAND ||
- regs->eax == -ERESTARTSYS ||
- regs->eax == -ERESTARTNOINTR) {
- regs->eax = regs->orig_eax;
- regs->eip -= 2;
- }
- if (regs->eax == -ERESTART_RESTARTBLOCK){
- regs->eax = __NR_restart_syscall;
- regs->eip -= 2;
- }
- }
- return 0;
-}
-
-/*
- * notification of userspace execution resumption
- * - triggered by current->work.notify_resume
- */
-__attribute__((regparm(3)))
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
- __u32 thread_info_flags)
-{
- /* Pending single-step? */
- if (thread_info_flags & _TIF_SINGLESTEP) {
- regs->eflags |= TF_MASK;
- clear_thread_flag(TIF_SINGLESTEP);
- }
- /* deal with pending signal delivery */
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs,oldset);
-
- clear_thread_flag(TIF_IRET);
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,737 +0,0 @@
-/*
- * linux/arch/i386/kernel/time.c
- *
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- *
- * This file contains the PC-specific time handling details:
- * reading the RTC at bootup, etc..
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1995-03-26 Markus Kuhn
- * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
- * precision CMOS clock update
- * 1996-05-03 Ingo Molnar
- * fixed time warps in do_[slow|fast]_gettimeoffset()
- * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
- * 1998-09-05 (Various)
- * More robust do_fast_gettimeoffset() algorithm implemented
- * (works with APM, Cyrix 6x86MX and Centaur C6),
- * monotonic gettimeofday() with fast_get_timeoffset(),
- * drift-proof precision TSC calibration on boot
- * (C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, Andrew D.
- * Balsa <andrebalsa@xxxxxxxxxx>, Philip Gladstone <philip@xxxxxxxxxx>;
- * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@xxxxxxxxxxxxx>).
- * 1998-12-16 Andrea Arcangeli
- * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
- * because was not accounting lost_ticks.
- * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
- * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- * serialize accesses to xtime/lost_ticks).
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/sysdev.h>
-#include <linux/bcd.h>
-#include <linux/efi.h>
-#include <linux/mca.h>
-#include <linux/sysctl.h>
-
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/irq.h>
-#include <asm/msr.h>
-#include <asm/delay.h>
-#include <asm/mpspec.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/timer.h>
-
-#include "mach_time.h"
-
-#include <linux/timex.h>
-#include <linux/config.h>
-
-#include <asm/hpet.h>
-
-#include <asm/arch_hooks.h>
-
-#include "io_ports.h"
-
-extern spinlock_t i8259A_lock;
-int pit_latch_buggy; /* extern */
-
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
-unsigned long cpu_khz; /* Detected as we calibrate the TSC */
-
-extern unsigned long wall_jiffies;
-
-DEFINE_SPINLOCK(rtc_lock);
-
-DEFINE_SPINLOCK(i8253_lock);
-EXPORT_SYMBOL(i8253_lock);
-
-extern struct init_timer_opts timer_tsc_init;
-extern struct timer_opts timer_tsc;
-struct timer_opts *cur_timer = &timer_tsc;
-
-/* These are peridically updated in shared_info, and then copied here. */
-u32 shadow_tsc_stamp;
-u64 shadow_system_time;
-static u32 shadow_time_version;
-static struct timeval shadow_tv;
-extern u64 processed_system_time;
-
-/*
- * We use this to ensure that gettimeofday() is monotonically increasing. We
- * only break this guarantee if the wall clock jumps backwards "a long way".
- */
-static struct timeval last_seen_tv = {0,0};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-/* Periodically propagate synchronised time base to the RTC and to Xen. */
-static long last_rtc_update, last_update_to_xen;
-#endif
-
-/* Periodically take synchronised time base from Xen, if we need it. */
-static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
-
-/* Keep track of last time we did processing/updating of jiffies and xtime. */
-u64 processed_system_time; /* System time (ns) at last processing. */
-
-#define NS_PER_TICK (1000000000ULL/HZ)
-
-#define HANDLE_USEC_UNDERFLOW(_tv) do { \
- while ((_tv).tv_usec < 0) { \
- (_tv).tv_usec += USEC_PER_SEC; \
- (_tv).tv_sec--; \
- } \
-} while (0)
-#define HANDLE_USEC_OVERFLOW(_tv) do { \
- while ((_tv).tv_usec >= USEC_PER_SEC) { \
- (_tv).tv_usec -= USEC_PER_SEC; \
- (_tv).tv_sec++; \
- } \
-} while (0)
-static inline void __normalize_time(time_t *sec, s64 *nsec)
-{
- while (*nsec >= NSEC_PER_SEC) {
- (*nsec) -= NSEC_PER_SEC;
- (*sec)++;
- }
- while (*nsec < 0) {
- (*nsec) += NSEC_PER_SEC;
- (*sec)--;
- }
-}
-
-/* Does this guest OS track Xen time, or set its wall clock independently? */
-static int independent_wallclock = 0;
-static int __init __independent_wallclock(char *str)
-{
- independent_wallclock = 1;
- return 1;
-}
-__setup("independent_wallclock", __independent_wallclock);
-#define INDEPENDENT_WALLCLOCK() \
- (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
-
-/*
- * Reads a consistent set of time-base values from Xen, into a shadow data
- * area. Must be called with the xtime_lock held for writing.
- */
-static void __get_time_values_from_xen(void)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
-
- do {
- shadow_time_version = s->time_version2;
- rmb();
- shadow_tv.tv_sec = s->wc_sec;
- shadow_tv.tv_usec = s->wc_usec;
- shadow_tsc_stamp = (u32)s->tsc_timestamp;
- shadow_system_time = s->system_time;
- rmb();
- }
- while (shadow_time_version != s->time_version1);
-
- cur_timer->mark_offset();
-}
-
-#define TIME_VALUES_UP_TO_DATE \
- ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
-
-/*
- * This version of gettimeofday has microsecond resolution
- * and better than microsecond precision on fast x86 machines with TSC.
- */
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long seq;
- unsigned long usec, sec;
- unsigned long max_ntp_tick;
- unsigned long flags;
- s64 nsec;
-
- do {
- unsigned long lost;
-
- seq = read_seqbegin(&xtime_lock);
-
- usec = cur_timer->get_offset();
- lost = jiffies - wall_jiffies;
-
- /*
- * If time_adjust is negative then NTP is slowing the clock
- * so make sure not to go into next possible interval.
- * Better to lose some accuracy than have time go backwards..
- */
- if (unlikely(time_adjust < 0)) {
- max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
- usec = min(usec, max_ntp_tick);
-
- if (lost)
- usec += lost * max_ntp_tick;
- }
- else if (unlikely(lost))
- usec += lost * (USEC_PER_SEC / HZ);
-
- sec = xtime.tv_sec;
- usec += (xtime.tv_nsec / NSEC_PER_USEC);
-
- nsec = shadow_system_time - processed_system_time;
- __normalize_time(&sec, &nsec);
- usec += (long)nsec / NSEC_PER_USEC;
-
- if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
- /*
- * We may have blocked for a long time,
- * rendering our calculations invalid
- * (e.g. the time delta may have
- * overflowed). Detect that and recalculate
- * with fresh values.
- */
- write_seqlock_irqsave(&xtime_lock, flags);
- __get_time_values_from_xen();
- write_sequnlock_irqrestore(&xtime_lock, flags);
- continue;
- }
- } while (read_seqretry(&xtime_lock, seq));
-
- while (usec >= USEC_PER_SEC) {
- usec -= USEC_PER_SEC;
- sec++;
- }
-
- /* Ensure that time-of-day is monotonically increasing. */
- if ((sec < last_seen_tv.tv_sec) ||
- ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
- sec = last_seen_tv.tv_sec;
- usec = last_seen_tv.tv_usec;
- } else {
- last_seen_tv.tv_sec = sec;
- last_seen_tv.tv_usec = usec;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec;
- s64 nsec;
- struct timespec xentime;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- if (!INDEPENDENT_WALLCLOCK())
- return 0; /* Silent failure? */
-
- write_seqlock_irq(&xtime_lock);
-
- /*
- * Ensure we don't get blocked for a long time so that our time delta
- * overflows. If that were to happen then our shadow time values would
- * be stale, so we can retry with fresh ones.
- */
- again:
- nsec = (s64)tv->tv_nsec -
- ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
- if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
- __get_time_values_from_xen();
- goto again;
- }
-
- __normalize_time(&sec, &nsec);
- set_normalized_timespec(&xentime, sec, nsec);
-
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
-
- nsec -= (shadow_system_time - processed_system_time);
-
- __normalize_time(&sec, &nsec);
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
-
- /* Reset all our running time counts. They make no sense now. */
- last_seen_tv.tv_sec = 0;
- last_update_from_xen = 0;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- if (xen_start_info.flags & SIF_INITDOMAIN) {
- dom0_op_t op;
- last_rtc_update = last_update_to_xen = 0;
- op.cmd = DOM0_SETTIME;
- op.u.settime.secs = xentime.tv_sec;
- op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
- op.u.settime.system_time = shadow_system_time;
- write_sequnlock_irq(&xtime_lock);
- HYPERVISOR_dom0_op(&op);
- } else
-#endif
- write_sequnlock_irq(&xtime_lock);
-
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static int set_rtc_mmss(unsigned long nowtime)
-{
- int retval;
-
- /* gets recalled with irq locally disabled */
- spin_lock(&rtc_lock);
- if (efi_enabled)
- retval = efi_set_rtc_mmss(nowtime);
- else
- retval = mach_set_rtc_mmss(nowtime);
- spin_unlock(&rtc_lock);
-
- return retval;
-}
-#endif
-
-/* monotonic_clock(): returns # of nanoseconds passed since time_init()
- * Note: This function is required to return accurate
- * time even in the absence of multiple timer ticks.
- */
-unsigned long long monotonic_clock(void)
-{
- return cur_timer->monotonic_clock();
-}
-EXPORT_SYMBOL(monotonic_clock);
-
-#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-unsigned long profile_pc(struct pt_regs *regs)
-{
- unsigned long pc = instruction_pointer(regs);
-
- if (in_lock_functions(pc))
- return *(unsigned long *)(regs->ebp + 4);
-
- return pc;
-}
-EXPORT_SYMBOL(profile_pc);
-#endif
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- */
-static inline void do_timer_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- time_t wtm_sec, sec;
- s64 delta, nsec;
- long sec_diff, wtm_nsec;
-
- do {
- __get_time_values_from_xen();
-
- delta = (s64)(shadow_system_time +
- ((s64)cur_timer->get_offset() *
- (s64)NSEC_PER_USEC) -
- processed_system_time);
- }
- while (!TIME_VALUES_UP_TO_DATE);
-
- if (unlikely(delta < 0)) {
- printk("Timer ISR: Time went backwards: %lld %lld %lld %lld\n",
- delta, shadow_system_time,
- ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
- processed_system_time);
- return;
- }
-
- /* Process elapsed jiffies since last call. */
- while (delta >= NS_PER_TICK) {
- delta -= NS_PER_TICK;
- processed_system_time += NS_PER_TICK;
- do_timer(regs);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
-#endif
- if (regs)
- profile_tick(CPU_PROFILING, regs);
- }
-
- /*
- * Take synchronised time from Xen once a minute if we're not
- * synchronised ourselves, and we haven't chosen to keep an independent
- * time base.
- */
- if (!INDEPENDENT_WALLCLOCK() &&
- ((time_status & STA_UNSYNC) != 0) &&
- (xtime.tv_sec > (last_update_from_xen + 60))) {
- /* Adjust shadow for jiffies that haven't updated xtime yet. */
- shadow_tv.tv_usec -=
- (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
- HANDLE_USEC_UNDERFLOW(shadow_tv);
-
- /*
- * Reset our running time counts if they are invalidated by
- * a warp backwards of more than 500ms.
- */
- sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
- if (unlikely(abs(sec_diff) > 1) ||
- unlikely(((sec_diff * USEC_PER_SEC) +
- (xtime.tv_nsec / NSEC_PER_USEC) -
- shadow_tv.tv_usec) > 500000)) {
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- last_rtc_update = last_update_to_xen = 0;
-#endif
- last_seen_tv.tv_sec = 0;
- }
-
- /* Update our unsynchronised xtime appropriately. */
- sec = shadow_tv.tv_sec;
- nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
-
- __normalize_time(&sec, &nsec);
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- last_update_from_xen = sec;
- }
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- if (!(xen_start_info.flags & SIF_INITDOMAIN))
- return;
-
- /* Send synchronised time to Xen approximately every minute. */
- if (((time_status & STA_UNSYNC) == 0) &&
- (xtime.tv_sec > (last_update_to_xen + 60))) {
- dom0_op_t op;
- struct timeval tv;
-
- tv.tv_sec = xtime.tv_sec;
- tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
- tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
- HANDLE_USEC_OVERFLOW(tv);
-
- op.cmd = DOM0_SETTIME;
- op.u.settime.secs = tv.tv_sec;
- op.u.settime.usecs = tv.tv_usec;
- op.u.settime.system_time = shadow_system_time;
- HYPERVISOR_dom0_op(&op);
-
- last_update_to_xen = xtime.tv_sec;
- }
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000)
- >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000)
- <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
- /* horrible...FIXME */
- if (efi_enabled) {
- if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600;
- } else if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in
60 s */
- }
-#endif
-}
-
-/*
- * This is the same as the above, except we _also_ save the current
- * Time Stamp Counter value at the time of the timer interrupt, so that
- * we later on can estimate the time of day more exactly.
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_seqlock(&xtime_lock);
- do_timer_interrupt(irq, NULL, regs);
- write_sequnlock(&xtime_lock);
- return IRQ_HANDLED;
-}
-
-/* not static: needed by APM */
-unsigned long get_cmos_time(void)
-{
- unsigned long retval;
-
- spin_lock(&rtc_lock);
-
- if (efi_enabled)
- retval = efi_get_time();
- else
- retval = mach_get_cmos_time();
-
- spin_unlock(&rtc_lock);
-
- return retval;
-}
-
-static long clock_cmos_diff, sleep_start;
-
-static int timer_suspend(struct sys_device *dev, u32 state)
-{
- /*
- * Estimate time zone so that set_time can update the clock
- */
- clock_cmos_diff = -get_cmos_time();
- clock_cmos_diff += get_seconds();
- sleep_start = get_cmos_time();
- return 0;
-}
-
-static int timer_resume(struct sys_device *dev)
-{
- unsigned long flags;
- unsigned long sec;
- unsigned long sleep_length;
-
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled())
- hpet_reenable();
-#endif
- sec = get_cmos_time() + clock_cmos_diff;
- sleep_length = (get_cmos_time() - sleep_start) * HZ;
- write_seqlock_irqsave(&xtime_lock, flags);
- xtime.tv_sec = sec;
- xtime.tv_nsec = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
- jiffies += sleep_length;
- wall_jiffies += sleep_length;
- return 0;
-}
-
-static struct sysdev_class timer_sysclass = {
- .resume = timer_resume,
- .suspend = timer_suspend,
- set_kset_name("timer"),
-};
-
-
-/* XXX this driverfs stuff should probably go elsewhere later -john */
-static struct sys_device device_timer = {
- .id = 0,
- .cls = &timer_sysclass,
-};
-
-static int time_init_device(void)
-{
- int error = sysdev_class_register(&timer_sysclass);
- if (!error)
- error = sysdev_register(&device_timer);
- return error;
-}
-
-device_initcall(time_init_device);
-
-#ifdef CONFIG_HPET_TIMER
-extern void (*late_time_init)(void);
-/* Duplicate of time_init() below, with hpet_enable part added */
-void __init hpet_time_init(void)
-{
- xtime.tv_sec = get_cmos_time();
- xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
- if (hpet_enable() >= 0) {
- printk("Using HPET for base-timer\n");
- }
-
- cur_timer = select_timer();
- printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-
- time_init_hook();
-}
-#endif
-
-/* Dynamically-mapped IRQ. */
-static int TIMER_IRQ;
-
-static struct irqaction irq_timer = {
- timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
- NULL, NULL
-};
-
-void __init time_init(void)
-{
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_capable()) {
- /*
- * HPET initialization needs to do memory-mapped io. So, let
- * us do a late initialization after mem_init().
- */
- late_time_init = hpet_time_init;
- return;
- }
-#endif
- __get_time_values_from_xen();
- xtime.tv_sec = shadow_tv.tv_sec;
- xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
- processed_system_time = shadow_system_time;
-
- if (timer_tsc_init.init(NULL) != 0)
- BUG();
- printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-
- TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
-
- (void)setup_irq(TIMER_IRQ, &irq_timer);
-}
-
-/* Convert jiffies to system time. Call with xtime_lock held for reading. */
-static inline u64 __jiffies_to_st(unsigned long j)
-{
- long delta = j - jiffies;
- /* NB. The next check can trigger in some wrap-around cases, but
- * that's ok -- we'll just end up with a shorter timeout. */
- if (delta < 1)
- delta = 1;
- return processed_system_time + (delta * NS_PER_TICK);
-}
-
-/*
- * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
- * These functions are based on implementations from arch/s390/kernel/time.c
- */
-void stop_hz_timer(void)
-{
- unsigned int cpu = smp_processor_id();
- unsigned long j;
-
- /* s390 does this /before/ checking rcu_pending(). We copy them. */
- cpu_set(cpu, nohz_cpu_mask);
-
- /* Leave ourselves in 'tick mode' if rcu or softirq pending. */
- if (rcu_pending(cpu) || local_softirq_pending()) {
- cpu_clear(cpu, nohz_cpu_mask);
- j = jiffies + 1;
- } else {
- j = next_timer_interrupt();
- }
-
- BUG_ON(HYPERVISOR_set_timer_op(__jiffies_to_st(j)) != 0);
-}
-
-void start_hz_timer(void)
-{
- cpu_clear(smp_processor_id(), nohz_cpu_mask);
-}
-
-void time_suspend(void)
-{
- /* nothing */
-}
-
-/* No locking required. We are only CPU running, and interrupts are off. */
-void time_resume(void)
-{
- if (timer_tsc_init.init(NULL) != 0)
- BUG();
-
- /* Get timebases for new environment. */
- __get_time_values_from_xen();
-
- /* Reset our own concept of passage of system time. */
- processed_system_time = shadow_system_time;
-
- /* Accept a warp in UTC (wall-clock) time. */
- last_seen_tv.tv_sec = 0;
-
- /* Make sure we resync UTC time with Xen on next timer interrupt. */
- last_update_from_xen = 0;
-}
-
-/*
- * /proc/sys/xen: This really belongs in another file. It can stay here for
- * now however.
- */
-static ctl_table xen_subtable[] = {
- {1, "independent_wallclock", &independent_wallclock,
- sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
- {0}
-};
-static ctl_table xen_table[] = {
- {123, "xen", NULL, 0, 0555, xen_subtable},
- {0}
-};
-static int __init xen_sysctl_init(void)
-{
- (void)register_sysctl_table(xen_table, 0);
- return 0;
-}
-__initcall(xen_sysctl_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/Makefile Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,17 +0,0 @@
-#
-# Makefile for x86 timers
-#
-
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-obj-y := timer_tsc.o
-c-obj-y :=
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
- @ln -fsn $(srctree)/arch/i386/kernel/timers/$(notdir $@) $@
-
-obj-y += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,379 +0,0 @@
-/*
- * This code largely moved from arch/i386/kernel/time.c.
- * See comments there for proper credits.
- */
-
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/cpufreq.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-
-#include <asm/timer.h>
-#include <asm/io.h>
-/* processor.h for distable_tsc flag */
-#include <asm/processor.h>
-
-#include "io_ports.h"
-#include "mach_timer.h"
-
-#include <asm/hpet.h>
-
-#ifdef CONFIG_HPET_TIMER
-static unsigned long hpet_usec_quotient;
-static unsigned long hpet_last;
-static struct timer_opts timer_tsc;
-#endif
-
-static inline void cpufreq_delayed_get(void);
-
-int tsc_disable __initdata = 0;
-
-extern spinlock_t i8253_lock;
-
-static int use_tsc;
-
-static unsigned long long monotonic_base;
-static u32 monotonic_offset;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-/* convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_mhz * 10^6))
- * ns = cycles * (10^3 / cpu_mhz)
- *
- * Then we use scaling math (suggested by george@xxxxxxxxxx) to get:
- * ns = cycles * (10^3 * SC / cpu_mhz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- * -johnstul@xxxxxxxxxx "math is hard, lets go shopping!"
- */
-static unsigned long cyc2ns_scale;
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
-{
- cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
-/* Cached *multiplier* to convert TSC counts to microseconds.
- * (see the equation below).
- * Equal to 2^32 * (1 / (clocks per usec) ).
- * Initialized in time_init.
- */
-static unsigned long fast_gettimeoffset_quotient;
-
-extern u32 shadow_tsc_stamp;
-extern u64 shadow_system_time;
-
-static unsigned long get_offset_tsc(void)
-{
- register unsigned long eax, edx;
-
- /* Read the Time Stamp Counter */
-
- rdtsc(eax,edx);
-
- /* .. relative to previous jiffy (32 bits is enough) */
- eax -= shadow_tsc_stamp;
-
- /*
- * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
- * = (tsc_low delta) * (usecs_per_clock)
- * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
- *
- * Using a mull instead of a divl saves up to 31 clock cycles
- * in the critical path.
- */
-
- __asm__("mull %2"
- :"=a" (eax), "=d" (edx)
- :"rm" (fast_gettimeoffset_quotient),
- "0" (eax));
-
- /* our adjusted time offset in microseconds */
- return edx;
-}
-
-static unsigned long long monotonic_clock_tsc(void)
-{
- unsigned long long last_offset, this_offset, base;
- unsigned seq;
-
- /* atomically read monotonic base & last_offset */
- do {
- seq = read_seqbegin(&monotonic_lock);
- last_offset = monotonic_offset;
- base = monotonic_base;
- } while (read_seqretry(&monotonic_lock, seq));
-
- /* Read the Time Stamp Counter */
- rdtscll(this_offset);
-
- /* return the value in ns */
- return base + cycles_2_ns(this_offset - last_offset);
-}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- */
-unsigned long long sched_clock(void)
-{
- unsigned long long this_offset;
-
- /*
- * In the NUMA case we dont use the TSC as they are not
- * synchronized across all CPUs.
- */
-#ifndef CONFIG_NUMA
- if (!use_tsc)
-#endif
- /* no locking but a rare wrong value is not a big deal */
- return jiffies_64 * (1000000000 / HZ);
-
- /* Read the Time Stamp Counter */
- rdtscll(this_offset);
-
- /* return the value in ns */
- return cycles_2_ns(this_offset);
-}
-
-
-static void mark_offset_tsc(void)
-{
-
- /* update the monotonic base value */
- write_seqlock(&monotonic_lock);
- monotonic_base = shadow_system_time;
- monotonic_offset = shadow_tsc_stamp;
- write_sequnlock(&monotonic_lock);
-}
-
-static void delay_tsc(unsigned long loops)
-{
- unsigned long bclock, now;
-
- rdtscl(bclock);
- do
- {
- rep_nop();
- rdtscl(now);
- } while ((now-bclock) < loops);
-}
-
-#ifdef CONFIG_HPET_TIMER
-static void mark_offset_tsc_hpet(void)
-{
- unsigned long long this_offset, last_offset;
- unsigned long offset, temp, hpet_current;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- /*
- * It is important that these two operations happen almost at
- * the same time. We do the RDTSC stuff first, since it's
- * faster. To avoid any inconsistencies, we need interrupts
- * disabled locally.
- */
- /*
- * Interrupts are just disabled locally since the timer irq
- * has the SA_INTERRUPT flag set. -arca
- */
- /* read Pentium cycle counter */
-
- hpet_current = hpet_readl(HPET_COUNTER);
- rdtsc(last_tsc_low, last_tsc_high);
-
- /* lost tick compensation */
- offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
- if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) {
- int lost_ticks = (offset - hpet_last) / hpet_tick;
- jiffies_64 += lost_ticks;
- }
- hpet_last = hpet_current;
-
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- monotonic_base += cycles_2_ns(this_offset - last_offset);
- write_sequnlock(&monotonic_lock);
-
- /* calculate delay_at_last_interrupt */
- /*
- * Time offset = (hpet delta) * ( usecs per HPET clock )
- * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
- * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
- * Where,
- * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
- */
- delay_at_last_interrupt = hpet_current - offset;
- ASM_MUL64_REG(temp, delay_at_last_interrupt,
- hpet_usec_quotient, delay_at_last_interrupt);
-}
-#endif
-
-
-#ifdef CONFIG_CPU_FREQ
-#include <linux/workqueue.h>
-
-static unsigned int cpufreq_delayed_issched = 0;
-static unsigned int cpufreq_init = 0;
-static struct work_struct cpufreq_delayed_get_work;
-
-static void handle_cpufreq_delayed_get(void *v)
-{
- unsigned int cpu;
- for_each_online_cpu(cpu) {
- cpufreq_get(cpu);
- }
- cpufreq_delayed_issched = 0;
-}
-
-/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
- * to verify the CPU frequency the timing core thinks the CPU is running
- * at is still correct.
- */
-static inline void cpufreq_delayed_get(void)
-{
- if (cpufreq_init && !cpufreq_delayed_issched) {
- cpufreq_delayed_issched = 1;
- printk(KERN_DEBUG "Losing some ticks... checking if CPU
frequency changed.\n");
- schedule_work(&cpufreq_delayed_get_work);
- }
-}
-
-/* If the CPU frequency is scaled, TSC-based delays will need a different
- * loops_per_jiffy value to function properly.
- */
-
-static unsigned int ref_freq = 0;
-static unsigned long loops_per_jiffy_ref = 0;
-
-#ifndef CONFIG_SMP
-static unsigned long fast_gettimeoffset_ref = 0;
-static unsigned long cpu_khz_ref = 0;
-#endif
-
-static int
-time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
-
- if (val != CPUFREQ_RESUMECHANGE)
- write_seqlock_irq(&xtime_lock);
- if (!ref_freq) {
- ref_freq = freq->old;
- loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
-#ifndef CONFIG_SMP
- fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
- cpu_khz_ref = cpu_khz;
-#endif
- }
-
- if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
- (val == CPUFREQ_RESUMECHANGE)) {
- if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- cpu_data[freq->cpu].loops_per_jiffy =
cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
-#ifndef CONFIG_SMP
- if (cpu_khz)
- cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq,
freq->new);
- if (use_tsc) {
- if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
- fast_gettimeoffset_quotient =
cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
- set_cyc2ns_scale(cpu_khz/1000);
- }
- }
-#endif
- }
-
- if (val != CPUFREQ_RESUMECHANGE)
- write_sequnlock_irq(&xtime_lock);
-
- return 0;
-}
-
-static struct notifier_block time_cpufreq_notifier_block = {
- .notifier_call = time_cpufreq_notifier
-};
-
-
-static int __init cpufreq_tsc(void)
-{
- int ret;
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
- ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (!ret)
- cpufreq_init = 1;
- return ret;
-}
-core_initcall(cpufreq_tsc);
-
-#else /* CONFIG_CPU_FREQ */
-static inline void cpufreq_delayed_get(void) { return; }
-#endif
-
-
-static int init_tsc(char* override)
-{
- u64 __cpu_khz;
-
- __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
- do_div(__cpu_khz, 1000);
- cpu_khz = (u32)__cpu_khz;
- printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n",
- cpu_khz / 1000, cpu_khz % 1000);
-
- /* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz =
- (2^32 * 1 / (clocks/us)) */
- {
- unsigned long eax=0, edx=1000;
- __asm__("divl %2"
- :"=a" (fast_gettimeoffset_quotient), "=d" (edx)
- :"r" (cpu_khz),
- "0" (eax), "1" (edx));
- }
-
- set_cyc2ns_scale(cpu_khz/1000);
-
- use_tsc = 1;
-
- return 0;
-}
-
-static int __init tsc_setup(char *str)
-{
- printk(KERN_WARNING "notsc: cannot disable TSC in Xen/Linux.\n");
- return 1;
-}
-__setup("notsc", tsc_setup);
-
-
-
-/************************************************************/
-
-/* tsc timer_opts struct */
-struct timer_opts timer_tsc = {
- .name = "tsc",
- .mark_offset = mark_offset_tsc,
- .get_offset = get_offset_tsc,
- .monotonic_clock = monotonic_clock_tsc,
- .delay = delay_tsc,
-};
-
-struct init_timer_opts timer_tsc_init = {
- .init = init_tsc,
- .opts = &timer_tsc,
-};
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,980 +0,0 @@
-/*
- * linux/arch/i386/traps.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
- */
-
-/*
- * 'Traps.c' handles hardware traps and faults after we have saved some
- * state in 'asm.s'.
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/highmem.h>
-#include <linux/kallsyms.h>
-#include <linux/ptrace.h>
-#include <linux/utsname.h>
-#include <linux/kprobes.h>
-
-#ifdef CONFIG_EISA
-#include <linux/ioport.h>
-#include <linux/eisa.h>
-#endif
-
-#ifdef CONFIG_MCA
-#include <linux/mca.h>
-#endif
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/debugreg.h>
-#include <asm/desc.h>
-#include <asm/i387.h>
-#include <asm/nmi.h>
-
-#include <asm/smp.h>
-#include <asm/arch_hooks.h>
-#include <asm/kdebug.h>
-
-#include <linux/irq.h>
-#include <linux/module.h>
-
-#include "mach_traps.h"
-
-asmlinkage int system_call(void);
-
-/* Do we ignore FPU interrupts ? */
-char ignore_fpu_irq = 0;
-
-/*
- * The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.. We have a special link segment
- * for this.
- */
-struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) =
{ {0, 0}, };
-
-asmlinkage void divide_error(void);
-asmlinkage void debug(void);
-asmlinkage void nmi(void);
-asmlinkage void int3(void);
-asmlinkage void overflow(void);
-asmlinkage void bounds(void);
-asmlinkage void invalid_op(void);
-asmlinkage void device_not_available(void);
-asmlinkage void coprocessor_segment_overrun(void);
-asmlinkage void invalid_TSS(void);
-asmlinkage void segment_not_present(void);
-asmlinkage void stack_segment(void);
-asmlinkage void general_protection(void);
-asmlinkage void page_fault(void);
-asmlinkage void coprocessor_error(void);
-asmlinkage void simd_coprocessor_error(void);
-asmlinkage void alignment_check(void);
-asmlinkage void fixup_4gb_segment(void);
-asmlinkage void machine_check(void);
-
-static int kstack_depth_to_print = 24;
-struct notifier_block *i386die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
-
-int register_die_notifier(struct notifier_block *nb)
-{
- int err = 0;
- unsigned long flags;
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&i386die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
-}
-
-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-{
- return p > (void *)tinfo &&
- p < (void *)tinfo + THREAD_SIZE - 3;
-}
-
-static inline unsigned long print_context_stack(struct thread_info *tinfo,
- unsigned long *stack, unsigned long ebp)
-{
- unsigned long addr;
-
-#ifdef CONFIG_FRAME_POINTER
- while (valid_stack_ptr(tinfo, (void *)ebp)) {
- addr = *(unsigned long *)(ebp + 4);
- printk(" [<%08lx>] ", addr);
- print_symbol("%s", addr);
- printk("\n");
- ebp = *(unsigned long *)ebp;
- }
-#else
- while (valid_stack_ptr(tinfo, stack)) {
- addr = *stack++;
- if (__kernel_text_address(addr)) {
- printk(" [<%08lx>]", addr);
- print_symbol(" %s", addr);
- printk("\n");
- }
- }
-#endif
- return ebp;
-}
-
-void show_trace(struct task_struct *task, unsigned long * stack)
-{
- unsigned long ebp;
-
- if (!task)
- task = current;
-
- if (task == current) {
- /* Grab ebp right from our regs */
- asm ("movl %%ebp, %0" : "=r" (ebp) : );
- } else {
- /* ebp is the last reg pushed by switch_to */
- ebp = *(unsigned long *) task->thread.esp;
- }
-
- while (1) {
- struct thread_info *context;
- context = (struct thread_info *)
- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
- ebp = print_context_stack(context, stack, ebp);
- stack = (unsigned long*)context->previous_esp;
- if (!stack)
- break;
- printk(" =======================\n");
- }
-}
-
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
- unsigned long *stack;
- int i;
-
- if (esp == NULL) {
- if (task)
- esp = (unsigned long*)task->thread.esp;
- else
- esp = (unsigned long *)&esp;
- }
-
- stack = esp;
- for(i = 0; i < kstack_depth_to_print; i++) {
- if (kstack_end(stack))
- break;
- if (i && ((i % 8) == 0))
- printk("\n ");
- printk("%08lx ", *stack++);
- }
- printk("\nCall Trace:\n");
- show_trace(task, esp);
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
- unsigned long stack;
-
- show_trace(current, &stack);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-void show_registers(struct pt_regs *regs)
-{
- int i;
- int in_kernel = 1;
- unsigned long esp;
- unsigned short ss;
-
- esp = (unsigned long) (®s->esp);
- ss = __KERNEL_DS;
- if (regs->xcs & 2) {
- in_kernel = 0;
- esp = regs->esp;
- ss = regs->xss & 0xffff;
- }
- print_modules();
- printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
- " (%s) \n",
- smp_processor_id(), 0xffff & regs->xcs, regs->eip,
- print_tainted(), regs->eflags, system_utsname.release);
- print_symbol("EIP is at %s\n", regs->eip);
- printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
- regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
- regs->esi, regs->edi, regs->ebp, esp);
- printk("ds: %04x es: %04x ss: %04x\n",
- regs->xds & 0xffff, regs->xes & 0xffff, ss);
- printk("Process %s (pid: %d, threadinfo=%p task=%p)",
- current->comm, current->pid, current_thread_info(), current);
- /*
- * When in-kernel, we also print out the stack and code at the
- * time of the fault..
- */
- if (in_kernel) {
- u8 *eip;
-
- printk("\nStack: ");
- show_stack(NULL, (unsigned long*)esp);
-
- printk("Code: ");
-
- eip = (u8 *)regs->eip - 43;
- for (i = 0; i < 64; i++, eip++) {
- unsigned char c;
-
- if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
- printk(" Bad EIP value.");
- break;
- }
- if (eip == (u8 *)regs->eip)
- printk("<%02x> ", c);
- else
- printk("%02x ", c);
- }
- }
- printk("\n");
-}
-
-static void handle_BUG(struct pt_regs *regs)
-{
- unsigned short ud2;
- unsigned short line;
- char *file;
- char c;
- unsigned long eip;
-
- if (regs->xcs & 2)
- goto no_bug; /* Not in kernel */
-
- eip = regs->eip;
-
- if (eip < PAGE_OFFSET)
- goto no_bug;
- if (__get_user(ud2, (unsigned short *)eip))
- goto no_bug;
- if (ud2 != 0x0b0f)
- goto no_bug;
- if (__get_user(line, (unsigned short *)(eip + 2)))
- goto bug;
- if (__get_user(file, (char **)(eip + 4)) ||
- (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
- file = "<bad filename>";
-
- printk("------------[ cut here ]------------\n");
- printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
-
-no_bug:
- return;
-
- /* Here we know it was a BUG but file-n-line is unavailable */
-bug:
- printk("Kernel BUG\n");
-}
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- static struct {
- spinlock_t lock;
- u32 lock_owner;
- int lock_owner_depth;
- } die = {
- .lock = SPIN_LOCK_UNLOCKED,
- .lock_owner = -1,
- .lock_owner_depth = 0
- };
- static int die_counter;
-
- if (die.lock_owner != _smp_processor_id()) {
- console_verbose();
- spin_lock_irq(&die.lock);
- die.lock_owner = smp_processor_id();
- die.lock_owner_depth = 0;
- bust_spinlocks(1);
- }
-
- if (++die.lock_owner_depth < 3) {
- int nl = 0;
- handle_BUG(regs);
- printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff,
++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
- nl = 1;
-#endif
-#ifdef CONFIG_SMP
- printk("SMP ");
- nl = 1;
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC");
- nl = 1;
-#endif
- if (nl)
- printk("\n");
- notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
- show_registers(regs);
- } else
- printk(KERN_ERR "Recursive die() failure, output suppressed\n");
-
- bust_spinlocks(0);
- die.lock_owner = -1;
- spin_unlock_irq(&die.lock);
- if (in_interrupt())
- panic("Fatal exception in interrupt");
-
- if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(5 * HZ);
- panic("Fatal exception");
- }
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long
err)
-{
- if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
- die(str, regs, err);
-}
-
-static void do_trap(int trapnr, int signr, char *str, int vm86,
- struct pt_regs * regs, long error_code, siginfo_t
*info)
-{
- if (regs->eflags & VM_MASK) {
- if (vm86)
- goto vm86_trap;
- goto trap_signal;
- }
-
- if (!(regs->xcs & 2))
- goto kernel_trap;
-
- trap_signal: {
- struct task_struct *tsk = current;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
- if (info)
- force_sig_info(signr, info, tsk);
- else
- force_sig(signr, tsk);
- return;
- }
-
- kernel_trap: {
- if (!fixup_exception(regs))
- die(str, regs, error_code);
- return;
- }
-
- vm86_trap: {
- int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, trapnr);
- if (ret) goto trap_signal;
- return;
- }
-}
-
-#define DO_ERROR(trapnr, signr, str, name) \
-fastcall void do_##name(struct pt_regs * regs, long error_code) \
-{ \
- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
- == NOTIFY_STOP) \
- return; \
- do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-}
-
-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-fastcall void do_##name(struct pt_regs * regs, long error_code) \
-{ \
- siginfo_t info; \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = (void __user *)siaddr; \
- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
- == NOTIFY_STOP) \
- return; \
- do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-}
-
-#define DO_VM86_ERROR(trapnr, signr, str, name) \
-fastcall void do_##name(struct pt_regs * regs, long error_code) \
-{ \
- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
- == NOTIFY_STOP) \
- return; \
- do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
-}
-
-#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-fastcall void do_##name(struct pt_regs * regs, long error_code) \
-{ \
- siginfo_t info; \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = (void __user *)siaddr; \
- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
- == NOTIFY_STOP) \
- return; \
- do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
-}
-
-DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV,
regs->eip)
-#ifndef CONFIG_KPROBES
-DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-#endif
-DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN,
regs->eip)
-DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-DO_ERROR( 9, SIGFPE, "coprocessor segment overrun",
coprocessor_segment_overrun)
-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-#ifdef CONFIG_X86_MCE
-DO_ERROR(18, SIGBUS, "machine check", machine_check)
-#endif
-
-fastcall void do_general_protection(struct pt_regs * regs, long error_code)
-{
- /*
- * If we trapped on an LDT access then ensure that the default_ldt is
- * loaded, if nothing else. We load default_ldt lazily because LDT
- * switching costs time and many applications don't need it.
- */
- if (unlikely((error_code & 6) == 4)) {
- unsigned long ldt;
- __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
- if (ldt == 0) {
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.ptr |= (unsigned long)&default_ldt[0];
- u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
- if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
- show_trace(NULL, (unsigned long *)&u);
- panic("Failed to install default LDT");
- }
- return;
- }
- }
-
- if (regs->eflags & VM_MASK)
- goto gp_in_vm86;
-
- if (!(regs->xcs & 2))
- goto gp_in_kernel;
-
- current->thread.error_code = error_code;
- current->thread.trap_no = 13;
- force_sig(SIGSEGV, current);
- return;
-
-gp_in_vm86:
- local_irq_enable();
- handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
- return;
-
-gp_in_kernel:
- if (!fixup_exception(regs)) {
- if (notify_die(DIE_GPF, "general protection fault", regs,
- error_code, 13, SIGSEGV) == NOTIFY_STOP)
- return;
- die("general protection fault", regs, error_code);
- }
-}
-
-static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
-{
- printk("Uhhuh. NMI received. Dazed and confused, but trying to
continue\n");
- printk("You probably have a hardware problem with your RAM chips\n");
-
- /* Clear and disable the memory parity error line. */
- clear_mem_error(reason);
-}
-
-static void io_check_error(unsigned char reason, struct pt_regs * regs)
-{
- unsigned long i;
-
- printk("NMI: IOCK error (debug interrupt?)\n");
- show_registers(regs);
-
- /* Re-enable the IOCK line, wait for a few seconds */
- reason = (reason & 0xf) | 8;
- outb(reason, 0x61);
- i = 2000;
- while (--i) udelay(1000);
- reason &= ~8;
- outb(reason, 0x61);
-}
-
-static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-{
-#ifdef CONFIG_MCA
- /* Might actually be able to figure out what the guilty party
- * is. */
- if( MCA_bus ) {
- mca_handle_nmi();
- return;
- }
-#endif
- printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
- reason, smp_processor_id());
- printk("Dazed and confused, but trying to continue\n");
- printk("Do you have a strange power saving mode enabled?\n");
-}
-
-static DEFINE_SPINLOCK(nmi_print_lock);
-
-void die_nmi (struct pt_regs *regs, const char *msg)
-{
- spin_lock(&nmi_print_lock);
- /*
- * We are in trouble anyway, lets at least try
- * to get a message out.
- */
- bust_spinlocks(1);
- printk(msg);
- printk(" on CPU%d, eip %08lx, registers:\n",
- smp_processor_id(), regs->eip);
- show_registers(regs);
- printk("console shuts up ...\n");
- console_silent();
- spin_unlock(&nmi_print_lock);
- bust_spinlocks(0);
- do_exit(SIGSEGV);
-}
-
-static void default_do_nmi(struct pt_regs * regs)
-{
- unsigned char reason = 0;
-
- /* Only the BSP gets external NMIs from the system. */
- if (!smp_processor_id())
- reason = get_nmi_reason();
-
- if (!(reason & 0xc0)) {
- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
- == NOTIFY_STOP)
- return;
-#ifdef CONFIG_X86_LOCAL_APIC
- /*
- * Ok, so this is none of the documented NMI sources,
- * so it must be the NMI watchdog.
- */
- if (nmi_watchdog) {
- nmi_watchdog_tick(regs);
- return;
- }
-#endif
- unknown_nmi_error(reason, regs);
- return;
- }
- if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
- return;
- if (reason & 0x80)
- mem_parity_error(reason, regs);
- if (reason & 0x40)
- io_check_error(reason, regs);
- /*
- * Reassert NMI in case it became active meanwhile
- * as it's edge-triggered.
- */
- reassert_nmi();
-}
-
-static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
-{
- return 0;
-}
-
-static nmi_callback_t nmi_callback = dummy_nmi_callback;
-
-fastcall void do_nmi(struct pt_regs * regs, long error_code)
-{
- int cpu;
-
- nmi_enter();
-
- cpu = smp_processor_id();
- ++nmi_count(cpu);
-
- if (!nmi_callback(regs, cpu))
- default_do_nmi(regs);
-
- nmi_exit();
-}
-
-void set_nmi_callback(nmi_callback_t callback)
-{
- nmi_callback = callback;
-}
-
-void unset_nmi_callback(void)
-{
- nmi_callback = dummy_nmi_callback;
-}
-
-#ifdef CONFIG_KPROBES
-fastcall int do_int3(struct pt_regs *regs, long error_code)
-{
- if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
- == NOTIFY_STOP)
- return 1;
- /* This is an interrupt gate, because kprobes wants interrupts
- disabled. Normal trap handlers don't. */
- restore_interrupts(regs);
- do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
- return 0;
-}
-#endif
-
-/*
- * Our handling of the processor debug registers is non-trivial.
- * We do not clear them on entry and exit from the kernel. Therefore
- * it is possible to get a watchpoint trap here from inside the kernel.
- * However, the code in ./ptrace.c has ensured that the user can
- * only set watchpoints on userspace addresses. Therefore the in-kernel
- * watchpoint trap can only occur in code which is reading/writing
- * from user space. Such code must not hold kernel locks (since it
- * can equally take a page fault), therefore it is safe to call
- * force_sig_info even though that claims and releases locks.
- *
- * Code in ./signal.c ensures that the debug control register
- * is restored before we deliver any signal, and therefore that
- * user code runs with the correct debug control register even though
- * we clear it here.
- *
- * Being careful here means that we don't have to be as careful in a
- * lot of more complicated places (task switching can be a bit lazy
- * about restoring all the debug state, and ptrace doesn't have to
- * find every occurrence of the TF bit that could be saved away even
- * by user code)
- */
-fastcall void do_debug(struct pt_regs * regs, long error_code)
-{
- unsigned int condition;
- struct task_struct *tsk = current;
-
- condition = HYPERVISOR_get_debugreg(6);
-
- if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
- SIGTRAP) == NOTIFY_STOP)
- return;
-#if 0
- /* It's safe to allow irq's after DR6 has been saved */
- if (regs->eflags & X86_EFLAGS_IF)
- local_irq_enable();
-#endif
-
- /* Mask out spurious debug traps due to lazy DR7 setting */
- if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
- if (!tsk->thread.debugreg[7])
- goto clear_dr7;
- }
-
- if (regs->eflags & VM_MASK)
- goto debug_vm86;
-
- /* Save debug status register where ptrace can see it */
- tsk->thread.debugreg[6] = condition;
-
- /*
- * Single-stepping through TF: make sure we ignore any events in
- * kernel space (but re-enable TF when returning to user mode).
- * And if the event was due to a debugger (PT_DTRACE), clear the
- * TF flag so that register information is correct.
- */
- if (condition & DR_STEP) {
- /*
- * We already checked v86 mode above, so we can
- * check for kernel mode by just checking the CPL
- * of CS.
- */
- if ((regs->xcs & 2) == 0)
- goto clear_TF_reenable;
-
- if (likely(tsk->ptrace & PT_DTRACE)) {
- tsk->ptrace &= ~PT_DTRACE;
- regs->eflags &= ~TF_MASK;
- }
- }
-
- /* Ok, finally something we can handle */
- send_sigtrap(tsk, regs, error_code);
-
- /* Disable additional traps. They'll be re-enabled when
- * the signal is delivered.
- */
-clear_dr7:
- HYPERVISOR_set_debugreg(7, 0);
- return;
-
-debug_vm86:
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
- return;
-
-clear_TF_reenable:
- set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
- regs->eflags &= ~TF_MASK;
- return;
-}
-
-/*
- * Note that we play around with the 'TS' bit in an attempt to get
- * the correct behaviour even in the presence of the asynchronous
- * IRQ13 behaviour
- */
-void math_error(void __user *eip)
-{
- struct task_struct * task;
- siginfo_t info;
- unsigned short cwd, swd;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 16;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = eip;
- /*
- * (~cwd & swd) will mask out exceptions that are not set to unmasked
- * status. 0x3f is the exception bits in these regs, 0x200 is the
- * C1 reg you need in case of a stack fault, 0x040 is the stack
- * fault bit. We should only be taking one exception at a time,
- * so if this combination doesn't produce any single exception,
- * then we have a bad program that isn't syncronizing its FPU usage
- * and it will suffer the consequences since we won't be able to
- * fully reproduce the context of the exception
- */
- cwd = get_fpu_cwd(task);
- swd = get_fpu_swd(task);
- switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- case 0x041: /* Stack Fault */
- case 0x241: /* Stack Fault | Direction */
- info.si_code = FPE_FLTINV;
- /* Should we clear the SF or let user space do it ????
*/
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
-}
-
-fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
-{
- ignore_fpu_irq = 1;
- math_error((void __user *)regs->eip);
-}
-
-void simd_math_error(void __user *eip)
-{
- struct task_struct * task;
- siginfo_t info;
- unsigned short mxcsr;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 19;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = eip;
- /*
- * The SIMD FPU exceptions are handled a little differently, as there
- * is only a single status/control register. Thus, to determine which
- * unmasked exception was caught we must mask the exception mask bits
- * at 0x1f80, and then use these to mask the exception bits at 0x3f.
- */
- mxcsr = get_fpu_mxcsr(task);
- switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- info.si_code = FPE_FLTINV;
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
-}
-
-fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
- long error_code)
-{
- if (cpu_has_xmm) {
- /* Handle SIMD FPU exceptions on PIII+ processors. */
- ignore_fpu_irq = 1;
- simd_math_error((void __user *)regs->eip);
- } else {
- /*
- * Handle strange cache flush from user space exception
- * in all other cases. This is undocumented behaviour.
- */
- if (regs->eflags & VM_MASK) {
- handle_vm86_fault((struct kernel_vm86_regs *)regs,
- error_code);
- return;
- }
- die_if_kernel("cache flush denied", regs, error_code);
- current->thread.trap_no = 19;
- current->thread.error_code = error_code;
- force_sig(SIGSEGV, current);
- }
-}
-
-/*
- * 'math_state_restore()' saves the current math information in the
- * old math state array, and gets the new ones from the current task
- *
- * Careful.. There are problems with IBM-designed IRQ13 behaviour.
- * Don't touch unless you *really* know how it works.
- *
- * Must be called with kernel preemption disabled (in this case,
- * local interrupts are disabled at the call-site in entry.S).
- */
-asmlinkage void math_state_restore(struct pt_regs regs)
-{
- struct thread_info *thread = current_thread_info();
- struct task_struct *tsk = thread->task;
-
- /*
- * A trap in kernel mode can be ignored. It'll be the fast XOR or
- * copying libraries, which will correctly save/restore state and
- * reset the TS bit in CR0.
- */
- if ((regs.xcs & 2) == 0)
- return;
-
- clts(); /* Allow maths ops (or we recurse) */
- if (!tsk_used_math(tsk))
- init_fpu(tsk);
- restore_fpu(tsk);
- thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
-}
-
-#ifndef CONFIG_MATH_EMULATION
-
-asmlinkage void math_emulate(long arg)
-{
- printk("math-emulation not enabled and no coprocessor found.\n");
- printk("killing %s.\n",current->comm);
- force_sig(SIGFPE,current);
- schedule();
-}
-
-#endif /* CONFIG_MATH_EMULATION */
-
-#ifdef CONFIG_X86_F00F_BUG
-void __init trap_init_f00f_bug(void)
-{
- __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
-
- /*
- * Update the IDT descriptor and reload the IDT so that
- * it uses the read-only mapped virtual address.
- */
- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
- __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
-}
-#endif
-
-
-/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
-static trap_info_t trap_table[] = {
- { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
- { 1, 0, __KERNEL_CS, (unsigned long)debug },
- { 3, 3, __KERNEL_CS, (unsigned long)int3 },
- { 4, 3, __KERNEL_CS, (unsigned long)overflow },
- { 5, 3, __KERNEL_CS, (unsigned long)bounds },
- { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
- { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
- { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
- { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
- { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
- { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
- { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
- { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
- { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
- { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
- { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
-#ifdef CONFIG_X86_MCE
- { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
-#endif
- { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
- { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
- { 0, 0, 0, 0 }
-};
-
-void __init trap_init(void)
-{
- HYPERVISOR_set_trap_table(trap_table);
- HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
-
- /*
- * default LDT is a single-entry callgate to lcall7 for iBCS
- * and a callgate to lcall27 for Solaris/x86 binaries
- */
- make_lowmem_page_readonly(&default_ldt[0]);
- xen_flush_page_update_queue();
-
- /*
- * Should be a barrier for any external CPU state.
- */
- cpu_init();
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/vsyscall.S
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/vsyscall.S Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,15 +0,0 @@
-#include <linux/init.h>
-
-__INITDATA
-
- .globl vsyscall_int80_start, vsyscall_int80_end
-vsyscall_int80_start:
- .incbin "arch/xen/i386/kernel/vsyscall-int80.so"
-vsyscall_int80_end:
-
- .globl vsyscall_sysenter_start, vsyscall_sysenter_end
-vsyscall_sysenter_start:
- .incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
-vsyscall_sysenter_end:
-
-__FINIT
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,24 +0,0 @@
-#
-# Makefile for the linux i386-specific parts of the memory manager.
-#
-
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-CFLAGS += -Iarch/$(XENARCH)/mm
-
-obj-y := init.o pgtable.o fault.o ioremap.o pageattr.o hypervisor.o
-c-obj-y := extable.o mmap.o
-
-c-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
-c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
-c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
- @ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
-
-obj-y += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,561 +0,0 @@
-/*
- * linux/arch/i386/mm/fault.c
- *
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-#include <linux/highmem.h>
-#include <linux/module.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/desc.h>
-#include <asm/kdebug.h>
-
-extern void die(const char *,struct pt_regs *,long);
-
-pgd_t *cur_pgd; /* XXXsmp */
-
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out
- */
-void bust_spinlocks(int yes)
-{
- int loglevel_save = console_loglevel;
-
- if (yes) {
- oops_in_progress = 1;
- return;
- }
-#ifdef CONFIG_VT
- unblank_screen();
-#endif
- oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk()
- * without oops_in_progress set so that printk will give klogd
- * a poke. Hold onto your hats...
- */
- console_loglevel = 15; /* NMI oopser may have shut the console
up */
- printk(" ");
- console_loglevel = loglevel_save;
-}
-
-/*
- * Return EIP plus the CS segment base. The segment limit is also
- * adjusted, clamped to the kernel/user address space (whichever is
- * appropriate), and returned in *eip_limit.
- *
- * The segment is checked, because it might have been changed by another
- * task between the original faulting instruction and here.
- *
- * If CS is no longer a valid code segment, or if EIP is beyond the
- * limit, or if it is a kernel address when CS is not a kernel segment,
- * then the returned value will be greater than *eip_limit.
- *
- * This is slow, but is very rarely executed.
- */
-static inline unsigned long get_segment_eip(struct pt_regs *regs,
- unsigned long *eip_limit)
-{
- unsigned long eip = regs->eip;
- unsigned seg = regs->xcs & 0xffff;
- u32 seg_ar, seg_limit, base, *desc;
-
- /* The standard kernel/user address space limit. */
- *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
-
- /* Unlikely, but must come before segment checks. */
- if (unlikely((regs->eflags & VM_MASK) != 0))
- return eip + (seg << 4);
-
- /* By far the most common cases. */
- if (likely(seg == __USER_CS || seg == __KERNEL_CS))
- return eip;
-
- /* Check the segment exists, is within the current LDT/GDT size,
- that kernel/user (ring 0..3) has the appropriate privilege,
- that it's a code segment, and get the limit. */
- __asm__ ("larl %3,%0; lsll %3,%1"
- : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
- if ((~seg_ar & 0x9800) || eip > seg_limit) {
- *eip_limit = 0;
- return 1; /* So that returned eip > *eip_limit. */
- }
-
- /* Get the GDT/LDT descriptor base.
- When you look for races in this code remember that
- LDT and other horrors are only used in user space. */
- if (seg & (1<<2)) {
- /* Must lock the LDT while reading it. */
- down(¤t->mm->context.sem);
- desc = current->mm->context.ldt;
- desc = (void *)desc + (seg & ~7);
- } else {
- /* Must disable preemption while reading the GDT. */
- desc = (u32 *)get_cpu_gdt_table(get_cpu());
- desc = (void *)desc + (seg & ~7);
- }
-
- /* Decode the code segment base from the descriptor */
- base = get_desc_base((unsigned long *)desc);
-
- if (seg & (1<<2)) {
- up(¤t->mm->context.sem);
- } else
- put_cpu();
-
- /* Adjust EIP and segment limit, and clamp at the kernel limit.
- It's legitimate for segments to wrap at 0xffffffff. */
- seg_limit += base;
- if (seg_limit < *eip_limit && seg_limit >= base)
- *eip_limit = seg_limit;
- return eip + base;
-}
-
-/*
- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
- * Check that here and ignore it.
- */
-static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-{
- unsigned long limit;
- unsigned long instr = get_segment_eip (regs, &limit);
- int scan_more = 1;
- int prefetch = 0;
- int i;
-
- for (i = 0; scan_more && i < 15; i++) {
- unsigned char opcode;
- unsigned char instr_hi;
- unsigned char instr_lo;
-
- if (instr > limit)
- break;
- if (__get_user(opcode, (unsigned char *) instr))
- break;
-
- instr_hi = opcode & 0xf0;
- instr_lo = opcode & 0x0f;
- instr++;
-
- switch (instr_hi) {
- case 0x20:
- case 0x30:
- /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
- scan_more = ((instr_lo & 7) == 0x6);
- break;
-
- case 0x60:
- /* 0x64 thru 0x67 are valid prefixes in all modes. */
- scan_more = (instr_lo & 0xC) == 0x4;
- break;
- case 0xF0:
- /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
- scan_more = !instr_lo || (instr_lo>>1) == 1;
- break;
- case 0x00:
- /* Prefetch instruction is 0x0F0D or 0x0F18 */
- scan_more = 0;
- if (instr > limit)
- break;
- if (__get_user(opcode, (unsigned char *) instr))
- break;
- prefetch = (instr_lo == 0xF) &&
- (opcode == 0x0D || opcode == 0x18);
- break;
- default:
- scan_more = 0;
- break;
- }
- }
- return prefetch;
-}
-
-static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
- unsigned long error_code)
-{
- if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 6)) {
- /* Catch an obscure case of prefetch inside an NX page. */
- if (nx_enabled && (error_code & 16))
- return 0;
- return __is_prefetch(regs, addr);
- }
- return 0;
-}
-
-fastcall void do_invalid_op(struct pt_regs *, unsigned long);
-
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- */
-fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long page;
- int write;
- siginfo_t info;
-
- /* Set the "privileged fault" bit to something sane. */
- error_code &= 3;
- error_code |= (regs->xcs & 2) << 1;
- if (regs->eflags & X86_EFLAGS_VM)
- error_code |= 4;
-
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
- SIGSEGV) == NOTIFY_STOP)
- return;
-#if 0
- /* It's safe to allow irq's after cr2 has been saved */
- if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
- local_irq_enable();
-#endif
-
- tsk = current;
-
- info.si_code = SEGV_MAPERR;
-
- /*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
- *
- * This verifies that the fault happens in kernel space
- * (error_code & 4) == 0, and that the fault was not a
- * protection error (error_code & 1) == 0.
- */
- if (unlikely(address >= TASK_SIZE)) {
- if (!(error_code & 5))
- goto vmalloc_fault;
- /*
- * Don't take the mm semaphore here. If we fixup a prefetch
- * fault we could otherwise deadlock.
- */
- goto bad_area_nosemaphore;
- }
-
- mm = tsk->mm;
-
- /*
- * If we're in an interrupt, have no user context or are running in an
- * atomic region then we must not take the fault..
- */
- if (in_atomic() || !mm)
- goto bad_area_nosemaphore;
-
- /* When running in the kernel we expect faults to occur only to
- * addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
- * erroneous fault occuring in a code path which already holds mmap_sem
- * we will deadlock attempting to validate the fault against the
- * address space. Luckily the kernel only validly references user
- * space from well defined areas of code, which are listed in the
- * exceptions table.
- *
- * As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
- * Attempt to lock the address space, if we cannot we then validate the
- * source. If this is invalid we can skip the address space check,
- * thus avoiding the deadlock.
- */
- if (!down_read_trylock(&mm->mmap_sem)) {
- if ((error_code & 4) == 0 &&
- !search_exception_tables(regs->eip))
- goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
- }
-
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (error_code & 4) {
- /*
- * accessing the stack below %esp is always a bug.
- * The "+ 32" is there due to some instructions (like
- * pusha) doing post-decrement on the stack and that
- * doesn't show up until later..
- */
- if (address + 32 < regs->esp)
- goto bad_area;
- }
- if (expand_stack(vma, address))
- goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- info.si_code = SEGV_ACCERR;
- write = 0;
- switch (error_code & 3) {
- default: /* 3: write, present */
-#ifdef TEST_VERIFY_AREA
- if (regs->cs == KERNEL_CS)
- printk("WP fault at %08lx\n", regs->eip);
-#endif
- /* fall through */
- case 2: /* write, not present */
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- write++;
- break;
- case 1: /* read, present */
- goto bad_area;
- case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
- }
-
- survive:
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- switch (handle_mm_fault(mm, vma, address, write)) {
- case VM_FAULT_MINOR:
- tsk->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- tsk->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- goto do_sigbus;
- case VM_FAULT_OOM:
- goto out_of_memory;
- default:
- BUG();
- }
-
- /*
- * Did it hit the DOS screen memory VA from vm86 mode?
- */
- if (regs->eflags & VM_MASK) {
- unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
- if (bit < 32)
- tsk->thread.screen_bitmap |= 1 << bit;
- }
- up_read(&mm->mmap_sem);
- return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (error_code & 4) {
- /*
- * Valid to do another page fault here because this one came
- * from user space.
- */
- if (is_prefetch(regs, address, error_code))
- return;
-
- tsk->thread.cr2 = address;
- /* Kernel addresses are always protection faults */
- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
- tsk->thread.trap_no = 14;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void __user *)address;
- force_sig_info(SIGSEGV, &info, tsk);
- return;
- }
-
-#ifdef CONFIG_X86_F00F_BUG
- /*
- * Pentium F0 0F C7 C8 bug workaround.
- */
- if (boot_cpu_data.f00f_bug) {
- unsigned long nr;
-
- nr = (address - idt_descr.address) >> 3;
-
- if (nr == 6) {
- do_invalid_op(regs, 0);
- return;
- }
- }
-#endif
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs))
- return;
-
- /*
- * Valid to do another page fault here, because if this fault
- * had been triggered by is_prefetch fixup_exception would have
- * handled it.
- */
- if (is_prefetch(regs, address, error_code))
- return;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-
- bust_spinlocks(1);
-
-#ifdef CONFIG_X86_PAE
- if (error_code & 16) {
- pte_t *pte = lookup_address(address);
-
- if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
- printk(KERN_CRIT "kernel tried to execute NX-protected
page - exploit attempt? (uid: %d)\n", current->uid);
- }
-#endif
- if (address < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer
dereference");
- else
- printk(KERN_ALERT "Unable to handle kernel paging request");
- printk(" at virtual address %08lx\n",address);
- printk(KERN_ALERT " printing eip:\n");
- printk("%08lx\n", regs->eip);
- page = ((unsigned long *) cur_pgd)[address >> 22];
- printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
- machine_to_phys(page));
- /*
- * We must not directly access the pte in the highpte
- * case, the page table might be allocated in highmem.
- * And lets rather not kmap-atomic the pte, just in case
- * it's allocated already.
- */
-#ifndef CONFIG_HIGHPTE
- if (page & 1) {
- page &= PAGE_MASK;
- address &= 0x003ff000;
- page = machine_to_phys(page);
- page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
- printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
- machine_to_phys(page));
- }
-#endif
- show_trace(NULL, (unsigned long *)®s[1]);
- die("Oops", regs, error_code);
- bust_spinlocks(0);
- do_exit(SIGKILL);
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (tsk->pid == 1) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (error_code & 4)
- do_exit(SIGKILL);
- goto no_context;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /* Kernel mode? Handle exceptions or die */
- if (!(error_code & 4))
- goto no_context;
-
- /* User space => ok to do another page fault */
- if (is_prefetch(regs, address, error_code))
- return;
-
- tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 14;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, tsk);
- return;
-
-vmalloc_fault:
- {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int index = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- pgd = index + cur_pgd;
- pgd_k = init_mm.pgd + index;
-
- if (!pgd_present(*pgd_k))
- goto no_context;
-
- /*
- * set_pgd(pgd, *pgd_k); here would be useless on PAE
- * and redundant with the set_pmd() on non-PAE. As would
- * set_pud.
- */
-
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
- goto no_context;
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- goto no_context;
- set_pmd(pmd, *pmd_k);
- xen_flush_page_update_queue(); /* flush PMD update */
-
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- goto no_context;
- return;
- }
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/highmem.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/highmem.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,100 +0,0 @@
-#include <linux/highmem.h>
-
-void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-
-void kunmap(struct page *page)
-{
- if (in_interrupt())
- BUG();
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- inc_preempt_count();
- if (!PageHighMem(page))
- return page_address(page);
-
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- if (!pte_none(*(kmap_pte-idx)))
- BUG();
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, prot));
- __flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-void *kmap_atomic(struct page *page, enum km_type type)
-{
- return __kmap_atomic(page, type, kmap_prot);
-}
-
-/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
-void *kmap_atomic_pte(struct page *page, enum km_type type)
-{
- return __kmap_atomic(page, type, PAGE_KERNEL_RO);
-}
-
-void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < FIXADDR_START) { // FIXME
- dec_preempt_count();
- preempt_check_resched();
- return;
- }
-
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- BUG();
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
-#endif
-
- dec_preempt_count();
- preempt_check_resched();
-}
-
-struct page *kmap_atomic_to_page(void *ptr)
-{
- unsigned long idx, vaddr = (unsigned long)ptr;
- pte_t *pte;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
- return pte_page(*pte);
-}
-
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,395 +0,0 @@
-/******************************************************************************
- * mm/hypervisor.c
- *
- * Update page tables via the hypervisor.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/multicall.h>
-#include <asm-xen/balloon.h>
-
-/*
- * This suffices to protect us if we ever move to SMP domains.
- * Further, it protects us against interrupts. At the very least, this is
- * required for the network driver which flushes the update queue before
- * pushing new receive buffers.
- */
-static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
-
-/* Linux 2.6 isn't using the traditional batched interface. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define QUEUE_SIZE 2048
-#define pte_offset_kernel pte_offset
-#define pmd_val_ma(v) (v).pmd;
-#define pud_t pgd_t
-#define pud_offset(d, va) d
-#else
-#define QUEUE_SIZE 128
-#define pmd_val_ma(v) (v).pud.pgd.pgd;
-#endif
-
-static mmu_update_t update_queue[QUEUE_SIZE];
-unsigned int mmu_update_queue_idx = 0;
-#define idx mmu_update_queue_idx
-
-/*
- * MULTICALL_flush_page_update_queue:
- * This is a version of the flush which queues as part of a multicall.
- */
-void MULTICALL_flush_page_update_queue(void)
-{
- unsigned long flags;
- unsigned int _idx;
- spin_lock_irqsave(&update_lock, flags);
- if ( (_idx = idx) != 0 )
- {
- idx = 0;
- wmb(); /* Make sure index is cleared first to avoid double updates. */
- queue_multicall3(__HYPERVISOR_mmu_update,
- (unsigned long)update_queue,
- (unsigned long)_idx,
- (unsigned long)NULL);
- }
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void __flush_page_update_queue(void)
-{
- unsigned int _idx = idx;
- idx = 0;
- wmb(); /* Make sure index is cleared first to avoid double updates. */
- if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
- {
- printk(KERN_ALERT "Failed to execute MMU updates.\n");
- BUG();
- }
-}
-
-void _flush_page_update_queue(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- if ( idx != 0 ) __flush_page_update_queue();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void increment_index(void)
-{
- idx++;
- if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
-}
-
-static inline void increment_index_and_flush(void)
-{
- idx++;
- __flush_page_update_queue();
-}
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = pmd_val_ma(val);
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pt_switch(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_NEW_BASEPTR;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_tlb_flush(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_TLB_FLUSH;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_invlpg(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].ptr |= ptr & PAGE_MASK;
- update_queue[idx].val = MMUEXT_INVLPG;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_pin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_unpin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_pin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_unpin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_set_ldt(unsigned long ptr, unsigned long len)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_machphys_update(unsigned long mfn, unsigned long pfn)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- update_queue[idx].val = pfn;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-/* queue and flush versions of the above */
-void xen_l1_entry_update(pte_t *ptr, unsigned long val)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = pmd_val_ma(val);
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pt_switch(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_NEW_BASEPTR;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_tlb_flush(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_TLB_FLUSH;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_invlpg(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].ptr |= ptr & PAGE_MASK;
- update_queue[idx].val = MMUEXT_INVLPG;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pgd_pin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pgd_unpin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pte_pin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_pte_unpin(unsigned long ptr)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_set_ldt(unsigned long ptr, unsigned long len)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void xen_machphys_update(unsigned long mfn, unsigned long pfn)
-{
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- update_queue[idx].val = pfn;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-
-unsigned long allocate_empty_lowmem_region(unsigned long pages)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long *pfn_array;
- unsigned long vstart;
- unsigned long i;
- unsigned int order = get_order(pages*PAGE_SIZE);
-
- vstart = __get_free_pages(GFP_KERNEL, order);
- if ( vstart == 0 )
- return 0UL;
-
- scrub_pages(vstart, 1 << order);
-
- pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
- if ( pfn_array == NULL )
- BUG();
-
- for ( i = 0; i < (1<<order); i++ )
- {
- pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
- pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
- pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
- pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
- pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
- queue_l1_entry_update(pte, 0);
- phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
- INVALID_P2M_ENTRY;
- }
-
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
-
- balloon_put_pages(pfn_array, 1 << order);
-
- vfree(pfn_array);
-
- return vstart;
-}
-
-#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,804 +0,0 @@
-/*
- * linux/arch/i386/mm/init.c
- *
- * Copyright (C) 1995 Linus Torvalds
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/efi.h>
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/dma.h>
-#include <asm/fixmap.h>
-#include <asm/e820.h>
-#include <asm/apic.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-#include <asm-xen/hypervisor.h>
-
-unsigned int __VMALLOC_RESERVE = 128 << 20;
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-unsigned long highstart_pfn, highend_pfn;
-
-static int noinline do_test_wp_bit(void);
-
-/*
- * Creates a middle page table and puts a pointer to it in the
- * given global directory entry. This only returns the gd entry
- * in non-PAE compilation mode, since the middle layer is folded.
- */
-static pmd_t * __init one_md_table_init(pgd_t *pgd)
-{
- pud_t *pud;
- pmd_t *pmd_table;
-
-#ifdef CONFIG_X86_PAE
- pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
- pud = pud_offset(pgd, 0);
- if (pmd_table != pmd_offset(pud, 0))
- BUG();
-#else
- pud = pud_offset(pgd, 0);
- pmd_table = pmd_offset(pud, 0);
-#endif
-
- return pmd_table;
-}
-
-/*
- * Create a page table and place a pointer to it in a middle page
- * directory entry.
- */
-static pte_t * __init one_page_table_init(pmd_t *pmd)
-{
- if (pmd_none(*pmd)) {
- pte_t *page_table = (pte_t *)
alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(page_table);
- set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
- if (page_table != pte_offset_kernel(pmd, 0))
- BUG();
-
- return page_table;
- }
-
- return pte_offset_kernel(pmd, 0);
-}
-
-/*
- * This function initializes a certain range of kernel virtual memory
- * with new bootmem page tables, everywhere page tables are missing in
- * the given range.
- */
-
-/*
- * NOTE: The pagetables are allocated contiguous on the physical space
- * so we can cache the place of the first one and move around without
- * checking the pgd every time.
- */
-static void __init page_table_range_init (unsigned long start, unsigned long
end, pgd_t *pgd_base)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- int pgd_idx, pmd_idx;
- unsigned long vaddr;
-
- vaddr = start;
- pgd_idx = pgd_index(vaddr);
- pmd_idx = pmd_index(vaddr);
- pgd = pgd_base + pgd_idx;
-
- for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++,
pgd_idx++) {
- if (pgd_none(*pgd))
- one_md_table_init(pgd);
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++,
pmd_idx++) {
- if (pmd_none(*pmd))
- one_page_table_init(pmd);
-
- vaddr += PMD_SIZE;
- }
- pmd_idx = 0;
- }
-}
-
-static inline int is_kernel_text(unsigned long addr)
-{
- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
- return 1;
- return 0;
-}
-
-/*
- * This maps the physical memory to kernel virtual address space, a total
- * of max_low_pfn pages, by creating page tables starting from address
- * PAGE_OFFSET.
- */
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-{
- unsigned long pfn;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int pgd_idx, pmd_idx, pte_ofs;
-
- unsigned long max_ram_pfn = xen_start_info.nr_pages;
- if (max_ram_pfn > max_low_pfn)
- max_ram_pfn = max_low_pfn;
-
- pgd_idx = pgd_index(PAGE_OFFSET);
- pgd = pgd_base + pgd_idx;
- pfn = 0;
- pmd_idx = pmd_index(PAGE_OFFSET);
- pte_ofs = pte_index(PAGE_OFFSET);
-
- for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
- pmd = one_md_table_init(pgd);
- if (pfn >= max_low_pfn)
- continue;
- pmd += pmd_idx;
- for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++,
pmd_idx++) {
- unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-
- /* Map with big pages if possible, otherwise create
normal page tables. */
- if (cpu_has_pse) {
- unsigned int address2 = (pfn + PTRS_PER_PTE -
1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
-
- if (is_kernel_text(address) ||
is_kernel_text(address2))
- set_pmd(pmd, pfn_pmd(pfn,
PAGE_KERNEL_LARGE_EXEC));
- else
- set_pmd(pmd, pfn_pmd(pfn,
PAGE_KERNEL_LARGE));
- pfn += PTRS_PER_PTE;
- } else {
- pte = one_page_table_init(pmd);
-
- pte += pte_ofs;
- for (; pte_ofs < PTRS_PER_PTE && pfn <
max_low_pfn; pte++, pfn++, pte_ofs++) {
- /* XEN: Only map initial RAM
allocation. */
- if ((pfn >= max_ram_pfn) ||
pte_present(*pte))
- continue;
- if (is_kernel_text(address))
- set_pte(pte,
pfn_pte(pfn, PAGE_KERNEL_EXEC));
- else
- set_pte(pte,
pfn_pte(pfn, PAGE_KERNEL));
- }
- pte_ofs = 0;
- }
- flush_page_update_queue();
- }
- pmd_idx = 0;
- }
-}
-
-static inline int page_kills_ppro(unsigned long pagenr)
-{
- if (pagenr >= 0x70000 && pagenr <= 0x7003F)
- return 1;
- return 0;
-}
-
-extern int is_available_memory(efi_memory_desc_t *);
-
-static inline int page_is_ram(unsigned long pagenr)
-{
- int i;
- unsigned long addr, end;
-
- if (efi_enabled) {
- efi_memory_desc_t *md;
-
- for (i = 0; i < memmap.nr_map; i++) {
- md = &memmap.map[i];
- if (!is_available_memory(md))
- continue;
- addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
- end = (md->phys_addr + (md->num_pages <<
EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
-
- if ((pagenr >= addr) && (pagenr < end))
- return 1;
- }
- return 0;
- }
-
- for (i = 0; i < e820.nr_map; i++) {
-
- if (e820.map[i].type != E820_RAM) /* not usable memory */
- continue;
- /*
- * !!!FIXME!!! Some BIOSen report areas as RAM that
- * are not. Notably the 640->1Mb area. We need a sanity
- * check here.
- */
- addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
- end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
- if ((pagenr >= addr) && (pagenr < end))
- return 1;
- }
- return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-EXPORT_SYMBOL(kmap_prot);
-EXPORT_SYMBOL(kmap_pte);
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr),
(vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-
-void __init permanent_kmaps_init(pgd_t *pgd_base)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long vaddr;
-
- vaddr = PKMAP_BASE;
- page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-
- pgd = swapper_pg_dir + pgd_index(vaddr);
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
-}
-
-void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
-{
- if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
- ClearPageReserved(page);
- set_bit(PG_highmem, &page->flags);
- set_page_count(page, 1);
- if (pfn < xen_start_info.nr_pages)
- __free_page(page);
- totalhigh_pages++;
- } else
- SetPageReserved(page);
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-void __init set_highmem_pages_init(int bad_ppro)
-{
- int pfn;
- for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
- one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
- totalram_pages += totalhigh_pages;
-}
-#else
-extern void set_highmem_pages_init(int);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-#else
-#define kmap_init() do { } while (0)
-#define permanent_kmaps_init(pgd_base) do { } while (0)
-#define set_highmem_pages_init(bad_ppro) do { } while (0)
-#endif /* CONFIG_HIGHMEM */
-
-unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-
-#ifndef CONFIG_DISCONTIGMEM
-#define remap_numa_kva() do {} while (0)
-#else
-extern void __init remap_numa_kva(void);
-#endif
-
-static void __init pagetable_init (void)
-{
- unsigned long vaddr;
- pgd_t *pgd_base = swapper_pg_dir;
- pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
-
-#ifdef CONFIG_X86_PAE
- int i;
- /* Init entries of the first-level page table to the zero page */
- for (i = 0; i < PTRS_PER_PGD; i++)
- set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) |
_PAGE_PRESENT));
-#endif
-
- /* Enable PSE if available */
- if (cpu_has_pse) {
- set_in_cr4(X86_CR4_PSE);
- }
-
- /* Enable PGE if available */
- if (cpu_has_pge) {
- set_in_cr4(X86_CR4_PGE);
- __PAGE_KERNEL |= _PAGE_GLOBAL;
- __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
- }
-
- /*
- * Switch to proper mm_init page directory. Initialise from the current
- * page directory, write-protect the new page directory, then switch to
- * it. We clean up by write-enabling and then freeing the old page dir.
- */
- memcpy(pgd_base, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
- make_page_readonly(pgd_base);
- queue_pgd_pin(__pa(pgd_base));
- load_cr3(pgd_base);
- queue_pgd_unpin(__pa(old_pgd));
- make_page_writable(old_pgd);
- __flush_tlb_all();
- free_bootmem(__pa(old_pgd), PAGE_SIZE);
-
- kernel_physical_mapping_init(pgd_base);
- remap_numa_kva();
-
- /*
- * Fixed mappings, only the page table structure has to be
- * created - mappings will be set by set_fixmap():
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- page_table_range_init(vaddr, 0, pgd_base);
-
- permanent_kmaps_init(pgd_base);
-
-#ifdef CONFIG_X86_PAE
- /*
- * Add low memory identity-mappings - SMP needs it when
- * starting up on an AP from real-mode. In the non-PAE
- * case we already have these mappings through head.S.
- * All user-space mappings are explicitly cleared after
- * SMP startup.
- */
- pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
-#endif
-}
-
-#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
-/*
- * Swap suspend & friends need this for resume because things like the
intel-agp
- * driver might have split up a kernel 4MB mapping.
- */
-char __nosavedata swsusp_pg_dir[PAGE_SIZE]
- __attribute__ ((aligned (PAGE_SIZE)));
-
-static inline void save_pg_dir(void)
-{
- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-}
-#else
-static inline void save_pg_dir(void)
-{
-}
-#endif
-
-void zap_low_mappings (void)
-{
- int i;
-
- save_pg_dir();
-
- /*
- * Zap initial low-memory mappings.
- *
- * Note that "pgd_clear()" doesn't do it for
- * us, because pgd_clear() is a no-op on i386.
- */
- for (i = 0; i < USER_PTRS_PER_PGD; i++)
-#ifdef CONFIG_X86_PAE
- set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-#else
- set_pgd(swapper_pg_dir+i, __pgd(0));
-#endif
- flush_tlb_all();
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-void __init zone_sizes_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
- unsigned int /*max_dma,*/ high, low;
-
- /*
- * XEN: Our notion of "DMA memory" is fake when running over Xen.
- * We simply put all RAM in the DMA zone so that those drivers which
- * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
- * Those drivers that *do* require lowmem are screwed anyway when
- * running over Xen!
- */
- /*max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;*/
- low = max_low_pfn;
- high = highend_pfn;
-
- /*if (low < max_dma)*/
- zones_size[ZONE_DMA] = low;
- /*else*/ {
- /*zones_size[ZONE_DMA] = max_dma;*/
- /*zones_size[ZONE_NORMAL] = low - max_dma;*/
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = high - low;
-#endif
- }
- free_area_init(zones_size);
-}
-#else
-extern void zone_sizes_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-static int disable_nx __initdata = 0;
-u64 __supported_pte_mask = ~_PAGE_NX;
-
-/*
- * noexec = on|off
- *
- * Control non executable mappings.
- *
- * on Enable
- * off Disable
- */
-void __init noexec_setup(const char *str)
-{
- if (!strncmp(str, "on",2) && cpu_has_nx) {
- __supported_pte_mask |= _PAGE_NX;
- disable_nx = 0;
- } else if (!strncmp(str,"off",3)) {
- disable_nx = 1;
- __supported_pte_mask &= ~_PAGE_NX;
- }
-}
-
-int nx_enabled = 0;
-#ifdef CONFIG_X86_PAE
-
-static void __init set_nx(void)
-{
- unsigned int v[4], l, h;
-
- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
- if ((v[3] & (1 << 20)) && !disable_nx) {
- rdmsr(MSR_EFER, l, h);
- l |= EFER_NX;
- wrmsr(MSR_EFER, l, h);
- nx_enabled = 1;
- __supported_pte_mask |= _PAGE_NX;
- }
- }
-}
-
-/*
- * Enables/disables executability of a given kernel page and
- * returns the previous setting.
- */
-int __init set_kernel_exec(unsigned long vaddr, int enable)
-{
- pte_t *pte;
- int ret = 1;
-
- if (!nx_enabled)
- goto out;
-
- pte = lookup_address(vaddr);
- BUG_ON(!pte);
-
- if (!pte_exec_kernel(*pte))
- ret = 0;
-
- if (enable)
- pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
- else
- pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
- __flush_tlb_all();
-out:
- return ret;
-}
-
-#endif
-
-/*
- * paging_init() sets up the page tables - note that the first 8MB are
- * already mapped by head.S.
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
- */
-void __init paging_init(void)
-{
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
- int i;
-#endif
-
-#ifdef CONFIG_X86_PAE
- set_nx();
- if (nx_enabled)
- printk("NX (Execute Disable) protection: active\n");
-#endif
-
- pagetable_init();
-
-#ifdef CONFIG_X86_PAE
- /*
- * We will bail out later - printk doesn't work right now so
- * the user would just see a hanging kernel.
- */
- if (cpu_has_pae)
- set_in_cr4(X86_CR4_PAE);
-#endif
- __flush_tlb_all();
-
- kmap_init();
- zone_sizes_init();
-
- /* Switch to the real shared_info page, and clear the dummy page. */
- flush_page_update_queue();
- set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
- HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
- memset(empty_zero_page, 0, sizeof(empty_zero_page));
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
- /* Setup mapping of lower 1st MB */
- for (i = 0; i < NR_FIX_ISAMAPS; i++)
- if (xen_start_info.flags & SIF_PRIVILEGED)
- set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
- else
- set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
- virt_to_machine(empty_zero_page));
-#endif
-}
-
-/*
- * Test if the WP bit works in supervisor mode. It isn't supported on 386's
- * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
- * used to involve black magic jumps to work around some nasty CPU bugs,
- * but fortunately the switch to using exceptions got rid of all that.
- */
-
-void __init test_wp_bit(void)
-{
- printk("Checking if this processor honours the WP bit even in
supervisor mode... ");
-
- /* Any page-aligned address will do, the test is non-destructive */
- __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
- boot_cpu_data.wp_works_ok = do_test_wp_bit();
- clear_fixmap(FIX_WP_TEST);
-
- if (!boot_cpu_data.wp_works_ok) {
- printk("No.\n");
-#ifdef CONFIG_X86_WP_WORKS_OK
- panic("This kernel doesn't support CPU's with broken WP.
Recompile it for a 386!");
-#endif
- } else {
- printk("Ok.\n");
- }
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-static void __init set_max_mapnr_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- max_mapnr = num_physpages = highend_pfn;
-#else
- max_mapnr = num_physpages = max_low_pfn;
-#endif
-}
-#define __free_all_bootmem() free_all_bootmem()
-#else
-#define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
-extern void set_max_mapnr_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
-
-static struct kcore_list kcore_mem, kcore_vmalloc;
-
-void __init mem_init(void)
-{
- extern int ppro_with_ram_bug(void);
- int codesize, reservedpages, datasize, initsize;
- int tmp;
- int bad_ppro;
- unsigned long pfn;
-
-#ifndef CONFIG_DISCONTIGMEM
- if (!mem_map)
- BUG();
-#endif
-
- bad_ppro = ppro_with_ram_bug();
-
-#ifdef CONFIG_HIGHMEM
- /* check that fixmap and pkmap do not overlap */
- if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
- printk(KERN_ERR "fixmap and kmap areas overlap - this will
crash\n");
- printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
- PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
FIXADDR_START);
- BUG();
- }
-#endif
-
- set_max_mapnr_init();
-
-#ifdef CONFIG_HIGHMEM
- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
-#else
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-#endif
- printk("vmalloc area: %lx-%lx, maxmem %lx\n",
- VMALLOC_START,VMALLOC_END,MAXMEM);
- BUG_ON(VMALLOC_START > VMALLOC_END);
-
- /* this will put all low memory onto the freelists */
- totalram_pages += __free_all_bootmem();
- /* XEN: init and count low-mem pages outside initial allocation. */
- for (pfn = xen_start_info.nr_pages; pfn < max_low_pfn; pfn++) {
- ClearPageReserved(&mem_map[pfn]);
- set_page_count(&mem_map[pfn], 1);
- totalram_pages++;
- }
-
- reservedpages = 0;
- for (tmp = 0; tmp < max_low_pfn; tmp++)
- /*
- * Only count reserved RAM pages
- */
- if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
- reservedpages++;
-
- set_highmem_pages_init(bad_ppro);
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
-
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk
reserved, %dk data, %dk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
- );
-
-#ifdef CONFIG_X86_PAE
- if (!cpu_has_pae)
- panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-#endif
- if (boot_cpu_data.wp_works_ok < 0)
- test_wp_bit();
-
- /*
- * Subtle. SMP is doing it's boot stuff late (because it has to
- * fork idle threads) - but it also needs low mappings for the
- * protected-mode entry to work. We zap these entries only after
- * the WP-bit has been tested.
- */
-#ifndef CONFIG_SMP
- zap_low_mappings();
-#endif
-}
-
-kmem_cache_t *pgd_cache;
-kmem_cache_t *pmd_cache;
-kmem_cache_t *pte_cache;
-
-void __init pgtable_cache_init(void)
-{
- pte_cache = kmem_cache_create("pte",
- PTRS_PER_PTE*sizeof(pte_t),
- PTRS_PER_PTE*sizeof(pte_t),
- 0,
- pte_ctor,
- pte_dtor);
- if (!pte_cache)
- panic("pgtable_cache_init(): Cannot create pte cache");
- if (PTRS_PER_PMD > 1) {
- pmd_cache = kmem_cache_create("pmd",
- PTRS_PER_PMD*sizeof(pmd_t),
- PTRS_PER_PMD*sizeof(pmd_t),
- 0,
- pmd_ctor,
- NULL);
- if (!pmd_cache)
- panic("pgtable_cache_init(): cannot create pmd cache");
- }
- pgd_cache = kmem_cache_create("pgd",
- PTRS_PER_PGD*sizeof(pgd_t),
- PTRS_PER_PGD*sizeof(pgd_t),
- 0,
- pgd_ctor,
- pgd_dtor);
- if (!pgd_cache)
- panic("pgtable_cache_init(): Cannot create pgd cache");
-}
-
-/*
- * This function cannot be __init, since exceptions don't work in that
- * section. Put this after the callers, so that it cannot be inlined.
- */
-static int noinline do_test_wp_bit(void)
-{
- char tmp_reg;
- int flag;
-
- __asm__ __volatile__(
- " movb %0,%1 \n"
- "1: movb %1,%0 \n"
- " xorl %2,%2 \n"
- "2: \n"
- ".section __ex_table,\"a\"\n"
- " .align 4 \n"
- " .long 1b,2b \n"
- ".previous \n"
- :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
- "=q" (tmp_reg),
- "=r" (flag)
- :"2" (1)
- :"memory");
-
- return flag;
-}
-
-void free_initmem(void)
-{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
- memset((void *)addr, 0xcc, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n",
(__init_end - __init_begin) >> 10);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- if (start < end)
- printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end -
start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
- free_page(start);
- totalram_pages++;
- }
-}
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,450 +0,0 @@
-/*
- * arch/i386/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-#ifndef CONFIG_XEN_PHYSDEV_ACCESS
-
-void * __ioremap(unsigned long phys_addr, unsigned long size,
- unsigned long flags)
-{
- return NULL;
-}
-
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
- return NULL;
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-}
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
- return NULL;
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-}
-
-#else
-
-/*
- * Does @address reside within a non-highmem page that is local to this virtual
- * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
- * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
- * why this works.
- */
-static inline int is_local_lowmem(unsigned long address)
-{
- extern unsigned long max_low_pfn;
- unsigned long mfn = address >> PAGE_SHIFT;
- unsigned long pfn = mfn_to_pfn(mfn);
- return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
-}
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned
long flags)
-{
- void __iomem * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
- domid_t domid = DOMID_IO;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- /*
- * Don't remap the low PCI/ISA area, it's always mapped..
- */
- if (phys_addr >= 0x0 && last_addr < 0x100000)
- return isa_bus_to_virt(phys_addr);
-#endif
-
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
- if (is_local_lowmem(phys_addr)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = bus_to_virt(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end);
page++)
- if(!PageReserved(page))
- return NULL;
-
- domid = DOMID_LOCAL;
- }
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr+1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP | (flags << 20));
- if (!area)
- return NULL;
- area->phys_addr = phys_addr;
- addr = (void __iomem *) area->addr;
- if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
- size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED
- | flags), domid)) {
- vunmap((void __force *) addr);
- return NULL;
- }
- return (void __iomem *) (offset + (char __iomem *)addr);
-}
-
-
-/**
- * ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- *
- * Must be freed with iounmap.
- */
-
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
- unsigned long last_addr;
- void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
- if (!p)
- return p;
-
- /* Guaranteed to be > phys_addr, as per __ioremap() */
- last_addr = phys_addr + size - 1;
-
- if (is_local_lowmem(last_addr)) {
- struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
- unsigned long npages;
-
- phys_addr &= PAGE_MASK;
-
- /* This might overflow and become zero.. */
- last_addr = PAGE_ALIGN(last_addr);
-
- /* .. but that's ok, because modulo-2**n arithmetic will make
- * the page-aligned "last - first" come out right.
- */
- npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-
- if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
- iounmap(p);
- p = NULL;
- }
- global_flush_tlb();
- }
-
- return p;
-}
-
-void iounmap(volatile void __iomem *addr)
-{
- struct vm_struct *p;
- if ((void __force *) addr <= high_memory)
- return;
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
- return;
-#endif
- p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
- if (!p) {
- printk("__iounmap: bad address %p\n", addr);
- return;
- }
-
- if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
- /* p->size includes the guard page, but cpa doesn't like that */
- change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
- (p->size - PAGE_SIZE) >> PAGE_SHIFT,
- PAGE_KERNEL);
- global_flush_tlb();
- }
- kfree(p);
-}
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
- unsigned long offset, last_addr;
- unsigned int nrpages;
- enum fixed_addresses idx;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- /*
- * Don't remap the low PCI/ISA area, it's always mapped..
- */
- if (phys_addr >= 0x0 && last_addr < 0x100000)
- return isa_bus_to_virt(phys_addr);
-#endif
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr) - phys_addr;
-
- /*
- * Mappings have to fit in the FIX_BTMAP area.
- */
- nrpages = size >> PAGE_SHIFT;
- if (nrpages > NR_FIX_BTMAPS)
- return NULL;
-
- /*
- * Ok, go for it..
- */
- idx = FIX_BTMAP_BEGIN;
- while (nrpages > 0) {
- set_fixmap_ma(idx, phys_addr);
- phys_addr += PAGE_SIZE;
- --idx;
- --nrpages;
- }
- return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
- unsigned long virt_addr;
- unsigned long offset;
- unsigned int nrpages;
- enum fixed_addresses idx;
-
- virt_addr = (unsigned long)addr;
- if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
- return;
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
- if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
- return;
-#endif
- offset = virt_addr & ~PAGE_MASK;
- nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-
- idx = FIX_BTMAP_BEGIN;
- while (nrpages > 0) {
- clear_fixmap(idx);
- --idx;
- --nrpages;
- }
-}
-
-#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
-
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
- __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
- __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-static inline void direct_remap_area_pte(pte_t *pte,
- unsigned long address,
- unsigned long size,
- mmu_update_t **v)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- if (address >= end)
- BUG();
-
- do {
- (*v)->ptr = virt_to_machine(pte);
- (*v)++;
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int direct_remap_area_pmd(struct mm_struct *mm,
- pmd_t *pmd,
- unsigned long address,
- unsigned long size,
- mmu_update_t **v)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- if (address >= end)
- BUG();
- do {
- pte_t *pte = (mm == &init_mm) ?
- pte_alloc_kernel(mm, pmd, address) :
- pte_alloc_map(mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- direct_remap_area_pte(pte, address, end - address, v);
- pte_unmap(pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-int __direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long size,
- mmu_update_t *v)
-{
- pgd_t * dir;
- unsigned long end = address + size;
- int error;
-
- dir = pgd_offset(mm, address);
- if (address >= end)
- BUG();
- spin_lock(&mm->page_table_lock);
- do {
- pud_t *pud;
- pmd_t *pmd;
-
- error = -ENOMEM;
- pud = pud_alloc(mm, dir, address);
- if (!pud)
- break;
- pmd = pmd_alloc(mm, pud, address);
- if (!pmd)
- break;
- error = 0;
- direct_remap_area_pmd(mm, pmd, address, end - address, &v);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
-
- } while (address && (address < end));
- spin_unlock(&mm->page_table_lock);
- return error;
-}
-
-
-int direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long machine_addr,
- unsigned long size,
- pgprot_t prot,
- domid_t domid)
-{
- int i;
- unsigned long start_address;
-#define MAX_DIRECTMAP_MMU_QUEUE 130
- mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
-
- v = w = &u[0];
- if (domid != DOMID_LOCAL) {
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)domid << 16;
- v = w = &u[1];
- }
-
- start_address = address;
-
- flush_cache_all();
-
- for (i = 0; i < size; i += PAGE_SIZE) {
- if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
- /* Fill in the PTE pointers. */
- __direct_remap_area_pages(mm,
- start_address,
- address-start_address,
- w);
-
- if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
- return -EFAULT;
- v = w;
- start_address = address;
- }
-
- /*
- * Fill in the machine address: PTE ptr is done later by
- * __direct_remap_area_pages().
- */
- v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
-
- machine_addr += PAGE_SIZE;
- address += PAGE_SIZE;
- v++;
- }
-
- if (v != w) {
- /* get the ptep's filled in */
- __direct_remap_area_pages(mm,
- start_address,
- address-start_address,
- w);
- if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
- return -EFAULT;
- }
-
- flush_tlb_all();
-
- return 0;
-}
-
-EXPORT_SYMBOL(direct_remap_area_pages);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,226 +0,0 @@
-/*
- * Copyright 2002 Andi Kleen, SuSE Labs.
- * Thanks to Ben LaHaise for precious feedback.
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/tlbflush.h>
-
-static DEFINE_SPINLOCK(cpa_lock);
-static struct list_head df_list = LIST_HEAD_INIT(df_list);
-
-
-pte_t *lookup_address(unsigned long address)
-{
- pgd_t *pgd = pgd_offset_k(address);
- pud_t *pud;
- pmd_t *pmd;
- if (pgd_none(*pgd))
- return NULL;
- pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- return NULL;
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- return NULL;
- if (pmd_large(*pmd))
- return (pte_t *)pmd;
- return pte_offset_kernel(pmd, address);
-}
-
-static struct page *split_large_page(unsigned long address, pgprot_t prot)
-{
- int i;
- unsigned long addr;
- struct page *base;
- pte_t *pbase;
-
- spin_unlock_irq(&cpa_lock);
- base = alloc_pages(GFP_KERNEL, 0);
- spin_lock_irq(&cpa_lock);
- if (!base)
- return NULL;
-
- address = __pa(address);
- addr = address & LARGE_PAGE_MASK;
- pbase = (pte_t *)page_address(base);
- for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
- pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
- addr == address ? prot : PAGE_KERNEL);
- }
- return base;
-}
-
-static void flush_kernel_map(void *dummy)
-{
- /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
- if (boot_cpu_data.x86_model >= 4)
- wbinvd();
- /* Flush all to work around Errata in early athlons regarding
- * large page flushing.
- */
- __flush_tlb_all();
-}
-
-static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
-{
- struct page *page;
- unsigned long flags;
-
- set_pte_atomic(kpte, pte); /* change init_mm */
- if (PTRS_PER_PMD > 1)
- return;
-
- spin_lock_irqsave(&pgd_lock, flags);
- for (page = pgd_list; page; page = (struct page *)page->index) {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- set_pte_atomic((pte_t *)pmd, pte);
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-/*
- * No more special protections in this 2/4MB area - revert to a
- * large page again.
- */
-static inline void revert_page(struct page *kpte_page, unsigned long address)
-{
- pte_t *linear = (pte_t *)
- pmd_offset(pud_offset(pgd_offset_k(address), address), address);
- set_pmd_pte(linear, address,
- pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE));
-}
-
-static int
-__change_page_attr(struct page *page, pgprot_t prot)
-{
- pte_t *kpte;
- unsigned long address;
- struct page *kpte_page;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- kpte = lookup_address(address);
- if (!kpte)
- return -EINVAL;
- kpte_page = virt_to_page(kpte);
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
- if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- set_pte_batched(kpte, mk_pte(page, prot));
- } else {
- struct page *split = split_large_page(address, prot);
- if (!split)
- return -ENOMEM;
- set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
- kpte_page = split;
- }
- get_page(kpte_page);
- } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
- set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
- __put_page(kpte_page);
- } else
- BUG();
-
- /*
- * If the pte was reserved, it means it was created at boot
- * time (not via split_large_page) and in turn we must not
- * replace it with a largepage.
- */
- if (!PageReserved(kpte_page)) {
- /* memleak and potential failed 2M page regeneration */
- BUG_ON(!page_count(kpte_page));
-
- if (cpu_has_pse && (page_count(kpte_page) == 1)) {
- list_add(&kpte_page->lru, &df_list);
- revert_page(kpte_page, address);
- }
- }
- return 0;
-}
-
-static inline void flush_map(void)
-{
- on_each_cpu(flush_kernel_map, NULL, 1, 1);
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
- *
- * The caller needs to ensure that there are no conflicting mappings elsewhere.
- * This function only deals with the kernel linear map.
- *
- * Caller must call global_flush_tlb() after this.
- */
-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int err = 0;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&cpa_lock, flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr(page, prot);
- if (err)
- break;
- }
- flush_page_update_queue();
- spin_unlock_irqrestore(&cpa_lock, flags);
- return err;
-}
-
-void global_flush_tlb(void)
-{
- LIST_HEAD(l);
- struct list_head* n;
-
- BUG_ON(irqs_disabled());
-
- spin_lock_irq(&cpa_lock);
- list_splice_init(&df_list, &l);
- spin_unlock_irq(&cpa_lock);
- flush_map();
- n = l.next;
- while (n != &l) {
- struct page *pg = list_entry(n, struct page, lru);
- n = n->next;
- __free_page(pg);
- }
-}
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- if (PageHighMem(page))
- return;
- /* the return value is ignored - the calls cannot fail,
- * large pages are disabled at boot time.
- */
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
- /* we should perform an IPI and flush all tlbs,
- * but that can deadlock->flush only current cpu.
- */
- __flush_tlb_all();
-}
-#endif
-
-EXPORT_SYMBOL(change_page_attr);
-EXPORT_SYMBOL(global_flush_tlb);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,433 +0,0 @@
-/*
- * linux/arch/i386/mm/pgtable.c
- */
-
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/spinlock.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/fixmap.h>
-#include <asm/e820.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/io.h>
-
-#include <asm-xen/foreign_page.h>
-
-void show_mem(void)
-{
- int total = 0, reserved = 0;
- int shared = 0, cached = 0;
- int highmem = 0;
- struct page *page;
- pg_data_t *pgdat;
- unsigned long i;
-
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
- for (i = 0; i < pgdat->node_spanned_pages; ++i) {
- page = pgdat->node_mem_map + i;
- total++;
- if (PageHighMem(page))
- highmem++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page) - 1;
- }
- }
- printk("%d pages of RAM\n", total);
- printk("%d pages of HIGHMEM\n",highmem);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
-}
-
-/*
- * Associate a virtual page frame with a given physical page frame
- * and protection flags for that frame.
- */
-static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = swapper_pg_dir + pgd_index(vaddr);
- if (pgd_none(*pgd)) {
- BUG();
- return;
- }
- pud = pud_offset(pgd, vaddr);
- if (pud_none(*pud)) {
- BUG();
- return;
- }
- pmd = pmd_offset(pud, vaddr);
- if (pmd_none(*pmd)) {
- BUG();
- return;
- }
- pte = pte_offset_kernel(pmd, vaddr);
- /* <pfn,flags> stored as-is, to permit clearing entries */
- set_pte(pte, pfn_pte(pfn, flags));
-
- /*
- * It's enough to flush this one mapping.
- * (PGE mappings get flushed as well)
- */
- __flush_tlb_one(vaddr);
-}
-
-/*
- * Associate a virtual page frame with a given physical page frame
- * and protection flags for that frame.
- */
-static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
- pgprot_t flags)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = swapper_pg_dir + pgd_index(vaddr);
- if (pgd_none(*pgd)) {
- BUG();
- return;
- }
- pud = pud_offset(pgd, vaddr);
- if (pud_none(*pud)) {
- BUG();
- return;
- }
- pmd = pmd_offset(pud, vaddr);
- if (pmd_none(*pmd)) {
- BUG();
- return;
- }
- pte = pte_offset_kernel(pmd, vaddr);
- /* <pfn,flags> stored as-is, to permit clearing entries */
- set_pte(pte, pfn_pte_ma(pfn, flags));
-
- /*
- * It's enough to flush this one mapping.
- * (PGE mappings get flushed as well)
- */
- __flush_tlb_one(vaddr);
-}
-
-/*
- * Associate a large virtual page frame with a given physical page frame
- * and protection flags for that frame. pfn is for the base of the page,
- * vaddr is what the page gets mapped to - both must be properly aligned.
- * The pmd must already be instantiated. Assumes PAE mode.
- */
-void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
- printk ("set_pmd_pfn: vaddr misaligned\n");
- return; /* BUG(); */
- }
- if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
- printk ("set_pmd_pfn: pfn misaligned\n");
- return; /* BUG(); */
- }
- pgd = swapper_pg_dir + pgd_index(vaddr);
- if (pgd_none(*pgd)) {
- printk ("set_pmd_pfn: pgd_none\n");
- return; /* BUG(); */
- }
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- set_pmd(pmd, pfn_pmd(pfn, flags));
- /*
- * It's enough to flush this one mapping.
- * (PGE mappings get flushed as well)
- */
- __flush_tlb_one(vaddr);
-}
-
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t
flags)
-{
- unsigned long address = __fix_to_virt(idx);
-
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
- }
- set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-}
-
-void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t
flags)
-{
- unsigned long address = __fix_to_virt(idx);
-
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
- }
- set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
-}
-
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte = (pte_t
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
- if (pte) {
- make_page_readonly(pte);
- xen_flush_page_update_queue();
- }
- return pte;
-}
-
-void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
-{
- struct page *page = virt_to_page(pte);
- SetPageForeign(page, pte_free);
- set_page_count(page, 1);
-
- clear_page(pte);
- make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- flush_page_update_queue();
-}
-
-void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
-{
- struct page *page = virt_to_page(pte);
- ClearPageForeign(page);
-
- queue_pte_unpin(__pa(pte));
- make_page_writable(pte);
- flush_page_update_queue();
-}
-
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pte_t *ptep;
-
-#ifdef CONFIG_HIGHPTE
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
- if (pte == NULL)
- return pte;
- if (PageHighMem(pte))
- return pte;
- /* not a highmem page -- free page and grab one from the cache */
- __free_page(pte);
-#endif
- ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
- if (ptep)
- return virt_to_page(ptep);
- return NULL;
-}
-
-void pte_free(struct page *pte)
-{
- set_page_count(pte, 1);
-#ifdef CONFIG_HIGHPTE
- if (!PageHighMem(pte))
-#endif
- kmem_cache_free(pte_cache,
- phys_to_virt(page_to_pseudophys(pte)));
-#ifdef CONFIG_HIGHPTE
- else
- __free_page(pte);
-#endif
-}
-
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
-{
- memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-}
-
-/*
- * List of all pgd's needed for non-PAE so it can invalidate entries
- * in both cached and uncached pgd's; not needed for PAE since the
- * kernel pmd is shared. If PAE were not to share the pmd a similar
- * tactic would be needed. This is essentially codepath-based locking
- * against pageattr.c; it is the unique case in which a valid change
- * of kernel pagetables can't be lazily synchronized by vmalloc faults.
- * vmalloc faults work because attached pagetables are never freed.
- * The locking scheme was chosen on the basis of manfred's
- * recommendations and having no core impact whatsoever.
- * -- wli
- */
-DEFINE_SPINLOCK(pgd_lock);
-struct page *pgd_list;
-
-static inline void pgd_list_add(pgd_t *pgd)
-{
- struct page *page = virt_to_page(pgd);
- page->index = (unsigned long)pgd_list;
- if (pgd_list)
- pgd_list->private = (unsigned long)&page->index;
- pgd_list = page;
- page->private = (unsigned long)&pgd_list;
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
- struct page *next, **pprev, *page = virt_to_page(pgd);
- next = (struct page *)page->index;
- pprev = (struct page **)page->private;
- *pprev = next;
- if (next)
- next->private = (unsigned long)pprev;
-}
-
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-{
- unsigned long flags;
-
- if (PTRS_PER_PMD == 1)
- spin_lock_irqsave(&pgd_lock, flags);
-
- memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-
- if (PTRS_PER_PMD > 1)
- goto out;
-
- pgd_list_add(pgd);
- spin_unlock_irqrestore(&pgd_lock, flags);
- memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
- out:
- make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
- flush_page_update_queue();
-}
-
-/* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-{
- unsigned long flags; /* can be called from interrupt context */
-
- queue_pgd_unpin(__pa(pgd));
- make_page_writable(pgd);
- flush_page_update_queue();
-
- if (PTRS_PER_PMD > 1)
- return;
-
- spin_lock_irqsave(&pgd_lock, flags);
- pgd_list_del(pgd);
- spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- int i;
- pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
-
- if (PTRS_PER_PMD == 1 || !pgd)
- return pgd;
-
- for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
- pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
- if (!pmd)
- goto out_oom;
- set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
- }
- return pgd;
-
-out_oom:
- for (i--; i >= 0; i--)
- kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pgd_cache, pgd);
- return NULL;
-}
-
-void pgd_free(pgd_t *pgd)
-{
- int i;
-
- /* in the PAE case user pgd entries are overwritten before usage */
- if (PTRS_PER_PMD > 1)
- for (i = 0; i < USER_PTRS_PER_PGD; ++i)
- kmem_cache_free(pmd_cache, (void
*)__va(pgd_val(pgd[i])-1));
- /* in the non-PAE case, clear_page_range() clears user pgd entries */
- kmem_cache_free(pgd_cache, pgd);
-}
-
-void make_lowmem_page_readonly(void *va)
-{
- pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
-}
-
-void make_lowmem_page_writable(void *va)
-{
- pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
-}
-
-void make_page_readonly(void *va)
-{
- pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
- if ( (unsigned long)va >= (unsigned long)high_memory )
- {
- unsigned long phys;
- phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
-#ifdef CONFIG_HIGHMEM
- if ( (phys >> PAGE_SHIFT) < highstart_pfn )
-#endif
- make_lowmem_page_readonly(phys_to_virt(phys));
- }
-}
-
-void make_page_writable(void *va)
-{
- pte_t *pte = virt_to_ptep(va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
- if ( (unsigned long)va >= (unsigned long)high_memory )
- {
- unsigned long phys;
- phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
-#ifdef CONFIG_HIGHMEM
- if ( (phys >> PAGE_SHIFT) < highstart_pfn )
-#endif
- make_lowmem_page_writable(phys_to_virt(phys));
- }
-}
-
-void make_pages_readonly(void *va, unsigned int nr)
-{
- while ( nr-- != 0 )
- {
- make_page_readonly(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
-}
-
-void make_pages_writable(void *va, unsigned int nr)
-{
- while ( nr-- != 0 )
- {
- make_page_writable(va);
- va = (void *)((unsigned long)va + PAGE_SIZE);
- }
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,31 +0,0 @@
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-CFLAGS += -Iarch/$(XENARCH)/pci
-
-c-obj-y := i386.o
-
-c-obj-$(CONFIG_PCI_BIOS) += pcbios.o
-c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-obj-$(CONFIG_PCI_DIRECT) += direct.o
-
-c-pci-y := fixup.o
-c-pci-$(CONFIG_ACPI_PCI) += acpi.o
-c-pci-y += legacy.o
-pci-y += irq.o
-
-c-pci-$(CONFIG_X86_VISWS) := visws.o fixup.o
-pci-$(CONFIG_X86_VISWS) :=
-c-pci-$(CONFIG_X86_NUMAQ) := numa.o
-pci-$(CONFIG_X86_NUMAQ) := irq.o
-
-obj-y += $(pci-y)
-c-obj-y += $(c-pci-y) common.o
-
-c-link :=
-
-$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
- @ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
-
-obj-y += $(c-obj-y)
-
-clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,81 +0,0 @@
-/*
- * direct.c - Low-level direct PCI config space access
- */
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include "pci.h"
-
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/physdev.h>
-
-/*
- * Functions for accessing PCI configuration space with type xen accesses
- */
-
-static int pci_conf_read (int seg, int bus, int devfn, int reg, int len, u32
*value)
-{
- unsigned long flags;
- physdev_op_t op;
- int ret;
-
- if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
- return -EINVAL;
-
- spin_lock_irqsave(&pci_config_lock, flags);
-
- op.cmd = PHYSDEVOP_PCI_CFGREG_READ;
- op.u.pci_cfgreg_read.bus = bus;
- op.u.pci_cfgreg_read.dev = (devfn & ~0x7) >> 3;
- op.u.pci_cfgreg_read.func = devfn & 0x7;
- op.u.pci_cfgreg_read.reg = reg;
- op.u.pci_cfgreg_read.len = len;
-
- ret = HYPERVISOR_physdev_op(&op);
- if (ret == 0)
- *value = op.u.pci_cfgreg_read.value;
-
- spin_unlock_irqrestore(&pci_config_lock, flags);
-
- return ret;
-}
-
-static int pci_conf_write (int seg, int bus, int devfn, int reg, int len, u32
value)
-{
- unsigned long flags;
- physdev_op_t op;
- int ret;
-
- if ((bus > 255) || (devfn > 255) || (reg > 255))
- return -EINVAL;
-
- spin_lock_irqsave(&pci_config_lock, flags);
-
- op.cmd = PHYSDEVOP_PCI_CFGREG_WRITE;
- op.u.pci_cfgreg_write.bus = bus;
- op.u.pci_cfgreg_write.dev = (devfn & ~0x7) >> 3;
- op.u.pci_cfgreg_write.func = devfn & 0x7;
- op.u.pci_cfgreg_write.reg = reg;
- op.u.pci_cfgreg_write.len = len;
- op.u.pci_cfgreg_write.value = value;
-
- ret = HYPERVISOR_physdev_op(&op);
-
- spin_unlock_irqrestore(&pci_config_lock, flags);
-
- return ret;
-}
-
-struct pci_raw_ops pci_direct_xen = {
- .read = pci_conf_read,
- .write = pci_conf_write,
-};
-
-static int __init pci_direct_init(void)
-{
- printk(KERN_INFO "PCI: Using configuration type Xen\n");
- raw_pci_ops = &pci_direct_xen;
- return 0;
-}
-
-arch_initcall(pci_direct_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,155 +0,0 @@
-/*
- * Low-Level PCI Support for PC -- Routing of Interrupts
- *
- * (c) 1999--2000 Martin Mares <mj@xxxxxx>
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/io_apic.h>
-#include <asm/hw_irq.h>
-#include <linux/acpi.h>
-
-#include "pci.h"
-
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/physdev.h>
-
-static int pirq_enable_irq(struct pci_dev *dev);
-
-/*
- * Never use: 0, 1, 2 (timer, keyboard, and cascade)
- * Avoid using: 13, 14 and 15 (FP error and IDE).
- * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and
mouse)
- */
-unsigned int pcibios_irq_mask = 0xfff8;
-
-static int pirq_penalty[16] = {
- 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
- 0, 0, 0, 0, 1000, 100000, 100000, 100000
-};
-
-int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
-
-
-static int __init pcibios_irq_init(void)
-{
- int bus;
- physdev_op_t op;
-
- DBG("PCI: IRQ init\n");
-
- if (pcibios_enable_irq || raw_pci_ops == NULL)
- return 0;
-
- op.cmd = PHYSDEVOP_PCI_PROBE_ROOT_BUSES;
- if (HYPERVISOR_physdev_op(&op) != 0) {
- printk(KERN_WARNING "PCI: System does not support PCI\n");
- return 0;
- }
-
- printk(KERN_INFO "PCI: Probing PCI hardware\n");
- for (bus = 0; bus < 256; bus++)
- if (test_bit(bus, (unsigned long *)
- &op.u.pci_probe_root_buses.busmask[0]))
- (void)pcibios_scan_root(bus);
-
- pcibios_enable_irq = pirq_enable_irq;
-
- return 0;
-}
-
-subsys_initcall(pcibios_irq_init);
-
-
-static void pirq_penalize_isa_irq(int irq)
-{
- /*
- * If any ISAPnP device reports an IRQ in its list of possible
- * IRQ's, we try to avoid assigning it to PCI devices.
- */
- if (irq < 16)
- pirq_penalty[irq] += 100;
-}
-
-void pcibios_penalize_isa_irq(int irq)
-{
-#ifdef CONFIG_ACPI_PCI
- if (!acpi_noirq)
- acpi_penalize_isa_irq(irq);
- else
-#endif
- pirq_penalize_isa_irq(irq);
-}
-
-static int pirq_enable_irq(struct pci_dev *dev)
-{
- int err;
- u8 pin;
- physdev_op_t op;
-
- /* Inform Xen that we are going to use this device. */
- op.cmd = PHYSDEVOP_PCI_INITIALISE_DEVICE;
- op.u.pci_initialise_device.bus = dev->bus->number;
- op.u.pci_initialise_device.dev = PCI_SLOT(dev->devfn);
- op.u.pci_initialise_device.func = PCI_FUNC(dev->devfn);
- if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
- return err;
-
- /* Now we can bind to the very final IRQ line. */
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &pin);
- dev->irq = pin;
-
- /* Sanity-check that an interrupt-producing device is routed
- * to an IRQ. */
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- if (pin != 0) {
- if (dev->irq != 0)
- printk(KERN_INFO "PCI: Obtained IRQ %d for device %s\n",
- dev->irq, dev->slot_name);
- else
- printk(KERN_WARNING "PCI: No IRQ known for interrupt "
- "pin %c of device %s.\n", 'A' + pin - 1,
- dev->slot_name);
- }
-
- return 0;
-}
-
-int pci_vector_resources(int last, int nr_released)
-{
- int count = nr_released;
-
- int next = last;
- int offset = (last % 8);
-
- while (next < FIRST_SYSTEM_VECTOR) {
- next += 8;
-#ifdef CONFIG_X86_64
- if (next == IA32_SYSCALL_VECTOR)
- continue;
-#else
- if (next == SYSCALL_VECTOR)
- continue;
-#endif
- count++;
- if (next >= FIRST_SYSTEM_VECTOR) {
- if (offset%8) {
- next = FIRST_DEVICE_VECTOR + offset;
- offset++;
- continue;
- }
- count--;
- }
- }
-
- return count;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,14 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-XENARCH := $(subst ",,$(CONFIG_XENARCH))
-
-CPPFLAGS_vmlinux.lds += -U$(XENARCH)
-
-$(obj)/vmlinux.lds.S:
- @ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
-
-extra-y += vmlinux.lds
-
-obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o skbuff.o devmem.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,549 +0,0 @@
-/******************************************************************************
- * ctrl_if.c
- *
- * Management functions for special interface to the domain controller.
- *
- * Copyright (c) 2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/evtchn.h>
-
-#if 0
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
- __FILE__ , __LINE__ , ## _a )
-#else
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-/*
- * Only used by initial domain which must create its own control-interface
- * event channel. This value is picked up by the user-space domain controller
- * via an ioctl.
- */
-int initdom_ctrlif_domcontroller_port = -1;
-
-static int ctrl_if_evtchn;
-static int ctrl_if_irq;
-static spinlock_t ctrl_if_lock;
-
-static struct irqaction ctrl_if_irq_action;
-
-static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
-static CONTROL_RING_IDX ctrl_if_rx_req_cons;
-
-/* Incoming message requests. */
- /* Primary message type -> message handler. */
-static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
- /* Primary message type -> callback in process context? */
-static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
- /* Is it late enough during bootstrap to use schedule_task()? */
-static int safe_to_schedule_task;
- /* Queue up messages to be handled in process context. */
-static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
-static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
-static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
-
-/* Incoming message responses: message identifier -> message handler/id. */
-static struct {
- ctrl_msg_handler_t fn;
- unsigned long id;
-} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
-
-/* For received messages that must be deferred to process context. */
-static void __ctrl_if_rxmsg_deferred(void *unused);
-static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
- __ctrl_if_rxmsg_deferred,
- NULL);
-
-/* Deferred callbacks for people waiting for space in the transmit ring. */
-static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
-
-static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
-static void __ctrl_if_tx_tasklet(unsigned long data);
-static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
-
-static void __ctrl_if_rx_tasklet(unsigned long data);
-static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
-
-#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
-#define TX_FULL(_c) \
- (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
-
-static void ctrl_if_notify_controller(void)
-{
- notify_via_evtchn(ctrl_if_evtchn);
-}
-
-static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
-{
- msg->length = 0;
- ctrl_if_send_response(msg);
-}
-
-static void __ctrl_if_tx_tasklet(unsigned long data)
-{
- control_if_t *ctrl_if = get_ctrl_if();
- ctrl_msg_t *msg;
- int was_full = TX_FULL(ctrl_if);
- CONTROL_RING_IDX rp;
-
- rp = ctrl_if->tx_resp_prod;
- rmb(); /* Ensure we see all requests up to 'rp'. */
-
- while ( ctrl_if_tx_resp_cons != rp )
- {
- msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
-
- DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
- ctrl_if_tx_resp_cons,
- ctrl_if->tx_resp_prod,
- msg->type, msg->subtype);
-
- /* Execute the callback handler, if one was specified. */
- if ( msg->id != 0xFF )
- {
- (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
- msg, ctrl_if_txmsg_id_mapping[msg->id].id);
- smp_mb(); /* Execute, /then/ free. */
- ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
- }
-
- /*
- * Step over the message in the ring /after/ finishing reading it. As
- * soon as the index is updated then the message may get blown away.
- */
- smp_mb();
- ctrl_if_tx_resp_cons++;
- }
-
- if ( was_full && !TX_FULL(ctrl_if) )
- {
- wake_up(&ctrl_if_tx_wait);
- run_task_queue(&ctrl_if_tx_tq);
- }
-}
-
-static void __ctrl_if_rxmsg_deferred(void *unused)
-{
- ctrl_msg_t *msg;
- CONTROL_RING_IDX dp;
-
- dp = ctrl_if_rxmsg_deferred_prod;
- rmb(); /* Ensure we see all deferred requests up to 'dp'. */
-
- while ( ctrl_if_rxmsg_deferred_cons != dp )
- {
- msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
- ctrl_if_rxmsg_deferred_cons++)];
- (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
- }
-}
-
-static void __ctrl_if_rx_tasklet(unsigned long data)
-{
- control_if_t *ctrl_if = get_ctrl_if();
- ctrl_msg_t msg, *pmsg;
- CONTROL_RING_IDX rp, dp;
-
- dp = ctrl_if_rxmsg_deferred_prod;
- rp = ctrl_if->rx_req_prod;
- rmb(); /* Ensure we see all requests up to 'rp'. */
-
- while ( ctrl_if_rx_req_cons != rp )
- {
- pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
- memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
-
- DPRINTK("Rx-Req %u/%u :: %d/%d\n",
- ctrl_if_rx_req_cons-1,
- ctrl_if->rx_req_prod,
- msg.type, msg.subtype);
-
- if ( msg.length != 0 )
- memcpy(msg.msg, pmsg->msg, msg.length);
-
- if ( test_bit(msg.type,
- (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
- memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
- &msg, offsetof(ctrl_msg_t, msg) + msg.length);
- else
- (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
- }
-
- if ( dp != ctrl_if_rxmsg_deferred_prod )
- {
- wmb();
- ctrl_if_rxmsg_deferred_prod = dp;
- schedule_work(&ctrl_if_rxmsg_deferred_work);
- }
-}
-
-static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- control_if_t *ctrl_if = get_ctrl_if();
-
- if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
- tasklet_schedule(&ctrl_if_tx_tasklet);
-
- if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
- tasklet_schedule(&ctrl_if_rx_tasklet);
-
- return IRQ_HANDLED;
-}
-
-int
-ctrl_if_send_message_noblock(
- ctrl_msg_t *msg,
- ctrl_msg_handler_t hnd,
- unsigned long id)
-{
- control_if_t *ctrl_if = get_ctrl_if();
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ctrl_if_lock, flags);
-
- if ( TX_FULL(ctrl_if) )
- {
- spin_unlock_irqrestore(&ctrl_if_lock, flags);
- return -EAGAIN;
- }
-
- msg->id = 0xFF;
- if ( hnd != NULL )
- {
- for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
- continue;
- ctrl_if_txmsg_id_mapping[i].fn = hnd;
- ctrl_if_txmsg_id_mapping[i].id = id;
- msg->id = i;
- }
-
- DPRINTK("Tx-Req %u/%u :: %d/%d\n",
- ctrl_if->tx_req_prod,
- ctrl_if_tx_resp_cons,
- msg->type, msg->subtype);
-
- memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
- msg, sizeof(*msg));
- wmb(); /* Write the message before letting the controller peek at it. */
- ctrl_if->tx_req_prod++;
-
- spin_unlock_irqrestore(&ctrl_if_lock, flags);
-
- ctrl_if_notify_controller();
-
- return 0;
-}
-
-int
-ctrl_if_send_message_block(
- ctrl_msg_t *msg,
- ctrl_msg_handler_t hnd,
- unsigned long id,
- long wait_state)
-{
- DECLARE_WAITQUEUE(wait, current);
- int rc;
-
- /* Fast path. */
- if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
- return rc;
-
- add_wait_queue(&ctrl_if_tx_wait, &wait);
-
- for ( ; ; )
- {
- set_current_state(wait_state);
-
- if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
- break;
-
- rc = -ERESTARTSYS;
- if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
- break;
-
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&ctrl_if_tx_wait, &wait);
-
- return rc;
-}
-
-/* Allow a reponse-callback handler to find context of a blocked requester. */
-struct rsp_wait {
- ctrl_msg_t *msg; /* Buffer for the response message. */
- struct task_struct *task; /* The task that is blocked on the response. */
- int done; /* Indicate to 'task' that response is rcv'ed. */
-};
-
-static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
-{
- struct rsp_wait *wait = (struct rsp_wait *)id;
- struct task_struct *task = wait->task;
-
- memcpy(wait->msg, msg, sizeof(*msg));
- wmb();
- wait->done = 1;
-
- wake_up_process(task);
-}
-
-int
-ctrl_if_send_message_and_get_response(
- ctrl_msg_t *msg,
- ctrl_msg_t *rmsg,
- long wait_state)
-{
- struct rsp_wait wait;
- int rc;
-
- wait.msg = rmsg;
- wait.done = 0;
- wait.task = current;
-
- if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
- (unsigned long)&wait,
- wait_state)) != 0 )
- return rc;
-
- for ( ; ; )
- {
- /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
- set_current_state(TASK_UNINTERRUPTIBLE);
- if ( wait.done )
- break;
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- return 0;
-}
-
-int
-ctrl_if_enqueue_space_callback(
- struct tq_struct *task)
-{
- control_if_t *ctrl_if = get_ctrl_if();
-
- /* Fast path. */
- if ( !TX_FULL(ctrl_if) )
- return 0;
-
- (void)queue_task(task, &ctrl_if_tx_tq);
-
- /*
- * We may race execution of the task queue, so return re-checked status. If
- * the task is not executed despite the ring being non-full then we will
- * certainly return 'not full'.
- */
- smp_mb();
- return TX_FULL(ctrl_if);
-}
-
-void
-ctrl_if_send_response(
- ctrl_msg_t *msg)
-{
- control_if_t *ctrl_if = get_ctrl_if();
- unsigned long flags;
- ctrl_msg_t *dmsg;
-
- /*
- * NB. The response may the original request message, modified in-place.
- * In this situation we may have src==dst, so no copying is required.
- */
- spin_lock_irqsave(&ctrl_if_lock, flags);
-
- DPRINTK("Tx-Rsp %u :: %d/%d\n",
- ctrl_if->rx_resp_prod,
- msg->type, msg->subtype);
-
- dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
- if ( dmsg != msg )
- memcpy(dmsg, msg, sizeof(*msg));
-
- wmb(); /* Write the message before letting the controller peek at it. */
- ctrl_if->rx_resp_prod++;
-
- spin_unlock_irqrestore(&ctrl_if_lock, flags);
-
- ctrl_if_notify_controller();
-}
-
-int
-ctrl_if_register_receiver(
- u8 type,
- ctrl_msg_handler_t hnd,
- unsigned int flags)
-{
- unsigned long _flags;
- int inuse;
-
- spin_lock_irqsave(&ctrl_if_lock, _flags);
-
- inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
-
- if ( inuse )
- {
- printk(KERN_INFO "Receiver %p already established for control "
- "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
- }
- else
- {
- ctrl_if_rxmsg_handler[type] = hnd;
- clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
- if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
- {
- set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
- if ( !safe_to_schedule_task )
- BUG();
- }
- }
-
- spin_unlock_irqrestore(&ctrl_if_lock, _flags);
-
- return !inuse;
-}
-
-void
-ctrl_if_unregister_receiver(
- u8 type,
- ctrl_msg_handler_t hnd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_if_lock, flags);
-
- if ( ctrl_if_rxmsg_handler[type] != hnd )
- printk(KERN_INFO "Receiver %p is not registered for control "
- "messages of type %d.\n", hnd, type);
- else
- ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
-
- spin_unlock_irqrestore(&ctrl_if_lock, flags);
-
- /* Ensure that @hnd will not be executed after this function returns. */
- tasklet_unlock_wait(&ctrl_if_rx_tasklet);
-}
-
-void ctrl_if_suspend(void)
-{
- teardown_irq(ctrl_if_irq, &ctrl_if_irq_action);
- unbind_evtchn_from_irq(ctrl_if_evtchn);
-}
-
-void ctrl_if_resume(void)
-{
- control_if_t *ctrl_if = get_ctrl_if();
-
- if ( xen_start_info.flags & SIF_INITDOMAIN )
- {
- /*
- * The initial domain must create its own domain-controller link.
- * The controller is probably not running at this point, but will
- * pick up its end of the event channel from
- */
- evtchn_op_t op;
- op.cmd = EVTCHNOP_bind_interdomain;
- op.u.bind_interdomain.dom1 = DOMID_SELF;
- op.u.bind_interdomain.dom2 = DOMID_SELF;
- op.u.bind_interdomain.port1 = 0;
- op.u.bind_interdomain.port2 = 0;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- BUG();
- xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
- initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
- }
-
- /* Sync up with shared indexes. */
- ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
- ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
-
- ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
- ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
-
- memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
- ctrl_if_irq_action.handler = ctrl_if_interrupt;
- ctrl_if_irq_action.name = "ctrl-if";
- (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
-}
-
-void __init ctrl_if_init(void)
-{
- int i;
-
- for ( i = 0; i < 256; i++ )
- ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
-
- spin_lock_init(&ctrl_if_lock);
-
- ctrl_if_resume();
-}
-
-
-/* This is called after it is safe to call schedule_task(). */
-static int __init ctrl_if_late_setup(void)
-{
- safe_to_schedule_task = 1;
- return 0;
-}
-__initcall(ctrl_if_late_setup);
-
-
-/*
- * !! The following are DANGEROUS FUNCTIONS !!
- * Use with care [for example, see xencons_force_flush()].
- */
-
-int ctrl_if_transmitter_empty(void)
-{
- return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
-}
-
-void ctrl_if_discard_responses(void)
-{
- ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
-}
-
-EXPORT_SYMBOL(ctrl_if_send_message_noblock);
-EXPORT_SYMBOL(ctrl_if_send_message_block);
-EXPORT_SYMBOL(ctrl_if_send_message_and_get_response);
-EXPORT_SYMBOL(ctrl_if_enqueue_space_callback);
-EXPORT_SYMBOL(ctrl_if_send_response);
-EXPORT_SYMBOL(ctrl_if_register_receiver);
-EXPORT_SYMBOL(ctrl_if_unregister_receiver);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/devmem.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/devmem.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,158 +0,0 @@
-/*
- * Originally from linux/drivers/char/mem.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Added devfs support.
- * Jan-11-1998, C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>
- * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@xxxxxxx>
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/raw.h>
-#include <linux/tty.h>
-#include <linux/capability.h>
-#include <linux/smp_lock.h>
-#include <linux/devfs_fs_kernel.h>
-#include <linux/ptrace.h>
-#include <linux/device.h>
-#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-static inline int uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_SYNC)
- return 1;
- /* Xen sets correct MTRR type on non-RAM for us. */
- return 0;
-}
-
-/*
- * This funcion reads the *physical* memory. The f_pos points directly to the
- * memory location.
- */
-static ssize_t read_mem(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long i, p = *ppos;
- ssize_t read = -EFAULT;
- void *v;
-
- if ((v = ioremap(p, count)) == NULL) {
- /*
- * Some programs (e.g., dmidecode) groove off into weird RAM
- * areas where no table scan possibly exist (because Xen will
- * have stomped on them!). These programs get rather upset if
- * we let them know that Xen failed their access, so we fake
- * out a read of all zeroes. :-)
- */
- for (i = 0; i < count; i++)
- if (put_user(0, buf+i))
- return -EFAULT;
- return count;
- }
- if (copy_to_user(buf, v, count))
- goto out;
-
- read = count;
- *ppos += read;
-out:
- iounmap(v);
- return read;
-}
-
-static ssize_t write_mem(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t written = -EFAULT;
- void *v;
-
- if ((v = ioremap(p, count)) == NULL)
- return -EFAULT;
- if (copy_to_user(v, buf, count))
- goto out;
-
- written = count;
- *ppos += written;
-out:
- iounmap(v);
- return written;
-}
-
-static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int uncached;
-
- uncached = uncached_access(file, offset);
- if (uncached)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- /* Don't try to swap out physical pages.. */
- vma->vm_flags |= VM_RESERVED;
-
- /*
- * Don't dump addresses that are not real memory to a core file.
- */
- if (uncached)
- vma->vm_flags |= VM_IO;
-
- if (io_remap_page_range(vma, vma->vm_start, offset,
- vma->vm_end-vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
-/*
- * The memory devices use the full 32/64 bits of the offset, and so we cannot
- * check against negative addresses: they are ok. The return value is weird,
- * though, in that case (0).
- *
- * also note that seeking relative to the "end of file" isn't supported:
- * it has no meaning, so it returns -EINVAL.
- */
-static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-{
- loff_t ret;
-
- down(&file->f_dentry->d_inode->i_sem);
- switch (orig) {
- case 0:
- file->f_pos = offset;
- ret = file->f_pos;
- force_successful_syscall_return();
- break;
- case 1:
- file->f_pos += offset;
- ret = file->f_pos;
- force_successful_syscall_return();
- break;
- default:
- ret = -EINVAL;
- }
- up(&file->f_dentry->d_inode->i_sem);
- return ret;
-}
-
-static int open_mem(struct inode * inode, struct file * filp)
-{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-struct file_operations mem_fops = {
- .llseek = memory_lseek,
- .read = read_mem,
- .write = write_mem,
- .mmap = mmap_mem,
- .open = open_mem,
-};
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,535 +0,0 @@
-/******************************************************************************
- * evtchn.c
- *
- * Communication via Xen event channels.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/version.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <asm/synch_bitops.h>
-#include <asm-xen/xen-public/event_channel.h>
-#include <asm-xen/xen-public/physdev.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-EXPORT_SYMBOL(force_evtchn_callback);
-EXPORT_SYMBOL(evtchn_do_upcall);
-#endif
-
-/*
- * This lock protects updates to the following mapping and reference-count
- * arrays. The lock does not need to be acquired to read the mapping tables.
- */
-static spinlock_t irq_mapping_update_lock;
-
-/* IRQ <-> event-channel mappings. */
-static int evtchn_to_irq[NR_EVENT_CHANNELS];
-static int irq_to_evtchn[NR_IRQS];
-
-/* IRQ <-> VIRQ mapping. */
-static int virq_to_irq[NR_VIRQS];
-
-/* Reference counts for bindings to IRQs. */
-static int irq_bindcount[NR_IRQS];
-
-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
-
-/* Upcall to generic IRQ layer. */
-#ifdef CONFIG_X86
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
-extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
-#else
-extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
-#endif
-#define do_IRQ(irq, regs) do { \
- (regs)->orig_eax = (irq); \
- do_IRQ((regs)); \
-} while (0)
-#endif
-
-#define VALID_EVTCHN(_chn) ((_chn) >= 0)
-
-/*
- * Force a proper event-channel callback from Xen after clearing the
- * callback mask. We do this in a very simple manner, by making a call
- * down into Xen. The pending flag will be checked by Xen on return.
- */
-void force_evtchn_callback(void)
-{
- (void)HYPERVISOR_xen_version(0);
-}
-
-/* NB. Interrupts are disabled on entry. */
-asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
-{
- unsigned long l1, l2;
- unsigned int l1i, l2i, port;
- int irq;
- shared_info_t *s = HYPERVISOR_shared_info;
-
- s->vcpu_data[0].evtchn_upcall_pending = 0;
-
- /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
- l1 = xchg(&s->evtchn_pending_sel, 0);
- while ( l1 != 0 )
- {
- l1i = __ffs(l1);
- l1 &= ~(1 << l1i);
-
- l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
- while ( l2 != 0 )
- {
- l2i = __ffs(l2);
- l2 &= ~(1 << l2i);
-
- port = (l1i << 5) + l2i;
- if ( (irq = evtchn_to_irq[port]) != -1 )
- do_IRQ(irq, regs);
- else
- evtchn_device_upcall(port);
- }
- }
-}
-
-static int find_unbound_irq(void)
-{
- int irq;
-
- for ( irq = 0; irq < NR_IRQS; irq++ )
- if ( irq_bindcount[irq] == 0 )
- break;
-
- if ( irq == NR_IRQS )
- panic("No available IRQ to bind to: increase NR_IRQS!\n");
-
- return irq;
-}
-
-int bind_virq_to_irq(int virq)
-{
- evtchn_op_t op;
- int evtchn, irq;
-
- spin_lock(&irq_mapping_update_lock);
-
- if ( (irq = virq_to_irq[virq]) == -1 )
- {
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = virq;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to bind virtual IRQ %d\n", virq);
- evtchn = op.u.bind_virq.port;
-
- irq = find_unbound_irq();
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
-
- virq_to_irq[virq] = irq;
- }
-
- irq_bindcount[irq]++;
-
- spin_unlock(&irq_mapping_update_lock);
-
- return irq;
-}
-
-void unbind_virq_from_irq(int virq)
-{
- evtchn_op_t op;
- int irq = virq_to_irq[virq];
- int evtchn = irq_to_evtchn[irq];
-
- spin_lock(&irq_mapping_update_lock);
-
- if ( --irq_bindcount[irq] == 0 )
- {
- op.cmd = EVTCHNOP_close;
- op.u.close.dom = DOMID_SELF;
- op.u.close.port = evtchn;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to unbind virtual IRQ %d\n", virq);
-
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- virq_to_irq[virq] = -1;
- }
-
- spin_unlock(&irq_mapping_update_lock);
-}
-
-int bind_evtchn_to_irq(int evtchn)
-{
- int irq;
-
- spin_lock(&irq_mapping_update_lock);
-
- if ( (irq = evtchn_to_irq[evtchn]) == -1 )
- {
- irq = find_unbound_irq();
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
- }
-
- irq_bindcount[irq]++;
-
- spin_unlock(&irq_mapping_update_lock);
-
- return irq;
-}
-
-void unbind_evtchn_from_irq(int evtchn)
-{
- int irq = evtchn_to_irq[evtchn];
-
- spin_lock(&irq_mapping_update_lock);
-
- if ( --irq_bindcount[irq] == 0 )
- {
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- }
-
- spin_unlock(&irq_mapping_update_lock);
-}
-
-
-/*
- * Interface to generic handling in irq.c
- */
-
-static unsigned int startup_dynirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
-
- if ( !VALID_EVTCHN(evtchn) )
- return 0;
- unmask_evtchn(evtchn);
- return 0;
-}
-
-static void shutdown_dynirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
-
- if ( !VALID_EVTCHN(evtchn) )
- return;
- mask_evtchn(evtchn);
-}
-
-static void enable_dynirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
-
- unmask_evtchn(evtchn);
-}
-
-static void disable_dynirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
-
- mask_evtchn(evtchn);
-}
-
-static void ack_dynirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
-
- mask_evtchn(evtchn);
- clear_evtchn(evtchn);
-}
-
-static void end_dynirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
-
- if ( !(irq_desc[irq].status & IRQ_DISABLED) )
- unmask_evtchn(evtchn);
-}
-
-static struct hw_interrupt_type dynirq_type = {
- "Dynamic-irq",
- startup_dynirq,
- shutdown_dynirq,
- enable_dynirq,
- disable_dynirq,
- ack_dynirq,
- end_dynirq,
- NULL
-};
-
-static inline void pirq_unmask_notify(int pirq)
-{
- physdev_op_t op;
- if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
- {
- op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
- (void)HYPERVISOR_physdev_op(&op);
- }
-}
-
-static inline void pirq_query_unmask(int pirq)
-{
- physdev_op_t op;
- op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
- op.u.irq_status_query.irq = pirq;
- (void)HYPERVISOR_physdev_op(&op);
- clear_bit(pirq, &pirq_needs_unmask_notify[0]);
- if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
- set_bit(pirq, &pirq_needs_unmask_notify[0]);
-}
-
-/*
- * On startup, if there is no action associated with the IRQ then we are
- * probing. In this case we should not share with others as it will confuse us.
- */
-#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
-
-static unsigned int startup_pirq(unsigned int irq)
-{
- evtchn_op_t op;
- int evtchn;
-
- op.cmd = EVTCHNOP_bind_pirq;
- op.u.bind_pirq.pirq = irq;
- /* NB. We are happy to share unless we are probing. */
- op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- {
- if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
- printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
- return 0;
- }
- evtchn = op.u.bind_pirq.port;
-
- pirq_query_unmask(irq_to_pirq(irq));
-
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
-
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq_to_pirq(irq));
-
- return 0;
-}
-
-static void shutdown_pirq(unsigned int irq)
-{
- evtchn_op_t op;
- int evtchn = irq_to_evtchn[irq];
-
- if ( !VALID_EVTCHN(evtchn) )
- return;
-
- mask_evtchn(evtchn);
-
- op.cmd = EVTCHNOP_close;
- op.u.close.dom = DOMID_SELF;
- op.u.close.port = evtchn;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to unbind physical IRQ %d\n", irq);
-
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
-}
-
-static void enable_pirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq_to_pirq(irq));
-}
-
-static void disable_pirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- mask_evtchn(evtchn);
-}
-
-static void ack_pirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- mask_evtchn(evtchn);
- clear_evtchn(evtchn);
-}
-
-static void end_pirq(unsigned int irq)
-{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- if ( !(irq_desc[irq].status & IRQ_DISABLED) )
- {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq_to_pirq(irq));
- }
-}
-
-static struct hw_interrupt_type pirq_type = {
- "Phys-irq",
- startup_pirq,
- shutdown_pirq,
- enable_pirq,
- disable_pirq,
- ack_pirq,
- end_pirq,
- NULL
-};
-
-static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- /* nothing */
- return IRQ_HANDLED;
-}
-
-static struct irqaction misdirect_action = {
- misdirect_interrupt,
- SA_INTERRUPT,
- CPU_MASK_NONE,
- "misdirect",
- NULL,
- NULL
-};
-
-void irq_suspend(void)
-{
- int pirq, virq, irq, evtchn;
-
- /* Unbind VIRQs from event channels. */
- for ( virq = 0; virq < NR_VIRQS; virq++ )
- {
- if ( (irq = virq_to_irq[virq]) == -1 )
- continue;
- evtchn = irq_to_evtchn[irq];
-
- /* Mark the event channel as unused in our table. */
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- }
-
- /* Check that no PIRQs are still bound. */
- for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
- if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
- panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
- pirq, evtchn);
-}
-
-void irq_resume(void)
-{
- evtchn_op_t op;
- int virq, irq, evtchn;
-
- for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
- mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
-
- for ( virq = 0; virq < NR_VIRQS; virq++ )
- {
- if ( (irq = virq_to_irq[virq]) == -1 )
- continue;
-
- /* Get a new binding from Xen. */
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = virq;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to bind virtual IRQ %d\n", virq);
- evtchn = op.u.bind_virq.port;
-
- /* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
- }
-}
-
-void __init init_IRQ(void)
-{
- int i;
-
- irq_ctx_init(0);
-
- spin_lock_init(&irq_mapping_update_lock);
-
- /* No VIRQ -> IRQ mappings. */
- for ( i = 0; i < NR_VIRQS; i++ )
- virq_to_irq[i] = -1;
-
- /* No event-channel -> IRQ mappings. */
- for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
- {
- evtchn_to_irq[i] = -1;
- mask_evtchn(i); /* No event channels are 'live' right now. */
- }
-
- /* No IRQ -> event-channel mappings. */
- for ( i = 0; i < NR_IRQS; i++ )
- irq_to_evtchn[i] = -1;
-
- for ( i = 0; i < NR_DYNIRQS; i++ )
- {
- /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
- irq_bindcount[dynirq_to_irq(i)] = 0;
-
- irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
- irq_desc[dynirq_to_irq(i)].action = 0;
- irq_desc[dynirq_to_irq(i)].depth = 1;
- irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
- }
-
- for ( i = 0; i < NR_PIRQS; i++ )
- {
- /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
- irq_bindcount[pirq_to_irq(i)] = 1;
-
- irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
- irq_desc[pirq_to_irq(i)].action = 0;
- irq_desc[pirq_to_irq(i)].depth = 1;
- irq_desc[pirq_to_irq(i)].handler = &pirq_type;
- }
-
- (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
-
- /* This needs to be done early, but after the IRQ subsystem is alive. */
- ctrl_if_init();
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,83 +0,0 @@
-/******************************************************************************
- * fixup.c
- *
- * Binary-rewriting of certain IA32 instructions, on notification by Xen.
- * Used to avoid repeated slow emulation of common instructions used by the
- * user-space TLS (Thread-Local Storage) libraries.
- *
- * **** NOTE ****
- * Issues with the binary rewriting have caused it to be removed. Instead
- * we rely on Xen's emulator to boot the kernel, and then print a banner
- * message recommending that the user disables /lib/tls.
- *
- * Copyright (c) 2004, K A Fraser
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/version.h>
-
-#define DP(_f) printk(KERN_ALERT " " _f "\n")
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define __LINKAGE fastcall
-#else
-#define __LINKAGE asmlinkage
-#endif
-
-__LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-{
- static unsigned long printed = 0;
- int i;
-
- if ( !test_and_set_bit(0, &printed) )
- {
- HYPERVISOR_vm_assist(VMASST_CMD_disable,
- VMASST_TYPE_4gb_segments_notify);
-
- DP("");
- DP("***************************************************************");
- DP("***************************************************************");
- DP("** WARNING: Currently emulating unsupported memory accesses **");
- DP("** in /lib/tls libraries. The emulation is very **");
- DP("** slow. To ensure full performance you should **");
- DP("** execute the following as root: **");
- DP("** mv /lib/tls /lib/tls.disabled **");
- DP("***************************************************************");
- DP("***************************************************************");
- DP("");
-
- for ( i = 5; i > 0; i-- )
- {
- printk("Pausing... %d", i);
- mdelay(1000);
- printk("\b\b\b\b\b\b\b\b\b\b\b\b");
- }
- printk("Continuing...\n\n");
- }
-}
-
-static int __init fixup_init(void)
-{
- HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
- return 0;
-}
-__initcall(fixup_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,256 +0,0 @@
-
-#define __KERNEL_SYSCALLS__
-static int errno;
-#include <linux/errno.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/unistd.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/sysrq.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-#include <asm-xen/linux-public/suspend.h>
-#include <asm-xen/queues.h>
-
-void machine_restart(char * __unused)
-{
- /* We really want to get pending console data out before we die. */
- extern void xencons_force_flush(void);
- xencons_force_flush();
- HYPERVISOR_reboot();
-}
-
-void machine_halt(void)
-{
- machine_power_off();
-}
-
-void machine_power_off(void)
-{
- /* We really want to get pending console data out before we die. */
- extern void xencons_force_flush(void);
- xencons_force_flush();
- HYPERVISOR_shutdown();
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-int reboot_thru_bios = 0; /* for dmi_scan.c */
-EXPORT_SYMBOL(machine_restart);
-EXPORT_SYMBOL(machine_halt);
-EXPORT_SYMBOL(machine_power_off);
-#endif
-
-
-/******************************************************************************
- * Stop/pickle callback handling.
- */
-
-/* Ignore multiple shutdown requests. */
-static int shutting_down = -1;
-
-static void __do_suspend(void)
-{
- int i, j;
- suspend_record_t *suspend_record;
-
- /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
- /* XXX SMH: yes it would :-( */
-#ifdef CONFIG_XEN_BLKDEV_FRONTEND
- extern void blkdev_suspend(void);
- extern void blkdev_resume(void);
-#else
-#define blkdev_suspend() do{}while(0)
-#define blkdev_resume() do{}while(0)
-#endif
-
-#ifdef CONFIG_XEN_NETDEV_FRONTEND
- extern void netif_suspend(void);
- extern void netif_resume(void);
-#else
-#define netif_suspend() do{}while(0)
-#define netif_resume() do{}while(0)
-#endif
-
- extern void time_suspend(void);
- extern void time_resume(void);
- extern unsigned long max_pfn;
- extern unsigned int *pfn_to_mfn_frame_list;
-
- suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
- if ( suspend_record == NULL )
- goto out;
-
- suspend_record->nr_pfns = max_pfn; /* final number of pfns */
-
- __cli();
-
- netif_suspend();
-
- blkdev_suspend();
-
- time_suspend();
-
- ctrl_if_suspend();
-
- irq_suspend();
-
- HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
- clear_fixmap(FIX_SHARED_INFO);
-
- memcpy(&suspend_record->resume_info, &xen_start_info,
- sizeof(xen_start_info));
-
- HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
-
- HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_4gb_segments);
-#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
- HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_writable_pagetables);
-#endif
-
- shutting_down = -1;
-
- memcpy(&xen_start_info, &suspend_record->resume_info,
- sizeof(xen_start_info));
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
-#else
- set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
-#endif
-
- HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-
- memset(empty_zero_page, 0, PAGE_SIZE);
-
- for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
- {
- pfn_to_mfn_frame_list[j] =
- virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
- }
- HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
- virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
-
-
- irq_resume();
-
- ctrl_if_resume();
-
- time_resume();
-
- blkdev_resume();
-
- netif_resume();
-
- __sti();
-
- out:
- if ( suspend_record != NULL )
- free_page((unsigned long)suspend_record);
-}
-
-static int shutdown_process(void *__unused)
-{
- static char *envp[] = { "HOME=/", "TERM=linux",
- "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- static char *restart_argv[] = { "/sbin/shutdown", "-r", "now", NULL };
- static char *poweroff_argv[] = { "/sbin/halt", "-p", NULL };
-
- extern asmlinkage long sys_reboot(int magic1, int magic2,
- unsigned int cmd, void *arg);
-
- daemonize(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- "shutdown"
-#endif
- );
-
- switch ( shutting_down )
- {
- case CMSG_SHUTDOWN_POWEROFF:
- if ( execve("/sbin/halt", poweroff_argv, envp) < 0 )
- {
- sys_reboot(LINUX_REBOOT_MAGIC1,
- LINUX_REBOOT_MAGIC2,
- LINUX_REBOOT_CMD_POWER_OFF,
- NULL);
- }
- break;
-
- case CMSG_SHUTDOWN_REBOOT:
- if ( execve("/sbin/shutdown", restart_argv, envp) < 0 )
- {
- sys_reboot(LINUX_REBOOT_MAGIC1,
- LINUX_REBOOT_MAGIC2,
- LINUX_REBOOT_CMD_RESTART,
- NULL);
- }
- break;
- }
-
- shutting_down = -1; /* could try again */
-
- return 0;
-}
-
-static void __shutdown_handler(void *unused)
-{
- int err;
-
- if ( shutting_down != CMSG_SHUTDOWN_SUSPEND )
- {
- err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
- if ( err < 0 )
- printk(KERN_ALERT "Error creating shutdown process!\n");
- }
- else
- {
- __do_suspend();
- }
-}
-
-static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
-{
- static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
-
- if ( msg->subtype == CMSG_SHUTDOWN_SYSRQ )
- {
- int sysrq = ((shutdown_sysrq_t *)&msg->msg[0])->key;
-
-#ifdef CONFIG_MAGIC_SYSRQ
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- handle_sysrq(sysrq, NULL, NULL);
-#else
- handle_sysrq(sysrq, NULL, NULL, NULL);
-#endif
-#endif
- }
- else if ( (shutting_down == -1) &&
- ((msg->subtype == CMSG_SHUTDOWN_POWEROFF) ||
- (msg->subtype == CMSG_SHUTDOWN_REBOOT) ||
- (msg->subtype == CMSG_SHUTDOWN_SUSPEND)) )
- {
- shutting_down = msg->subtype;
- schedule_work(&shutdown_work);
- }
- else
- {
- printk("Ignore spurious shutdown request\n");
- }
-
- ctrl_if_send_response(msg);
-}
-
-static int __init setup_shutdown_event(void)
-{
- ctrl_if_register_receiver(CMSG_SHUTDOWN, shutdown_handler, 0);
- return 0;
-}
-
-__initcall(setup_shutdown_event);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/skbuff.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/skbuff.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,47 +0,0 @@
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/page.h>
-
-EXPORT_SYMBOL(__dev_alloc_skb);
-
-/* Referenced in netback.c. */
-/*static*/ kmem_cache_t *skbuff_cachep;
-
-/* Size must be cacheline-aligned (alloc_skb uses SKB_DATA_ALIGN). */
-#define XEN_SKB_SIZE \
- ((PAGE_SIZE - sizeof(struct skb_shared_info)) & ~(SMP_CACHE_BYTES - 1))
-
-struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
-{
- struct sk_buff *skb;
- skb = alloc_skb_from_cache(skbuff_cachep, length + 16, gfp_mask);
- if ( likely(skb != NULL) )
- skb_reserve(skb, 16);
- return skb;
-}
-
-static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
-{
- scrub_pages(buf, 1);
-}
-
-static int __init skbuff_init(void)
-{
- skbuff_cachep = kmem_cache_create(
- "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL);
- return 0;
-}
-__initcall(skbuff_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/kernel/xen_proc.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/xen_proc.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,18 +0,0 @@
-
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-
-static struct proc_dir_entry *xen_base;
-
-struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
-{
- if ( xen_base == NULL )
- if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
- panic("Couldn't create /proc/xen");
- return create_proc_entry(name, mode, xen_base);
-}
-
-void remove_xen_proc_entry(const char *name)
-{
- remove_proc_entry(name, xen_base);
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/early_printk.c
--- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/early_printk.c Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,242 +0,0 @@
-#include <linux/config.h>
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-
-/* Simple VGA output */
-
-#ifdef __i386__
-#define VGABASE (__ISA_IO_base + 0xb8000)
-#else
-#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
-#endif
-
-#define MAX_YPOS 25
-#define MAX_XPOS 80
-
-static int current_ypos = 1, current_xpos = 0;
-
-static void early_vga_write(struct console *con, const char *str, unsigned n)
-{
- char c;
- int i, k, j;
-
- while ((c = *str++) != '\0' && n-- > 0) {
- if (current_ypos >= MAX_YPOS) {
- /* scroll 1 line up */
- for (k = 1, j = 0; k < MAX_YPOS; k++, j++) {
- for (i = 0; i < MAX_XPOS; i++) {
- writew(readw(VGABASE + 2*(MAX_XPOS*k +
i)),
- VGABASE + 2*(MAX_XPOS*j + i));
- }
- }
- for (i = 0; i < MAX_XPOS; i++)
- writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
- current_ypos = MAX_YPOS-1;
- }
- if (c == '\n') {
- current_xpos = 0;
- current_ypos++;
- } else if (c != '\r') {
- writew(((0x7 << 8) | (unsigned short) c),
- VGABASE + 2*(MAX_XPOS*current_ypos +
- current_xpos++));
- if (current_xpos >= MAX_XPOS) {
- current_xpos = 0;
- current_ypos++;
- }
- }
- }
-}
-
-static struct console early_vga_console = {
- .name = "earlyvga",
- .write = early_vga_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-#ifndef CONFIG_XEN
-/* Serial functions loosely based on a similar package from Klaus P. Gerlicher
*/
-
-int early_serial_base = 0x3f8; /* ttyS0 */
-
-#define XMTRDY 0x20
-
-#define DLAB 0x80
-
-#define TXR 0 /* Transmit register (WRITE) */
-#define RXR 0 /* Receive register (READ) */
-#define IER 1 /* Interrupt Enable */
-#define IIR 2 /* Interrupt ID */
-#define FCR 2 /* FIFO control */
-#define LCR 3 /* Line control */
-#define MCR 4 /* Modem control */
-#define LSR 5 /* Line Status */
-#define MSR 6 /* Modem Status */
-#define DLL 0 /* Divisor Latch Low */
-#define DLH 1 /* Divisor latch High */
-
-static int early_serial_putc(unsigned char ch)
-{
- unsigned timeout = 0xffff;
- while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
- cpu_relax();
- outb(ch, early_serial_base + TXR);
- return timeout ? 0 : -1;
-}
-
-static void early_serial_write(struct console *con, const char *s, unsigned n)
-{
- while (*s && n-- > 0) {
- early_serial_putc(*s);
- if (*s == '\n')
- early_serial_putc('\r');
- s++;
- }
-}
-
-#define DEFAULT_BAUD 9600
-
-static __init void early_serial_init(char *s)
-{
- unsigned char c;
- unsigned divisor;
- unsigned baud = DEFAULT_BAUD;
- char *e;
-
- if (*s == ',')
- ++s;
-
- if (*s) {
- unsigned port;
- if (!strncmp(s,"0x",2)) {
- early_serial_base = simple_strtoul(s, &e, 16);
- } else {
- static int bases[] = { 0x3f8, 0x2f8 };
-
- if (!strncmp(s,"ttyS",4))
- s += 4;
- port = simple_strtoul(s, &e, 10);
- if (port > 1 || s == e)
- port = 0;
- early_serial_base = bases[port];
- }
- s += strcspn(s, ",");
- if (*s == ',')
- s++;
- }
-
- outb(0x3, early_serial_base + LCR); /* 8n1 */
- outb(0, early_serial_base + IER); /* no interrupt */
- outb(0, early_serial_base + FCR); /* no fifo */
- outb(0x3, early_serial_base + MCR); /* DTR + RTS */
-
- if (*s) {
- baud = simple_strtoul(s, &e, 0);
- if (baud == 0 || s == e)
- baud = DEFAULT_BAUD;
- }
-
- divisor = 115200 / baud;
- c = inb(early_serial_base + LCR);
- outb(c | DLAB, early_serial_base + LCR);
- outb(divisor & 0xff, early_serial_base + DLL);
- outb((divisor >> 8) & 0xff, early_serial_base + DLH);
- outb(c & ~DLAB, early_serial_base + LCR);
-}
-#else
-
-static void
-early_serial_write(struct console *con, const char *s, unsigned count)
-{
- int n;
-
- while (count > 0) {
- n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
- if (n <= 0)
- break;
- count -= n;
- s += n;
- }
-}
-
-static __init void early_serial_init(char *s)
-{
-}
-#endif
-
-static struct console early_serial_console = {
- .name = "earlyser",
- .write = early_serial_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-/* Direct interface for emergencies */
-struct console *early_console = &early_vga_console;
-static int early_console_initialized = 0;
-
-void early_printk(const char *fmt, ...)
-{
- char buf[512];
- int n;
- va_list ap;
-
- va_start(ap,fmt);
- n = vscnprintf(buf,512,fmt,ap);
- early_console->write(early_console,buf,n);
- va_end(ap);
-}
-
-static int keep_early;
-
-int __init setup_early_printk(char *opt)
-{
- char *space;
- char buf[256];
-
- if (early_console_initialized)
- return -1;
-
- opt = strchr(opt, '=') + 1;
-
- strlcpy(buf,opt,sizeof(buf));
- space = strchr(buf, ' ');
- if (space)
- *space = 0;
-
- if (strstr(buf,"keep"))
- keep_early = 1;
-
- if (!strncmp(buf, "serial", 6)) {
- early_serial_init(buf + 6);
- early_console = &early_serial_console;
- } else if (!strncmp(buf, "ttyS", 4)) {
- early_serial_init(buf);
- early_console = &early_serial_console;
- } else if (!strncmp(buf, "vga", 3)) {
- early_console = &early_vga_console;
- }
- early_console_initialized = 1;
- register_console(early_console);
- return 0;
-}
-
-void __init disable_early_printk(void)
-{
- if (!early_console_initialized || !early_console)
- return;
- if (!keep_early) {
- printk("disabling early console\n");
- unregister_console(early_console);
- early_console_initialized = 0;
- } else {
- printk("keeping early console\n");
- }
-}
-
-__setup("earlyprintk=", setup_early_printk);
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/drivers/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,66 +0,0 @@
-#
-# Makefile for the Linux kernel device drivers.
-#
-# 15 Sep 2000, Christoph Hellwig <hch@xxxxxxxxxxxxx>
-# Rewritten to use lists instead of if-statements.
-#
-
-obj-$(CONFIG_PCI) += pci/
-obj-$(CONFIG_PARISC) += parisc/
-obj-y += video/
-obj-$(CONFIG_ACPI_BOOT) += acpi/
-# PnP must come after ACPI since it will eventually need to check if acpi
-# was used and do nothing if so
-obj-$(CONFIG_PNP) += pnp/
-
-# char/ comes before serial/ etc so that the VT console is the boot-time
-# default.
-obj-y += char/
-
-# i810fb and intelfb depend on char/agp/
-obj-$(CONFIG_FB_I810) += video/i810/
-obj-$(CONFIG_FB_INTEL) += video/intelfb/
-
-# we also need input/serio early so serio bus is initialized by the time
-# serial drivers start registering their serio ports
-obj-$(CONFIG_SERIO) += input/serio/
-obj-y += serial/
-obj-$(CONFIG_PARPORT) += parport/
-obj-y += base/ block/ misc/ net/ media/
-obj-$(CONFIG_NUBUS) += nubus/
-obj-$(CONFIG_ATM) += atm/
-obj-$(CONFIG_PPC_PMAC) += macintosh/
-obj-$(CONFIG_ARCH_XEN) += xen/
-obj-$(CONFIG_IDE) += ide/
-obj-$(CONFIG_FC4) += fc4/
-obj-$(CONFIG_SCSI) += scsi/
-obj-$(CONFIG_FUSION) += message/
-obj-$(CONFIG_IEEE1394) += ieee1394/
-obj-y += cdrom/
-obj-$(CONFIG_MTD) += mtd/
-obj-$(CONFIG_PCCARD) += pcmcia/
-obj-$(CONFIG_DIO) += dio/
-obj-$(CONFIG_SBUS) += sbus/
-obj-$(CONFIG_ZORRO) += zorro/
-obj-$(CONFIG_MAC) += macintosh/
-obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
-obj-$(CONFIG_PARIDE) += block/paride/
-obj-$(CONFIG_TC) += tc/
-obj-$(CONFIG_USB) += usb/
-obj-$(CONFIG_USB_GADGET) += usb/gadget/
-obj-$(CONFIG_INPUT) += input/
-obj-$(CONFIG_GAMEPORT) += input/gameport/
-obj-$(CONFIG_I2O) += message/
-obj-$(CONFIG_I2C) += i2c/
-obj-$(CONFIG_W1) += w1/
-obj-$(CONFIG_PHONE) += telephony/
-obj-$(CONFIG_MD) += md/
-obj-$(CONFIG_BT) += bluetooth/
-obj-$(CONFIG_ISDN) += isdn/
-obj-$(CONFIG_MCA) += mca/
-obj-$(CONFIG_EISA) += eisa/
-obj-$(CONFIG_CPU_FREQ) += cpufreq/
-obj-$(CONFIG_MMC) += mmc/
-obj-$(CONFIG_INFINIBAND) += infiniband/
-obj-y += firmware/
-obj-$(CONFIG_CRYPTO) += crypto/
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/drivers/char/mem.c
--- a/linux-2.6.11-xen-sparse/drivers/char/mem.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,733 +0,0 @@
-/*
- * linux/drivers/char/mem.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Added devfs support.
- * Jan-11-1998, C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>
- * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@xxxxxxx>
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/raw.h>
-#include <linux/tty.h>
-#include <linux/capability.h>
-#include <linux/smp_lock.h>
-#include <linux/devfs_fs_kernel.h>
-#include <linux/ptrace.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#ifdef CONFIG_IA64
-# include <linux/efi.h>
-#endif
-
-#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
-extern void tapechar_init(void);
-#endif
-
-/*
- * Architectures vary in how they handle caching for addresses
- * outside of main memory.
- *
- */
-static inline int uncached_access(struct file *file, unsigned long addr)
-{
-#if defined(__i386__)
- /*
- * On the PPro and successors, the MTRRs are used to set
- * memory types for physical addresses outside main memory,
- * so blindly setting PCD or PWT on those pages is wrong.
- * For Pentiums and earlier, the surround logic should disable
- * caching for the high addresses through the KEN pin, but
- * we maintain the tradition of paranoia in this code.
- */
- if (file->f_flags & O_SYNC)
- return 1;
- return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
- test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
- test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability)
||
- test_bit(X86_FEATURE_CENTAUR_MCR,
boot_cpu_data.x86_capability) )
- && addr >= __pa(high_memory);
-#elif defined(__x86_64__)
- /*
- * This is broken because it can generate memory type aliases,
- * which can cause cache corruptions
- * But it is only available for root and we have to be bug-to-bug
- * compatible with i386.
- */
- if (file->f_flags & O_SYNC)
- return 1;
- /* same behaviour as i386. PAT always set to cached and MTRRs control
the
- caching behaviour.
- Hopefully a full PAT implementation will fix that soon. */
- return 0;
-#elif defined(CONFIG_IA64)
- /*
- * On ia64, we ignore O_SYNC because we cannot tolerate memory
attribute aliases.
- */
- return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-#elif defined(CONFIG_PPC64)
- /* On PPC64, we always do non-cacheable access to the IO hole and
- * cacheable elsewhere. Cache paradox can checkstop the CPU and
- * the high_memory heuristic below is wrong on machines with memory
- * above the IO hole... Ah, and of course, XFree86 doesn't pass
- * O_SYNC when mapping us to tap IO space. Surprised ?
- */
- return !page_is_ram(addr >> PAGE_SHIFT);
-#else
- /*
- * Accessing memory above the top the kernel knows about or through a
file pointer
- * that was marked O_SYNC will be done non-cached.
- */
- if (file->f_flags & O_SYNC)
- return 1;
- return addr >= __pa(high_memory);
-#endif
-}
-
-#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
-static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
-{
- unsigned long end_mem;
-
- end_mem = __pa(high_memory);
- if (addr >= end_mem)
- return 0;
-
- if (*count > end_mem - addr)
- *count = end_mem - addr;
-
- return 1;
-}
-#endif
-
-static ssize_t do_write_mem(void *p, unsigned long realp,
- const char __user * buf, size_t count, loff_t *ppos)
-{
- ssize_t written;
- unsigned long copied;
-
- written = 0;
-#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (realp < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE-realp;
- if (sz > count) sz = count;
- /* Hmm. Do something? */
- buf+=sz;
- p+=sz;
- count-=sz;
- written+=sz;
- }
-#endif
- copied = copy_from_user(p, buf, count);
- if (copied) {
- ssize_t ret = written + (count - copied);
-
- if (ret)
- return ret;
- return -EFAULT;
- }
- written += count;
- *ppos += written;
- return written;
-}
-
-#ifndef ARCH_HAS_DEV_MEM
-/*
- * This funcion reads the *physical* memory. The f_pos points directly to the
- * memory location.
- */
-static ssize_t read_mem(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t read;
-
- if (!valid_phys_addr_range(p, &count))
- return -EFAULT;
- read = 0;
-#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (p < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE-p;
- if (sz > count)
- sz = count;
- if (sz > 0) {
- if (clear_user(buf, sz))
- return -EFAULT;
- buf += sz;
- p += sz;
- count -= sz;
- read += sz;
- }
- }
-#endif
- if (copy_to_user(buf, __va(p), count))
- return -EFAULT;
- read += count;
- *ppos += read;
- return read;
-}
-
-static ssize_t write_mem(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
-
- if (!valid_phys_addr_range(p, &count))
- return -EFAULT;
- return do_write_mem(__va(p), p, buf, count, ppos);
-}
-#endif
-
-static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
-{
-#ifdef pgprot_noncached
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int uncached;
-
- uncached = uncached_access(file, offset);
- if (uncached)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
-
- /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
- if (remap_pfn_range(vma,
- vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end-vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
-
-/*
- * This function reads the *virtual* memory as seen by the kernel.
- */
-static ssize_t read_kmem(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t read = 0;
- ssize_t virtr = 0;
- char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
-
- if (p < (unsigned long) high_memory) {
- read = count;
- if (count > (unsigned long) high_memory - p)
- read = (unsigned long) high_memory - p;
-
-#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (p < PAGE_SIZE && read > 0) {
- size_t tmp = PAGE_SIZE - p;
- if (tmp > read) tmp = read;
- if (clear_user(buf, tmp))
- return -EFAULT;
- buf += tmp;
- p += tmp;
- read -= tmp;
- count -= tmp;
- }
-#endif
- if (copy_to_user(buf, (char *)p, read))
- return -EFAULT;
- p += read;
- buf += read;
- count -= read;
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len = vread(kbuf, (char *)p, len);
- if (!len)
- break;
- if (copy_to_user(buf, kbuf, len)) {
- free_page((unsigned long)kbuf);
- return -EFAULT;
- }
- count -= len;
- buf += len;
- virtr += len;
- p += len;
- }
- free_page((unsigned long)kbuf);
- }
- *ppos = p;
- return virtr + read;
-}
-
-/*
- * This function writes to the *virtual* memory as seen by the kernel.
- */
-static ssize_t write_kmem(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t wrote = 0;
- ssize_t virtr = 0;
- ssize_t written;
- char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
-
- if (p < (unsigned long) high_memory) {
-
- wrote = count;
- if (count > (unsigned long) high_memory - p)
- wrote = (unsigned long) high_memory - p;
-
- written = do_write_mem((void*)p, p, buf, wrote, ppos);
- if (written != wrote)
- return written;
- wrote = written;
- p += wrote;
- buf += wrote;
- count -= wrote;
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return wrote ? wrote : -ENOMEM;
- while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- if (len) {
- written = copy_from_user(kbuf, buf, len);
- if (written) {
- ssize_t ret;
-
- free_page((unsigned long)kbuf);
- ret = wrote + virtr + (len - written);
- return ret ? ret : -EFAULT;
- }
- }
- len = vwrite(kbuf, (char *)p, len);
- count -= len;
- buf += len;
- virtr += len;
- p += len;
- }
- free_page((unsigned long)kbuf);
- }
-
- *ppos = p;
- return virtr + wrote;
-}
-
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-static ssize_t read_port(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long i = *ppos;
- char __user *tmp = buf;
-
- if (verify_area(VERIFY_WRITE,buf,count))
- return -EFAULT;
- while (count-- > 0 && i < 65536) {
- if (__put_user(inb(i),tmp) < 0)
- return -EFAULT;
- i++;
- tmp++;
- }
- *ppos = i;
- return tmp-buf;
-}
-
-static ssize_t write_port(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long i = *ppos;
- const char __user * tmp = buf;
-
- if (verify_area(VERIFY_READ,buf,count))
- return -EFAULT;
- while (count-- > 0 && i < 65536) {
- char c;
- if (__get_user(c, tmp))
- return -EFAULT;
- outb(c,i);
- i++;
- tmp++;
- }
- *ppos = i;
- return tmp-buf;
-}
-#endif
-
-static ssize_t read_null(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-
-static ssize_t write_null(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- return count;
-}
-
-#ifdef CONFIG_MMU
-/*
- * For fun, we are using the MMU for this.
- */
-static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
-{
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long addr=(unsigned long)buf;
-
- mm = current->mm;
- /* Oops, this was forgotten before. -ben */
- down_read(&mm->mmap_sem);
-
- /* For private mappings, just map in zero pages. */
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- unsigned long count;
-
- if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
- goto out_up;
- if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
- break;
- count = vma->vm_end - addr;
- if (count > size)
- count = size;
-
- zap_page_range(vma, addr, count, NULL);
- zeromap_page_range(vma, addr, count, PAGE_COPY);
-
- size -= count;
- buf += count;
- addr += count;
- if (size == 0)
- goto out_up;
- }
-
- up_read(&mm->mmap_sem);
-
- /* The shared case is hard. Let's do the conventional zeroing. */
- do {
- unsigned long unwritten = clear_user(buf, PAGE_SIZE);
- if (unwritten)
- return size + unwritten - PAGE_SIZE;
- cond_resched();
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
- } while (size);
-
- return size;
-out_up:
- up_read(&mm->mmap_sem);
- return size;
-}
-
-static ssize_t read_zero(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long left, unwritten, written = 0;
-
- if (!count)
- return 0;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- left = count;
-
- /* do we want to be clever? Arbitrary cut-off */
- if (count >= PAGE_SIZE*4) {
- unsigned long partial;
-
- /* How much left of the page? */
- partial = (PAGE_SIZE-1) & -(unsigned long) buf;
- unwritten = clear_user(buf, partial);
- written = partial - unwritten;
- if (unwritten)
- goto out;
- left -= partial;
- buf += partial;
- unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
- written += (left & PAGE_MASK) - unwritten;
- if (unwritten)
- goto out;
- buf += left & PAGE_MASK;
- left &= ~PAGE_MASK;
- }
- unwritten = clear_user(buf, left);
- written += left - unwritten;
-out:
- return written ? written : -EFAULT;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-#else /* CONFIG_MMU */
-static ssize_t read_zero(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- size_t todo = count;
-
- while (todo) {
- size_t chunk = todo;
-
- if (chunk > 4096)
- chunk = 4096; /* Just for latency reasons */
- if (clear_user(buf, chunk))
- return -EFAULT;
- buf += chunk;
- todo -= chunk;
- cond_resched();
- }
- return count;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_MMU */
-
-static ssize_t write_full(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- return -ENOSPC;
-}
-
-/*
- * Special lseek() function for /dev/null and /dev/zero. Most notably, you
- * can fopen() both devices with "a" now. This was previously impossible.
- * -- SRB.
- */
-
-static loff_t null_lseek(struct file * file, loff_t offset, int orig)
-{
- return file->f_pos = 0;
-}
-
-/*
- * The memory devices use the full 32/64 bits of the offset, and so we cannot
- * check against negative addresses: they are ok. The return value is weird,
- * though, in that case (0).
- *
- * also note that seeking relative to the "end of file" isn't supported:
- * it has no meaning, so it returns -EINVAL.
- */
-static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-{
- loff_t ret;
-
- down(&file->f_dentry->d_inode->i_sem);
- switch (orig) {
- case 0:
- file->f_pos = offset;
- ret = file->f_pos;
- force_successful_syscall_return();
- break;
- case 1:
- file->f_pos += offset;
- ret = file->f_pos;
- force_successful_syscall_return();
- break;
- default:
- ret = -EINVAL;
- }
- up(&file->f_dentry->d_inode->i_sem);
- return ret;
-}
-
-static int open_port(struct inode * inode, struct file * filp)
-{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-#define mmap_mem mmap_kmem
-#define zero_lseek null_lseek
-#define full_lseek null_lseek
-#define write_zero write_null
-#define read_full read_zero
-#define open_mem open_port
-#define open_kmem open_mem
-
-#ifndef ARCH_HAS_DEV_MEM
-static struct file_operations mem_fops = {
- .llseek = memory_lseek,
- .read = read_mem,
- .write = write_mem,
- .mmap = mmap_mem,
- .open = open_mem,
-};
-#else
-extern struct file_operations mem_fops;
-#endif
-
-static struct file_operations kmem_fops = {
- .llseek = memory_lseek,
- .read = read_kmem,
- .write = write_kmem,
- .mmap = mmap_kmem,
- .open = open_kmem,
-};
-
-static struct file_operations null_fops = {
- .llseek = null_lseek,
- .read = read_null,
- .write = write_null,
-};
-
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
-static struct file_operations port_fops = {
- .llseek = memory_lseek,
- .read = read_port,
- .write = write_port,
- .open = open_port,
-};
-#endif
-
-static struct file_operations zero_fops = {
- .llseek = zero_lseek,
- .read = read_zero,
- .write = write_zero,
- .mmap = mmap_zero,
-};
-
-static struct file_operations full_fops = {
- .llseek = full_lseek,
- .read = read_full,
- .write = write_full,
-};
-
-static ssize_t kmsg_write(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- char *tmp;
- int ret;
-
- tmp = kmalloc(count + 1, GFP_KERNEL);
- if (tmp == NULL)
- return -ENOMEM;
- ret = -EFAULT;
- if (!copy_from_user(tmp, buf, count)) {
- tmp[count] = 0;
- ret = printk("%s", tmp);
- }
- kfree(tmp);
- return ret;
-}
-
-static struct file_operations kmsg_fops = {
- .write = kmsg_write,
-};
-
-static int memory_open(struct inode * inode, struct file * filp)
-{
- switch (iminor(inode)) {
- case 1:
- filp->f_op = &mem_fops;
- break;
- case 2:
- filp->f_op = &kmem_fops;
- break;
- case 3:
- filp->f_op = &null_fops;
- break;
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
- case 4:
- filp->f_op = &port_fops;
- break;
-#endif
- case 5:
- filp->f_op = &zero_fops;
- break;
- case 7:
- filp->f_op = &full_fops;
- break;
- case 8:
- filp->f_op = &random_fops;
- break;
- case 9:
- filp->f_op = &urandom_fops;
- break;
- case 11:
- filp->f_op = &kmsg_fops;
- break;
- default:
- return -ENXIO;
- }
- if (filp->f_op && filp->f_op->open)
- return filp->f_op->open(inode,filp);
- return 0;
-}
-
-static struct file_operations memory_fops = {
- .open = memory_open, /* just a selector for the real open */
-};
-
-static const struct {
- unsigned int minor;
- char *name;
- umode_t mode;
- struct file_operations *fops;
-} devlist[] = { /* list of minor devices */
- {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
- {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
- {3, "null", S_IRUGO | S_IWUGO, &null_fops},
-#if defined(CONFIG_ISA) || !defined(__mc68000__)
- {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
-#endif
- {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
- {7, "full", S_IRUGO | S_IWUGO, &full_fops},
- {8, "random", S_IRUGO | S_IWUSR, &random_fops},
- {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
- {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
-};
-
-static struct class_simple *mem_class;
-
-static int __init chr_dev_init(void)
-{
- int i;
-
- if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
- printk("unable to get major %d for memory devs\n", MEM_MAJOR);
-
- mem_class = class_simple_create(THIS_MODULE, "mem");
- for (i = 0; i < ARRAY_SIZE(devlist); i++) {
- class_simple_device_add(mem_class,
- MKDEV(MEM_MAJOR, devlist[i].minor),
- NULL, devlist[i].name);
- devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
- S_IFCHR | devlist[i].mode, devlist[i].name);
- }
-
- return 0;
-}
-
-fs_initcall(chr_dev_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/char/tty_io.c
--- a/linux-2.6.11-xen-sparse/drivers/char/tty_io.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2988 +0,0 @@
-/*
- * linux/drivers/char/tty_io.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-/*
- * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
- * or rs-channels. It also implements echoing, cooked mode etc.
- *
- * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
- *
- * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
- * tty_struct and tty_queue structures. Previously there was an array
- * of 256 tty_struct's which was statically allocated, and the
- * tty_queue structures were allocated at boot time. Both are now
- * dynamically allocated only when the tty is open.
- *
- * Also restructured routines so that there is more of a separation
- * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
- * the low-level tty routines (serial.c, pty.c, console.c). This
- * makes for cleaner and more compact code. -TYT, 9/17/92
- *
- * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
- * which can be dynamically activated and de-activated by the line
- * discipline handling modules (like SLIP).
- *
- * NOTE: pay no attention to the line discipline code (yet); its
- * interface is still subject to change in this version...
- * -- TYT, 1/31/92
- *
- * Added functionality to the OPOST tty handling. No delays, but all
- * other bits should be there.
- * -- Nick Holloway <alfie@xxxxxxxxxxxxxxxxx>, 27th May 1993.
- *
- * Rewrote canonical mode and added more termios flags.
- * -- julian@xxxxxxxxxxxxxxxxxxxxxx (J. Cowley), 13Jan94
- *
- * Reorganized FASYNC support so mouse code can share it.
- * -- ctm@xxxxxxxx, 9Sep95
- *
- * New TIOCLINUX variants added.
- * -- mj@xxxxxxxxxxxxxxxxx, 19-Nov-95
- *
- * Restrict vt switching via ioctl()
- * -- grif@xxxxxxxxxx, 5-Dec-95
- *
- * Move console and virtual terminal code to more appropriate files,
- * implement CONFIG_VT and generalize console device interface.
- * -- Marko Kohtala <Marko.Kohtala@xxxxxx>, March 97
- *
- * Rewrote init_dev and release_dev to eliminate races.
- * -- Bill Hawes <whawes@xxxxxxxx>, June 97
- *
- * Added devfs support.
- * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 13-Jan-1998
- *
- * Added support for a Unix98-style ptmx device.
- * -- C. Scott Ananian <cananian@xxxxxxxxxxxxxxxxxxxx>, 14-Jan-1998
- *
- * Reduced memory usage for older ARM systems
- * -- Russell King <rmk@xxxxxxxxxxxxxxxx>
- *
- * Move do_SAK() into process context. Less stack use in devfs functions.
- * alloc_tty_struct() always uses kmalloc() -- Andrew Morton
<andrewm@xxxxxxxxxx> 17Mar01
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
-#include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/smp_lock.h>
-#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-#include <linux/devfs_fs_kernel.h>
-
-#include <linux/kmod.h>
-
-#undef TTY_DEBUG_HANGUP
-
-#define TTY_PARANOIA_CHECK 1
-#define CHECK_TTY_COUNT 1
-
-struct termios tty_std_termios = { /* for the benefit of tty drivers */
- .c_iflag = ICRNL | IXON,
- .c_oflag = OPOST | ONLCR,
- .c_cflag = B38400 | CS8 | CREAD | HUPCL,
- .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
- ECHOCTL | ECHOKE | IEXTEN,
- .c_cc = INIT_C_CC
-};
-
-EXPORT_SYMBOL(tty_std_termios);
-
-/* This list gets poked at by procfs and various bits of boot up code. This
- could do with some rationalisation such as pulling the tty proc function
- into this file */
-
-LIST_HEAD(tty_drivers); /* linked list of tty drivers */
-
-/* Semaphore to protect creating and releasing a tty. This is shared with
- vt.c for deeply disgusting hack reasons */
-DECLARE_MUTEX(tty_sem);
-
-int console_use_vt = 1;
-
-#ifdef CONFIG_UNIX98_PTYS
-extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
-extern int pty_limit; /* Config limit on Unix98 ptys */
-static DEFINE_IDR(allocated_ptys);
-static DECLARE_MUTEX(allocated_ptys_lock);
-static int ptmx_open(struct inode *, struct file *);
-#endif
-
-extern void disable_early_printk(void);
-
-static void initialize_tty_struct(struct tty_struct *tty);
-
-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *, size_t,
loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
-static int tty_open(struct inode *, struct file *);
-static int tty_release(struct inode *, struct file *);
-int tty_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg);
-static int tty_fasync(int fd, struct file * filp, int on);
-extern void rs_360_init(void);
-static void release_mem(struct tty_struct *tty, int idx);
-
-
-static struct tty_struct *alloc_tty_struct(void)
-{
- struct tty_struct *tty;
-
- tty = kmalloc(sizeof(struct tty_struct), GFP_KERNEL);
- if (tty)
- memset(tty, 0, sizeof(struct tty_struct));
- return tty;
-}
-
-static inline void free_tty_struct(struct tty_struct *tty)
-{
- kfree(tty->write_buf);
- kfree(tty);
-}
-
-#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
-
-char *tty_name(struct tty_struct *tty, char *buf)
-{
- if (!tty) /* Hmm. NULL pointer. That's fun. */
- strcpy(buf, "NULL tty");
- else
- strcpy(buf, tty->name);
- return buf;
-}
-
-EXPORT_SYMBOL(tty_name);
-
-inline int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
- const char *routine)
-{
-#ifdef TTY_PARANOIA_CHECK
- if (!tty) {
- printk(KERN_WARNING
- "null TTY for (%d:%d) in %s\n",
- imajor(inode), iminor(inode), routine);
- return 1;
- }
- if (tty->magic != TTY_MAGIC) {
- printk(KERN_WARNING
- "bad magic number for tty struct (%d:%d) in %s\n",
- imajor(inode), iminor(inode), routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-static int check_tty_count(struct tty_struct *tty, const char *routine)
-{
-#ifdef CHECK_TTY_COUNT
- struct list_head *p;
- int count = 0;
-
- file_list_lock();
- list_for_each(p, &tty->tty_files) {
- count++;
- }
- file_list_unlock();
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_SLAVE &&
- tty->link && tty->link->count)
- count++;
- if (tty->count != count) {
- printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
- "!= #fd's(%d) in %s\n",
- tty->name, tty->count, count, routine);
- return count;
- }
-#endif
- return 0;
-}
-
-/*
- * This is probably overkill for real world processors but
- * they are not on hot paths so a little discipline won't do
- * any harm.
- */
-
-static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
-{
- down(&tty->termios_sem);
- tty->termios->c_line = num;
- up(&tty->termios_sem);
-}
-
-/*
- * This guards the refcounted line discipline lists. The lock
- * must be taken with irqs off because there are hangup path
- * callers who will do ldisc lookups and cannot sleep.
- */
-
-static DEFINE_SPINLOCK(tty_ldisc_lock);
-static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
-static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table
*/
-
-int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
-{
- unsigned long flags;
- int ret = 0;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- return -EINVAL;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if (new_ldisc) {
- tty_ldiscs[disc] = *new_ldisc;
- tty_ldiscs[disc].num = disc;
- tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED;
- tty_ldiscs[disc].refcount = 0;
- } else {
- if(tty_ldiscs[disc].refcount)
- ret = -EBUSY;
- else
- tty_ldiscs[disc].flags &= ~LDISC_FLAG_DEFINED;
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- return ret;
-}
-
-EXPORT_SYMBOL(tty_register_ldisc);
-
-struct tty_ldisc *tty_ldisc_get(int disc)
-{
- unsigned long flags;
- struct tty_ldisc *ld;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- return NULL;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
-
- ld = &tty_ldiscs[disc];
- /* Check the entry is defined */
- if(ld->flags & LDISC_FLAG_DEFINED)
- {
- /* If the module is being unloaded we can't use it */
- if (!try_module_get(ld->owner))
- ld = NULL;
- else /* lock it */
- ld->refcount++;
- }
- else
- ld = NULL;
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return ld;
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_get);
-
-void tty_ldisc_put(int disc)
-{
- struct tty_ldisc *ld;
- unsigned long flags;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- BUG();
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = &tty_ldiscs[disc];
- if(ld->refcount == 0)
- BUG();
- ld->refcount --;
- module_put(ld->owner);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_put);
-
-static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
-{
- tty->ldisc = *ld;
- tty->ldisc.refcount = 0;
-}
-
-/**
- * tty_ldisc_try - internal helper
- * @tty: the tty
- *
- * Make a single attempt to grab and bump the refcount on
- * the tty ldisc. Return 0 on failure or 1 on success. This is
- * used to implement both the waiting and non waiting versions
- * of tty_ldisc_ref
- */
-
-static int tty_ldisc_try(struct tty_struct *tty)
-{
- unsigned long flags;
- struct tty_ldisc *ld;
- int ret = 0;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = &tty->ldisc;
- if(test_bit(TTY_LDISC, &tty->flags))
- {
- ld->refcount++;
- ret = 1;
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return ret;
-}
-
-/**
- * tty_ldisc_ref_wait - wait for the tty ldisc
- * @tty: tty device
- *
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * wait patiently until it changes.
- *
- * Note: Must not be called from an IRQ/timer context. The caller
- * must also be careful not to hold other locks that will deadlock
- * against a discipline change, such as an existing ldisc reference
- * (which we check for)
- */
-
-struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
-{
- /* wait_event is a macro */
- wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
- if(tty->ldisc.refcount == 0)
- printk(KERN_ERR "tty_ldisc_ref_wait\n");
- return &tty->ldisc;
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
-
-/**
- * tty_ldisc_ref - get the tty ldisc
- * @tty: tty device
- *
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * return NULL. Can be called from IRQ and timer functions.
- */
-
-struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
-{
- if(tty_ldisc_try(tty))
- return &tty->ldisc;
- return NULL;
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_ref);
-
-/**
- * tty_ldisc_deref - free a tty ldisc reference
- * @ld: reference to free up
- *
- * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
- * be called in IRQ context.
- */
-
-void tty_ldisc_deref(struct tty_ldisc *ld)
-{
- unsigned long flags;
-
- if(ld == NULL)
- BUG();
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if(ld->refcount == 0)
- printk(KERN_ERR "tty_ldisc_deref: no references.\n");
- else
- ld->refcount--;
- if(ld->refcount == 0)
- wake_up(&tty_ldisc_wait);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_deref);
-
-/**
- * tty_ldisc_enable - allow ldisc use
- * @tty: terminal to activate ldisc on
- *
- * Set the TTY_LDISC flag when the line discipline can be called
- * again. Do neccessary wakeups for existing sleepers.
- *
- * Note: nobody should set this bit except via this function. Clearing
- * directly is allowed.
- */
-
-static void tty_ldisc_enable(struct tty_struct *tty)
-{
- set_bit(TTY_LDISC, &tty->flags);
- wake_up(&tty_ldisc_wait);
-}
-
-/**
- * tty_set_ldisc - set line discipline
- * @tty: the terminal to set
- * @ldisc: the line discipline
- *
- * Set the discipline of a tty line. Must be called from a process
- * context.
- */
-
-static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
-{
- int retval = 0;
- struct tty_ldisc o_ldisc;
- char buf[64];
- int work;
- unsigned long flags;
- struct tty_ldisc *ld;
-
- if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
- return -EINVAL;
-
-restart:
-
- if (tty->ldisc.num == ldisc)
- return 0; /* We are already in the desired discipline */
-
- ld = tty_ldisc_get(ldisc);
- /* Eduardo Blanco <ejbs@xxxxxxxxxxxx> */
- /* Cyrus Durgin <cider@xxxxxxxxxxxxx> */
- if (ld == NULL) {
- request_module("tty-ldisc-%d", ldisc);
- ld = tty_ldisc_get(ldisc);
- }
- if (ld == NULL)
- return -EINVAL;
-
- o_ldisc = tty->ldisc;
-
- tty_wait_until_sent(tty, 0);
-
- /*
- * Make sure we don't change while someone holds a
- * reference to the line discipline. The TTY_LDISC bit
- * prevents anyone taking a reference once it is clear.
- * We need the lock to avoid racing reference takers.
- */
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if(tty->ldisc.refcount)
- {
- /* Free the new ldisc we grabbed. Must drop the lock
- first. */
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(ldisc);
- /*
- * There are several reasons we may be busy, including
- * random momentary I/O traffic. We must therefore
- * retry. We could distinguish between blocking ops
- * and retries if we made tty_ldisc_wait() smarter. That
- * is up for discussion.
- */
- if(wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount
== 0) < 0)
- return -ERESTARTSYS;
- goto restart;
- }
- clear_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- /*
- * From this point on we know nobody has an ldisc
- * usage reference, nor can they obtain one until
- * we say so later on.
- */
-
- work = cancel_delayed_work(&tty->flip.work);
- /*
- * Wait for ->hangup_work and ->flip.work handlers to terminate
- */
-
- flush_scheduled_work();
- /* Shutdown the current discipline. */
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
-
- /* Now set up the new line discipline. */
- tty_ldisc_assign(tty, ld);
- tty_set_termios_ldisc(tty, ldisc);
- if (tty->ldisc.open)
- retval = (tty->ldisc.open)(tty);
- if (retval < 0) {
- tty_ldisc_put(ldisc);
- /* There is an outstanding reference here so this is safe */
- tty_ldisc_assign(tty, tty_ldisc_get(o_ldisc.num));
- tty_set_termios_ldisc(tty, tty->ldisc.num);
- if (tty->ldisc.open && (tty->ldisc.open(tty) < 0)) {
- tty_ldisc_put(o_ldisc.num);
- /* This driver is always present */
- tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
- tty_set_termios_ldisc(tty, N_TTY);
- if (tty->ldisc.open) {
- int r = tty->ldisc.open(tty);
-
- if (r < 0)
- panic("Couldn't open N_TTY ldisc for "
- "%s --- error %d.",
- tty_name(tty, buf), r);
- }
- }
- }
- /* At this point we hold a reference to the new ldisc and a
- a reference to the old ldisc. If we ended up flipping back
- to the existing ldisc we have two references to it */
-
- if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc)
- tty->driver->set_ldisc(tty);
-
- tty_ldisc_put(o_ldisc.num);
-
- /*
- * Allow ldisc referencing to occur as soon as the driver
- * ldisc callback completes.
- */
-
- tty_ldisc_enable(tty);
-
- /* Restart it in case no characters kick it off. Safe if
- already running */
- if(work)
- schedule_delayed_work(&tty->flip.work, 1);
- return retval;
-}
-
-/*
- * This routine returns a tty driver structure, given a device number
- */
-static struct tty_driver *get_tty_driver(dev_t device, int *index)
-{
- struct tty_driver *p;
-
- list_for_each_entry(p, &tty_drivers, tty_drivers) {
- dev_t base = MKDEV(p->major, p->minor_start);
- if (device < base || device >= base + p->num)
- continue;
- *index = device - base;
- return p;
- }
- return NULL;
-}
-
-/*
- * If we try to write to, or set the state of, a terminal and we're
- * not in the foreground, send a SIGTTOU. If the signal is blocked or
- * ignored, go ahead and perform the operation. (POSIX 7.2)
- */
-int tty_check_change(struct tty_struct * tty)
-{
- if (current->signal->tty != tty)
- return 0;
- if (tty->pgrp <= 0) {
- printk(KERN_WARNING "tty_check_change: tty->pgrp <= 0!\n");
- return 0;
- }
- if (process_group(current) == tty->pgrp)
- return 0;
- if (is_ignored(SIGTTOU))
- return 0;
- if (is_orphaned_pgrp(process_group(current)))
- return -EIO;
- (void) kill_pg(process_group(current), SIGTTOU, 1);
- return -ERESTARTSYS;
-}
-
-EXPORT_SYMBOL(tty_check_change);
-
-static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-
-static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- return -EIO;
-}
-
-/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait)
-{
- return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
-}
-
-static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
-}
-
-static struct file_operations tty_fops = {
- .llseek = no_llseek,
- .read = tty_read,
- .write = tty_write,
- .poll = tty_poll,
- .ioctl = tty_ioctl,
- .open = tty_open,
- .release = tty_release,
- .fasync = tty_fasync,
-};
-
-#ifdef CONFIG_UNIX98_PTYS
-static struct file_operations ptmx_fops = {
- .llseek = no_llseek,
- .read = tty_read,
- .write = tty_write,
- .poll = tty_poll,
- .ioctl = tty_ioctl,
- .open = ptmx_open,
- .release = tty_release,
- .fasync = tty_fasync,
-};
-#endif
-
-static struct file_operations console_fops = {
- .llseek = no_llseek,
- .read = tty_read,
- .write = redirected_tty_write,
- .poll = tty_poll,
- .ioctl = tty_ioctl,
- .open = tty_open,
- .release = tty_release,
- .fasync = tty_fasync,
-};
-
-static struct file_operations hung_up_tty_fops = {
- .llseek = no_llseek,
- .read = hung_up_tty_read,
- .write = hung_up_tty_write,
- .poll = hung_up_tty_poll,
- .ioctl = hung_up_tty_ioctl,
- .release = tty_release,
-};
-
-static DEFINE_SPINLOCK(redirect_lock);
-static struct file *redirect;
-
-/**
- * tty_wakeup - request more data
- * @tty: terminal
- *
- * Internal and external helper for wakeups of tty. This function
- * informs the line discipline if present that the driver is ready
- * to receive more output data.
- */
-
-void tty_wakeup(struct tty_struct *tty)
-{
- struct tty_ldisc *ld;
-
- if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
- ld = tty_ldisc_ref(tty);
- if(ld) {
- if(ld->write_wakeup)
- ld->write_wakeup(tty);
- tty_ldisc_deref(ld);
- }
- }
- wake_up_interruptible(&tty->write_wait);
-}
-
-EXPORT_SYMBOL_GPL(tty_wakeup);
-
-/**
- * tty_ldisc_flush - flush line discipline queue
- * @tty: tty
- *
- * Flush the line discipline queue (if any) for this tty. If there
- * is no line discipline active this is a no-op.
- */
-
-void tty_ldisc_flush(struct tty_struct *tty)
-{
- struct tty_ldisc *ld = tty_ldisc_ref(tty);
- if(ld) {
- if(ld->flush_buffer)
- ld->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_flush);
-
-/*
- * This can be called by the "eventd" kernel thread. That is process
synchronous,
- * but doesn't hold any locks, so we need to make sure we have the appropriate
- * locks for what we're doing..
- */
-static void do_tty_hangup(void *data)
-{
- struct tty_struct *tty = (struct tty_struct *) data;
- struct file * cons_filp = NULL;
- struct file *filp, *f = NULL;
- struct task_struct *p;
- struct tty_ldisc *ld;
- int closecount = 0, n;
-
- if (!tty)
- return;
-
- /* inuse_filps is protected by the single kernel lock */
- lock_kernel();
-
- spin_lock(&redirect_lock);
- if (redirect && redirect->private_data == tty) {
- f = redirect;
- redirect = NULL;
- }
- spin_unlock(&redirect_lock);
-
- check_tty_count(tty, "do_tty_hangup");
- file_list_lock();
- /* This breaks for file handles being sent over AF_UNIX sockets ? */
- list_for_each_entry(filp, &tty->tty_files, f_list) {
- if (filp->f_op->write == redirected_tty_write)
- cons_filp = filp;
- if (filp->f_op->write != tty_write)
- continue;
- closecount++;
- tty_fasync(-1, filp, 0); /* can't block */
- filp->f_op = &hung_up_tty_fops;
- }
- file_list_unlock();
-
- /* FIXME! What are the locking issues here? This may me overdoing
things..
- * this question is especially important now that we've removed the
irqlock. */
-
- ld = tty_ldisc_ref(tty);
- if(ld != NULL) /* We may have no line discipline at this point */
- {
- if (ld->flush_buffer)
- ld->flush_buffer(tty);
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
- if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
- ld->write_wakeup)
- ld->write_wakeup(tty);
- if (ld->hangup)
- ld->hangup(tty);
- }
-
- /* FIXME: Once we trust the LDISC code better we can wait here for
- ldisc completion and fix the driver call race */
-
- wake_up_interruptible(&tty->write_wait);
- wake_up_interruptible(&tty->read_wait);
-
- /*
- * Shutdown the current line discipline, and reset it to
- * N_TTY.
- */
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
- {
- down(&tty->termios_sem);
- *tty->termios = tty->driver->init_termios;
- up(&tty->termios_sem);
- }
-
- /* Defer ldisc switch */
- /* tty_deferred_ldisc_switch(N_TTY);
-
- This should get done automatically when the port closes and
- tty_release is called */
-
- read_lock(&tasklist_lock);
- if (tty->session > 0) {
- do_each_task_pid(tty->session, PIDTYPE_SID, p) {
- if (p->signal->tty == tty)
- p->signal->tty = NULL;
- if (!p->signal->leader)
- continue;
- send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p);
- send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p);
- if (tty->pgrp > 0)
- p->signal->tty_old_pgrp = tty->pgrp;
- } while_each_task_pid(tty->session, PIDTYPE_SID, p);
- }
- read_unlock(&tasklist_lock);
-
- tty->flags = 0;
- tty->session = 0;
- tty->pgrp = -1;
- tty->ctrl_status = 0;
- /*
- * If one of the devices matches a console pointer, we
- * cannot just call hangup() because that will cause
- * tty->count and state->count to go out of sync.
- * So we just call close() the right number of times.
- */
- if (cons_filp) {
- if (tty->driver->close)
- for (n = 0; n < closecount; n++)
- tty->driver->close(tty, cons_filp);
- } else if (tty->driver->hangup)
- (tty->driver->hangup)(tty);
-
- /* We don't want to have driver/ldisc interactions beyond
- the ones we did here. The driver layer expects no
- calls after ->hangup() from the ldisc side. However we
- can't yet guarantee all that */
-
- set_bit(TTY_HUPPED, &tty->flags);
- if (ld) {
- tty_ldisc_enable(tty);
- tty_ldisc_deref(ld);
- }
- unlock_kernel();
- if (f)
- fput(f);
-}
-
-void tty_hangup(struct tty_struct * tty)
-{
-#ifdef TTY_DEBUG_HANGUP
- char buf[64];
-
- printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
-#endif
- schedule_work(&tty->hangup_work);
-}
-
-EXPORT_SYMBOL(tty_hangup);
-
-void tty_vhangup(struct tty_struct * tty)
-{
-#ifdef TTY_DEBUG_HANGUP
- char buf[64];
-
- printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
-#endif
- do_tty_hangup((void *) tty);
-}
-EXPORT_SYMBOL(tty_vhangup);
-
-int tty_hung_up_p(struct file * filp)
-{
- return (filp->f_op == &hung_up_tty_fops);
-}
-
-EXPORT_SYMBOL(tty_hung_up_p);
-
-/*
- * This function is typically called only by the session leader, when
- * it wants to disassociate itself from its controlling tty.
- *
- * It performs the following functions:
- * (1) Sends a SIGHUP and SIGCONT to the foreground process group
- * (2) Clears the tty from being controlling the session
- * (3) Clears the controlling tty for all processes in the
- * session group.
- *
- * The argument on_exit is set to 1 if called when a process is
- * exiting; it is 0 if called by the ioctl TIOCNOTTY.
- */
-void disassociate_ctty(int on_exit)
-{
- struct tty_struct *tty;
- struct task_struct *p;
- int tty_pgrp = -1;
-
- lock_kernel();
-
- down(&tty_sem);
- tty = current->signal->tty;
- if (tty) {
- tty_pgrp = tty->pgrp;
- up(&tty_sem);
- if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
- tty_vhangup(tty);
- } else {
- if (current->signal->tty_old_pgrp) {
- kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit);
- kill_pg(current->signal->tty_old_pgrp, SIGCONT,
on_exit);
- }
- up(&tty_sem);
- unlock_kernel();
- return;
- }
- if (tty_pgrp > 0) {
- kill_pg(tty_pgrp, SIGHUP, on_exit);
- if (!on_exit)
- kill_pg(tty_pgrp, SIGCONT, on_exit);
- }
-
- /* Must lock changes to tty_old_pgrp */
- down(&tty_sem);
- current->signal->tty_old_pgrp = 0;
- tty->session = 0;
- tty->pgrp = -1;
-
- /* Now clear signal->tty under the lock */
- read_lock(&tasklist_lock);
- do_each_task_pid(current->signal->session, PIDTYPE_SID, p) {
- p->signal->tty = NULL;
- } while_each_task_pid(current->signal->session, PIDTYPE_SID, p);
- read_unlock(&tasklist_lock);
- up(&tty_sem);
- unlock_kernel();
-}
-
-void stop_tty(struct tty_struct *tty)
-{
- if (tty->stopped)
- return;
- tty->stopped = 1;
- if (tty->link && tty->link->packet) {
- tty->ctrl_status &= ~TIOCPKT_START;
- tty->ctrl_status |= TIOCPKT_STOP;
- wake_up_interruptible(&tty->link->read_wait);
- }
- if (tty->driver->stop)
- (tty->driver->stop)(tty);
-}
-
-EXPORT_SYMBOL(stop_tty);
-
-void start_tty(struct tty_struct *tty)
-{
- if (!tty->stopped || tty->flow_stopped)
- return;
- tty->stopped = 0;
- if (tty->link && tty->link->packet) {
- tty->ctrl_status &= ~TIOCPKT_STOP;
- tty->ctrl_status |= TIOCPKT_START;
- wake_up_interruptible(&tty->link->read_wait);
- }
- if (tty->driver->start)
- (tty->driver->start)(tty);
-
- /* If we have a running line discipline it may need kicking */
- tty_wakeup(tty);
- wake_up_interruptible(&tty->write_wait);
-}
-
-EXPORT_SYMBOL(start_tty);
-
-static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
- loff_t *ppos)
-{
- int i;
- struct tty_struct * tty;
- struct inode *inode;
- struct tty_ldisc *ld;
-
- tty = (struct tty_struct *)file->private_data;
- inode = file->f_dentry->d_inode;
- if (tty_paranoia_check(tty, inode, "tty_read"))
- return -EIO;
- if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
- return -EIO;
-
- /* We want to wait for the line discipline to sort out in this
- situation */
- ld = tty_ldisc_ref_wait(tty);
- lock_kernel();
- if (ld->read)
- i = (ld->read)(tty,file,buf,count);
- else
- i = -EIO;
- tty_ldisc_deref(ld);
- unlock_kernel();
- if (i > 0)
- inode->i_atime = current_fs_time(inode->i_sb);
- return i;
-}
-
-/*
- * Split writes up in sane blocksizes to avoid
- * denial-of-service type attacks
- */
-static inline ssize_t do_tty_write(
- ssize_t (*write)(struct tty_struct *, struct file *, const unsigned
char *, size_t),
- struct tty_struct *tty,
- struct file *file,
- const char __user *buf,
- size_t count)
-{
- ssize_t ret = 0, written = 0;
- unsigned int chunk;
-
- if (down_interruptible(&tty->atomic_write)) {
- return -ERESTARTSYS;
- }
-
- /*
- * We chunk up writes into a temporary buffer. This
- * simplifies low-level drivers immensely, since they
- * don't have locking issues and user mode accesses.
- *
- * But if TTY_NO_WRITE_SPLIT is set, we should use a
- * big chunk-size..
- *
- * The default chunk-size is 2kB, because the NTTY
- * layer has problems with bigger chunks. It will
- * claim to be able to handle more characters than
- * it actually does.
- */
- chunk = 2048;
- if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
- chunk = 65536;
- if (count < chunk)
- chunk = count;
-
- /* write_buf/write_cnt is protected by the atomic_write semaphore */
- if (tty->write_cnt < chunk) {
- unsigned char *buf;
-
- if (chunk < 1024)
- chunk = 1024;
-
- buf = kmalloc(chunk, GFP_KERNEL);
- if (!buf) {
- up(&tty->atomic_write);
- return -ENOMEM;
- }
- kfree(tty->write_buf);
- tty->write_cnt = chunk;
- tty->write_buf = buf;
- }
-
- /* Do the write .. */
- for (;;) {
- size_t size = count;
- if (size > chunk)
- size = chunk;
- ret = -EFAULT;
- if (copy_from_user(tty->write_buf, buf, size))
- break;
- lock_kernel();
- ret = write(tty, file, tty->write_buf, size);
- unlock_kernel();
- if (ret <= 0)
- break;
- written += ret;
- buf += ret;
- count -= ret;
- if (!count)
- break;
- ret = -ERESTARTSYS;
- if (signal_pending(current))
- break;
- cond_resched();
- }
- if (written) {
- struct inode *inode = file->f_dentry->d_inode;
- inode->i_mtime = current_fs_time(inode->i_sb);
- ret = written;
- }
- up(&tty->atomic_write);
- return ret;
-}
-
-
-static ssize_t tty_write(struct file * file, const char __user * buf, size_t
count,
- loff_t *ppos)
-{
- struct tty_struct * tty;
- struct inode *inode = file->f_dentry->d_inode;
- ssize_t ret;
- struct tty_ldisc *ld;
-
- tty = (struct tty_struct *)file->private_data;
- if (tty_paranoia_check(tty, inode, "tty_write"))
- return -EIO;
- if (!tty || !tty->driver->write || (test_bit(TTY_IO_ERROR,
&tty->flags)))
- return -EIO;
-
- ld = tty_ldisc_ref_wait(tty);
- if (!ld->write)
- ret = -EIO;
- else
- ret = do_tty_write(ld->write, tty, file, buf, count);
- tty_ldisc_deref(ld);
- return ret;
-}
-
-ssize_t redirected_tty_write(struct file * file, const char __user * buf,
size_t count,
- loff_t *ppos)
-{
- struct file *p = NULL;
-
- spin_lock(&redirect_lock);
- if (redirect) {
- get_file(redirect);
- p = redirect;
- }
- spin_unlock(&redirect_lock);
-
- if (p) {
- ssize_t res;
- res = vfs_write(p, buf, count, &p->f_pos);
- fput(p);
- return res;
- }
-
- return tty_write(file, buf, count, ppos);
-}
-
-static char ptychar[] = "pqrstuvwxyzabcde";
-
-static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
-{
- int i = index + driver->name_base;
- /* ->name is initialized to "ttyp", but "tty" is expected */
- sprintf(p, "%s%c%x",
- driver->subtype == PTY_TYPE_SLAVE ? "tty" :
driver->name,
- ptychar[i >> 4 & 0xf], i & 0xf);
-}
-
-static inline void tty_line_name(struct tty_driver *driver, int index, char *p)
-{
- sprintf(p, "%s%d", driver->name, index + driver->name_base);
-}
-
-/*
- * WSH 06/09/97: Rewritten to remove races and properly clean up after a
- * failed open. The new code protects the open with a semaphore, so it's
- * really quite straightforward. The semaphore locking can probably be
- * relaxed for the (most common) case of reopening a tty.
- */
-static int init_dev(struct tty_driver *driver, int idx,
- struct tty_struct **ret_tty)
-{
- struct tty_struct *tty, *o_tty;
- struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
- struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
- int retval=0;
-
- /* check whether we're reopening an existing tty */
- if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
- tty = devpts_get_tty(idx);
- if (tty && driver->subtype == PTY_TYPE_MASTER)
- tty = tty->link;
- } else {
- tty = driver->ttys[idx];
- }
- if (tty) goto fast_track;
-
- /*
- * First time open is complex, especially for PTY devices.
- * This code guarantees that either everything succeeds and the
- * TTY is ready for operation, or else the table slots are vacated
- * and the allocated memory released. (Except that the termios
- * and locked termios may be retained.)
- */
-
- if (!try_module_get(driver->owner)) {
- retval = -ENODEV;
- goto end_init;
- }
-
- o_tty = NULL;
- tp = o_tp = NULL;
- ltp = o_ltp = NULL;
-
- tty = alloc_tty_struct();
- if(!tty)
- goto fail_no_mem;
- initialize_tty_struct(tty);
- tty->driver = driver;
- tty->index = idx;
- tty_line_name(driver, idx, tty->name);
-
- if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
- tp_loc = &tty->termios;
- ltp_loc = &tty->termios_locked;
- } else {
- tp_loc = &driver->termios[idx];
- ltp_loc = &driver->termios_locked[idx];
- }
-
- if (!*tp_loc) {
- tp = (struct termios *) kmalloc(sizeof(struct termios),
- GFP_KERNEL);
- if (!tp)
- goto free_mem_out;
- *tp = driver->init_termios;
- }
-
- if (!*ltp_loc) {
- ltp = (struct termios *) kmalloc(sizeof(struct termios),
- GFP_KERNEL);
- if (!ltp)
- goto free_mem_out;
- memset(ltp, 0, sizeof(struct termios));
- }
-
- if (driver->type == TTY_DRIVER_TYPE_PTY) {
- o_tty = alloc_tty_struct();
- if (!o_tty)
- goto free_mem_out;
- initialize_tty_struct(o_tty);
- o_tty->driver = driver->other;
- o_tty->index = idx;
- tty_line_name(driver->other, idx, o_tty->name);
-
- if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
- o_tp_loc = &o_tty->termios;
- o_ltp_loc = &o_tty->termios_locked;
- } else {
- o_tp_loc = &driver->other->termios[idx];
- o_ltp_loc = &driver->other->termios_locked[idx];
- }
-
- if (!*o_tp_loc) {
- o_tp = (struct termios *)
- kmalloc(sizeof(struct termios), GFP_KERNEL);
- if (!o_tp)
- goto free_mem_out;
- *o_tp = driver->other->init_termios;
- }
-
- if (!*o_ltp_loc) {
- o_ltp = (struct termios *)
- kmalloc(sizeof(struct termios), GFP_KERNEL);
- if (!o_ltp)
- goto free_mem_out;
- memset(o_ltp, 0, sizeof(struct termios));
- }
-
- /*
- * Everything allocated ... set up the o_tty structure.
- */
- if (!(driver->other->flags & TTY_DRIVER_DEVPTS_MEM)) {
- driver->other->ttys[idx] = o_tty;
- }
- if (!*o_tp_loc)
- *o_tp_loc = o_tp;
- if (!*o_ltp_loc)
- *o_ltp_loc = o_ltp;
- o_tty->termios = *o_tp_loc;
- o_tty->termios_locked = *o_ltp_loc;
- driver->other->refcount++;
- if (driver->subtype == PTY_TYPE_MASTER)
- o_tty->count++;
-
- /* Establish the links in both directions */
- tty->link = o_tty;
- o_tty->link = tty;
- }
-
- /*
- * All structures have been allocated, so now we install them.
- * Failures after this point use release_mem to clean up, so
- * there's no need to null out the local pointers.
- */
- if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- driver->ttys[idx] = tty;
- }
-
- if (!*tp_loc)
- *tp_loc = tp;
- if (!*ltp_loc)
- *ltp_loc = ltp;
- tty->termios = *tp_loc;
- tty->termios_locked = *ltp_loc;
- driver->refcount++;
- tty->count++;
-
- /*
- * Structures all installed ... call the ldisc open routines.
- * If we fail here just call release_mem to clean up. No need
- * to decrement the use counts, as release_mem doesn't care.
- */
-
- if (tty->ldisc.open) {
- retval = (tty->ldisc.open)(tty);
- if (retval)
- goto release_mem_out;
- }
- if (o_tty && o_tty->ldisc.open) {
- retval = (o_tty->ldisc.open)(o_tty);
- if (retval) {
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
- goto release_mem_out;
- }
- tty_ldisc_enable(o_tty);
- }
- tty_ldisc_enable(tty);
- goto success;
-
- /*
- * This fast open can be used if the tty is already open.
- * No memory is allocated, and the only failures are from
- * attempting to open a closing tty or attempting multiple
- * opens on a pty master.
- */
-fast_track:
- if (test_bit(TTY_CLOSING, &tty->flags)) {
- retval = -EIO;
- goto end_init;
- }
- if (driver->type == TTY_DRIVER_TYPE_PTY &&
- driver->subtype == PTY_TYPE_MASTER) {
- /*
- * special case for PTY masters: only one open permitted,
- * and the slave side open count is incremented as well.
- */
- if (tty->count) {
- retval = -EIO;
- goto end_init;
- }
- tty->link->count++;
- }
- tty->count++;
- tty->driver = driver; /* N.B. why do this every time?? */
-
- /* FIXME */
- if(!test_bit(TTY_LDISC, &tty->flags))
- printk(KERN_ERR "init_dev but no ldisc\n");
-success:
- *ret_tty = tty;
-
- /* All paths come through here to release the semaphore */
-end_init:
- return retval;
-
- /* Release locally allocated memory ... nothing placed in slots */
-free_mem_out:
- if (o_tp)
- kfree(o_tp);
- if (o_tty)
- free_tty_struct(o_tty);
- if (ltp)
- kfree(ltp);
- if (tp)
- kfree(tp);
- free_tty_struct(tty);
-
-fail_no_mem:
- module_put(driver->owner);
- retval = -ENOMEM;
- goto end_init;
-
- /* call the tty release_mem routine to clean out this slot */
-release_mem_out:
- printk(KERN_INFO "init_dev: ldisc open failed, "
- "clearing slot %d\n", idx);
- release_mem(tty, idx);
- goto end_init;
-}
-
-/*
- * Releases memory associated with a tty structure, and clears out the
- * driver table slots.
- */
-static void release_mem(struct tty_struct *tty, int idx)
-{
- struct tty_struct *o_tty;
- struct termios *tp;
- int devpts = tty->driver->flags & TTY_DRIVER_DEVPTS_MEM;
-
- if ((o_tty = tty->link) != NULL) {
- if (!devpts)
- o_tty->driver->ttys[idx] = NULL;
- if (o_tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
- tp = o_tty->termios;
- if (!devpts)
- o_tty->driver->termios[idx] = NULL;
- kfree(tp);
-
- tp = o_tty->termios_locked;
- if (!devpts)
- o_tty->driver->termios_locked[idx] = NULL;
- kfree(tp);
- }
- o_tty->magic = 0;
- o_tty->driver->refcount--;
- file_list_lock();
- list_del_init(&o_tty->tty_files);
- file_list_unlock();
- free_tty_struct(o_tty);
- }
-
- if (!devpts)
- tty->driver->ttys[idx] = NULL;
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
- tp = tty->termios;
- if (!devpts)
- tty->driver->termios[idx] = NULL;
- kfree(tp);
-
- tp = tty->termios_locked;
- if (!devpts)
- tty->driver->termios_locked[idx] = NULL;
- kfree(tp);
- }
-
- tty->magic = 0;
- tty->driver->refcount--;
- file_list_lock();
- list_del_init(&tty->tty_files);
- file_list_unlock();
- module_put(tty->driver->owner);
- free_tty_struct(tty);
-}
-
-/*
- * Even releasing the tty structures is a tricky business.. We have
- * to be very careful that the structures are all released at the
- * same time, as interrupts might otherwise get the wrong pointers.
- *
- * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
- * lead to double frees or releasing memory still in use.
- */
-static void release_dev(struct file * filp)
-{
- struct tty_struct *tty, *o_tty;
- int pty_master, tty_closing, o_tty_closing, do_sleep;
- int devpts_master, devpts;
- int idx;
- char buf[64];
- unsigned long flags;
-
- tty = (struct tty_struct *)filp->private_data;
- if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "release_dev"))
- return;
-
- check_tty_count(tty, "release_dev");
-
- tty_fasync(-1, filp, 0);
-
- idx = tty->index;
- pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER);
- devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
- devpts_master = pty_master && devpts;
- o_tty = tty->link;
-
-#ifdef TTY_PARANOIA_CHECK
- if (idx < 0 || idx >= tty->driver->num) {
- printk(KERN_DEBUG "release_dev: bad idx when trying to "
- "free (%s)\n", tty->name);
- return;
- }
- if (!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- if (tty != tty->driver->ttys[idx]) {
- printk(KERN_DEBUG "release_dev: driver.table[%d] not
tty "
- "for (%s)\n", idx, tty->name);
- return;
- }
- if (tty->termios != tty->driver->termios[idx]) {
- printk(KERN_DEBUG "release_dev: driver.termios[%d] not
termios "
- "for (%s)\n",
- idx, tty->name);
- return;
- }
- if (tty->termios_locked != tty->driver->termios_locked[idx]) {
- printk(KERN_DEBUG "release_dev:
driver.termios_locked[%d] not "
- "termios_locked for (%s)\n",
- idx, tty->name);
- return;
- }
- }
-#endif
-
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
- tty_name(tty, buf), tty->count);
-#endif
-
-#ifdef TTY_PARANOIA_CHECK
- if (tty->driver->other &&
- !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- if (o_tty != tty->driver->other->ttys[idx]) {
- printk(KERN_DEBUG "release_dev: other->table[%d] "
- "not o_tty for (%s)\n",
- idx, tty->name);
- return;
- }
- if (o_tty->termios != tty->driver->other->termios[idx]) {
- printk(KERN_DEBUG "release_dev: other->termios[%d] "
- "not o_termios for (%s)\n",
- idx, tty->name);
- return;
- }
- if (o_tty->termios_locked !=
- tty->driver->other->termios_locked[idx]) {
- printk(KERN_DEBUG "release_dev: other->termios_locked["
- "%d] not o_termios_locked for (%s)\n",
- idx, tty->name);
- return;
- }
- if (o_tty->link != tty) {
- printk(KERN_DEBUG "release_dev: bad pty pointers\n");
- return;
- }
- }
-#endif
- if (tty->driver->close)
- tty->driver->close(tty, filp);
-
- /*
- * Sanity check: if tty->count is going to zero, there shouldn't be
- * any waiters on tty->read_wait or tty->write_wait. We test the
- * wait queues and kick everyone out _before_ actually starting to
- * close. This ensures that we won't block while releasing the tty
- * structure.
- *
- * The test for the o_tty closing is necessary, since the master and
- * slave sides may close in any order. If the slave side closes out
- * first, its count will be one, since the master side holds an open.
- * Thus this test wouldn't be triggered at the time the slave closes,
- * so we do it now.
- *
- * Note that it's possible for the tty to be opened again while we're
- * flushing out waiters. By recalculating the closing flags before
- * each iteration we avoid any problems.
- */
- while (1) {
- /* Guard against races with tty->count changes elsewhere and
- opens on /dev/tty */
-
- down(&tty_sem);
- tty_closing = tty->count <= 1;
- o_tty_closing = o_tty &&
- (o_tty->count <= (pty_master ? 1 : 0));
- up(&tty_sem);
- do_sleep = 0;
-
- if (tty_closing) {
- if (waitqueue_active(&tty->read_wait)) {
- wake_up(&tty->read_wait);
- do_sleep++;
- }
- if (waitqueue_active(&tty->write_wait)) {
- wake_up(&tty->write_wait);
- do_sleep++;
- }
- }
- if (o_tty_closing) {
- if (waitqueue_active(&o_tty->read_wait)) {
- wake_up(&o_tty->read_wait);
- do_sleep++;
- }
- if (waitqueue_active(&o_tty->write_wait)) {
- wake_up(&o_tty->write_wait);
- do_sleep++;
- }
- }
- if (!do_sleep)
- break;
-
- printk(KERN_WARNING "release_dev: %s: read/write wait queue "
- "active!\n", tty_name(tty, buf));
- schedule();
- }
-
- /*
- * The closing flags are now consistent with the open counts on
- * both sides, and we've completed the last operation that could
- * block, so it's safe to proceed with closing.
- */
-
- down(&tty_sem);
- if (pty_master) {
- if (--o_tty->count < 0) {
- printk(KERN_WARNING "release_dev: bad pty slave count "
- "(%d) for %s\n",
- o_tty->count, tty_name(o_tty, buf));
- o_tty->count = 0;
- }
- }
- if (--tty->count < 0) {
- printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n",
- tty->count, tty_name(tty, buf));
- tty->count = 0;
- }
- up(&tty_sem);
-
- /*
- * We've decremented tty->count, so we need to remove this file
- * descriptor off the tty->tty_files list; this serves two
- * purposes:
- * - check_tty_count sees the correct number of file descriptors
- * associated with this tty.
- * - do_tty_hangup no longer sees this file descriptor as
- * something that needs to be handled for hangups.
- */
- file_kill(filp);
- filp->private_data = NULL;
-
- /*
- * Perform some housekeeping before deciding whether to return.
- *
- * Set the TTY_CLOSING flag if this was the last open. In the
- * case of a pty we may have to wait around for the other side
- * to close, and TTY_CLOSING makes sure we can't be reopened.
- */
- if(tty_closing)
- set_bit(TTY_CLOSING, &tty->flags);
- if(o_tty_closing)
- set_bit(TTY_CLOSING, &o_tty->flags);
-
- /*
- * If _either_ side is closing, make sure there aren't any
- * processes that still think tty or o_tty is their controlling
- * tty.
- */
- if (tty_closing || o_tty_closing) {
- struct task_struct *p;
-
- read_lock(&tasklist_lock);
- do_each_task_pid(tty->session, PIDTYPE_SID, p) {
- p->signal->tty = NULL;
- } while_each_task_pid(tty->session, PIDTYPE_SID, p);
- if (o_tty)
- do_each_task_pid(o_tty->session, PIDTYPE_SID, p) {
- p->signal->tty = NULL;
- } while_each_task_pid(o_tty->session, PIDTYPE_SID, p);
- read_unlock(&tasklist_lock);
- }
-
- /* check whether both sides are closing ... */
- if (!tty_closing || (o_tty && !o_tty_closing))
- return;
-
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "freeing tty structure...");
-#endif
- /*
- * Prevent flush_to_ldisc() from rescheduling the work for later. Then
- * kill any delayed work. As this is the final close it does not
- * race with the set_ldisc code path.
- */
- clear_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_DONT_FLIP, &tty->flags);
- cancel_delayed_work(&tty->flip.work);
-
- /*
- * Wait for ->hangup_work and ->flip.work handlers to terminate
- */
-
- flush_scheduled_work();
-
- /*
- * Wait for any short term users (we know they are just driver
- * side waiters as the file is closing so user count on the file
- * side is zero.
- */
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- while(tty->ldisc.refcount)
- {
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- /*
- * Shutdown the current line discipline, and reset it to N_TTY.
- * N.B. why reset ldisc when we're releasing the memory??
- *
- * FIXME: this MUST get fixed for the new reflocking
- */
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
- tty_ldisc_put(tty->ldisc.num);
-
- /*
- * Switch the line discipline back
- */
- tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
- tty_set_termios_ldisc(tty,N_TTY);
- if (o_tty) {
- /* FIXME: could o_tty be in setldisc here ? */
- clear_bit(TTY_LDISC, &o_tty->flags);
- if (o_tty->ldisc.close)
- (o_tty->ldisc.close)(o_tty);
- tty_ldisc_put(o_tty->ldisc.num);
- tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY));
- tty_set_termios_ldisc(o_tty,N_TTY);
- }
- /*
- * The release_mem function takes care of the details of clearing
- * the slots and preserving the termios structure.
- */
- release_mem(tty, idx);
-
-#ifdef CONFIG_UNIX98_PTYS
- /* Make this pty number available for reallocation */
- if (devpts) {
- down(&allocated_ptys_lock);
- idr_remove(&allocated_ptys, idx);
- up(&allocated_ptys_lock);
- }
-#endif
-
-}
-
-/*
- * tty_open and tty_release keep up the tty count that contains the
- * number of opens done on a tty. We cannot use the inode-count, as
- * different inodes might point to the same tty.
- *
- * Open-counting is needed for pty masters, as well as for keeping
- * track of serial lines: DTR is dropped when the last close happens.
- * (This is not done solely through tty->count, now. - Ted 1/27/92)
- *
- * The termios state of a pty is reset on first open so that
- * settings don't persist across reuse.
- */
-static int tty_open(struct inode * inode, struct file * filp)
-{
- struct tty_struct *tty;
- int noctty, retval;
- struct tty_driver *driver;
- int index;
- dev_t device = inode->i_rdev;
- unsigned short saved_flags = filp->f_flags;
-
- nonseekable_open(inode, filp);
-
-retry_open:
- noctty = filp->f_flags & O_NOCTTY;
- index = -1;
- retval = 0;
-
- down(&tty_sem);
-
- if (device == MKDEV(TTYAUX_MAJOR,0)) {
- if (!current->signal->tty) {
- up(&tty_sem);
- return -ENXIO;
- }
- driver = current->signal->tty->driver;
- index = current->signal->tty->index;
- filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
- /* noctty = 1; */
- goto got_driver;
- }
-#ifdef CONFIG_VT
- if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
- extern int fg_console;
- extern struct tty_driver *console_driver;
- driver = console_driver;
- index = fg_console;
- noctty = 1;
- goto got_driver;
- }
-#endif
- if (device == MKDEV(TTYAUX_MAJOR,1)) {
- driver = console_device(&index);
- if (driver) {
- /* Don't let /dev/console block */
- filp->f_flags |= O_NONBLOCK;
- noctty = 1;
- goto got_driver;
- }
- up(&tty_sem);
- return -ENODEV;
- }
-
- driver = get_tty_driver(device, &index);
- if (!driver) {
- up(&tty_sem);
- return -ENODEV;
- }
-got_driver:
- retval = init_dev(driver, index, &tty);
- up(&tty_sem);
- if (retval)
- return retval;
-
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
- check_tty_count(tty, "tty_open");
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER)
- noctty = 1;
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "opening %s...", tty->name);
-#endif
- if (!retval) {
- if (tty->driver->open)
- retval = tty->driver->open(tty, filp);
- else
- retval = -ENODEV;
- }
- filp->f_flags = saved_flags;
-
- if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
!capable(CAP_SYS_ADMIN))
- retval = -EBUSY;
-
- if (retval) {
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "error %d in opening %s...", retval,
- tty->name);
-#endif
- release_dev(filp);
- if (retval != -ERESTARTSYS)
- return retval;
- if (signal_pending(current))
- return retval;
- schedule();
- /*
- * Need to reset f_op in case a hangup happened.
- */
- if (filp->f_op == &hung_up_tty_fops)
- filp->f_op = &tty_fops;
- goto retry_open;
- }
- if (!noctty &&
- current->signal->leader &&
- !current->signal->tty &&
- tty->session == 0) {
- task_lock(current);
- current->signal->tty = tty;
- task_unlock(current);
- current->signal->tty_old_pgrp = 0;
- tty->session = current->signal->session;
- tty->pgrp = process_group(current);
- }
- return 0;
-}
-
-#ifdef CONFIG_UNIX98_PTYS
-static int ptmx_open(struct inode * inode, struct file * filp)
-{
- struct tty_struct *tty;
- int retval;
- int index;
- int idr_ret;
-
- nonseekable_open(inode, filp);
-
- /* find a device that is not in use. */
- down(&allocated_ptys_lock);
- if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
- up(&allocated_ptys_lock);
- return -ENOMEM;
- }
- idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
- if (idr_ret < 0) {
- up(&allocated_ptys_lock);
- if (idr_ret == -EAGAIN)
- return -ENOMEM;
- return -EIO;
- }
- if (index >= pty_limit) {
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return -EIO;
- }
- up(&allocated_ptys_lock);
-
- down(&tty_sem);
- retval = init_dev(ptm_driver, index, &tty);
- up(&tty_sem);
-
- if (retval)
- goto out;
-
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
-
- retval = -ENOMEM;
- if (devpts_pty_new(tty->link))
- goto out1;
-
- check_tty_count(tty, "tty_open");
- retval = ptm_driver->open(tty, filp);
- if (!retval)
- return 0;
-out1:
- release_dev(filp);
-out:
- down(&allocated_ptys_lock);
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return retval;
-}
-#endif
-
-static int tty_release(struct inode * inode, struct file * filp)
-{
- lock_kernel();
- release_dev(filp);
- unlock_kernel();
- return 0;
-}
-
-/* No kernel lock held - fine */
-static unsigned int tty_poll(struct file * filp, poll_table * wait)
-{
- struct tty_struct * tty;
- struct tty_ldisc *ld;
- int ret = 0;
-
- tty = (struct tty_struct *)filp->private_data;
- if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_poll"))
- return 0;
-
- ld = tty_ldisc_ref_wait(tty);
- if (ld->poll)
- ret = (ld->poll)(tty, filp, wait);
- tty_ldisc_deref(ld);
- return ret;
-}
-
-static int tty_fasync(int fd, struct file * filp, int on)
-{
- struct tty_struct * tty;
- int retval;
-
- tty = (struct tty_struct *)filp->private_data;
- if (tty_paranoia_check(tty, filp->f_dentry->d_inode, "tty_fasync"))
- return 0;
-
- retval = fasync_helper(fd, filp, on, &tty->fasync);
- if (retval <= 0)
- return retval;
-
- if (on) {
- if (!waitqueue_active(&tty->read_wait))
- tty->minimum_to_wake = 1;
- retval = f_setown(filp, (-tty->pgrp) ? : current->pid, 0);
- if (retval)
- return retval;
- } else {
- if (!tty->fasync && !waitqueue_active(&tty->read_wait))
- tty->minimum_to_wake = N_TTY_BUF_SIZE;
- }
- return 0;
-}
-
-static int tiocsti(struct tty_struct *tty, char __user *p)
-{
- char ch, mbz = 0;
- struct tty_ldisc *ld;
-
- if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (get_user(ch, p))
- return -EFAULT;
- ld = tty_ldisc_ref_wait(tty);
- ld->receive_buf(tty, &ch, &mbz, 1);
- tty_ldisc_deref(ld);
- return 0;
-}
-
-static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
-{
- if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
- return -EFAULT;
- return 0;
-}
-
-static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
- struct winsize __user * arg)
-{
- struct winsize tmp_ws;
-
- if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
- return -EFAULT;
- if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
- return 0;
-#ifdef CONFIG_VT
- if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
- unsigned int currcons = tty->index;
- int rc;
-
- acquire_console_sem();
- rc = vc_resize(currcons, tmp_ws.ws_col, tmp_ws.ws_row);
- release_console_sem();
- if (rc)
- return -ENXIO;
- }
-#endif
- if (tty->pgrp > 0)
- kill_pg(tty->pgrp, SIGWINCH, 1);
- if ((real_tty->pgrp != tty->pgrp) && (real_tty->pgrp > 0))
- kill_pg(real_tty->pgrp, SIGWINCH, 1);
- tty->winsize = tmp_ws;
- real_tty->winsize = tmp_ws;
- return 0;
-}
-
-static int tioccons(struct file *file)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (file->f_op->write == redirected_tty_write) {
- struct file *f;
- spin_lock(&redirect_lock);
- f = redirect;
- redirect = NULL;
- spin_unlock(&redirect_lock);
- if (f)
- fput(f);
- return 0;
- }
- spin_lock(&redirect_lock);
- if (redirect) {
- spin_unlock(&redirect_lock);
- return -EBUSY;
- }
- get_file(file);
- redirect = file;
- spin_unlock(&redirect_lock);
- return 0;
-}
-
-
-static int fionbio(struct file *file, int __user *p)
-{
- int nonblock;
-
- if (get_user(nonblock, p))
- return -EFAULT;
-
- if (nonblock)
- file->f_flags |= O_NONBLOCK;
- else
- file->f_flags &= ~O_NONBLOCK;
- return 0;
-}
-
-static int tiocsctty(struct tty_struct *tty, int arg)
-{
- task_t *p;
-
- if (current->signal->leader &&
- (current->signal->session == tty->session))
- return 0;
- /*
- * The process must be a session leader and
- * not have a controlling tty already.
- */
- if (!current->signal->leader || current->signal->tty)
- return -EPERM;
- if (tty->session > 0) {
- /*
- * This tty is already the controlling
- * tty for another session group!
- */
- if ((arg == 1) && capable(CAP_SYS_ADMIN)) {
- /*
- * Steal it away
- */
-
- read_lock(&tasklist_lock);
- do_each_task_pid(tty->session, PIDTYPE_SID, p) {
- p->signal->tty = NULL;
- } while_each_task_pid(tty->session, PIDTYPE_SID, p);
- read_unlock(&tasklist_lock);
- } else
- return -EPERM;
- }
- task_lock(current);
- current->signal->tty = tty;
- task_unlock(current);
- current->signal->tty_old_pgrp = 0;
- tty->session = current->signal->session;
- tty->pgrp = process_group(current);
- return 0;
-}
-
-static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t __user *p)
-{
- /*
- * (tty == real_tty) is a cheap way of
- * testing if the tty is NOT a master pty.
- */
- if (tty == real_tty && current->signal->tty != real_tty)
- return -ENOTTY;
- return put_user(real_tty->pgrp, p);
-}
-
-static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty,
pid_t __user *p)
-{
- pid_t pgrp;
- int retval = tty_check_change(real_tty);
-
- if (retval == -EIO)
- return -ENOTTY;
- if (retval)
- return retval;
- if (!current->signal->tty ||
- (current->signal->tty != real_tty) ||
- (real_tty->session != current->signal->session))
- return -ENOTTY;
- if (get_user(pgrp, p))
- return -EFAULT;
- if (pgrp < 0)
- return -EINVAL;
- if (session_of_pgrp(pgrp) != current->signal->session)
- return -EPERM;
- real_tty->pgrp = pgrp;
- return 0;
-}
-
-static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
__user *p)
-{
- /*
- * (tty == real_tty) is a cheap way of
- * testing if the tty is NOT a master pty.
- */
- if (tty == real_tty && current->signal->tty != real_tty)
- return -ENOTTY;
- if (real_tty->session <= 0)
- return -ENOTTY;
- return put_user(real_tty->session, p);
-}
-
-static int tiocsetd(struct tty_struct *tty, int __user *p)
-{
- int ldisc;
-
- if (get_user(ldisc, p))
- return -EFAULT;
- return tty_set_ldisc(tty, ldisc);
-}
-
-static int send_break(struct tty_struct *tty, int duration)
-{
- tty->driver->break_ctl(tty, -1);
- if (!signal_pending(current)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(duration);
- }
- tty->driver->break_ctl(tty, 0);
- if (signal_pending(current))
- return -EINTR;
- return 0;
-}
-
-static int
-tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
-{
- int retval = -EINVAL;
-
- if (tty->driver->tiocmget) {
- retval = tty->driver->tiocmget(tty, file);
-
- if (retval >= 0)
- retval = put_user(retval, p);
- }
- return retval;
-}
-
-static int
-tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
- unsigned __user *p)
-{
- int retval = -EINVAL;
-
- if (tty->driver->tiocmset) {
- unsigned int set, clear, val;
-
- retval = get_user(val, p);
- if (retval)
- return retval;
-
- set = clear = 0;
- switch (cmd) {
- case TIOCMBIS:
- set = val;
- break;
- case TIOCMBIC:
- clear = val;
- break;
- case TIOCMSET:
- set = val;
- clear = ~val;
- break;
- }
-
- set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
- clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
-
- retval = tty->driver->tiocmset(tty, file, set, clear);
- }
- return retval;
-}
-
-/*
- * Split this up, as gcc can choke on it otherwise..
- */
-int tty_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- struct tty_struct *tty, *real_tty;
- void __user *p = (void __user *)arg;
- int retval;
- struct tty_ldisc *ld;
-
- tty = (struct tty_struct *)file->private_data;
- if (tty_paranoia_check(tty, inode, "tty_ioctl"))
- return -EINVAL;
-
- real_tty = tty;
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER)
- real_tty = tty->link;
-
- /*
- * Break handling by driver
- */
- if (!tty->driver->break_ctl) {
- switch(cmd) {
- case TIOCSBRK:
- case TIOCCBRK:
- if (tty->driver->ioctl)
- return tty->driver->ioctl(tty, file, cmd, arg);
- return -EINVAL;
-
- /* These two ioctl's always return success; even if */
- /* the driver doesn't support them. */
- case TCSBRK:
- case TCSBRKP:
- if (!tty->driver->ioctl)
- return 0;
- retval = tty->driver->ioctl(tty, file, cmd, arg);
- if (retval == -ENOIOCTLCMD)
- retval = 0;
- return retval;
- }
- }
-
- /*
- * Factor out some common prep work
- */
- switch (cmd) {
- case TIOCSETD:
- case TIOCSBRK:
- case TIOCCBRK:
- case TCSBRK:
- case TCSBRKP:
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- if (cmd != TIOCCBRK) {
- tty_wait_until_sent(tty, 0);
- if (signal_pending(current))
- return -EINTR;
- }
- break;
- }
-
- switch (cmd) {
- case TIOCSTI:
- return tiocsti(tty, p);
- case TIOCGWINSZ:
- return tiocgwinsz(tty, p);
- case TIOCSWINSZ:
- return tiocswinsz(tty, real_tty, p);
- case TIOCCONS:
- return real_tty!=tty ? -EINVAL : tioccons(file);
- case FIONBIO:
- return fionbio(file, p);
- case TIOCEXCL:
- set_bit(TTY_EXCLUSIVE, &tty->flags);
- return 0;
- case TIOCNXCL:
- clear_bit(TTY_EXCLUSIVE, &tty->flags);
- return 0;
- case TIOCNOTTY:
- if (current->signal->tty != tty)
- return -ENOTTY;
- if (current->signal->leader)
- disassociate_ctty(0);
- task_lock(current);
- current->signal->tty = NULL;
- task_unlock(current);
- return 0;
- case TIOCSCTTY:
- return tiocsctty(tty, arg);
- case TIOCGPGRP:
- return tiocgpgrp(tty, real_tty, p);
- case TIOCSPGRP:
- return tiocspgrp(tty, real_tty, p);
- case TIOCGSID:
- return tiocgsid(tty, real_tty, p);
- case TIOCGETD:
- /* FIXME: check this is ok */
- return put_user(tty->ldisc.num, (int __user *)p);
- case TIOCSETD:
- return tiocsetd(tty, p);
-#ifdef CONFIG_VT
- case TIOCLINUX:
- return tioclinux(tty, arg);
-#endif
- /*
- * Break handling
- */
- case TIOCSBRK: /* Turn break on, unconditionally */
- tty->driver->break_ctl(tty, -1);
- return 0;
-
- case TIOCCBRK: /* Turn break off, unconditionally */
- tty->driver->break_ctl(tty, 0);
- return 0;
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- /*
- * XXX is the above comment correct, or the
- * code below correct? Is this ioctl used at
- * all by anyone?
- */
- if (!arg)
- return send_break(tty, HZ/4);
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- return send_break(tty, arg ? arg*(HZ/10) : HZ/4);
-
- case TIOCMGET:
- return tty_tiocmget(tty, file, p);
-
- case TIOCMSET:
- case TIOCMBIC:
- case TIOCMBIS:
- return tty_tiocmset(tty, file, cmd, p);
- }
- if (tty->driver->ioctl) {
- retval = (tty->driver->ioctl)(tty, file, cmd, arg);
- if (retval != -ENOIOCTLCMD)
- return retval;
- }
- ld = tty_ldisc_ref_wait(tty);
- retval = -EINVAL;
- if (ld->ioctl) {
- retval = ld->ioctl(tty, file, cmd, arg);
- if (retval == -ENOIOCTLCMD)
- retval = -EINVAL;
- }
- tty_ldisc_deref(ld);
- return retval;
-}
-
-
-/*
- * This implements the "Secure Attention Key" --- the idea is to
- * prevent trojan horses by killing all processes associated with this
- * tty when the user hits the "Secure Attention Key". Required for
- * super-paranoid applications --- see the Orange Book for more details.
- *
- * This code could be nicer; ideally it should send a HUP, wait a few
- * seconds, then send a INT, and then a KILL signal. But you then
- * have to coordinate with the init process, since all processes associated
- * with the current tty must be dead before the new getty is allowed
- * to spawn.
- *
- * Now, if it would be correct ;-/ The current code has a nasty hole -
- * it doesn't catch files in flight. We may send the descriptor to ourselves
- * via AF_UNIX socket, close it and later fetch from socket. FIXME.
- *
- * Nasty bug: do_SAK is being called in interrupt context. This can
- * deadlock. We punt it up to process context. AKPM - 16Mar2001
- */
-static void __do_SAK(void *arg)
-{
-#ifdef TTY_SOFT_SAK
- tty_hangup(tty);
-#else
- struct tty_struct *tty = arg;
- struct task_struct *p;
- int session;
- int i;
- struct file *filp;
- struct tty_ldisc *disc;
-
- if (!tty)
- return;
- session = tty->session;
-
- /* We don't want an ldisc switch during this */
- disc = tty_ldisc_ref(tty);
- if (disc && disc->flush_buffer)
- disc->flush_buffer(tty);
- tty_ldisc_deref(disc);
-
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
-
- read_lock(&tasklist_lock);
- do_each_task_pid(session, PIDTYPE_SID, p) {
- if (p->signal->tty == tty || session > 0) {
- printk(KERN_NOTICE "SAK: killed process %d"
- " (%s): p->signal->session==tty->session\n",
- p->pid, p->comm);
- send_sig(SIGKILL, p, 1);
- continue;
- }
- task_lock(p);
- if (p->files) {
- spin_lock(&p->files->file_lock);
- for (i=0; i < p->files->max_fds; i++) {
- filp = fcheck_files(p->files, i);
- if (!filp)
- continue;
- if (filp->f_op->read == tty_read &&
- filp->private_data == tty) {
- printk(KERN_NOTICE "SAK: killed process
%d"
- " (%s): fd#%d opened to the tty\n",
- p->pid, p->comm, i);
- send_sig(SIGKILL, p, 1);
- break;
- }
- }
- spin_unlock(&p->files->file_lock);
- }
- task_unlock(p);
- } while_each_task_pid(session, PIDTYPE_SID, p);
- read_unlock(&tasklist_lock);
-#endif
-}
-
-/*
- * The tq handling here is a little racy - tty->SAK_work may already be queued.
- * Fortunately we don't need to worry, because if ->SAK_work is already queued,
- * the values which we write to it will be identical to the values which it
- * already has. --akpm
- */
-void do_SAK(struct tty_struct *tty)
-{
- if (!tty)
- return;
- PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
- schedule_work(&tty->SAK_work);
-}
-
-EXPORT_SYMBOL(do_SAK);
-
-/*
- * This routine is called out of the software interrupt to flush data
- * from the flip buffer to the line discipline.
- */
-
-static void flush_to_ldisc(void *private_)
-{
- struct tty_struct *tty = (struct tty_struct *) private_;
- unsigned char *cp;
- char *fp;
- int count;
- unsigned long flags;
- struct tty_ldisc *disc;
-
- disc = tty_ldisc_ref(tty);
- if (disc == NULL) /* !TTY_LDISC */
- return;
-
- if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
- /*
- * Do it after the next timer tick:
- */
- schedule_delayed_work(&tty->flip.work, 1);
- goto out;
- }
- spin_lock_irqsave(&tty->read_lock, flags);
- if (tty->flip.buf_num) {
- cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
- fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
- tty->flip.buf_num = 0;
- tty->flip.char_buf_ptr = tty->flip.char_buf;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf;
- } else {
- cp = tty->flip.char_buf;
- fp = tty->flip.flag_buf;
- tty->flip.buf_num = 1;
- tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
- }
- count = tty->flip.count;
- tty->flip.count = 0;
- spin_unlock_irqrestore(&tty->read_lock, flags);
-
- disc->receive_buf(tty, cp, fp, count);
-out:
- tty_ldisc_deref(disc);
-}
-
-/*
- * Routine which returns the baud rate of the tty
- *
- * Note that the baud_table needs to be kept in sync with the
- * include/asm/termbits.h file.
- */
-static int baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 230400, 460800,
-#ifdef __sparc__
- 76800, 153600, 307200, 614400, 921600
-#else
- 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
- 2500000, 3000000, 3500000, 4000000
-#endif
-};
-
-static int n_baud_table = ARRAY_SIZE(baud_table);
-
-/**
- * tty_termios_baud_rate
- * @termios: termios structure
- *
- * Convert termios baud rate data into a speed. This should be called
- * with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data.
- */
-
-int tty_termios_baud_rate(struct termios *termios)
-{
- unsigned int cbaud;
-
- cbaud = termios->c_cflag & CBAUD;
-
- if (cbaud & CBAUDEX) {
- cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~CBAUDEX;
- else
- cbaud += 15;
- }
- return baud_table[cbaud];
-}
-
-EXPORT_SYMBOL(tty_termios_baud_rate);
-
-/**
- * tty_get_baud_rate - get tty bit rates
- * @tty: tty to query
- *
- * Returns the baud rate as an integer for this terminal. The
- * termios lock must be held by the caller and the terminal bit
- * flags may be updated.
- */
-
-int tty_get_baud_rate(struct tty_struct *tty)
-{
- int baud = tty_termios_baud_rate(tty->termios);
-
- if (baud == 38400 && tty->alt_speed) {
- if (!tty->warned) {
- printk(KERN_WARNING "Use of setserial/setrocket to "
- "set SPD_* flags is deprecated\n");
- tty->warned = 1;
- }
- baud = tty->alt_speed;
- }
-
- return baud;
-}
-
-EXPORT_SYMBOL(tty_get_baud_rate);
-
-/**
- * tty_flip_buffer_push - terminal
- * @tty: tty to push
- *
- * Queue a push of the terminal flip buffers to the line discipline. This
- * function must not be called from IRQ context if tty->low_latency is set.
- *
- * In the event of the queue being busy for flipping the work will be
- * held off and retried later.
- */
-
-void tty_flip_buffer_push(struct tty_struct *tty)
-{
- if (tty->low_latency)
- flush_to_ldisc((void *) tty);
- else
- schedule_delayed_work(&tty->flip.work, 1);
-}
-
-EXPORT_SYMBOL(tty_flip_buffer_push);
-
-/*
- * This subroutine initializes a tty structure.
- */
-static void initialize_tty_struct(struct tty_struct *tty)
-{
- memset(tty, 0, sizeof(struct tty_struct));
- tty->magic = TTY_MAGIC;
- tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
- tty->pgrp = -1;
- tty->flip.char_buf_ptr = tty->flip.char_buf;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf;
- INIT_WORK(&tty->flip.work, flush_to_ldisc, tty);
- init_MUTEX(&tty->flip.pty_sem);
- init_MUTEX(&tty->termios_sem);
- init_waitqueue_head(&tty->write_wait);
- init_waitqueue_head(&tty->read_wait);
- INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
- sema_init(&tty->atomic_read, 1);
- sema_init(&tty->atomic_write, 1);
- spin_lock_init(&tty->read_lock);
- INIT_LIST_HEAD(&tty->tty_files);
- INIT_WORK(&tty->SAK_work, NULL, NULL);
-}
-
-/*
- * The default put_char routine if the driver did not define one.
- */
-static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
-{
- tty->driver->write(tty, &ch, 1);
-}
-
-static struct class_simple *tty_class;
-
-/**
- * tty_register_device - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- * This field is optional, if there is no known struct device for this
- * tty device it can be set to NULL safely.
- *
- * This call is required to be made to register an individual tty device if
- * the tty driver's flags have the TTY_DRIVER_NO_DEVFS bit set. If that
- * bit is not set, this function should not be called.
- */
-void tty_register_device(struct tty_driver *driver, unsigned index,
- struct device *device)
-{
- char name[64];
- dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
-
- if (index >= driver->num) {
- printk(KERN_ERR "Attempt to register invalid tty line number "
- " (%d).\n", index);
- return;
- }
-
- devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
- "%s%d", driver->devfs_name, index + driver->name_base);
-
- if (driver->type == TTY_DRIVER_TYPE_PTY)
- pty_line_name(driver, index, name);
- else
- tty_line_name(driver, index, name);
- class_simple_device_add(tty_class, dev, device, name);
-}
-
-/**
- * tty_unregister_device - unregister a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- *
- * If a tty device is registered with a call to tty_register_device() then
- * this function must be made when the tty device is gone.
- */
-void tty_unregister_device(struct tty_driver *driver, unsigned index)
-{
- devfs_remove("%s%d", driver->devfs_name, index + driver->name_base);
- class_simple_device_remove(MKDEV(driver->major, driver->minor_start) +
index);
-}
-
-EXPORT_SYMBOL(tty_register_device);
-EXPORT_SYMBOL(tty_unregister_device);
-
-struct tty_driver *alloc_tty_driver(int lines)
-{
- struct tty_driver *driver;
-
- driver = kmalloc(sizeof(struct tty_driver), GFP_KERNEL);
- if (driver) {
- memset(driver, 0, sizeof(struct tty_driver));
- driver->magic = TTY_DRIVER_MAGIC;
- driver->num = lines;
- /* later we'll move allocation of tables here */
- }
- return driver;
-}
-
-void put_tty_driver(struct tty_driver *driver)
-{
- kfree(driver);
-}
-
-void tty_set_operations(struct tty_driver *driver, struct tty_operations *op)
-{
- driver->open = op->open;
- driver->close = op->close;
- driver->write = op->write;
- driver->put_char = op->put_char;
- driver->flush_chars = op->flush_chars;
- driver->write_room = op->write_room;
- driver->chars_in_buffer = op->chars_in_buffer;
- driver->ioctl = op->ioctl;
- driver->set_termios = op->set_termios;
- driver->throttle = op->throttle;
- driver->unthrottle = op->unthrottle;
- driver->stop = op->stop;
- driver->start = op->start;
- driver->hangup = op->hangup;
- driver->break_ctl = op->break_ctl;
- driver->flush_buffer = op->flush_buffer;
- driver->set_ldisc = op->set_ldisc;
- driver->wait_until_sent = op->wait_until_sent;
- driver->send_xchar = op->send_xchar;
- driver->read_proc = op->read_proc;
- driver->write_proc = op->write_proc;
- driver->tiocmget = op->tiocmget;
- driver->tiocmset = op->tiocmset;
-}
-
-
-EXPORT_SYMBOL(alloc_tty_driver);
-EXPORT_SYMBOL(put_tty_driver);
-EXPORT_SYMBOL(tty_set_operations);
-
-/*
- * Called by a tty driver to register itself.
- */
-int tty_register_driver(struct tty_driver *driver)
-{
- int error;
- int i;
- dev_t dev;
- void **p = NULL;
-
- if (driver->flags & TTY_DRIVER_INSTALLED)
- return 0;
-
- if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- p = kmalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- memset(p, 0, driver->num * 3 * sizeof(void *));
- }
-
- if (!driver->major) {
- error = alloc_chrdev_region(&dev, driver->minor_start,
driver->num,
- (char*)driver->name);
- if (!error) {
- driver->major = MAJOR(dev);
- driver->minor_start = MINOR(dev);
- }
- } else {
- dev = MKDEV(driver->major, driver->minor_start);
- error = register_chrdev_region(dev, driver->num,
- (char*)driver->name);
- }
- if (error < 0) {
- kfree(p);
- return error;
- }
-
- if (p) {
- driver->ttys = (struct tty_struct **)p;
- driver->termios = (struct termios **)(p + driver->num);
- driver->termios_locked = (struct termios **)(p + driver->num *
2);
- } else {
- driver->ttys = NULL;
- driver->termios = NULL;
- driver->termios_locked = NULL;
- }
-
- cdev_init(&driver->cdev, &tty_fops);
- driver->cdev.owner = driver->owner;
- error = cdev_add(&driver->cdev, dev, driver->num);
- if (error) {
- cdev_del(&driver->cdev);
- unregister_chrdev_region(dev, driver->num);
- driver->ttys = NULL;
- driver->termios = driver->termios_locked = NULL;
- kfree(p);
- return error;
- }
-
- if (!driver->put_char)
- driver->put_char = tty_default_put_char;
-
- list_add(&driver->tty_drivers, &tty_drivers);
-
- if ( !(driver->flags & TTY_DRIVER_NO_DEVFS) ) {
- for(i = 0; i < driver->num; i++)
- tty_register_device(driver, i, NULL);
- }
- proc_tty_register_driver(driver);
- return 0;
-}
-
-EXPORT_SYMBOL(tty_register_driver);
-
-/*
- * Called by a tty driver to unregister itself.
- */
-int tty_unregister_driver(struct tty_driver *driver)
-{
- int i;
- struct termios *tp;
- void *p;
-
- if (driver->refcount)
- return -EBUSY;
-
- unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
- driver->num);
-
- list_del(&driver->tty_drivers);
-
- /*
- * Free the termios and termios_locked structures because
- * we don't want to get memory leaks when modular tty
- * drivers are removed from the kernel.
- */
- for (i = 0; i < driver->num; i++) {
- tp = driver->termios[i];
- if (tp) {
- driver->termios[i] = NULL;
- kfree(tp);
- }
- tp = driver->termios_locked[i];
- if (tp) {
- driver->termios_locked[i] = NULL;
- kfree(tp);
- }
- if (!(driver->flags & TTY_DRIVER_NO_DEVFS))
- tty_unregister_device(driver, i);
- }
- p = driver->ttys;
- proc_tty_unregister_driver(driver);
- driver->ttys = NULL;
- driver->termios = driver->termios_locked = NULL;
- kfree(p);
- cdev_del(&driver->cdev);
- return 0;
-}
-
-EXPORT_SYMBOL(tty_unregister_driver);
-
-
-/*
- * Initialize the console device. This is called *early*, so
- * we can't necessarily depend on lots of kernel help here.
- * Just do some early initializations, and do the complex setup
- * later.
- */
-void __init console_init(void)
-{
- initcall_t *call;
-
- /* Setup the default TTY line discipline. */
- (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
-
- /*
- * set up the console device so that later boot sequences can
- * inform about problems etc..
- */
-#ifdef CONFIG_EARLY_PRINTK
- disable_early_printk();
-#endif
-#ifdef CONFIG_SERIAL_68360
- /* This is not a console initcall. I know not what it's doing here.
- So I haven't moved it. dwmw2 */
- rs_360_init();
-#endif
- call = __con_initcall_start;
- while (call < __con_initcall_end) {
- (*call)();
- call++;
- }
-}
-
-#ifdef CONFIG_VT
-extern int vty_init(void);
-#endif
-
-static int __init tty_class_init(void)
-{
- tty_class = class_simple_create(THIS_MODULE, "tty");
- if (IS_ERR(tty_class))
- return PTR_ERR(tty_class);
- return 0;
-}
-
-postcore_initcall(tty_class_init);
-
-/* 3/2004 jmc: why do these devices exist? */
-
-static struct cdev tty_cdev, console_cdev;
-#ifdef CONFIG_UNIX98_PTYS
-static struct cdev ptmx_cdev;
-#endif
-#ifdef CONFIG_VT
-static struct cdev vc0_cdev;
-#endif
-
-/*
- * Ok, now we can initialize the rest of the tty devices and can count
- * on memory allocations, interrupts etc..
- */
-static int __init tty_init(void)
-{
- cdev_init(&tty_cdev, &tty_fops);
- if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
- register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
- panic("Couldn't register /dev/tty driver\n");
- devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 0), S_IFCHR|S_IRUGO|S_IWUGO, "tty");
- class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
-
- cdev_init(&console_cdev, &console_fops);
- if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
- register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") <
0)
- panic("Couldn't register /dev/console driver\n");
- devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 1), S_IFCHR|S_IRUSR|S_IWUSR,
"console");
- class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL,
"console");
-
-#ifdef CONFIG_UNIX98_PTYS
- cdev_init(&ptmx_cdev, &ptmx_fops);
- if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
- register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
- panic("Couldn't register /dev/ptmx driver\n");
- devfs_mk_cdev(MKDEV(TTYAUX_MAJOR, 2), S_IFCHR|S_IRUGO|S_IWUGO, "ptmx");
- class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 2), NULL,
"ptmx");
-#endif
-
-#ifdef CONFIG_VT
- if (console_use_vt) {
- cdev_init(&vc0_cdev, &console_fops);
- if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
- register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1,
- "/dev/vc/0") < 0)
- panic("Couldn't register /dev/tty0 driver\n");
- devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR,
- "vc/0");
- class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL,
- "tty0");
-
- vty_init();
- }
-#endif
- return 0;
-}
-module_init(tty_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/Makefile Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,12 +0,0 @@
-
-
-obj-y += console/
-obj-y += evtchn/
-obj-y += balloon/
-
-obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += privcmd/
-obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
-obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
-obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
-obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
-
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/balloon/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/balloon/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y += balloon.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,458 +0,0 @@
-/******************************************************************************
- * balloon.c
- *
- * Xen balloon driver - enables returning/claiming memory to/from Xen.
- *
- * Copyright (c) 2003, B Dragovic
- * Copyright (c) 2003-2004, M Williamson, K Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-#include <asm-xen/xen_proc.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/balloon.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-#include <linux/list.h>
-
-static struct proc_dir_entry *balloon_pde;
-
-static DECLARE_MUTEX(balloon_mutex);
-spinlock_t balloon_lock = SPIN_LOCK_UNLOCKED;
-
-/* We aim for 'current allocation' == 'target allocation'. */
-static unsigned long current_pages;
-static unsigned long target_pages;
-
-/* We may hit the hard limit in Xen. If we do then we remember it. */
-static unsigned long hard_limit;
-
-/*
- * Drivers may alter the memory reservation independently, but they must
- * inform the balloon driver so that we can avoid hitting the hard limit.
- */
-static unsigned long driver_pages;
-
-/* List of ballooned pages, threaded through the mem_map array. */
-static LIST_HEAD(ballooned_pages);
-static unsigned long balloon_low, balloon_high;
-
-/* Main work function, always executed in process context. */
-static void balloon_process(void *unused);
-static DECLARE_WORK(balloon_worker, balloon_process, NULL);
-static struct timer_list balloon_timer;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-/* Use the private and mapping fields of struct page as a list. */
-#define PAGE_TO_LIST(p) ( (struct list_head *)&p->private )
-#define LIST_TO_PAGE(l) ( list_entry( ((unsigned long *)l), \
- struct page, private ) )
-#define UNLIST_PAGE(p) do { list_del(PAGE_TO_LIST(p)); \
- p->mapping = NULL; \
- p->private = 0; } while(0)
-#else
-/* There's a dedicated list field in struct page we can use. */
-#define PAGE_TO_LIST(p) ( &p->list )
-#define LIST_TO_PAGE(l) ( list_entry(l, struct page, list) )
-#define UNLIST_PAGE(p) ( list_del(&p->list) )
-#define pte_offset_kernel pte_offset
-#define pud_t pgd_t
-#define pud_offset(d, va) d
-#define pud_none(d) 0
-#define pud_bad(d) 0
-#define subsys_initcall(_fn) __initcall(_fn)
-#define pfn_to_page(_pfn) (mem_map + (_pfn))
-#endif
-
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_mem: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_mem: " fmt, ##args)
-
-/* balloon_append: add the given page to the balloon. */
-static void balloon_append(struct page *page)
-{
- /* Low memory is re-populated first, so highmem pages go at list tail. */
- if ( PageHighMem(page) )
- {
- list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
- balloon_high++;
- }
- else
- {
- list_add(PAGE_TO_LIST(page), &ballooned_pages);
- balloon_low++;
- }
-}
-
-/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-static struct page *balloon_retrieve(void)
-{
- struct page *page;
-
- if ( list_empty(&ballooned_pages) )
- return NULL;
-
- page = LIST_TO_PAGE(ballooned_pages.next);
- UNLIST_PAGE(page);
-
- if ( PageHighMem(page) )
- balloon_high--;
- else
- balloon_low--;
-
- return page;
-}
-
-static inline pte_t *get_ptep(unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- pgd = pgd_offset_k(addr);
- if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
-
- pud = pud_offset(pgd, addr);
- if ( pud_none(*pud) || pud_bad(*pud) ) BUG();
-
- pmd = pmd_offset(pud, addr);
- if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
-
- return pte_offset_kernel(pmd, addr);
-}
-
-static void balloon_alarm(unsigned long unused)
-{
- schedule_work(&balloon_worker);
-}
-
-static unsigned long current_target(void)
-{
- unsigned long target = min(target_pages, hard_limit);
- if ( target > (current_pages + balloon_low + balloon_high) )
- target = current_pages + balloon_low + balloon_high;
- return target;
-}
-
-/*
- * We avoid multiple worker processes conflicting via the balloon mutex.
- * We may of course race updates of the target counts (which are protected
- * by the balloon lock), or with changes to the Xen hard limit, but we will
- * recover from these in time.
- */
-static void balloon_process(void *unused)
-{
- unsigned long *mfn_list, pfn, i, flags;
- struct page *page;
- long credit, debt, rc;
- void *v;
-
- down(&balloon_mutex);
-
- retry:
- mfn_list = NULL;
-
- if ( (credit = current_target() - current_pages) > 0 )
- {
- mfn_list = (unsigned long *)vmalloc(credit * sizeof(*mfn_list));
- if ( mfn_list == NULL )
- goto out;
-
- balloon_lock(flags);
- rc = HYPERVISOR_dom_mem_op(
- MEMOP_increase_reservation, mfn_list, credit, 0);
- balloon_unlock(flags);
- if ( rc < credit )
- {
- /* We hit the Xen hard limit: reprobe. */
- if ( HYPERVISOR_dom_mem_op(
- MEMOP_decrease_reservation, mfn_list, rc, 0) != rc )
- BUG();
- hard_limit = current_pages + rc - driver_pages;
- vfree(mfn_list);
- goto retry;
- }
-
- for ( i = 0; i < credit; i++ )
- {
- if ( (page = balloon_retrieve()) == NULL )
- BUG();
-
- pfn = page - mem_map;
- if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
- BUG();
-
- /* Update P->M and M->P tables. */
- phys_to_machine_mapping[pfn] = mfn_list[i];
- queue_machphys_update(mfn_list[i], pfn);
-
- /* Link back into the page tables if it's not a highmem page. */
- if ( pfn < max_low_pfn )
- queue_l1_entry_update(
- get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
- (mfn_list[i] << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-
- /* Finally, relinquish the memory back to the system allocator. */
- ClearPageReserved(page);
- set_page_count(page, 1);
- __free_page(page);
- }
-
- current_pages += credit;
- }
- else if ( credit < 0 )
- {
- debt = -credit;
-
- mfn_list = (unsigned long *)vmalloc(debt * sizeof(*mfn_list));
- if ( mfn_list == NULL )
- goto out;
-
- for ( i = 0; i < debt; i++ )
- {
- if ( (page = alloc_page(GFP_HIGHUSER)) == NULL )
- {
- debt = i;
- break;
- }
-
- pfn = page - mem_map;
- mfn_list[i] = phys_to_machine_mapping[pfn];
-
- if ( !PageHighMem(page) )
- {
- v = phys_to_virt(pfn << PAGE_SHIFT);
- scrub_pages(v, 1);
- queue_l1_entry_update(get_ptep((unsigned long)v), 0);
- }
-#ifdef CONFIG_XEN_SCRUB_PAGES
- else
- {
- v = kmap(page);
- scrub_pages(v, 1);
- kunmap(page);
- }
-#endif
- }
-
- /* Ensure that ballooned highmem pages don't have cached mappings. */
- kmap_flush_unused();
-
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
-
- /* No more mappings: invalidate pages in P2M and add to balloon. */
- for ( i = 0; i < debt; i++ )
- {
- pfn = mfn_to_pfn(mfn_list[i]);
- phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
- balloon_append(pfn_to_page(pfn));
- }
-
- if ( HYPERVISOR_dom_mem_op(
- MEMOP_decrease_reservation, mfn_list, debt, 0) != debt )
- BUG();
-
- current_pages -= debt;
- }
-
- out:
- if ( mfn_list != NULL )
- vfree(mfn_list);
-
- /* Schedule more work if there is some still to be done. */
- if ( current_target() != current_pages )
- mod_timer(&balloon_timer, jiffies + HZ);
-
- up(&balloon_mutex);
-}
-
-/* Resets the Xen limit, sets new target, and kicks off processing. */
-static void set_new_target(unsigned long target)
-{
- /* No need for lock. Not read-modify-write updates. */
- hard_limit = ~0UL;
- target_pages = target;
- schedule_work(&balloon_worker);
-}
-
-static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
- switch ( msg->subtype )
- {
- case CMSG_MEM_REQUEST_SET:
- {
- mem_request_t *req = (mem_request_t *)&msg->msg[0];
- if ( msg->length != sizeof(mem_request_t) )
- goto parse_error;
- set_new_target(req->target);
- req->status = 0;
- }
- break;
- default:
- goto parse_error;
- }
-
- ctrl_if_send_response(msg);
- return;
-
- parse_error:
- msg->length = 0;
- ctrl_if_send_response(msg);
-}
-
-static int balloon_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char memstring[64], *endchar;
- unsigned long long target_bytes;
-
- if ( !capable(CAP_SYS_ADMIN) )
- return -EPERM;
-
- if ( count <= 1 )
- return -EBADMSG; /* runt */
- if ( count > sizeof(memstring) )
- return -EFBIG; /* too long */
-
- if ( copy_from_user(memstring, buffer, count) )
- return -EFAULT;
- memstring[sizeof(memstring)-1] = '\0';
-
- target_bytes = memparse(memstring, &endchar);
- set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static int balloon_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
-
-#define K(_p) ((_p)<<(PAGE_SHIFT-10))
- len = sprintf(
- page,
- "Current allocation: %8lu kB\n"
- "Requested target: %8lu kB\n"
- "Low-mem balloon: %8lu kB\n"
- "High-mem balloon: %8lu kB\n"
- "Xen hard limit: ",
- K(current_pages), K(target_pages), K(balloon_low), K(balloon_high));
-
- if ( hard_limit != ~0UL )
- len += sprintf(
- page + len,
- "%8lu kB (inc. %8lu kB driver headroom)\n",
- K(hard_limit), K(driver_pages));
- else
- len += sprintf(
- page + len,
- " ??? kB\n");
-
- *eof = 1;
- return len;
-}
-
-static int __init balloon_init(void)
-{
- unsigned long pfn;
- struct page *page;
-
- IPRINTK("Initialising balloon driver.\n");
-
- current_pages = min(xen_start_info.nr_pages, max_pfn);
- target_pages = current_pages;
- balloon_low = 0;
- balloon_high = 0;
- driver_pages = 0UL;
- hard_limit = ~0UL;
-
- init_timer(&balloon_timer);
- balloon_timer.data = 0;
- balloon_timer.function = balloon_alarm;
-
- if ( (balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL )
- {
- WPRINTK("Unable to create /proc/xen/balloon.\n");
- return -1;
- }
-
- balloon_pde->read_proc = balloon_read;
- balloon_pde->write_proc = balloon_write;
-
- (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx, 0);
-
- /* Initialise the balloon with excess memory space. */
- for ( pfn = xen_start_info.nr_pages; pfn < max_pfn; pfn++ )
- {
- page = &mem_map[pfn];
- if ( !PageReserved(page) )
- balloon_append(page);
- }
-
- return 0;
-}
-
-subsys_initcall(balloon_init);
-
-void balloon_update_driver_allowance(long delta)
-{
- unsigned long flags;
- balloon_lock(flags);
- driver_pages += delta; /* non-atomic update */
- balloon_unlock(flags);
-}
-
-void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns)
-{
- unsigned long flags;
-
- balloon_lock(flags);
- if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
- mfn_list, nr_mfns, 0) != nr_mfns )
- BUG();
- current_pages -= nr_mfns; /* non-atomic update */
- balloon_unlock(flags);
-
- schedule_work(&balloon_worker);
-}
-
-EXPORT_SYMBOL(balloon_update_driver_allowance);
-EXPORT_SYMBOL(balloon_put_pages);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkback/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y := blkback.o control.o interface.o vbd.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,645 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/backend/main.c
- *
- * Back-end of the driver for virtual block devices. This portion of the
- * driver exports a 'unified' block-device interface that can be accessed
- * by any operating system that implements a compatible front end. A
- * reference front-end implementation can be found in:
- * arch/xen/drivers/blkif/frontend
- *
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- */
-
-#include "common.h"
-
-/*
- * These are rather arbitrary. They are fairly large because adjacent requests
- * pulled from a communication ring are quite likely to end up being part of
- * the same scatter/gather request at the disc.
- *
- * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW **
- * This will increase the chances of being able to write whole tracks.
- * 64 should be enough to keep us competitive with Linux.
- */
-#define MAX_PENDING_REQS 64
-#define BATCH_PER_DOMAIN 16
-
-static unsigned long mmap_vstart;
-#define MMAP_PAGES_PER_REQUEST \
- (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
-#define MMAP_PAGES \
- (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
-#define MMAP_VADDR(_req,_seg) \
- (mmap_vstart + \
- ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
- ((_seg) * PAGE_SIZE))
-
-/*
- * Each outstanding request that we've passed to the lower device layers has a
- * 'pending_req' allocated to it. Each buffer_head that completes decrements
- * the pendcnt towards zero. When it hits zero, the specified domain has a
- * response queued for it, with the saved 'id' passed back.
- */
-typedef struct {
- blkif_t *blkif;
- unsigned long id;
- int nr_pages;
- atomic_t pendcnt;
- unsigned short operation;
- int status;
- void *bounce_page;
- unsigned int bounce_off, bounce_len;
-} pending_req_t;
-
-/*
- * We can't allocate pending_req's in order, since they may complete out of
- * order. We therefore maintain an allocation ring. This ring also indicates
- * when enough work has been passed down -- at that point the allocation ring
- * will be empty.
- */
-static pending_req_t pending_reqs[MAX_PENDING_REQS];
-static unsigned char pending_ring[MAX_PENDING_REQS];
-static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
-/* NB. We use a different index type to differentiate from shared blk rings. */
-typedef unsigned int PEND_RING_IDX;
-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-static kmem_cache_t *buffer_head_cachep;
-#else
-static request_queue_t *plugged_queue;
-static inline void flush_plugged_queue(void)
-{
- request_queue_t *q = plugged_queue;
- if ( q != NULL )
- {
- if ( q->unplug_fn != NULL )
- q->unplug_fn(q);
- blk_put_queue(q);
- plugged_queue = NULL;
- }
-}
-#endif
-
-static int do_block_io_op(blkif_t *blkif, int max_to_do);
-static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
-static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
-static void make_response(blkif_t *blkif, unsigned long id,
- unsigned short op, int st);
-
-static void fast_flush_area(int idx, int nr_pages)
-{
- multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
- int i;
-
- for ( i = 0; i < nr_pages; i++ )
- {
- mcl[i].op = __HYPERVISOR_update_va_mapping;
- mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
- mcl[i].args[1] = 0;
- mcl[i].args[2] = 0;
- }
-
- mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
- if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
- BUG();
-}
-
-
-/******************************************************************
- * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
- */
-
-static struct list_head blkio_schedule_list;
-static spinlock_t blkio_schedule_list_lock;
-
-static int __on_blkdev_list(blkif_t *blkif)
-{
- return blkif->blkdev_list.next != NULL;
-}
-
-static void remove_from_blkdev_list(blkif_t *blkif)
-{
- unsigned long flags;
- if ( !__on_blkdev_list(blkif) ) return;
- spin_lock_irqsave(&blkio_schedule_list_lock, flags);
- if ( __on_blkdev_list(blkif) )
- {
- list_del(&blkif->blkdev_list);
- blkif->blkdev_list.next = NULL;
- blkif_put(blkif);
- }
- spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-}
-
-static void add_to_blkdev_list_tail(blkif_t *blkif)
-{
- unsigned long flags;
- if ( __on_blkdev_list(blkif) ) return;
- spin_lock_irqsave(&blkio_schedule_list_lock, flags);
- if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
- {
- list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
- blkif_get(blkif);
- }
- spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-}
-
-
-/******************************************************************
- * SCHEDULER FUNCTIONS
- */
-
-static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
-
-static int blkio_schedule(void *arg)
-{
- DECLARE_WAITQUEUE(wq, current);
-
- blkif_t *blkif;
- struct list_head *ent;
-
- daemonize(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- "xenblkd"
-#endif
- );
-
- for ( ; ; )
- {
- /* Wait for work to do. */
- add_wait_queue(&blkio_schedule_wait, &wq);
- set_current_state(TASK_INTERRUPTIBLE);
- if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
- list_empty(&blkio_schedule_list) )
- schedule();
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&blkio_schedule_wait, &wq);
-
- /* Queue up a batch of requests. */
- while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
- !list_empty(&blkio_schedule_list) )
- {
- ent = blkio_schedule_list.next;
- blkif = list_entry(ent, blkif_t, blkdev_list);
- blkif_get(blkif);
- remove_from_blkdev_list(blkif);
- if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
- add_to_blkdev_list_tail(blkif);
- blkif_put(blkif);
- }
-
- /* Push the batch through to disc. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- run_task_queue(&tq_disk);
-#else
- flush_plugged_queue();
-#endif
- }
-}
-
-static void maybe_trigger_blkio_schedule(void)
-{
- /*
- * Needed so that two processes, who together make the following predicate
- * true, don't both read stale values and evaluate the predicate
- * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
- */
- smp_mb();
-
- if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
- !list_empty(&blkio_schedule_list) )
- wake_up(&blkio_schedule_wait);
-}
-
-
-
-/******************************************************************
- * COMPLETION CALLBACK -- Called as bh->b_end_io()
- */
-
-static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
-{
- unsigned long flags;
-
- /* An error fails the entire request. */
- if ( !uptodate )
- {
- DPRINTK("Buffer not up-to-date at end of operation\n");
- pending_req->status = BLKIF_RSP_ERROR;
- }
-
- if ( atomic_dec_and_test(&pending_req->pendcnt) )
- {
- int pending_idx = pending_req - pending_reqs;
- if ( unlikely(pending_req->bounce_page != NULL) )
- {
- memcpy((void *)(MMAP_VADDR(pending_idx, 0) +
- pending_req->bounce_off),
- (void *)((unsigned long)pending_req->bounce_page +
- pending_req->bounce_off),
- pending_req->bounce_len);
- free_page((unsigned long)pending_req->bounce_page);
- }
- fast_flush_area(pending_idx, pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
- pending_req->operation, pending_req->status);
- blkif_put(pending_req->blkif);
- spin_lock_irqsave(&pend_prod_lock, flags);
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
- spin_unlock_irqrestore(&pend_prod_lock, flags);
- maybe_trigger_blkio_schedule();
- }
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-static void end_block_io_op(struct buffer_head *bh, int uptodate)
-{
- __end_block_io_op(bh->b_private, uptodate);
- kmem_cache_free(buffer_head_cachep, bh);
-}
-#else
-static int end_block_io_op(struct bio *bio, unsigned int done, int error)
-{
- if ( bio->bi_size != 0 )
- return 1;
- __end_block_io_op(bio->bi_private, !error);
- bio_put(bio);
- return error;
-}
-#endif
-
-
-/******************************************************************************
- * NOTIFICATION FROM GUEST OS.
- */
-
-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-{
- blkif_t *blkif = dev_id;
- add_to_blkdev_list_tail(blkif);
- maybe_trigger_blkio_schedule();
- return IRQ_HANDLED;
-}
-
-
-
-/******************************************************************
- * DOWNWARD CALLS -- These interface with the block-device layer proper.
- */
-
-static int do_block_io_op(blkif_t *blkif, int max_to_do)
-{
- blkif_ring_t *blk_ring = blkif->blk_ring_base;
- blkif_request_t *req;
- BLKIF_RING_IDX i, rp;
- int more_to_do = 0;
-
- rp = blk_ring->req_prod;
- rmb(); /* Ensure we see queued requests up to 'rp'. */
-
- /* Take items off the comms ring, taking care not to overflow. */
- for ( i = blkif->blk_req_cons;
- (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
- i++ )
- {
- if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
- {
- more_to_do = 1;
- break;
- }
-
- req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
- switch ( req->operation )
- {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- dispatch_rw_block_io(blkif, req);
- break;
-
- case BLKIF_OP_PROBE:
- dispatch_probe(blkif, req);
- break;
-
- default:
- DPRINTK("error: unknown block io operation [%d]\n",
- blk_ring->ring[i].req.operation);
- make_response(blkif, blk_ring->ring[i].req.id,
- blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
- break;
- }
- }
-
- blkif->blk_req_cons = i;
- return more_to_do;
-}
-
-static void dispatch_probe(blkif_t *blkif, blkif_request_t *req)
-{
- int rsp = BLKIF_RSP_ERROR;
- int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-
- /* We expect one buffer only. */
- if ( unlikely(req->nr_segments != 1) )
- goto out;
-
- /* Make sure the buffer is page-sized. */
- if ( (blkif_first_sect(req->frame_and_sects[0]) != 0) ||
- (blkif_last_sect(req->frame_and_sects[0]) != 7) )
- goto out;
-
- if ( HYPERVISOR_update_va_mapping_otherdomain(
- MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
- (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
- 0, blkif->domid) )
- goto out;
-
- rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0),
- PAGE_SIZE / sizeof(vdisk_t));
-
- out:
- fast_flush_area(pending_idx, 1);
- make_response(blkif, req->id, req->operation, rsp);
-}
-
-static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
-{
- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
- int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
- short nr_sects;
- unsigned long buffer, fas;
- int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
- pending_req_t *pending_req;
- unsigned long remap_prot;
- multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-
- /* We map virtual scatter/gather segments to physical segments. */
- int new_segs, nr_psegs = 0;
- phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
-
- /* Check that number of segments is sane. */
- if ( unlikely(req->nr_segments == 0) ||
- unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
- {
- DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
- goto bad_descriptor;
- }
-
- /*
- * Check each address/size pair is sane, and convert into a
- * physical device and block offset. Note that if the offset and size
- * crosses a virtual extent boundary, we may end up with more
- * physical scatter/gather segments than virtual segments.
- */
- for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
- {
- fas = req->frame_and_sects[i];
- buffer = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
- nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
-
- if ( nr_sects <= 0 )
- goto bad_descriptor;
-
- phys_seg[nr_psegs].dev = req->device;
- phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
- phys_seg[nr_psegs].buffer = buffer;
- phys_seg[nr_psegs].nr_sects = nr_sects;
-
- /* Translate the request into the relevant 'physical device' */
- new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation);
- if ( new_segs < 0 )
- {
- DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
- operation == READ ? "read" : "write",
- req->sector_number + tot_sects,
- req->sector_number + tot_sects + nr_sects,
- req->device);
- goto bad_descriptor;
- }
-
- nr_psegs += new_segs;
- ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1));
- }
-
- /* Nonsensical zero-sized request? */
- if ( unlikely(nr_psegs == 0) )
- goto bad_descriptor;
-
- if ( operation == READ )
- remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
- else
- remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED;
-
- for ( i = 0; i < nr_psegs; i++ )
- {
- mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
- mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
- mcl[i].args[2] = 0;
- mcl[i].args[3] = blkif->domid;
-
- phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
- FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
- }
-
- if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
- BUG();
-
- for ( i = 0; i < nr_psegs; i++ )
- {
- if ( unlikely(mcl[i].args[5] != 0) )
- {
- DPRINTK("invalid buffer -- could not remap it\n");
- fast_flush_area(pending_idx, nr_psegs);
- goto bad_descriptor;
- }
- }
-
- pending_req = &pending_reqs[pending_idx];
- pending_req->blkif = blkif;
- pending_req->id = req->id;
- pending_req->operation = operation;
- pending_req->status = BLKIF_RSP_OKAY;
- pending_req->nr_pages = nr_psegs;
- pending_req->bounce_page = NULL;
- atomic_set(&pending_req->pendcnt, nr_psegs);
- pending_cons++;
-
- blkif_get(blkif);
-
- /* Now we pass each segment down to the real blkdev layer. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- for ( i = 0; i < nr_psegs; i++ )
- {
- struct buffer_head *bh;
-
- bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC);
- if ( unlikely(bh == NULL) )
- {
- __end_block_io_op(pending_req, 0);
- continue;
- }
-
- memset(bh, 0, sizeof (struct buffer_head));
-
- init_waitqueue_head(&bh->b_wait);
- bh->b_size = phys_seg[i].nr_sects << 9;
- bh->b_dev = phys_seg[i].dev;
- bh->b_rdev = phys_seg[i].dev;
- bh->b_rsector = (unsigned long)phys_seg[i].sector_number;
- bh->b_data = (char *)MMAP_VADDR(pending_idx, i) +
- (phys_seg[i].buffer & ~PAGE_MASK);
- bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i));
- bh->b_end_io = end_block_io_op;
- bh->b_private = pending_req;
-
- bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) |
- (1 << BH_Req) | (1 << BH_Launder);
- if ( operation == WRITE )
- bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate);
-
- atomic_set(&bh->b_count, 1);
-
- /* Dispatch a single request. We'll flush it to disc later. */
- generic_make_request(operation, bh);
- }
-#else
- for ( i = 0; i < nr_psegs; i++ )
- {
- struct bio *bio;
- request_queue_t *q;
-
- bio = bio_alloc(GFP_ATOMIC, 1);
- if ( unlikely(bio == NULL) )
- {
- __end_block_io_op(pending_req, 0);
- continue;
- }
-
- bio->bi_bdev = phys_seg[i].bdev;
- bio->bi_private = pending_req;
- bio->bi_end_io = end_block_io_op;
- bio->bi_sector = phys_seg[i].sector_number;
-
- /* Is the request misaligned with respect to hardware sector size? */
- if ( ((bio->bi_sector | phys_seg[i].nr_sects) &
- ((bdev_hardsect_size(bio->bi_bdev) >> 9) - 1)) )
- {
- /* We can't bounce scatter-gather requests. */
- if ( (nr_psegs != 1) ||
- ((pending_req->bounce_page = (void *)
- __get_free_page(GFP_KERNEL)) == NULL) )
- {
- printk("xen_blk: Unaligned scatter-gather request!\n");
- bio_put(bio);
- __end_block_io_op(pending_req, 0);
- continue;
- }
-
- /* Record offset and length within a bounce page. */
- pending_req->bounce_off = (bio->bi_sector << 9) & ~PAGE_MASK;
- pending_req->bounce_len = phys_seg[i].nr_sects << 9;
-
- /* Submit a page-aligned I/O. */
- bio->bi_sector &= ~((PAGE_SIZE >> 9) - 1);
- bio_add_page(
- bio, virt_to_page(pending_req->bounce_page), PAGE_SIZE, 0);
- }
- else
- {
- bio_add_page(
- bio,
- virt_to_page(MMAP_VADDR(pending_idx, i)),
- phys_seg[i].nr_sects << 9,
- phys_seg[i].buffer & ~PAGE_MASK);
- }
-
- if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
- {
- flush_plugged_queue();
- blk_get_queue(q);
- plugged_queue = q;
- }
-
- submit_bio(operation, bio);
- }
-#endif
-
- return;
-
- bad_descriptor:
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-}
-
-
-
-/******************************************************************
- * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
- */
-
-
-static void make_response(blkif_t *blkif, unsigned long id,
- unsigned short op, int st)
-{
- blkif_response_t *resp;
- unsigned long flags;
-
- /* Place on the response ring for the relevant domain. */
- spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- resp = &blkif->blk_ring_base->
- ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
- resp->id = id;
- resp->operation = op;
- resp->status = st;
- wmb(); /* Ensure other side can see the response fields. */
- blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
- spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
- /* Kick the relevant domain. */
- notify_via_evtchn(blkif->evtchn);
-}
-
-void blkif_deschedule(blkif_t *blkif)
-{
- remove_from_blkdev_list(blkif);
-}
-
-static int __init blkif_init(void)
-{
- int i;
-
- if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
- !(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
- return 0;
-
- blkif_interface_init();
-
- if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
- BUG();
-
- pending_cons = 0;
- pending_prod = MAX_PENDING_REQS;
- memset(pending_reqs, 0, sizeof(pending_reqs));
- for ( i = 0; i < MAX_PENDING_REQS; i++ )
- pending_ring[i] = i;
-
- spin_lock_init(&blkio_schedule_list_lock);
- INIT_LIST_HEAD(&blkio_schedule_list);
-
- if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
- BUG();
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- buffer_head_cachep = kmem_cache_create(
- "buffer_head_cache", sizeof(struct buffer_head),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
-#endif
-
- blkif_ctrlif_init();
-
- return 0;
-}
-
-__initcall(blkif_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,120 +0,0 @@
-
-#ifndef __BLKIF__BACKEND__COMMON_H__
-#define __BLKIF__BACKEND__COMMON_H__
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/rbtree.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-#include <asm/pgalloc.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/xen-public/io/blkif.h>
-
-#if 0
-#define ASSERT(_p) \
- if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
- __LINE__, __FILE__); *(int*)0=0; }
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
- __FILE__ , __LINE__ , ## _a )
-#else
-#define ASSERT(_p) ((void)0)
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-typedef struct rb_root rb_root_t;
-typedef struct rb_node rb_node_t;
-#else
-struct block_device;
-#endif
-
-typedef struct blkif_st {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
- /* Physical parameters of the comms window. */
- unsigned long shmem_frame;
- unsigned int evtchn;
- int irq;
- /* Comms information. */
- blkif_ring_t *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
- BLKIF_RING_IDX blk_req_cons; /* Request consumer. */
- BLKIF_RING_IDX blk_resp_prod; /* Private version of resp. producer. */
- /* VBDs attached to this interface. */
- rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs. */
- spinlock_t vbd_lock; /* Protects VBD mapping. */
- /* Private fields. */
- enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
- /*
- * DISCONNECT response is deferred until pending requests are ack'ed.
- * We therefore need to store the id from the original request.
- */
- u8 disconnect_rspid;
- struct blkif_st *hash_next;
- struct list_head blkdev_list;
- spinlock_t blk_ring_lock;
- atomic_t refcnt;
-
- struct work_struct work;
-} blkif_t;
-
-void blkif_create(blkif_be_create_t *create);
-void blkif_destroy(blkif_be_destroy_t *destroy);
-void blkif_connect(blkif_be_connect_t *connect);
-int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id);
-void blkif_disconnect_complete(blkif_t *blkif);
-blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
-#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define blkif_put(_b) \
- do { \
- if ( atomic_dec_and_test(&(_b)->refcnt) ) \
- blkif_disconnect_complete(_b); \
- } while (0)
-
-/* An entry in a list of xen_extents. */
-typedef struct _blkif_extent_le {
- blkif_extent_t extent; /* an individual extent */
- struct _blkif_extent_le *next; /* and a pointer to the next */
- struct block_device *bdev;
-} blkif_extent_le_t;
-
-typedef struct _vbd {
- blkif_vdev_t vdevice; /* what the domain refers to this vbd as */
- unsigned char readonly; /* Non-zero -> read-only */
- unsigned char type; /* VDISK_TYPE_xxx */
- blkif_extent_le_t *extents; /* list of xen_extents making up this vbd */
- rb_node_t rb; /* for linking into R-B tree lookup struct */
-} vbd_t;
-
-void vbd_create(blkif_be_vbd_create_t *create);
-void vbd_grow(blkif_be_vbd_grow_t *grow);
-void vbd_shrink(blkif_be_vbd_shrink_t *shrink);
-void vbd_destroy(blkif_be_vbd_destroy_t *delete);
-int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds);
-void destroy_all_vbds(blkif_t *blkif);
-
-/* Describes a [partial] disk extent (part of a block io request) */
-typedef struct {
- unsigned short dev;
- unsigned short nr_sects;
- struct block_device *bdev;
- unsigned long buffer;
- blkif_sector_t sector_number;
-} phys_seg_t;
-
-int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation);
-
-void blkif_interface_init(void);
-void blkif_ctrlif_init(void);
-
-void blkif_deschedule(blkif_t *blkif);
-
-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-
-#endif /* __BLKIF__BACKEND__COMMON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,87 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/backend/control.c
- *
- * Routines for interfacing with the control plane.
- *
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
- DPRINTK("Received blkif backend message, subtype=%d\n", msg->subtype);
-
- switch ( msg->subtype )
- {
- case CMSG_BLKIF_BE_CREATE:
- if ( msg->length != sizeof(blkif_be_create_t) )
- goto parse_error;
- blkif_create((blkif_be_create_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_DESTROY:
- if ( msg->length != sizeof(blkif_be_destroy_t) )
- goto parse_error;
- blkif_destroy((blkif_be_destroy_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_CONNECT:
- if ( msg->length != sizeof(blkif_be_connect_t) )
- goto parse_error;
- blkif_connect((blkif_be_connect_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_DISCONNECT:
- if ( msg->length != sizeof(blkif_be_disconnect_t) )
- goto parse_error;
- if ( !blkif_disconnect((blkif_be_disconnect_t *)&msg->msg[0],msg->id) )
- return; /* Sending the response is deferred until later. */
- break;
- case CMSG_BLKIF_BE_VBD_CREATE:
- if ( msg->length != sizeof(blkif_be_vbd_create_t) )
- goto parse_error;
- vbd_create((blkif_be_vbd_create_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_VBD_DESTROY:
- if ( msg->length != sizeof(blkif_be_vbd_destroy_t) )
- goto parse_error;
- vbd_destroy((blkif_be_vbd_destroy_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_VBD_GROW:
- if ( msg->length != sizeof(blkif_be_vbd_grow_t) )
- goto parse_error;
- vbd_grow((blkif_be_vbd_grow_t *)&msg->msg[0]);
- break;
- case CMSG_BLKIF_BE_VBD_SHRINK:
- if ( msg->length != sizeof(blkif_be_vbd_shrink_t) )
- goto parse_error;
- vbd_shrink((blkif_be_vbd_shrink_t *)&msg->msg[0]);
- break;
- default:
- goto parse_error;
- }
-
- ctrl_if_send_response(msg);
- return;
-
- parse_error:
- DPRINTK("Parse error while reading message subtype %d, len %d\n",
- msg->subtype, msg->length);
- msg->length = 0;
- ctrl_if_send_response(msg);
-}
-
-void blkif_ctrlif_init(void)
-{
- ctrl_msg_t cmsg;
- blkif_be_driver_status_t st;
-
- (void)ctrl_if_register_receiver(CMSG_BLKIF_BE, blkif_ctrlif_rx,
- CALLBACK_IN_BLOCKING_CONTEXT);
-
- /* Send a driver-UP notification to the domain controller. */
- cmsg.type = CMSG_BLKIF_BE;
- cmsg.subtype = CMSG_BLKIF_BE_DRIVER_STATUS;
- cmsg.length = sizeof(blkif_be_driver_status_t);
- st.status = BLKIF_DRIVER_STATUS_UP;
- memcpy(cmsg.msg, &st, sizeof(st));
- ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,246 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/blkif/backend/interface.c
- *
- * Block-device interface management.
- *
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#endif
-
-#define BLKIF_HASHSZ 1024
-#define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1))
-
-static kmem_cache_t *blkif_cachep;
-static blkif_t *blkif_hash[BLKIF_HASHSZ];
-
-blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle)
-{
- blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)];
- while ( (blkif != NULL) &&
- ((blkif->domid != domid) || (blkif->handle != handle)) )
- blkif = blkif->hash_next;
- return blkif;
-}
-
-static void __blkif_disconnect_complete(void *arg)
-{
- blkif_t *blkif = (blkif_t *)arg;
- ctrl_msg_t cmsg;
- blkif_be_disconnect_t disc;
-
- /*
- * These can't be done in blkif_disconnect() because at that point there
- * may be outstanding requests at the disc whose asynchronous responses
- * must still be notified to the remote driver.
- */
- unbind_evtchn_from_irq(blkif->evtchn);
- vfree(blkif->blk_ring_base);
-
- /* Construct the deferred response message. */
- cmsg.type = CMSG_BLKIF_BE;
- cmsg.subtype = CMSG_BLKIF_BE_DISCONNECT;
- cmsg.id = blkif->disconnect_rspid;
- cmsg.length = sizeof(blkif_be_disconnect_t);
- disc.domid = blkif->domid;
- disc.blkif_handle = blkif->handle;
- disc.status = BLKIF_BE_STATUS_OKAY;
- memcpy(cmsg.msg, &disc, sizeof(disc));
-
- /*
- * Make sure message is constructed /before/ status change, because
- * after the status change the 'blkif' structure could be deallocated at
- * any time. Also make sure we send the response /after/ status change,
- * as otherwise a subsequent CONNECT request could spuriously fail if
- * another CPU doesn't see the status change yet.
- */
- mb();
- if ( blkif->status != DISCONNECTING )
- BUG();
- blkif->status = DISCONNECTED;
- mb();
-
- /* Send the successful response. */
- ctrl_if_send_response(&cmsg);
-}
-
-void blkif_disconnect_complete(blkif_t *blkif)
-{
- INIT_WORK(&blkif->work, __blkif_disconnect_complete, (void *)blkif);
- schedule_work(&blkif->work);
-}
-
-void blkif_create(blkif_be_create_t *create)
-{
- domid_t domid = create->domid;
- unsigned int handle = create->blkif_handle;
- blkif_t **pblkif, *blkif;
-
- if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL )
- {
- DPRINTK("Could not create blkif: out of memory\n");
- create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
-
- memset(blkif, 0, sizeof(*blkif));
- blkif->domid = domid;
- blkif->handle = handle;
- blkif->status = DISCONNECTED;
- spin_lock_init(&blkif->vbd_lock);
- spin_lock_init(&blkif->blk_ring_lock);
- atomic_set(&blkif->refcnt, 0);
-
- pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
- while ( *pblkif != NULL )
- {
- if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) )
- {
- DPRINTK("Could not create blkif: already exists\n");
- create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS;
- kmem_cache_free(blkif_cachep, blkif);
- return;
- }
- pblkif = &(*pblkif)->hash_next;
- }
-
- blkif->hash_next = *pblkif;
- *pblkif = blkif;
-
- DPRINTK("Successfully created blkif\n");
- create->status = BLKIF_BE_STATUS_OKAY;
-}
-
-void blkif_destroy(blkif_be_destroy_t *destroy)
-{
- domid_t domid = destroy->domid;
- unsigned int handle = destroy->blkif_handle;
- blkif_t **pblkif, *blkif;
-
- pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
- while ( (blkif = *pblkif) != NULL )
- {
- if ( (blkif->domid == domid) && (blkif->handle == handle) )
- {
- if ( blkif->status != DISCONNECTED )
- goto still_connected;
- goto destroy;
- }
- pblkif = &blkif->hash_next;
- }
-
- destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
-
- still_connected:
- destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
- return;
-
- destroy:
- *pblkif = blkif->hash_next;
- destroy_all_vbds(blkif);
- kmem_cache_free(blkif_cachep, blkif);
- destroy->status = BLKIF_BE_STATUS_OKAY;
-}
-
-void blkif_connect(blkif_be_connect_t *connect)
-{
- domid_t domid = connect->domid;
- unsigned int handle = connect->blkif_handle;
- unsigned int evtchn = connect->evtchn;
- unsigned long shmem_frame = connect->shmem_frame;
- struct vm_struct *vma;
- pgprot_t prot;
- int error;
- blkif_t *blkif;
-
- blkif = blkif_find_by_handle(domid, handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n",
- connect->domid, connect->blkif_handle);
- connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
- {
- connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
-
- prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
- error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
- shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
- prot, domid);
- if ( error != 0 )
- {
- if ( error == -ENOMEM )
- connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
- else if ( error == -EFAULT )
- connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
- else
- connect->status = BLKIF_BE_STATUS_ERROR;
- vfree(vma->addr);
- return;
- }
-
- if ( blkif->status != DISCONNECTED )
- {
- connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
- vfree(vma->addr);
- return;
- }
-
- blkif->evtchn = evtchn;
- blkif->irq = bind_evtchn_to_irq(evtchn);
- blkif->shmem_frame = shmem_frame;
- blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
- blkif->status = CONNECTED;
- blkif_get(blkif);
-
- request_irq(blkif->irq, blkif_be_int, 0, "blkif-backend", blkif);
-
- connect->status = BLKIF_BE_STATUS_OKAY;
-}
-
-int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id)
-{
- domid_t domid = disconnect->domid;
- unsigned int handle = disconnect->blkif_handle;
- blkif_t *blkif;
-
- blkif = blkif_find_by_handle(domid, handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("blkif_disconnect attempted for non-existent blkif"
- " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle);
- disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return 1; /* Caller will send response error message. */
- }
-
- if ( blkif->status == CONNECTED )
- {
- blkif->status = DISCONNECTING;
- blkif->disconnect_rspid = rsp_id;
- wmb(); /* Let other CPUs see the status change. */
- free_irq(blkif->irq, blkif);
- blkif_deschedule(blkif);
- blkif_put(blkif);
- return 0; /* Caller should not send response message. */
- }
-
- disconnect->status = BLKIF_BE_STATUS_OKAY;
- return 1;
-}
-
-void __init blkif_interface_init(void)
-{
- blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
- 0, 0, NULL, NULL);
- memset(blkif_hash, 0, sizeof(blkif_hash));
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,583 +0,0 @@
-/******************************************************************************
- * blkback/vbd.c
- *
- * Routines for managing virtual block devices (VBDs).
- *
- * NOTE: vbd_lock protects updates to the rb_tree against concurrent lookups
- * in vbd_translate. All other lookups are implicitly protected because the
- * only caller (the control message dispatch routine) serializes the calls.
- *
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- */
-
-#include "common.h"
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static dev_t vbd_map_devnum(blkif_pdev_t);
-#define bdev_put(_b) blkdev_put(_b)
-#else
-#define bdev_put(_b) ((void)0)
-#endif
-
-void vbd_create(blkif_be_vbd_create_t *create)
-{
- vbd_t *vbd;
- rb_node_t **rb_p, *rb_parent = NULL;
- blkif_t *blkif;
- blkif_vdev_t vdevice = create->vdevice;
-
- blkif = blkif_find_by_handle(create->domid, create->blkif_handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("vbd_create attempted for non-existent blkif (%u,%u)\n",
- create->domid, create->blkif_handle);
- create->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- rb_p = &blkif->vbd_rb.rb_node;
- while ( *rb_p != NULL )
- {
- rb_parent = *rb_p;
- vbd = rb_entry(rb_parent, vbd_t, rb);
- if ( vdevice < vbd->vdevice )
- {
- rb_p = &rb_parent->rb_left;
- }
- else if ( vdevice > vbd->vdevice )
- {
- rb_p = &rb_parent->rb_right;
- }
- else
- {
- DPRINTK("vbd_create attempted for already existing vbd\n");
- create->status = BLKIF_BE_STATUS_VBD_EXISTS;
- return;
- }
- }
-
- if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) )
- {
- DPRINTK("vbd_create: out of memory\n");
- create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
-
- vbd->vdevice = vdevice;
- vbd->readonly = create->readonly;
- vbd->type = VDISK_TYPE_DISK;
- vbd->extents = NULL;
-
- spin_lock(&blkif->vbd_lock);
- rb_link_node(&vbd->rb, rb_parent, rb_p);
- rb_insert_color(&vbd->rb, &blkif->vbd_rb);
- spin_unlock(&blkif->vbd_lock);
-
- DPRINTK("Successful creation of vdev=%04x (dom=%u)\n",
- vdevice, create->domid);
- create->status = BLKIF_BE_STATUS_OKAY;
-}
-
-
-/* Grow a VBD by appending a new extent. Fails if the VBD doesn't exist. */
-void vbd_grow(blkif_be_vbd_grow_t *grow)
-{
- blkif_t *blkif;
- blkif_extent_le_t **px, *x;
- vbd_t *vbd = NULL;
- rb_node_t *rb;
- blkif_vdev_t vdevice = grow->vdevice;
- unsigned long sz;
-
- blkif = blkif_find_by_handle(grow->domid, grow->blkif_handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("vbd_grow attempted for non-existent blkif (%u,%u)\n",
- grow->domid, grow->blkif_handle);
- grow->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
- {
- vbd = rb_entry(rb, vbd_t, rb);
- if ( vdevice < vbd->vdevice )
- rb = rb->rb_left;
- else if ( vdevice > vbd->vdevice )
- rb = rb->rb_right;
- else
- break;
- }
-
- if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
- {
- DPRINTK("vbd_grow: attempted to append extent to non-existent VBD.\n");
- grow->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
- return;
- }
-
- if ( grow->extent.sector_start > 0 )
- {
- DPRINTK("vbd_grow: dev %08x start not zero.\n", grow->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- return;
- }
-
- if ( unlikely((x = kmalloc(sizeof(blkif_extent_le_t),
- GFP_KERNEL)) == NULL) )
- {
- DPRINTK("vbd_grow: out of memory\n");
- grow->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
-
- /* Mask to 16-bit for compatibility with old tools */
- x->extent.device = grow->extent.device & 0xffff;
- x->extent.sector_start = grow->extent.sector_start;
- x->extent.sector_length = grow->extent.sector_length;
- x->next = (blkif_extent_le_t *)NULL;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- x->bdev = open_by_devnum(vbd_map_devnum(x->extent.device),
- vbd->readonly ? FMODE_READ : FMODE_WRITE);
- if ( IS_ERR(x->bdev) )
- {
- DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- goto out;
- }
- /* XXXcl maybe bd_claim? */
-
- if ( (x->bdev->bd_disk == NULL) )
- {
- DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- bdev_put(x->bdev);
- goto out;
- }
-
- /* get size in sectors */
- if ( x->bdev->bd_part )
- sz = x->bdev->bd_part->nr_sects;
- else
- sz = x->bdev->bd_disk->capacity;
-
- vbd->type = (x->bdev->bd_disk->flags & GENHD_FL_CD) ?
- VDISK_TYPE_CDROM : VDISK_TYPE_DISK;
-
-#else
- if( !blk_size[MAJOR(x->extent.device)] )
- {
- DPRINTK("vbd_grow: device %08x doesn't exist.\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- goto out;
- }
-
- /* convert blocks (1KB) to sectors */
- sz = blk_size[MAJOR(x->extent.device)][MINOR(x->extent.device)] * 2;
-
- if ( sz == 0 )
- {
- DPRINTK("vbd_grow: device %08x zero size!\n", x->extent.device);
- grow->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- goto out;
- }
-#endif
-
- /*
- * NB. This test assumes sector_start == 0, which is always the case
- * in Xen 1.3. In fact the whole grow/shrink interface could do with
- * some simplification.
- */
- if ( x->extent.sector_length > sz )
- x->extent.sector_length = sz;
-
- DPRINTK("vbd_grow: requested_len %llu actual_len %lu\n",
- x->extent.sector_length, sz);
-
- for ( px = &vbd->extents; *px != NULL; px = &(*px)->next )
- continue;
-
- *px = x; /* ATOMIC: no need for vbd_lock. */
-
- DPRINTK("Successful grow of vdev=%04x (dom=%u)\n",
- vdevice, grow->domid);
-
- grow->status = BLKIF_BE_STATUS_OKAY;
- return;
-
- out:
- kfree(x);
-}
-
-
-void vbd_shrink(blkif_be_vbd_shrink_t *shrink)
-{
- blkif_t *blkif;
- blkif_extent_le_t **px, *x;
- vbd_t *vbd = NULL;
- rb_node_t *rb;
- blkif_vdev_t vdevice = shrink->vdevice;
-
- blkif = blkif_find_by_handle(shrink->domid, shrink->blkif_handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("vbd_shrink attempted for non-existent blkif (%u,%u)\n",
- shrink->domid, shrink->blkif_handle);
- shrink->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
- {
- vbd = rb_entry(rb, vbd_t, rb);
- if ( vdevice < vbd->vdevice )
- rb = rb->rb_left;
- else if ( vdevice > vbd->vdevice )
- rb = rb->rb_right;
- else
- break;
- }
-
- if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) )
- {
- shrink->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
- return;
- }
-
- if ( unlikely(vbd->extents == NULL) )
- {
- shrink->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND;
- return;
- }
-
- /* Find the last extent. We now know that there is at least one. */
- for ( px = &vbd->extents; (*px)->next != NULL; px = &(*px)->next )
- continue;
-
- x = *px;
- *px = x->next; /* ATOMIC: no need for vbd_lock. */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- bdev_put(x->bdev);
-#endif
- kfree(x);
-
- shrink->status = BLKIF_BE_STATUS_OKAY;
-}
-
-
-void vbd_destroy(blkif_be_vbd_destroy_t *destroy)
-{
- blkif_t *blkif;
- vbd_t *vbd;
- rb_node_t *rb;
- blkif_extent_le_t *x, *t;
- blkif_vdev_t vdevice = destroy->vdevice;
-
- blkif = blkif_find_by_handle(destroy->domid, destroy->blkif_handle);
- if ( unlikely(blkif == NULL) )
- {
- DPRINTK("vbd_destroy attempted for non-existent blkif (%u,%u)\n",
- destroy->domid, destroy->blkif_handle);
- destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
- {
- vbd = rb_entry(rb, vbd_t, rb);
- if ( vdevice < vbd->vdevice )
- rb = rb->rb_left;
- else if ( vdevice > vbd->vdevice )
- rb = rb->rb_right;
- else
- goto found;
- }
-
- destroy->status = BLKIF_BE_STATUS_VBD_NOT_FOUND;
- return;
-
- found:
- spin_lock(&blkif->vbd_lock);
- rb_erase(rb, &blkif->vbd_rb);
- spin_unlock(&blkif->vbd_lock);
-
- x = vbd->extents;
- kfree(vbd);
-
- while ( x != NULL )
- {
- t = x->next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- bdev_put(x->bdev);
-#endif
- kfree(x);
- x = t;
- }
-}
-
-
-void destroy_all_vbds(blkif_t *blkif)
-{
- vbd_t *vbd;
- rb_node_t *rb;
- blkif_extent_le_t *x, *t;
-
- spin_lock(&blkif->vbd_lock);
-
- while ( (rb = blkif->vbd_rb.rb_node) != NULL )
- {
- vbd = rb_entry(rb, vbd_t, rb);
-
- rb_erase(rb, &blkif->vbd_rb);
- x = vbd->extents;
- kfree(vbd);
-
- while ( x != NULL )
- {
- t = x->next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- bdev_put(x->bdev);
-#endif
- kfree(x);
- x = t;
- }
- }
-
- spin_unlock(&blkif->vbd_lock);
-}
-
-
-static int vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd)
-{
- blkif_extent_le_t *x;
-
- vbd_info->device = vbd->vdevice;
- vbd_info->info = vbd->type;
- if ( vbd->readonly )
- vbd_info->info |= VDISK_FLAG_RO;
- vbd_info->capacity = 0ULL;
- for ( x = vbd->extents; x != NULL; x = x->next )
- vbd_info->capacity += x->extent.sector_length;
-
- return 0;
-}
-
-
-int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds)
-{
- int rc = 0, nr_vbds = 0;
- rb_node_t *rb;
-
- spin_lock(&blkif->vbd_lock);
-
- if ( (rb = blkif->vbd_rb.rb_node) == NULL )
- goto out;
-
- new_subtree:
- /* STEP 1. Find least node (it'll be left-most). */
- while ( rb->rb_left != NULL )
- rb = rb->rb_left;
-
- for ( ; ; )
- {
- /* STEP 2. Dealt with left subtree. Now process current node. */
- if ( (rc = vbd_probe_single(blkif, &vbd_info[nr_vbds],
- rb_entry(rb, vbd_t, rb))) != 0 )
- goto out;
- if ( ++nr_vbds == max_vbds )
- goto out;
-
- /* STEP 3. Process right subtree, if any. */
- if ( rb->rb_right != NULL )
- {
- rb = rb->rb_right;
- goto new_subtree;
- }
-
- /* STEP 4. Done both subtrees. Head back through ancesstors. */
- for ( ; ; )
- {
- /* We're done when we get back to the root node. */
- if ( rb->rb_parent == NULL )
- goto out;
- /* If we are left of parent, then parent is next to process. */
- if ( rb->rb_parent->rb_left == rb )
- break;
- /* If we are right of parent, then we climb to grandparent. */
- rb = rb->rb_parent;
- }
-
- rb = rb->rb_parent;
- }
-
- out:
- spin_unlock(&blkif->vbd_lock);
- return (rc == 0) ? nr_vbds : rc;
-}
-
-
-int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation)
-{
- blkif_extent_le_t *x;
- vbd_t *vbd;
- rb_node_t *rb;
- blkif_sector_t sec_off;
- unsigned long nr_secs;
-
- /* Take the vbd_lock because another thread could be updating the tree. */
- spin_lock(&blkif->vbd_lock);
-
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
- {
- vbd = rb_entry(rb, vbd_t, rb);
- if ( pseg->dev < vbd->vdevice )
- rb = rb->rb_left;
- else if ( pseg->dev > vbd->vdevice )
- rb = rb->rb_right;
- else
- goto found;
- }
-
- DPRINTK("vbd_translate; domain %u attempted to access "
- "non-existent VBD.\n", blkif->domid);
-
- spin_unlock(&blkif->vbd_lock);
- return -ENODEV;
-
- found:
-
- if ( (operation == WRITE) && vbd->readonly )
- {
- spin_unlock(&blkif->vbd_lock);
- return -EACCES;
- }
-
- /*
- * Now iterate through the list of blkif_extents, working out which should
- * be used to perform the translation.
- */
- sec_off = pseg->sector_number;
- nr_secs = pseg->nr_sects;
- for ( x = vbd->extents; x != NULL; x = x->next )
- {
- if ( sec_off < x->extent.sector_length )
- {
- pseg->dev = x->extent.device;
- pseg->bdev = x->bdev;
- pseg->sector_number = x->extent.sector_start + sec_off;
- if ( unlikely((sec_off + nr_secs) > x->extent.sector_length) )
- goto overrun;
- spin_unlock(&blkif->vbd_lock);
- return 1;
- }
- sec_off -= x->extent.sector_length;
- }
-
- DPRINTK("vbd_translate: end of vbd.\n");
- spin_unlock(&blkif->vbd_lock);
- return -EACCES;
-
- /*
- * Here we deal with overrun onto the following extent. We don't deal with
- * overrun of more than one boundary since each request is restricted to
- * 2^9 512-byte sectors, so it should be trivial for control software to
- * ensure that extents are large enough to prevent excessive overrun.
- */
- overrun:
-
- /* Adjust length of first chunk to run to end of first extent. */
- pseg[0].nr_sects = x->extent.sector_length - sec_off;
-
- /* Set second chunk buffer and length to start where first chunk ended. */
- pseg[1].buffer = pseg[0].buffer + (pseg[0].nr_sects << 9);
- pseg[1].nr_sects = nr_secs - pseg[0].nr_sects;
-
- /* Now move to the next extent. Check it exists and is long enough! */
- if ( unlikely((x = x->next) == NULL) ||
- unlikely(x->extent.sector_length < pseg[1].nr_sects) )
- {
- DPRINTK("vbd_translate: multiple overruns or end of vbd.\n");
- spin_unlock(&blkif->vbd_lock);
- return -EACCES;
- }
-
- /* Store the real device and start sector for the second chunk. */
- pseg[1].dev = x->extent.device;
- pseg[1].bdev = x->bdev;
- pseg[1].sector_number = x->extent.sector_start;
-
- spin_unlock(&blkif->vbd_lock);
- return 2;
-}
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
-#define MAJOR_XEN(dev) ((dev)>>8)
-#define MINOR_XEN(dev) ((dev) & 0xff)
-
-#ifndef FANCY_REMAPPING
-static dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{
- int major = MAJOR_XEN(cookie);
- int minor = MINOR_XEN(cookie);
-
- return MKDEV(major, minor);
-}
-#else
-#define XEN_IDE0_MAJOR IDE0_MAJOR
-#define XEN_IDE1_MAJOR IDE1_MAJOR
-#define XEN_IDE2_MAJOR IDE2_MAJOR
-#define XEN_IDE3_MAJOR IDE3_MAJOR
-#define XEN_IDE4_MAJOR IDE4_MAJOR
-#define XEN_IDE5_MAJOR IDE5_MAJOR
-#define XEN_IDE6_MAJOR IDE6_MAJOR
-#define XEN_IDE7_MAJOR IDE7_MAJOR
-#define XEN_IDE8_MAJOR IDE8_MAJOR
-#define XEN_IDE9_MAJOR IDE9_MAJOR
-#define XEN_SCSI_DISK0_MAJOR SCSI_DISK0_MAJOR
-#define XEN_SCSI_DISK1_MAJOR SCSI_DISK1_MAJOR
-#define XEN_SCSI_DISK2_MAJOR SCSI_DISK2_MAJOR
-#define XEN_SCSI_DISK3_MAJOR SCSI_DISK3_MAJOR
-#define XEN_SCSI_DISK4_MAJOR SCSI_DISK4_MAJOR
-#define XEN_SCSI_DISK5_MAJOR SCSI_DISK5_MAJOR
-#define XEN_SCSI_DISK6_MAJOR SCSI_DISK6_MAJOR
-#define XEN_SCSI_DISK7_MAJOR SCSI_DISK7_MAJOR
-#define XEN_SCSI_CDROM_MAJOR SCSI_CDROM_MAJOR
-
-static dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{
- int new_major;
- int major = MAJOR_XEN(cookie);
- int minor = MINOR_XEN(cookie);
-
- switch (major) {
- case XEN_IDE0_MAJOR: new_major = IDE0_MAJOR; break;
- case XEN_IDE1_MAJOR: new_major = IDE1_MAJOR; break;
- case XEN_IDE2_MAJOR: new_major = IDE2_MAJOR; break;
- case XEN_IDE3_MAJOR: new_major = IDE3_MAJOR; break;
- case XEN_IDE4_MAJOR: new_major = IDE4_MAJOR; break;
- case XEN_IDE5_MAJOR: new_major = IDE5_MAJOR; break;
- case XEN_IDE6_MAJOR: new_major = IDE6_MAJOR; break;
- case XEN_IDE7_MAJOR: new_major = IDE7_MAJOR; break;
- case XEN_IDE8_MAJOR: new_major = IDE8_MAJOR; break;
- case XEN_IDE9_MAJOR: new_major = IDE9_MAJOR; break;
- case XEN_SCSI_DISK0_MAJOR: new_major = SCSI_DISK0_MAJOR; break;
- case XEN_SCSI_DISK1_MAJOR ... XEN_SCSI_DISK7_MAJOR:
- new_major = SCSI_DISK1_MAJOR + major - XEN_SCSI_DISK1_MAJOR;
- break;
- case XEN_SCSI_CDROM_MAJOR: new_major = SCSI_CDROM_MAJOR; break;
- default: new_major = 0; break;
- }
-
- return MKDEV(new_major, minor);
-}
-#endif
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION_CODE(2,6,0) */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkfront/Kconfig
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/Kconfig Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,6 +0,0 @@
-
-config XENBLOCK
- tristate "Block device driver"
- depends on ARCH_XEN
- help
- Block device driver for Xen
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkfront/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,3 +0,0 @@
-
-obj-y := blkfront.o vbd.o
-
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1410 +0,0 @@
-/******************************************************************************
- * blkfront.c
- *
- * XenLinux virtual block-device driver.
- *
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- * Copyright (c) 2004, Christian Limpach
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#include "block.h"
-#else
-#include "common.h"
-#include <linux/blk.h>
-#include <linux/tqueue.h>
-#endif
-
-#include <linux/cdrom.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <scsi/scsi.h>
-#include <asm-xen/ctrl_if.h>
-
-typedef unsigned char byte; /* from linux/ide.h */
-
-/* Control whether runtime update of vbds is enabled. */
-#define ENABLE_VBD_UPDATE 1
-
-#if ENABLE_VBD_UPDATE
-static void vbd_update(void);
-#else
-static void vbd_update(void){};
-#endif
-
-#define BLKIF_STATE_CLOSED 0
-#define BLKIF_STATE_DISCONNECTED 1
-#define BLKIF_STATE_CONNECTED 2
-
-static int blkif_handle = 0;
-static unsigned int blkif_state = BLKIF_STATE_CLOSED;
-static unsigned int blkif_evtchn = 0;
-static unsigned int blkif_irq = 0;
-
-static int blkif_control_rsp_valid;
-static blkif_response_t blkif_control_rsp;
-
-static blkif_ring_t *blk_ring = NULL;
-static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
-static BLKIF_RING_IDX req_prod; /* Private request producer. */
-
-unsigned long rec_ring_free;
-blkif_request_t rec_ring[BLKIF_RING_SIZE];
-
-static int recovery = 0; /* "Recovery in progress" flag. Protected
- * by the blkif_io_lock */
-
-/* We plug the I/O ring if the driver is suspended or if the ring is full. */
-#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
- (blkif_state != BLKIF_STATE_CONNECTED))
-
-static void kick_pending_request_queues(void);
-
-int __init xlblk_init(void);
-
-void blkif_completion( blkif_request_t *req );
-
-static inline int GET_ID_FROM_FREELIST( void )
-{
- unsigned long free = rec_ring_free;
-
- if ( free > BLKIF_RING_SIZE )
- BUG();
-
- rec_ring_free = rec_ring[free].id;
-
- rec_ring[free].id = 0x0fffffee; /* debug */
-
- return free;
-}
-
-static inline void ADD_ID_TO_FREELIST( unsigned long id )
-{
- rec_ring[id].id = rec_ring_free;
- rec_ring_free = id;
-}
-
-
-/************************ COMMON CODE (inlined) ************************/
-
-/* Kernel-specific definitions used in the common code */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define DISABLE_SCATTERGATHER()
-#else
-static int sg_operation = -1;
-#define DISABLE_SCATTERGATHER() (sg_operation = -1)
-#endif
-
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
- blkif_request_t *req)
-{
- int i;
-
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- /* preserve id */
- xreq->sector_number = req->sector_number;
-
- for ( i = 0; i < req->nr_segments; i++ )
- xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
-}
-
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
- blkif_request_t *req)
-{
- int i;
-
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- xreq->id = req->id; /* copy id (unlike above) */
- xreq->sector_number = req->sector_number;
-
- for ( i = 0; i < req->nr_segments; i++ )
- xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
-}
-
-
-static inline void flush_requests(void)
-{
- DISABLE_SCATTERGATHER();
- wmb(); /* Ensure that the frontend can see the requests. */
- blk_ring->req_prod = req_prod;
- notify_via_evtchn(blkif_evtchn);
-}
-
-
-
-
-/************************** KERNEL VERSION 2.6 **************************/
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
-module_init(xlblk_init);
-
-#if ENABLE_VBD_UPDATE
-static void update_vbds_task(void *unused)
-{
- xlvbd_update_vbds();
-}
-
-static void vbd_update(void)
-{
- static DECLARE_WORK(update_tq, update_vbds_task, NULL);
- schedule_work(&update_tq);
-}
-#endif /* ENABLE_VBD_UPDATE */
-
-static void kick_pending_request_queues(void)
-{
-
- if ( (xlbd_blk_queue != NULL) &&
- test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
- {
- blk_start_queue(xlbd_blk_queue);
- /* XXXcl call to request_fn should not be needed but
- * we get stuck without... needs investigating
- */
- xlbd_blk_queue->request_fn(xlbd_blk_queue);
- }
-
-}
-
-
-int blkif_open(struct inode *inode, struct file *filep)
-{
- struct gendisk *gd = inode->i_bdev->bd_disk;
- struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
-
- /* Update of usage count is protected by per-device semaphore. */
- di->mi->usage++;
-
- return 0;
-}
-
-
-int blkif_release(struct inode *inode, struct file *filep)
-{
- struct gendisk *gd = inode->i_bdev->bd_disk;
- struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
-
- /*
- * When usage drops to zero it may allow more VBD updates to occur.
- * Update of usage count is protected by a per-device semaphore.
- */
- if (--di->mi->usage == 0) {
- vbd_update();
- }
-
- return 0;
-}
-
-
-int blkif_ioctl(struct inode *inode, struct file *filep,
- unsigned command, unsigned long argument)
-{
- int i;
- /* struct gendisk *gd = inode->i_bdev->bd_disk; */
-
- DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
- command, (long)argument, inode->i_rdev);
-
- switch (command) {
-
- case HDIO_GETGEO:
- /* return ENOSYS to use defaults */
- return -ENOSYS;
-
- case CDROMMULTISESSION:
- DPRINTK("FIXME: support multisession CDs later\n");
- for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
- if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
- return 0;
-
- default:
- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
- command);*/
- return -EINVAL; /* same return as native Linux */
- }
-
- return 0;
-}
-
-#if 0
-/* check media change: should probably do something here in some cases :-) */
-int blkif_check(kdev_t dev)
-{
- DPRINTK("blkif_check\n");
- return 0;
-}
-
-int blkif_revalidate(kdev_t dev)
-{
- struct block_device *bd;
- struct gendisk *gd;
- xen_block_t *disk;
- unsigned long capacity;
- int i, rc = 0;
-
- if ( (bd = bdget(dev)) == NULL )
- return -EINVAL;
-
- /*
- * Update of partition info, and check of usage count, is protected
- * by the per-block-device semaphore.
- */
- down(&bd->bd_sem);
-
- if ( ((gd = get_gendisk(dev)) == NULL) ||
- ((disk = xldev_to_xldisk(dev)) == NULL) ||
- ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
- {
- rc = -EINVAL;
- goto out;
- }
-
- if ( disk->usage > 1 )
- {
- rc = -EBUSY;
- goto out;
- }
-
- /* Only reread partition table if VBDs aren't mapped to partitions. */
- if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
- {
- for ( i = gd->max_p - 1; i >= 0; i-- )
- {
- invalidate_device(dev+i, 1);
- gd->part[MINOR(dev+i)].start_sect = 0;
- gd->part[MINOR(dev+i)].nr_sects = 0;
- gd->sizes[MINOR(dev+i)] = 0;
- }
-
- grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
- }
-
- out:
- up(&bd->bd_sem);
- bdput(bd);
- return rc;
-}
-#endif
-
-/*
- * blkif_queue_request
- *
- * request block io
- *
- * id: for guest use only.
- * operation: BLKIF_OP_{READ,WRITE,PROBE}
- * buffer: buffer to read/write into. this should be a
- * virtual address in the guest os.
- */
-static int blkif_queue_request(struct request *req)
-{
- struct xlbd_disk_info *di =
- (struct xlbd_disk_info *)req->rq_disk->private_data;
- unsigned long buffer_ma;
- blkif_request_t *ring_req;
- struct bio *bio;
- struct bio_vec *bvec;
- int idx;
- unsigned long id;
- unsigned int fsect, lsect;
-
- if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
- return 1;
-
- /* Fill out a communications ring structure. */
- ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
- id = GET_ID_FROM_FREELIST();
- rec_ring[id].id = (unsigned long) req;
-
- ring_req->id = id;
- ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
- BLKIF_OP_READ;
- ring_req->sector_number = (blkif_sector_t)req->sector;
- ring_req->device = di->xd_device;
-
- ring_req->nr_segments = 0;
- rq_for_each_bio(bio, req)
- {
- bio_for_each_segment(bvec, bio, idx)
- {
- if ( ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST )
- BUG();
- buffer_ma = page_to_phys(bvec->bv_page);
- fsect = bvec->bv_offset >> 9;
- lsect = fsect + (bvec->bv_len >> 9) - 1;
- ring_req->frame_and_sects[ring_req->nr_segments++] =
- buffer_ma | (fsect << 3) | lsect;
- }
- }
-
- req_prod++;
-
- /* Keep a private copy so we can reissue requests when recovering. */
- translate_req_to_pfn(&rec_ring[id], ring_req);
-
- return 0;
-}
-
-
-/*
- * do_blkif_request
- * read a block; request is in a request queue
- */
-void do_blkif_request(request_queue_t *rq)
-{
- struct request *req;
- int queued;
-
- DPRINTK("Entered do_blkif_request\n");
-
- queued = 0;
-
- while ((req = elv_next_request(rq)) != NULL) {
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
-
- if ( BLKIF_RING_FULL )
- {
- blk_stop_queue(rq);
- break;
- }
- DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
- req, req->cmd, req->sector, req->current_nr_sectors,
- req->nr_sectors, req->buffer,
- rq_data_dir(req) ? "write" : "read");
- blkdev_dequeue_request(req);
- if (blkif_queue_request(req)) {
- blk_stop_queue(rq);
- break;
- }
- queued++;
- }
-
- if (queued != 0)
- flush_requests();
-}
-
-
-static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
- struct request *req;
- blkif_response_t *bret;
- BLKIF_RING_IDX i, rp;
- unsigned long flags;
-
- spin_lock_irqsave(&blkif_io_lock, flags);
-
- if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) ||
- unlikely(recovery) )
- {
- spin_unlock_irqrestore(&blkif_io_lock, flags);
- return IRQ_HANDLED;
- }
-
- rp = blk_ring->resp_prod;
- rmb(); /* Ensure we see queued responses up to 'rp'. */
-
- for ( i = resp_cons; i != rp; i++ )
- {
- unsigned long id;
- bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
-
- id = bret->id;
- req = (struct request *)rec_ring[id].id;
-
- blkif_completion( &rec_ring[id] );
-
- ADD_ID_TO_FREELIST(id); /* overwrites req */
-
- switch ( bret->operation )
- {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
- DPRINTK("Bad return from blkdev data request: %x\n",
- bret->status);
-
- if ( unlikely(end_that_request_first
- (req,
- (bret->status == BLKIF_RSP_OKAY),
- req->hard_nr_sectors)) )
- BUG();
- end_that_request_last(req);
-
- break;
- case BLKIF_OP_PROBE:
- memcpy(&blkif_control_rsp, bret, sizeof(*bret));
- blkif_control_rsp_valid = 1;
- break;
- default:
- BUG();
- }
- }
-
- resp_cons = i;
-
- kick_pending_request_queues();
-
- spin_unlock_irqrestore(&blkif_io_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-#else
-/************************** KERNEL VERSION 2.4 **************************/
-
-static kdev_t sg_dev;
-static unsigned long sg_next_sect;
-
-/*
- * Request queues with outstanding work, but ring is currently full.
- * We need no special lock here, as we always access this with the
- * blkif_io_lock held. We only need a small maximum list.
- */
-#define MAX_PENDING 8
-static request_queue_t *pending_queues[MAX_PENDING];
-static int nr_pending;
-
-
-#define blkif_io_lock io_request_lock
-
-/*============================================================================*/
-#if ENABLE_VBD_UPDATE
-
-/*
- * blkif_update_int/update-vbds_task - handle VBD update events.
- * Schedule a task for keventd to run, which will update the VBDs and perform
- * the corresponding updates to our view of VBD state.
- */
-static void update_vbds_task(void *unused)
-{
- xlvbd_update_vbds();
-}
-
-static void vbd_update(void)
-{
- static struct tq_struct update_tq;
- update_tq.routine = update_vbds_task;
- schedule_task(&update_tq);
-}
-
-#endif /* ENABLE_VBD_UPDATE */
-/*============================================================================*/
-
-
-static void kick_pending_request_queues(void)
-{
- /* We kick pending request queues if the ring is reasonably empty. */
- if ( (nr_pending != 0) &&
- ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
- {
- /* Attempt to drain the queue, but bail if the ring becomes full. */
- while ( (nr_pending != 0) && !BLKIF_RING_FULL )
- do_blkif_request(pending_queues[--nr_pending]);
- }
-}
-
-int blkif_open(struct inode *inode, struct file *filep)
-{
- short xldev = inode->i_rdev;
- struct gendisk *gd = get_gendisk(xldev);
- xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
- short minor = MINOR(xldev);
-
- if ( gd->part[minor].nr_sects == 0 )
- {
- /*
- * Device either doesn't exist, or has zero capacity; we use a few
- * cheesy heuristics to return the relevant error code
- */
- if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
- ((minor & (gd->max_p - 1)) != 0) )
- {
- /*
- * We have a real device, but no such partition, or we just have a
- * partition number so guess this is the problem.
- */
- return -ENXIO; /* no such device or address */
- }
- else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
- {
- /* This is a removable device => assume that media is missing. */
- return -ENOMEDIUM; /* media not present (this is a guess) */
- }
- else
- {
- /* Just go for the general 'no such device' error. */
- return -ENODEV; /* no such device */
- }
- }
-
- /* Update of usage count is protected by per-device semaphore. */
- disk->usage++;
-
- return 0;
-}
-
-
-int blkif_release(struct inode *inode, struct file *filep)
-{
- xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
-
- /*
- * When usage drops to zero it may allow more VBD updates to occur.
- * Update of usage count is protected by a per-device semaphore.
- */
- if ( --disk->usage == 0 ) {
- vbd_update();
- }
-
- return 0;
-}
-
-
-int blkif_ioctl(struct inode *inode, struct file *filep,
- unsigned command, unsigned long argument)
-{
- kdev_t dev = inode->i_rdev;
- struct hd_geometry *geo = (struct hd_geometry *)argument;
- struct gendisk *gd;
- struct hd_struct *part;
- int i;
- unsigned short cylinders;
- byte heads, sectors;
-
- /* NB. No need to check permissions. That is done for us. */
-
- DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
- command, (long) argument, dev);
-
- gd = get_gendisk(dev);
- part = &gd->part[MINOR(dev)];
-
- switch ( command )
- {
- case BLKGETSIZE:
- DPRINTK_IOCTL(" BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects);
- return put_user(part->nr_sects, (unsigned long *) argument);
-
- case BLKGETSIZE64:
- DPRINTK_IOCTL(" BLKGETSIZE64: %x %llx\n", BLKGETSIZE64,
- (u64)part->nr_sects * 512);
- return put_user((u64)part->nr_sects * 512, (u64 *) argument);
-
- case BLKRRPART: /* re-read partition table */
- DPRINTK_IOCTL(" BLKRRPART: %x\n", BLKRRPART);
- return blkif_revalidate(dev);
-
- case BLKSSZGET:
- return hardsect_size[MAJOR(dev)][MINOR(dev)];
-
- case BLKBSZGET: /* get block size */
- DPRINTK_IOCTL(" BLKBSZGET: %x\n", BLKBSZGET);
- break;
-
- case BLKBSZSET: /* set block size */
- DPRINTK_IOCTL(" BLKBSZSET: %x\n", BLKBSZSET);
- break;
-
- case BLKRASET: /* set read-ahead */
- DPRINTK_IOCTL(" BLKRASET: %x\n", BLKRASET);
- break;
-
- case BLKRAGET: /* get read-ahead */
- DPRINTK_IOCTL(" BLKRAFET: %x\n", BLKRAGET);
- break;
-
- case HDIO_GETGEO:
- DPRINTK_IOCTL(" HDIO_GETGEO: %x\n", HDIO_GETGEO);
- if (!argument) return -EINVAL;
-
- /* We don't have real geometry info, but let's at least return
- values consistent with the size of the device */
-
- heads = 0xff;
- sectors = 0x3f;
- cylinders = part->nr_sects / (heads * sectors);
-
- if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT;
- if (put_user(heads, (byte *)&geo->heads)) return -EFAULT;
- if (put_user(sectors, (byte *)&geo->sectors)) return -EFAULT;
- if (put_user(cylinders, (unsigned short *)&geo->cylinders)) return
-EFAULT;
-
- return 0;
-
- case HDIO_GETGEO_BIG:
- DPRINTK_IOCTL(" HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
- if (!argument) return -EINVAL;
-
- /* We don't have real geometry info, but let's at least return
- values consistent with the size of the device */
-
- heads = 0xff;
- sectors = 0x3f;
- cylinders = part->nr_sects / (heads * sectors);
-
- if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT;
- if (put_user(heads, (byte *)&geo->heads)) return -EFAULT;
- if (put_user(sectors, (byte *)&geo->sectors)) return -EFAULT;
- if (put_user(cylinders, (unsigned int *) &geo->cylinders)) return
-EFAULT;
-
- return 0;
-
- case CDROMMULTISESSION:
- DPRINTK("FIXME: support multisession CDs later\n");
- for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
- if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
- return 0;
-
- case SCSI_IOCTL_GET_BUS_NUMBER:
- DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in XL blkif");
- return -ENOSYS;
-
- default:
- WPRINTK("ioctl %08x not supported by XL blkif\n", command);
- return -ENOSYS;
- }
-
- return 0;
-}
-
-
-
-/* check media change: should probably do something here in some cases :-) */
-int blkif_check(kdev_t dev)
-{
- DPRINTK("blkif_check\n");
- return 0;
-}
-
-int blkif_revalidate(kdev_t dev)
-{
- struct block_device *bd;
- struct gendisk *gd;
- xl_disk_t *disk;
- unsigned long capacity;
- int i, rc = 0;
-
- if ( (bd = bdget(dev)) == NULL )
- return -EINVAL;
-
- /*
- * Update of partition info, and check of usage count, is protected
- * by the per-block-device semaphore.
- */
- down(&bd->bd_sem);
-
- if ( ((gd = get_gendisk(dev)) == NULL) ||
- ((disk = xldev_to_xldisk(dev)) == NULL) ||
- ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
- {
- rc = -EINVAL;
- goto out;
- }
-
- if ( disk->usage > 1 )
- {
- rc = -EBUSY;
- goto out;
- }
-
- /* Only reread partition table if VBDs aren't mapped to partitions. */
- if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
- {
- for ( i = gd->max_p - 1; i >= 0; i-- )
- {
- invalidate_device(dev+i, 1);
- gd->part[MINOR(dev+i)].start_sect = 0;
- gd->part[MINOR(dev+i)].nr_sects = 0;
- gd->sizes[MINOR(dev+i)] = 0;
- }
-
- grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
- }
-
- out:
- up(&bd->bd_sem);
- bdput(bd);
- return rc;
-}
-
-
-/*
- * blkif_queue_request
- *
- * request block io
- *
- * id: for guest use only.
- * operation: BLKIF_OP_{READ,WRITE,PROBE}
- * buffer: buffer to read/write into. this should be a
- * virtual address in the guest os.
- */
-static int blkif_queue_request(unsigned long id,
- int operation,
- char * buffer,
- unsigned long sector_number,
- unsigned short nr_sectors,
- kdev_t device)
-{
- unsigned long buffer_ma = virt_to_bus(buffer);
- unsigned long xid;
- struct gendisk *gd;
- blkif_request_t *req;
- struct buffer_head *bh;
- unsigned int fsect, lsect;
-
- fsect = (buffer_ma & ~PAGE_MASK) >> 9;
- lsect = fsect + nr_sectors - 1;
-
- /* Buffer must be sector-aligned. Extent mustn't cross a page boundary. */
- if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) )
- BUG();
- if ( lsect > 7 )
- BUG();
-
- buffer_ma &= PAGE_MASK;
-
- if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
- return 1;
-
- switch ( operation )
- {
-
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- gd = get_gendisk(device);
-
- /*
- * Update the sector_number we'll pass down as appropriate; note that
- * we could sanity check that resulting sector will be in this
- * partition, but this will happen in driver backend anyhow.
- */
- sector_number += gd->part[MINOR(device)].start_sect;
-
- /*
- * If this unit doesn't consist of virtual partitions then we clear
- * the partn bits from the device number.
- */
- if ( !(gd->flags[MINOR(device)>>gd->minor_shift] &
- GENHD_FL_VIRT_PARTNS) )
- device &= ~(gd->max_p - 1);
-
- if ( (sg_operation == operation) &&
- (sg_dev == device) &&
- (sg_next_sect == sector_number) )
- {
-
- req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
- bh = (struct buffer_head *)id;
-
- bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
-
-
- rec_ring[req->id].id = id;
-
- req->frame_and_sects[req->nr_segments] =
- buffer_ma | (fsect<<3) | lsect;
- if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST )
- sg_next_sect += nr_sectors;
- else
- DISABLE_SCATTERGATHER();
-
- /* Update the copy of the request in the recovery ring. */
- translate_req_to_pfn(&rec_ring[req->id], req );
-
- return 0;
- }
- else if ( BLKIF_RING_FULL )
- {
- return 1;
- }
- else
- {
- sg_operation = operation;
- sg_dev = device;
- sg_next_sect = sector_number + nr_sectors;
- }
- break;
-
- default:
- panic("unknown op %d\n", operation);
- }
-
- /* Fill out a communications ring structure. */
- req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
-
- xid = GET_ID_FROM_FREELIST();
- rec_ring[xid].id = id;
-
- req->id = xid;
- req->operation = operation;
- req->sector_number = (blkif_sector_t)sector_number;
- req->device = device;
- req->nr_segments = 1;
- req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
-
- req_prod++;
-
- /* Keep a private copy so we can reissue requests when recovering. */
- translate_req_to_pfn(&rec_ring[xid], req );
-
- return 0;
-}
-
-
-/*
- * do_blkif_request
- * read a block; request is in a request queue
- */
-void do_blkif_request(request_queue_t *rq)
-{
- struct request *req;
- struct buffer_head *bh, *next_bh;
- int rw, nsect, full, queued = 0;
-
- DPRINTK("Entered do_blkif_request\n");
-
- while ( !rq->plugged && !list_empty(&rq->queue_head))
- {
- if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL )
- goto out;
-
- DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
- req, req->cmd, req->sector,
- req->current_nr_sectors, req->nr_sectors, req->bh);
-
- rw = req->cmd;
- if ( rw == READA )
- rw = READ;
- if ( unlikely((rw != READ) && (rw != WRITE)) )
- panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw);
-
- req->errors = 0;
-
- bh = req->bh;
- while ( bh != NULL )
- {
- next_bh = bh->b_reqnext;
- bh->b_reqnext = NULL;
-
- full = blkif_queue_request(
- (unsigned long)bh,
- (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE,
- bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
-
- if ( full )
- {
- bh->b_reqnext = next_bh;
- pending_queues[nr_pending++] = rq;
- if ( unlikely(nr_pending >= MAX_PENDING) )
- BUG();
- goto out;
- }
-
- queued++;
-
- /* Dequeue the buffer head from the request. */
- nsect = bh->b_size >> 9;
- bh = req->bh = next_bh;
-
- if ( bh != NULL )
- {
- /* There's another buffer head to do. Update the request. */
- req->hard_sector += nsect;
- req->hard_nr_sectors -= nsect;
- req->sector = req->hard_sector;
- req->nr_sectors = req->hard_nr_sectors;
- req->current_nr_sectors = bh->b_size >> 9;
- req->buffer = bh->b_data;
- }
- else
- {
- /* That was the last buffer head. Finalise the request. */
- if ( unlikely(end_that_request_first(req, 1, "XenBlk")) )
- BUG();
- blkdev_dequeue_request(req);
- end_that_request_last(req);
- }
- }
- }
-
- out:
- if ( queued != 0 )
- flush_requests();
-}
-
-
-static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
- BLKIF_RING_IDX i, rp;
- unsigned long flags;
- struct buffer_head *bh, *next_bh;
-
- spin_lock_irqsave(&io_request_lock, flags);
-
- if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
- {
- spin_unlock_irqrestore(&io_request_lock, flags);
- return;
- }
-
- rp = blk_ring->resp_prod;
- rmb(); /* Ensure we see queued responses up to 'rp'. */
-
- for ( i = resp_cons; i != rp; i++ )
- {
- unsigned long id;
- blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
-
- id = bret->id;
- bh = (struct buffer_head *)rec_ring[id].id;
-
- blkif_completion( &rec_ring[id] );
-
- ADD_ID_TO_FREELIST(id);
-
- switch ( bret->operation )
- {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
- DPRINTK("Bad return from blkdev data request: %lx\n",
- bret->status);
- for ( ; bh != NULL; bh = next_bh )
- {
- next_bh = bh->b_reqnext;
- bh->b_reqnext = NULL;
- bh->b_end_io(bh, bret->status == BLKIF_RSP_OKAY);
- }
-
- break;
- case BLKIF_OP_PROBE:
- memcpy(&blkif_control_rsp, bret, sizeof(*bret));
- blkif_control_rsp_valid = 1;
- break;
- default:
- BUG();
- }
- }
-
- resp_cons = i;
-
- kick_pending_request_queues();
-
- spin_unlock_irqrestore(&io_request_lock, flags);
-}
-
-#endif
-
-/***************************** COMMON CODE *******************************/
-
-
-void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
-{
- unsigned long flags, id;
-
- retry:
- while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
-
- spin_lock_irqsave(&blkif_io_lock, flags);
- if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
- {
- spin_unlock_irqrestore(&blkif_io_lock, flags);
- goto retry;
- }
-
- DISABLE_SCATTERGATHER();
- blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;
-
- id = GET_ID_FROM_FREELIST();
- blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
- rec_ring[id].id = (unsigned long) req;
-
- translate_req_to_pfn( &rec_ring[id], req );
-
- req_prod++;
- flush_requests();
-
- spin_unlock_irqrestore(&blkif_io_lock, flags);
-
- while ( !blkif_control_rsp_valid )
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
-
- memcpy(rsp, &blkif_control_rsp, sizeof(*rsp));
- blkif_control_rsp_valid = 0;
-}
-
-
-/* Send a driver status notification to the domain controller. */
-static void send_driver_status(int ok)
-{
- ctrl_msg_t cmsg = {
- .type = CMSG_BLKIF_FE,
- .subtype = CMSG_BLKIF_FE_DRIVER_STATUS,
- .length = sizeof(blkif_fe_driver_status_t),
- };
- blkif_fe_driver_status_t *msg = (void*)cmsg.msg;
-
- msg->status = (ok ? BLKIF_DRIVER_STATUS_UP : BLKIF_DRIVER_STATUS_DOWN);
-
- ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
-
-/* Tell the controller to bring up the interface. */
-static void blkif_send_interface_connect(void)
-{
- ctrl_msg_t cmsg = {
- .type = CMSG_BLKIF_FE,
- .subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
- .length = sizeof(blkif_fe_interface_connect_t),
- };
- blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
-
- msg->handle = 0;
- msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
-
- ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
-
-static void blkif_free(void)
-{
- /* Prevent new requests being issued until we fix things up. */
- spin_lock_irq(&blkif_io_lock);
- recovery = 1;
- blkif_state = BLKIF_STATE_DISCONNECTED;
- spin_unlock_irq(&blkif_io_lock);
-
- /* Free resources associated with old device channel. */
- if ( blk_ring != NULL )
- {
- free_page((unsigned long)blk_ring);
- blk_ring = NULL;
- }
- free_irq(blkif_irq, NULL);
- blkif_irq = 0;
-
- unbind_evtchn_from_irq(blkif_evtchn);
- blkif_evtchn = 0;
-}
-
-static void blkif_close(void)
-{
-}
-
-/* Move from CLOSED to DISCONNECTED state. */
-static void blkif_disconnect(void)
-{
- if ( blk_ring != NULL )
- free_page((unsigned long)blk_ring);
- blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
- blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
- blkif_state = BLKIF_STATE_DISCONNECTED;
- blkif_send_interface_connect();
-}
-
-static void blkif_reset(void)
-{
- blkif_free();
- blkif_disconnect();
-}
-
-static void blkif_recover(void)
-{
- int i;
-
- /* Hmm, requests might be re-ordered when we re-issue them.
- * This will need to be fixed once we have barriers */
-
- /* Stage 1 : Find active and move to safety. */
- for ( i = 0; i < BLKIF_RING_SIZE; i++ )
- {
- if ( rec_ring[i].id >= PAGE_OFFSET )
- {
- translate_req_to_mfn(
- &blk_ring->ring[req_prod].req, &rec_ring[i]);
- req_prod++;
- }
- }
-
- /* Stage 2 : Set up shadow list. */
- for ( i = 0; i < req_prod; i++ )
- {
- rec_ring[i].id = blk_ring->ring[i].req.id;
- blk_ring->ring[i].req.id = i;
- translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
- }
-
- /* Stage 3 : Set up free list. */
- for ( ; i < BLKIF_RING_SIZE; i++ )
- rec_ring[i].id = i+1;
- rec_ring_free = req_prod;
- rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
-
- /* blk_ring->req_prod will be set when we flush_requests().*/
- wmb();
-
- /* Switch off recovery mode, using a memory barrier to ensure that
- * it's seen before we flush requests - we don't want to miss any
- * interrupts. */
- recovery = 0;
- wmb();
-
- /* Kicks things back into life. */
- flush_requests();
-
- /* Now safe to left other peope use interface. */
- blkif_state = BLKIF_STATE_CONNECTED;
-}
-
-static void blkif_connect(blkif_fe_interface_status_t *status)
-{
- int err = 0;
-
- blkif_evtchn = status->evtchn;
- blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
-
- err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
- if ( err )
- {
- WPRINTK("request_irq failed (err=%d)\n", err);
- return;
- }
-
- if ( recovery )
- {
- blkif_recover();
- }
- else
- {
- /* Transition to connected in case we need to do
- * a partition probe on a whole disk. */
- blkif_state = BLKIF_STATE_CONNECTED;
-
- /* Probe for discs attached to the interface. */
- xlvbd_init();
- }
-
- /* Kick pending requests. */
- spin_lock_irq(&blkif_io_lock);
- kick_pending_request_queues();
- spin_unlock_irq(&blkif_io_lock);
-}
-
-static void unexpected(blkif_fe_interface_status_t *status)
-{
- DPRINTK(" Unexpected blkif status %u in state %u\n",
- status->status, blkif_state);
-}
-
-static void blkif_status(blkif_fe_interface_status_t *status)
-{
- if ( status->handle != blkif_handle )
- {
- WPRINTK(" Invalid blkif: handle=%u", status->handle);
- return;
- }
-
- switch ( status->status )
- {
- case BLKIF_INTERFACE_STATUS_CLOSED:
- switch ( blkif_state )
- {
- case BLKIF_STATE_CLOSED:
- unexpected(status);
- break;
- case BLKIF_STATE_DISCONNECTED:
- case BLKIF_STATE_CONNECTED:
- unexpected(status);
- blkif_close();
- break;
- }
- break;
-
- case BLKIF_INTERFACE_STATUS_DISCONNECTED:
- switch ( blkif_state )
- {
- case BLKIF_STATE_CLOSED:
- blkif_disconnect();
- break;
- case BLKIF_STATE_DISCONNECTED:
- case BLKIF_STATE_CONNECTED:
- /* unexpected(status); */ /* occurs during suspend/resume */
- blkif_reset();
- break;
- }
- break;
-
- case BLKIF_INTERFACE_STATUS_CONNECTED:
- switch ( blkif_state )
- {
- case BLKIF_STATE_CLOSED:
- unexpected(status);
- blkif_disconnect();
- blkif_connect(status);
- break;
- case BLKIF_STATE_DISCONNECTED:
- blkif_connect(status);
- break;
- case BLKIF_STATE_CONNECTED:
- unexpected(status);
- blkif_connect(status);
- break;
- }
- break;
-
- case BLKIF_INTERFACE_STATUS_CHANGED:
- switch ( blkif_state )
- {
- case BLKIF_STATE_CLOSED:
- case BLKIF_STATE_DISCONNECTED:
- unexpected(status);
- break;
- case BLKIF_STATE_CONNECTED:
- vbd_update();
- break;
- }
- break;
-
- default:
- WPRINTK(" Invalid blkif status: %d\n", status->status);
- break;
- }
-}
-
-
-static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
- switch ( msg->subtype )
- {
- case CMSG_BLKIF_FE_INTERFACE_STATUS:
- if ( msg->length != sizeof(blkif_fe_interface_status_t) )
- goto parse_error;
- blkif_status((blkif_fe_interface_status_t *)
- &msg->msg[0]);
- break;
- default:
- goto parse_error;
- }
-
- ctrl_if_send_response(msg);
- return;
-
- parse_error:
- msg->length = 0;
- ctrl_if_send_response(msg);
-}
-
-int wait_for_blkif(void)
-{
- int err = 0;
- int i;
- send_driver_status(1);
-
- /*
- * We should read 'nr_interfaces' from response message and wait
- * for notifications before proceeding. For now we assume that we
- * will be notified of exactly one interface.
- */
- for ( i=0; (blkif_state != BLKIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
-
- if ( blkif_state != BLKIF_STATE_CONNECTED )
- {
- WPRINTK("Timeout connecting to device!\n");
- err = -ENOSYS;
- }
- return err;
-}
-
-int __init xlblk_init(void)
-{
- int i;
-
- if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
- (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
- return 0;
-
- IPRINTK("Initialising virtual block device driver\n");
-
- rec_ring_free = 0;
- for ( i = 0; i < BLKIF_RING_SIZE; i++ )
- rec_ring[i].id = i+1;
- rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
-
- (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
- CALLBACK_IN_BLOCKING_CONTEXT);
-
- wait_for_blkif();
-
- return 0;
-}
-
-void blkdev_suspend(void)
-{
-}
-
-void blkdev_resume(void)
-{
- send_driver_status(1);
-}
-
-/* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
-
-void blkif_completion(blkif_request_t *req)
-{
- int i;
-
- switch ( req->operation )
- {
- case BLKIF_OP_READ:
- for ( i = 0; i < req->nr_segments; i++ )
- {
- unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
- unsigned long mfn = phys_to_machine_mapping[pfn];
- xen_machphys_update(mfn, pfn);
- }
- break;
- }
-
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,125 +0,0 @@
-/******************************************************************************
- * block.h
- *
- * Shared definitions between all levels of XenLinux Virtual block devices.
- *
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- * Copyright (c) 2004-2005, Christian Limpach
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __XEN_DRIVERS_BLOCK_H__
-#define __XEN_DRIVERS_BLOCK_H__
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/hdreg.h>
-#include <linux/blkdev.h>
-#include <linux/major.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/io/blkif.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/uaccess.h>
-
-#if 1
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_blk: " fmt, ##args)
-#else
-#define IPRINTK(fmt, args...) ((void)0)
-#endif
-
-#if 1
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_blk: " fmt, ##args)
-#else
-#define WPRINTK(fmt, args...) ((void)0)
-#endif
-
-#if 0
-#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-#else
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-#if 0
-#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-#else
-#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-#endif
-
-struct xlbd_type_info {
- int partn_shift;
- int disks_per_major;
- char *devname;
- char *diskname;
-};
-
-/*
- * We have one of these per vbd, whether ide, scsi or 'other'. They
- * hang in private_data off the gendisk structure. We may end up
- * putting all kinds of interesting stuff here :-)
- */
-struct xlbd_major_info {
- int major;
- int index;
- int usage;
- struct xlbd_type_info *type;
-};
-
-struct xlbd_disk_info {
- int xd_device;
- struct xlbd_major_info *mi;
-};
-
-typedef struct xen_block {
- int usage;
-} xen_block_t;
-
-extern struct request_queue *xlbd_blk_queue;
-extern spinlock_t blkif_io_lock;
-
-extern int blkif_open(struct inode *inode, struct file *filep);
-extern int blkif_release(struct inode *inode, struct file *filep);
-extern int blkif_ioctl(struct inode *inode, struct file *filep,
- unsigned command, unsigned long argument);
-extern int blkif_check(dev_t dev);
-extern int blkif_revalidate(dev_t dev);
-extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
-extern void do_blkif_request (request_queue_t *rq);
-
-extern void xlvbd_update_vbds(void);
-
-/* Virtual block-device subsystem. */
-extern int xlvbd_init(void);
-extern void xlvbd_cleanup(void);
-
-#endif /* __XEN_DRIVERS_BLOCK_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/blkfront/vbd.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/vbd.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,494 +0,0 @@
-/******************************************************************************
- * vbd.c
- *
- * XenLinux virtual block-device driver (xvd).
- *
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
- * Copyright (c) 2004-2005, Christian Limpach
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "block.h"
-#include <linux/blkdev.h>
-#include <linux/list.h>
-
-/*
- * For convenience we distinguish between ide, scsi and 'other' (i.e.,
- * potentially combinations of the two) in the naming scheme and in a few other
- * places.
- */
-
-#define NUM_IDE_MAJORS 10
-#define NUM_SCSI_MAJORS 9
-#define NUM_VBD_MAJORS 1
-
-struct lvdisk
-{
- blkif_sector_t capacity; /* 0: Size in terms of 512-byte sectors. */
- blkif_vdev_t device; /* 8: Device number (opaque 16 bit value). */
- u16 info;
- struct list_head list;
-};
-
-static struct xlbd_type_info xlbd_ide_type = {
- .partn_shift = 6,
- .disks_per_major = 2,
- .devname = "ide",
- .diskname = "hd",
-};
-
-static struct xlbd_type_info xlbd_scsi_type = {
- .partn_shift = 4,
- .disks_per_major = 16,
- .devname = "sd",
- .diskname = "sd",
-};
-
-static struct xlbd_type_info xlbd_vbd_type = {
- .partn_shift = 4,
- .disks_per_major = 16,
- .devname = "xvd",
- .diskname = "xvd",
-};
-
-static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
- NUM_VBD_MAJORS];
-
-#define XLBD_MAJOR_IDE_START 0
-#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
-#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
-
-#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START
- 1
-#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START
- 1
-#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START
+ NUM_VBD_MAJORS - 1
-
-/* Information about our VBDs. */
-#define MAX_VBDS 64
-struct list_head vbds_list;
-
-struct request_queue *xlbd_blk_queue = NULL;
-
-#define MAJOR_XEN(dev) ((dev)>>8)
-#define MINOR_XEN(dev) ((dev) & 0xff)
-
-static struct block_device_operations xlvbd_block_fops =
-{
- .owner = THIS_MODULE,
- .open = blkif_open,
- .release = blkif_release,
- .ioctl = blkif_ioctl,
-};
-
-spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
-
-static struct lvdisk *xlvbd_device_alloc(void)
-{
- struct lvdisk *disk;
-
- disk = kmalloc(sizeof(*disk), GFP_KERNEL);
- if (disk != NULL) {
- memset(disk, 0, sizeof(*disk));
- INIT_LIST_HEAD(&disk->list);
- }
- return disk;
-}
-
-static void xlvbd_device_free(struct lvdisk *disk)
-{
- list_del(&disk->list);
- kfree(disk);
-}
-
-static vdisk_t *xlvbd_probe(int *ret)
-{
- blkif_response_t rsp;
- blkif_request_t req;
- vdisk_t *disk_info = NULL;
- unsigned long buf;
- int nr;
-
- buf = __get_free_page(GFP_KERNEL);
- if ((void *)buf == NULL)
- goto out;
-
- memset(&req, 0, sizeof(req));
- req.operation = BLKIF_OP_PROBE;
- req.nr_segments = 1;
-#ifdef CONFIG_XEN_BLKDEV_GRANT
- blkif_control_probe_send(&req, &rsp,
- (unsigned long)(virt_to_machine(buf)));
-#else
- req.frame_and_sects[0] = virt_to_machine(buf) | 7;
-
- blkif_control_send(&req, &rsp);
-#endif
- if ( rsp.status <= 0 ) {
- WPRINTK("Could not probe disks (%d)\n", rsp.status);
- goto out;
- }
- nr = rsp.status;
- if ( nr > MAX_VBDS )
- nr = MAX_VBDS;
-
- disk_info = kmalloc(nr * sizeof(vdisk_t), GFP_KERNEL);
- if (disk_info != NULL)
- memcpy(disk_info, (void *) buf, nr * sizeof(vdisk_t));
-
- if (ret != NULL)
- *ret = nr;
-
-out:
- free_page(buf);
- return disk_info;
-}
-
-static struct xlbd_major_info *xlbd_alloc_major_info(
- int major, int minor, int index)
-{
- struct xlbd_major_info *ptr;
-
- ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
- if (ptr == NULL)
- return NULL;
-
- memset(ptr, 0, sizeof(struct xlbd_major_info));
-
- ptr->major = major;
-
- switch (index) {
- case XLBD_MAJOR_IDE_RANGE:
- ptr->type = &xlbd_ide_type;
- ptr->index = index - XLBD_MAJOR_IDE_START;
- break;
- case XLBD_MAJOR_SCSI_RANGE:
- ptr->type = &xlbd_scsi_type;
- ptr->index = index - XLBD_MAJOR_SCSI_START;
- break;
- case XLBD_MAJOR_VBD_RANGE:
- ptr->type = &xlbd_vbd_type;
- ptr->index = index - XLBD_MAJOR_VBD_START;
- break;
- }
-
- if (register_blkdev(ptr->major, ptr->type->devname)) {
- WPRINTK("can't get major %d with name %s\n",
- ptr->major, ptr->type->devname);
- kfree(ptr);
- return NULL;
- }
-
- devfs_mk_dir(ptr->type->devname);
- major_info[index] = ptr;
- return ptr;
-}
-
-static struct xlbd_major_info *xlbd_get_major_info(int device)
-{
- int major, minor, index;
-
- major = MAJOR_XEN(device);
- minor = MINOR_XEN(device);
-
- switch (major) {
- case IDE0_MAJOR: index = 0; break;
- case IDE1_MAJOR: index = 1; break;
- case IDE2_MAJOR: index = 2; break;
- case IDE3_MAJOR: index = 3; break;
- case IDE4_MAJOR: index = 4; break;
- case IDE5_MAJOR: index = 5; break;
- case IDE6_MAJOR: index = 6; break;
- case IDE7_MAJOR: index = 7; break;
- case IDE8_MAJOR: index = 8; break;
- case IDE9_MAJOR: index = 9; break;
- case SCSI_DISK0_MAJOR: index = 10; break;
- case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
- index = 11 + major - SCSI_DISK1_MAJOR;
- break;
- case SCSI_CDROM_MAJOR: index = 18; break;
- default: index = 19; break;
- }
-
- return ((major_info[index] != NULL) ? major_info[index] :
- xlbd_alloc_major_info(major, minor, index));
-}
-
-static int xlvbd_blk_queue_alloc(struct xlbd_type_info *type)
-{
- xlbd_blk_queue = blk_init_queue(do_blkif_request, &blkif_io_lock);
- if (xlbd_blk_queue == NULL)
- return -1;
-
- elevator_init(xlbd_blk_queue, "noop");
-
- /*
- * Turn off barking 'headactive' mode. We dequeue
- * buffer heads as soon as we pass them to back-end
- * driver.
- */
- blk_queue_headactive(xlbd_blk_queue, 0);
-
- /* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_hardsect_size(xlbd_blk_queue, 512);
- blk_queue_max_sectors(xlbd_blk_queue, 512);
-
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(xlbd_blk_queue, PAGE_SIZE - 1);
- blk_queue_max_segment_size(xlbd_blk_queue, PAGE_SIZE);
-
- /* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_phys_segments(xlbd_blk_queue,
BLKIF_MAX_SEGMENTS_PER_REQUEST);
- blk_queue_max_hw_segments(xlbd_blk_queue, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
- /* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(xlbd_blk_queue, 511);
- return 0;
-}
-
-struct gendisk *xlvbd_alloc_gendisk(
- struct xlbd_major_info *mi, int minor, vdisk_t *disk)
-{
- struct gendisk *gd;
- struct xlbd_disk_info *di;
- int nr_minors = 1;
-
- di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL);
- if (di == NULL)
- goto out;
- di->mi = mi;
- di->xd_device = disk->device;
-
- if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
- nr_minors = 1 << mi->type->partn_shift;
-
- gd = alloc_disk(nr_minors);
- if ( !gd )
- goto out;
-
- if (nr_minors > 1)
- sprintf(gd->disk_name, "%s%c", mi->type->diskname,
- 'a' + mi->index * mi->type->disks_per_major +
- (minor >> mi->type->partn_shift));
- else
- sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
- 'a' + mi->index * mi->type->disks_per_major +
- (minor >> mi->type->partn_shift),
- minor & ((1 << mi->type->partn_shift) - 1));
-
- gd->major = mi->major;
- gd->first_minor = minor;
- gd->fops = &xlvbd_block_fops;
- gd->private_data = di;
- set_capacity(gd, disk->capacity);
-
- if ((xlbd_blk_queue == NULL) && xlvbd_blk_queue_alloc(mi->type))
- goto out_gendisk;
-
- if (VDISK_READONLY(disk->info))
- set_disk_ro(gd, 1);
-
- if (VDISK_TYPE(disk->info) == VDISK_TYPE_CDROM)
- gd->flags |= GENHD_FL_REMOVABLE | GENHD_FL_CD;
-
- gd->queue = xlbd_blk_queue;
- add_disk(gd);
- return gd;
-
-out_gendisk:
- del_gendisk(gd);
-out:
- kfree(di);
- return NULL;
-}
-
-static int xlvbd_device_add(struct list_head *list, vdisk_t *disk)
-{
- struct lvdisk *new;
- int minor;
- dev_t device;
- struct block_device *bd;
- struct gendisk *gd;
- struct xlbd_major_info *mi;
-
- mi = xlbd_get_major_info(disk->device);
- if (mi == NULL)
- return -EPERM;
-
- new = xlvbd_device_alloc();
- if (new == NULL)
- return -1;
- new->capacity = disk->capacity;
- new->device = disk->device;
- new->info = disk->info;
-
- minor = MINOR_XEN(disk->device);
- device = MKDEV(mi->major, minor);
-
- bd = bdget(device);
- if (bd == NULL)
- goto out;
-
- gd = xlvbd_alloc_gendisk(mi, minor, disk);
- if (gd == NULL)
- goto out_bd;
-
- list_add(&new->list, list);
-out_bd:
- bdput(bd);
-out:
- return 0;
-}
-
-static int xlvbd_device_del(struct lvdisk *disk)
-{
- dev_t device;
- struct block_device *bd;
- struct gendisk *gd;
- struct xlbd_disk_info *di;
- int ret = 0, unused;
-
- device = MKDEV(MAJOR_XEN(disk->device), MINOR_XEN(disk->device));
-
- bd = bdget(device);
- if (bd == NULL)
- return -1;
-
- gd = get_gendisk(device, &unused);
- di = gd->private_data;
-
- if (di->mi->usage != 0) {
- WPRINTK("disk removal failed: used [dev=%x]\n", device);
- ret = -1;
- goto out;
- }
-
- del_gendisk(gd);
-
- xlvbd_device_free(disk);
-out:
- bdput(bd);
- return ret;
-}
-
-static int xlvbd_device_update(struct lvdisk *ldisk, vdisk_t *disk)
-{
- dev_t device;
- struct block_device *bd;
- struct gendisk *gd;
- int unused;
-
- if ((ldisk->capacity == disk->capacity) && (ldisk->info == disk->info))
- return 0;
-
- device = MKDEV(MAJOR_XEN(ldisk->device), MINOR_XEN(ldisk->device));
-
- bd = bdget(device);
- if (bd == NULL)
- return -1;
-
- gd = get_gendisk(device, &unused);
- set_capacity(gd, disk->capacity);
- ldisk->capacity = disk->capacity;
-
- bdput(bd);
-
- return 0;
-}
-
-void xlvbd_refresh(void)
-{
- vdisk_t *newdisks;
- struct list_head *tmp, *tmp2;
- struct lvdisk *disk;
- int i, nr;
-
- newdisks = xlvbd_probe(&nr);
- if (newdisks == NULL) {
- WPRINTK("failed to probe\n");
- return;
- }
-
- i = 0;
- list_for_each_safe(tmp, tmp2, &vbds_list) {
- disk = list_entry(tmp, struct lvdisk, list);
-
- for (i = 0; i < nr; i++) {
- if ( !newdisks[i].device )
- continue;
- if ( disk->device == newdisks[i].device ) {
- xlvbd_device_update(disk, &newdisks[i]);
- newdisks[i].device = 0;
- break;
- }
- }
- if (i == nr) {
- xlvbd_device_del(disk);
- newdisks[i].device = 0;
- }
- }
- for (i = 0; i < nr; i++)
- if ( newdisks[i].device )
- xlvbd_device_add(&vbds_list, &newdisks[i]);
- kfree(newdisks);
-}
-
-/*
- * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
- * state. The VBDs need to be updated in this way when the domain is
- * initialised and also each time we receive an XLBLK_UPDATE event.
- */
-void xlvbd_update_vbds(void)
-{
- xlvbd_refresh();
-}
-
-/*
- * Set up all the linux device goop for the virtual block devices
- * (vbd's) that we know about. Note that although from the backend
- * driver's p.o.v. VBDs are addressed simply an opaque 16-bit device
- * number, the domain creation tools conventionally allocate these
- * numbers to correspond to those used by 'real' linux -- this is just
- * for convenience as it means e.g. that the same /etc/fstab can be
- * used when booting with or without Xen.
- */
-int xlvbd_init(void)
-{
- int i, nr;
- vdisk_t *disks;
-
- INIT_LIST_HEAD(&vbds_list);
-
- memset(major_info, 0, sizeof(major_info));
-
- disks = xlvbd_probe(&nr);
- if (disks == NULL) {
- WPRINTK("failed to probe\n");
- return -1;
- }
-
- for (i = 0; i < nr; i++)
- xlvbd_device_add(&vbds_list, &disks[i]);
-
- kfree(disks);
- return 0;
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/console/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/console/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y := console.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/console/console.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/console/console.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,781 +0,0 @@
-/******************************************************************************
- * console.c
- *
- * Virtual console driver.
- *
- * Copyright (c) 2002-2004, K A Fraser.
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/bootmem.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm-xen/xen-public/event_channel.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/evtchn.h>
-#include <asm-xen/ctrl_if.h>
-
-/*
- * Modes:
- * 'xencons=off' [XC_OFF]: Console is disabled.
- * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
- * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
- * [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
- *
- * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
- * warnings from standard distro startup scripts.
- */
-static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
-
-static int __init xencons_setup(char *str)
-{
- if ( !strcmp(str, "tty") )
- xc_mode = XC_TTY;
- else if ( !strcmp(str, "ttyS") )
- xc_mode = XC_SERIAL;
- else if ( !strcmp(str, "off") )
- xc_mode = XC_OFF;
- return 1;
-}
-__setup("xencons=", xencons_setup);
-
-/* The kernel and user-land drivers share a common transmit buffer. */
-static unsigned int wbuf_size = 4096;
-#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
-static char *wbuf;
-static unsigned int wc, wp; /* write_cons, write_prod */
-
-static int __init xencons_bufsz_setup(char *str)
-{
- unsigned int goal;
- goal = simple_strtoul(str, NULL, 0);
- while ( wbuf_size < goal )
- wbuf_size <<= 1;
- return 1;
-}
-__setup("xencons_bufsz=", xencons_bufsz_setup);
-
-/* This lock protects accesses to the common transmit buffer. */
-static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
-
-/* Common transmit-kick routine. */
-static void __xencons_tx_flush(void);
-
-/* This task is used to defer sending console data until there is space. */
-static void xencons_tx_flush_task_routine(void *data);
-
-static DECLARE_TQUEUE(xencons_tx_flush_task,
- xencons_tx_flush_task_routine,
- NULL);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static struct tty_driver *xencons_driver;
-#else
-static struct tty_driver xencons_driver;
-#endif
-
-
-/******************** Kernel console driver ********************************/
-
-static void kcons_write(
- struct console *c, const char *s, unsigned int count)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- for ( i = 0; i < count; i++ )
- {
- if ( (wp - wc) >= (wbuf_size - 1) )
- break;
- if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
- wbuf[WBUF_MASK(wp++)] = '\r';
- }
-
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void kcons_write_dom0(
- struct console *c, const char *s, unsigned int count)
-{
- int rc;
-
- while ( count > 0 )
- {
- if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write,
- count, (char *)s)) > 0 )
- {
- count -= rc;
- s += rc;
- }
- else
- break;
- }
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static struct tty_driver *kcons_device(struct console *c, int *index)
-{
- *index = c->index;
- return xencons_driver;
-}
-#else
-static kdev_t kcons_device(struct console *c)
-{
- return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
-}
-#endif
-
-static struct console kcons_info = {
- device: kcons_device,
- flags: CON_PRINTBUFFER,
- index: -1
-};
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define __RETCODE 0
-static int __init xen_console_init(void)
-#else
-#define __RETCODE
-void xen_console_init(void)
-#endif
-{
- if ( xen_start_info.flags & SIF_INITDOMAIN )
- {
- if ( xc_mode == XC_DEFAULT )
- xc_mode = XC_SERIAL;
- kcons_info.write = kcons_write_dom0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- if ( xc_mode == XC_SERIAL )
- kcons_info.flags |= CON_ENABLED;
-#endif
- }
- else
- {
- if ( xc_mode == XC_DEFAULT )
- xc_mode = XC_TTY;
- kcons_info.write = kcons_write;
- }
-
- if ( xc_mode == XC_OFF )
- return __RETCODE;
-
- if ( xc_mode == XC_SERIAL )
- strcpy(kcons_info.name, "ttyS");
- else
- strcpy(kcons_info.name, "tty");
-
- wbuf = alloc_bootmem(wbuf_size);
-
- register_console(&kcons_info);
- return __RETCODE;
-}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-console_initcall(xen_console_init);
-#endif
-
-/*** Useful function for console debugging -- goes straight to Xen. ***/
-asmlinkage int xprintk(const char *fmt, ...)
-{
- va_list args;
- int printk_len;
- static char printk_buf[1024];
-
- /* Emit the output into the temporary buffer */
- va_start(args, fmt);
- printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
- va_end(args);
-
- /* Send the processed output directly to Xen. */
- kcons_write_dom0(NULL, printk_buf, printk_len);
-
- return 0;
-}
-
-/*** Forcibly flush console data before dying. ***/
-void xencons_force_flush(void)
-{
- ctrl_msg_t msg;
- int sz;
-
- /* Emergency console is synchronous, so there's nothing to flush. */
- if ( xen_start_info.flags & SIF_INITDOMAIN )
- return;
-
- /*
- * We use dangerous control-interface functions that require a quiescent
- * system and no interrupts. Try to ensure this with a global cli().
- */
- local_irq_disable(); /* XXXsmp */
-
- /* Spin until console data is flushed through to the domain controller. */
- while ( (wc != wp) && !ctrl_if_transmitter_empty() )
- {
- /* Interrupts are disabled -- we must manually reap responses. */
- ctrl_if_discard_responses();
-
- if ( (sz = wp - wc) == 0 )
- continue;
- if ( sz > sizeof(msg.msg) )
- sz = sizeof(msg.msg);
- if ( sz > (wbuf_size - WBUF_MASK(wc)) )
- sz = wbuf_size - WBUF_MASK(wc);
-
- msg.type = CMSG_CONSOLE;
- msg.subtype = CMSG_CONSOLE_DATA;
- msg.length = sz;
- memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
-
- if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
- wc += sz;
- }
-}
-
-
-/******************** User-space console driver (/dev/console) ************/
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define DRV(_d) (_d)
-#define TTY_INDEX(_tty) ((_tty)->index)
-#else
-static int xencons_refcount;
-static struct tty_struct *xencons_table[MAX_NR_CONSOLES];
-#define DRV(_d) (&(_d))
-#define TTY_INDEX(_tty) (MINOR((_tty)->device) - xencons_driver.minor_start)
-#endif
-
-static struct termios *xencons_termios[MAX_NR_CONSOLES];
-static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
-static struct tty_struct *xencons_tty;
-static int xencons_priv_irq;
-static char x_char;
-
-/* Non-privileged receive callback. */
-static void xencons_rx(ctrl_msg_t *msg, unsigned long id)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&xencons_lock, flags);
- if ( xencons_tty != NULL )
- {
- for ( i = 0; i < msg->length; i++ )
- tty_insert_flip_char(xencons_tty, msg->msg[i], 0);
- tty_flip_buffer_push(xencons_tty);
- }
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- msg->length = 0;
- ctrl_if_send_response(msg);
-}
-
-/* Privileged and non-privileged transmit worker. */
-static void __xencons_tx_flush(void)
-{
- int sz, work_done = 0;
- ctrl_msg_t msg;
-
- if ( xen_start_info.flags & SIF_INITDOMAIN )
- {
- if ( x_char )
- {
- kcons_write_dom0(NULL, &x_char, 1);
- x_char = 0;
- work_done = 1;
- }
-
- while ( wc != wp )
- {
- sz = wp - wc;
- if ( sz > (wbuf_size - WBUF_MASK(wc)) )
- sz = wbuf_size - WBUF_MASK(wc);
- kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
- wc += sz;
- work_done = 1;
- }
- }
- else
- {
- while ( x_char )
- {
- msg.type = CMSG_CONSOLE;
- msg.subtype = CMSG_CONSOLE_DATA;
- msg.length = 1;
- msg.msg[0] = x_char;
-
- if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
- x_char = 0;
- else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
- break;
-
- work_done = 1;
- }
-
- while ( wc != wp )
- {
- sz = wp - wc;
- if ( sz > sizeof(msg.msg) )
- sz = sizeof(msg.msg);
- if ( sz > (wbuf_size - WBUF_MASK(wc)) )
- sz = wbuf_size - WBUF_MASK(wc);
-
- msg.type = CMSG_CONSOLE;
- msg.subtype = CMSG_CONSOLE_DATA;
- msg.length = sz;
- memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
-
- if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
- wc += sz;
- else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
- break;
-
- work_done = 1;
- }
- }
-
- if ( work_done && (xencons_tty != NULL) )
- {
- wake_up_interruptible(&xencons_tty->write_wait);
- if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- (xencons_tty->ldisc.write_wakeup != NULL) )
- (xencons_tty->ldisc.write_wakeup)(xencons_tty);
- }
-}
-
-/* Non-privileged transmit kicker. */
-static void xencons_tx_flush_task_routine(void *data)
-{
- unsigned long flags;
- spin_lock_irqsave(&xencons_lock, flags);
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-/* Privileged receive callback and transmit kicker. */
-static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- static char rbuf[16];
- int i, l;
- unsigned long flags;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- if ( xencons_tty != NULL )
- {
- /* Receive work. */
- while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
- for ( i = 0; i < l; i++ )
- tty_insert_flip_char(xencons_tty, rbuf[i], 0);
- if ( xencons_tty->flip.count != 0 )
- tty_flip_buffer_push(xencons_tty);
- }
-
- /* Transmit work. */
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-static int xencons_write_room(struct tty_struct *tty)
-{
- return wbuf_size - (wp - wc);
-}
-
-static int xencons_chars_in_buffer(struct tty_struct *tty)
-{
- return wp - wc;
-}
-
-static void xencons_send_xchar(struct tty_struct *tty, char ch)
-{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- x_char = ch;
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void xencons_throttle(struct tty_struct *tty)
-{
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- if ( I_IXOFF(tty) )
- xencons_send_xchar(tty, STOP_CHAR(tty));
-}
-
-static void xencons_unthrottle(struct tty_struct *tty)
-{
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- if ( I_IXOFF(tty) )
- {
- if ( x_char != 0 )
- x_char = 0;
- else
- xencons_send_xchar(tty, START_CHAR(tty));
- }
-}
-
-static void xencons_flush_buffer(struct tty_struct *tty)
-{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- wc = wp = 0;
- spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static inline int __xencons_put_char(int ch)
-{
- char _ch = (char)ch;
- if ( (wp - wc) == wbuf_size )
- return 0;
- wbuf[WBUF_MASK(wp++)] = _ch;
- return 1;
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static int xencons_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
-{
- int i;
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return count;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- for ( i = 0; i < count; i++ )
- if ( !__xencons_put_char(buf[i]) )
- break;
-
- if ( i != 0 )
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return i;
-}
-#else
-static int xencons_write(struct tty_struct *tty, int from_user,
- const u_char *buf, int count)
-{
- int i;
- unsigned long flags;
-
- if ( from_user && verify_area(VERIFY_READ, buf, count) )
- return -EINVAL;
-
- if ( TTY_INDEX(tty) != 0 )
- return count;
-
- spin_lock_irqsave(&xencons_lock, flags);
-
- for ( i = 0; i < count; i++ )
- {
- char ch;
- if ( from_user )
- __get_user(ch, buf + i);
- else
- ch = buf[i];
- if ( !__xencons_put_char(ch) )
- break;
- }
-
- if ( i != 0 )
- __xencons_tx_flush();
-
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return i;
-}
-#endif
-
-static void xencons_put_char(struct tty_struct *tty, u_char ch)
-{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- (void)__xencons_put_char(ch);
- spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void xencons_flush_chars(struct tty_struct *tty)
-{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- spin_lock_irqsave(&xencons_lock, flags);
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
-}
-
-static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- unsigned long orig_jiffies = jiffies;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- while ( DRV(tty->driver)->chars_in_buffer(tty) )
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- if ( signal_pending(current) )
- break;
- if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
- break;
- }
-
- set_current_state(TASK_RUNNING);
-}
-
-static int xencons_open(struct tty_struct *tty, struct file *filp)
-{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return 0;
-
- spin_lock_irqsave(&xencons_lock, flags);
- tty->driver_data = NULL;
- if ( xencons_tty == NULL )
- xencons_tty = tty;
- __xencons_tx_flush();
- spin_unlock_irqrestore(&xencons_lock, flags);
-
- return 0;
-}
-
-static void xencons_close(struct tty_struct *tty, struct file *filp)
-{
- unsigned long flags;
-
- if ( TTY_INDEX(tty) != 0 )
- return;
-
- if ( tty->count == 1 )
- {
- tty->closing = 1;
- tty_wait_until_sent(tty, 0);
- if ( DRV(tty->driver)->flush_buffer != NULL )
- DRV(tty->driver)->flush_buffer(tty);
- if ( tty->ldisc.flush_buffer != NULL )
- tty->ldisc.flush_buffer(tty);
- tty->closing = 0;
- spin_lock_irqsave(&xencons_lock, flags);
- xencons_tty = NULL;
- spin_unlock_irqrestore(&xencons_lock, flags);
- }
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-static struct tty_operations xencons_ops = {
- .open = xencons_open,
- .close = xencons_close,
- .write = xencons_write,
- .write_room = xencons_write_room,
- .put_char = xencons_put_char,
- .flush_chars = xencons_flush_chars,
- .chars_in_buffer = xencons_chars_in_buffer,
- .send_xchar = xencons_send_xchar,
- .flush_buffer = xencons_flush_buffer,
- .throttle = xencons_throttle,
- .unthrottle = xencons_unthrottle,
- .wait_until_sent = xencons_wait_until_sent,
-};
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static const char *xennullcon_startup(void)
-{
- return NULL;
-}
-
-static int xennullcon_dummy(void)
-{
- return 0;
-}
-
-#define DUMMY (void *)xennullcon_dummy
-
-/*
- * The console `switch' structure for the dummy console
- *
- * Most of the operations are dummies.
- */
-
-const struct consw xennull_con = {
- .owner = THIS_MODULE,
- .con_startup = xennullcon_startup,
- .con_init = DUMMY,
- .con_deinit = DUMMY,
- .con_clear = DUMMY,
- .con_putc = DUMMY,
- .con_putcs = DUMMY,
- .con_cursor = DUMMY,
- .con_scroll = DUMMY,
- .con_bmove = DUMMY,
- .con_switch = DUMMY,
- .con_blank = DUMMY,
- .con_font_set = DUMMY,
- .con_font_get = DUMMY,
- .con_font_default = DUMMY,
- .con_font_copy = DUMMY,
- .con_set_palette = DUMMY,
- .con_scrolldelta = DUMMY,
-};
-#endif
-#endif
-
-static int __init xencons_init(void)
-{
- int rc;
-
- if ( xc_mode == XC_OFF )
- return 0;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ?
- 1 : MAX_NR_CONSOLES);
- if ( xencons_driver == NULL )
- return -ENOMEM;
-#else
- memset(&xencons_driver, 0, sizeof(struct tty_driver));
- xencons_driver.magic = TTY_DRIVER_MAGIC;
- xencons_driver.refcount = &xencons_refcount;
- xencons_driver.table = xencons_table;
- xencons_driver.num = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
-#endif
-
- DRV(xencons_driver)->major = TTY_MAJOR;
- DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
- DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
- DRV(xencons_driver)->init_termios = tty_std_termios;
- DRV(xencons_driver)->flags =
- TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
- DRV(xencons_driver)->termios = xencons_termios;
- DRV(xencons_driver)->termios_locked = xencons_termios_locked;
-
- if ( xc_mode == XC_SERIAL )
- {
- DRV(xencons_driver)->name = "ttyS";
- DRV(xencons_driver)->minor_start = 64;
- DRV(xencons_driver)->name_base = 0;
- }
- else
- {
- DRV(xencons_driver)->name = "tty";
- DRV(xencons_driver)->minor_start = 1;
- DRV(xencons_driver)->name_base = 1;
- }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- tty_set_operations(xencons_driver, &xencons_ops);
-#else
- xencons_driver.open = xencons_open;
- xencons_driver.close = xencons_close;
- xencons_driver.write = xencons_write;
- xencons_driver.write_room = xencons_write_room;
- xencons_driver.put_char = xencons_put_char;
- xencons_driver.flush_chars = xencons_flush_chars;
- xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
- xencons_driver.send_xchar = xencons_send_xchar;
- xencons_driver.flush_buffer = xencons_flush_buffer;
- xencons_driver.throttle = xencons_throttle;
- xencons_driver.unthrottle = xencons_unthrottle;
- xencons_driver.wait_until_sent = xencons_wait_until_sent;
-#endif
-
- if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
- {
- printk("WARNING: Failed to register Xen virtual "
- "console driver as '%s%d'\n",
- DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- put_tty_driver(xencons_driver);
- xencons_driver = NULL;
-#endif
- return rc;
- }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- tty_register_device(xencons_driver, 0, NULL);
-#endif
-
- if ( xen_start_info.flags & SIF_INITDOMAIN )
- {
- xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
- (void)request_irq(xencons_priv_irq,
- xencons_priv_interrupt, 0, "console", NULL);
- }
- else
- {
- (void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0);
- }
-
- printk("Xen virtual console successfully installed as %s\n",
- DRV(xencons_driver)->name);
-
- return 0;
-}
-
-module_init(xencons_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/evtchn/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/evtchn/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y := evtchn.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,429 +0,0 @@
-/******************************************************************************
- * evtchn.c
- *
- * Xenolinux driver for receiving and demuxing event-channel signals.
- *
- * Copyright (c) 2004, K A Fraser
- * Multi-process extensions Copyright (c) 2004, Steven Smith
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/major.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/poll.h>
-#include <linux/irq.h>
-#include <linux/init.h>
-#include <asm-xen/evtchn.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#include <linux/devfs_fs_kernel.h>
-#define OLD_DEVFS
-#else
-#include <linux/gfp.h>
-#endif
-
-#ifdef OLD_DEVFS
-/* NB. This must be shared amongst drivers if more things go in /dev/xen */
-static devfs_handle_t xen_dev_dir;
-#endif
-
-struct per_user_data {
- /* Notification ring, accessed via /dev/xen/evtchn. */
-# define RING_SIZE 2048 /* 2048 16-bit entries */
-# define RING_MASK(_i) ((_i)&(RING_SIZE-1))
- u16 *ring;
- unsigned int ring_cons, ring_prod, ring_overflow;
-
- /* Processes wait on this queue when ring is empty. */
- wait_queue_head_t evtchn_wait;
- struct fasync_struct *evtchn_async_queue;
-};
-
-/* Who's bound to each port? */
-static struct per_user_data *port_user[NR_EVENT_CHANNELS];
-static spinlock_t port_user_lock;
-
-void evtchn_device_upcall(int port)
-{
- struct per_user_data *u;
-
- spin_lock(&port_user_lock);
-
- mask_evtchn(port);
- clear_evtchn(port);
-
- if ( (u = port_user[port]) != NULL )
- {
- if ( (u->ring_prod - u->ring_cons) < RING_SIZE )
- {
- u->ring[RING_MASK(u->ring_prod)] = (u16)port;
- if ( u->ring_cons == u->ring_prod++ )
- {
- wake_up_interruptible(&u->evtchn_wait);
- kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
- }
- }
- else
- {
- u->ring_overflow = 1;
- }
- }
-
- spin_unlock(&port_user_lock);
-}
-
-static ssize_t evtchn_read(struct file *file, char *buf,
- size_t count, loff_t *ppos)
-{
- int rc;
- unsigned int c, p, bytes1 = 0, bytes2 = 0;
- DECLARE_WAITQUEUE(wait, current);
- struct per_user_data *u = file->private_data;
-
- add_wait_queue(&u->evtchn_wait, &wait);
-
- count &= ~1; /* even number of bytes */
-
- if ( count == 0 )
- {
- rc = 0;
- goto out;
- }
-
- if ( count > PAGE_SIZE )
- count = PAGE_SIZE;
-
- for ( ; ; )
- {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if ( (c = u->ring_cons) != (p = u->ring_prod) )
- break;
-
- if ( u->ring_overflow )
- {
- rc = -EFBIG;
- goto out;
- }
-
- if ( file->f_flags & O_NONBLOCK )
- {
- rc = -EAGAIN;
- goto out;
- }
-
- if ( signal_pending(current) )
- {
- rc = -ERESTARTSYS;
- goto out;
- }
-
- schedule();
- }
-
- /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
- if ( ((c ^ p) & RING_SIZE) != 0 )
- {
- bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
- bytes2 = RING_MASK(p) * sizeof(u16);
- }
- else
- {
- bytes1 = (p - c) * sizeof(u16);
- bytes2 = 0;
- }
-
- /* Truncate chunks according to caller's maximum byte count. */
- if ( bytes1 > count )
- {
- bytes1 = count;
- bytes2 = 0;
- }
- else if ( (bytes1 + bytes2) > count )
- {
- bytes2 = count - bytes1;
- }
-
- if ( copy_to_user(buf, &u->ring[RING_MASK(c)], bytes1) ||
- ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
- {
- rc = -EFAULT;
- goto out;
- }
-
- u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
-
- rc = bytes1 + bytes2;
-
- out:
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&u->evtchn_wait, &wait);
- return rc;
-}
-
-static ssize_t evtchn_write(struct file *file, const char *buf,
- size_t count, loff_t *ppos)
-{
- int rc, i;
- u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
- struct per_user_data *u = file->private_data;
-
- if ( kbuf == NULL )
- return -ENOMEM;
-
- count &= ~1; /* even number of bytes */
-
- if ( count == 0 )
- {
- rc = 0;
- goto out;
- }
-
- if ( count > PAGE_SIZE )
- count = PAGE_SIZE;
-
- if ( copy_from_user(kbuf, buf, count) != 0 )
- {
- rc = -EFAULT;
- goto out;
- }
-
- spin_lock_irq(&port_user_lock);
- for ( i = 0; i < (count/2); i++ )
- if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
- unmask_evtchn(kbuf[i]);
- spin_unlock_irq(&port_user_lock);
-
- rc = count;
-
- out:
- free_page((unsigned long)kbuf);
- return rc;
-}
-
-static int evtchn_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int rc = 0;
- struct per_user_data *u = file->private_data;
-
- spin_lock_irq(&port_user_lock);
-
- switch ( cmd )
- {
- case EVTCHN_RESET:
- /* Initialise the ring to empty. Clear errors. */
- u->ring_cons = u->ring_prod = u->ring_overflow = 0;
- break;
-
- case EVTCHN_BIND:
- if ( arg >= NR_EVENT_CHANNELS )
- {
- rc = -EINVAL;
- }
- else if ( port_user[arg] != NULL )
- {
- rc = -EISCONN;
- }
- else
- {
- port_user[arg] = u;
- unmask_evtchn(arg);
- }
- break;
-
- case EVTCHN_UNBIND:
- if ( arg >= NR_EVENT_CHANNELS )
- {
- rc = -EINVAL;
- }
- else if ( port_user[arg] != u )
- {
- rc = -ENOTCONN;
- }
- else
- {
- port_user[arg] = NULL;
- mask_evtchn(arg);
- }
- break;
-
- default:
- rc = -ENOSYS;
- break;
- }
-
- spin_unlock_irq(&port_user_lock);
-
- return rc;
-}
-
-static unsigned int evtchn_poll(struct file *file, poll_table *wait)
-{
- unsigned int mask = POLLOUT | POLLWRNORM;
- struct per_user_data *u = file->private_data;
-
- poll_wait(file, &u->evtchn_wait, wait);
- if ( u->ring_cons != u->ring_prod )
- mask |= POLLIN | POLLRDNORM;
- if ( u->ring_overflow )
- mask = POLLERR;
- return mask;
-}
-
-static int evtchn_fasync(int fd, struct file *filp, int on)
-{
- struct per_user_data *u = filp->private_data;
- return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
-}
-
-static int evtchn_open(struct inode *inode, struct file *filp)
-{
- struct per_user_data *u;
-
- if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
- return -ENOMEM;
-
- memset(u, 0, sizeof(*u));
- init_waitqueue_head(&u->evtchn_wait);
-
- if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
- {
- kfree(u);
- return -ENOMEM;
- }
-
- filp->private_data = u;
-
- return 0;
-}
-
-static int evtchn_release(struct inode *inode, struct file *filp)
-{
- int i;
- struct per_user_data *u = filp->private_data;
-
- spin_lock_irq(&port_user_lock);
-
- free_page((unsigned long)u->ring);
-
- for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
- {
- if ( port_user[i] == u )
- {
- port_user[i] = NULL;
- mask_evtchn(i);
- }
- }
-
- spin_unlock_irq(&port_user_lock);
-
- return 0;
-}
-
-static struct file_operations evtchn_fops = {
- owner: THIS_MODULE,
- read: evtchn_read,
- write: evtchn_write,
- ioctl: evtchn_ioctl,
- poll: evtchn_poll,
- fasync: evtchn_fasync,
- open: evtchn_open,
- release: evtchn_release
-};
-
-static struct miscdevice evtchn_miscdev = {
- .minor = EVTCHN_MINOR,
- .name = "evtchn",
- .fops = &evtchn_fops,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- .devfs_name = "misc/evtchn",
-#endif
-};
-
-static int __init evtchn_init(void)
-{
-#ifdef OLD_DEVFS
- devfs_handle_t symlink_handle;
- int pos;
- char link_dest[64];
-#endif
- int err;
-
- spin_lock_init(&port_user_lock);
- memset(port_user, 0, sizeof(port_user));
-
- /* (DEVFS) create '/dev/misc/evtchn'. */
- err = misc_register(&evtchn_miscdev);
- if ( err != 0 )
- {
- printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
- return err;
- }
-
-#ifdef OLD_DEVFS
- /* (DEVFS) create directory '/dev/xen'. */
- xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
-
- /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
- pos = devfs_generate_path(evtchn_miscdev.devfs_handle,
- &link_dest[3],
- sizeof(link_dest) - 3);
- if ( pos >= 0 )
- strncpy(&link_dest[pos], "../", 3);
-
- /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
- (void)devfs_mk_symlink(xen_dev_dir,
- "evtchn",
- DEVFS_FL_DEFAULT,
- &link_dest[pos],
- &symlink_handle,
- NULL);
-
- /* (DEVFS) automatically destroy the symlink with its destination. */
- devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
-#endif
-
- printk("Event-channel device installed.\n");
-
- return 0;
-}
-
-static void evtchn_cleanup(void)
-{
- misc_deregister(&evtchn_miscdev);
-}
-
-module_init(evtchn_init);
-module_exit(evtchn_cleanup);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y := netback.o control.o interface.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/common.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,100 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/common.h
- */
-
-#ifndef __NETIF__BACKEND__COMMON_H__
-#define __NETIF__BACKEND__COMMON_H__
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/xen-public/io/netif.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-
-#if 0
-#define ASSERT(_p) \
- if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
- __LINE__, __FILE__); *(int*)0=0; }
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
- __FILE__ , __LINE__ , ## _a )
-#else
-#define ASSERT(_p) ((void)0)
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-typedef struct netif_st {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
-
- /* Physical parameters of the comms window. */
- unsigned long tx_shmem_frame;
- unsigned long rx_shmem_frame;
- unsigned int evtchn;
- int irq;
-
- /* The shared rings and indexes. */
- netif_tx_interface_t *tx;
- netif_rx_interface_t *rx;
-
- /* Private indexes into shared ring. */
- NETIF_RING_IDX rx_req_cons;
- NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
- NETIF_RING_IDX tx_req_cons;
- NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
-
- /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
- unsigned long credit_bytes;
- unsigned long credit_usec;
- unsigned long remaining_credit;
- struct timer_list credit_timeout;
-
- /* Miscellaneous private stuff. */
- enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
- int active;
- /*
- * DISCONNECT response is deferred until pending requests are ack'ed.
- * We therefore need to store the id from the original request.
- */
- u8 disconnect_rspid;
- struct netif_st *hash_next;
- struct list_head list; /* scheduling list */
- atomic_t refcnt;
- struct net_device *dev;
- struct net_device_stats stats;
-
- struct work_struct work;
-} netif_t;
-
-void netif_create(netif_be_create_t *create);
-void netif_destroy(netif_be_destroy_t *destroy);
-void netif_connect(netif_be_connect_t *connect);
-int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
-void netif_disconnect_complete(netif_t *netif);
-netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
-#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define netif_put(_b) \
- do { \
- if ( atomic_dec_and_test(&(_b)->refcnt) ) \
- netif_disconnect_complete(_b); \
- } while (0)
-
-void netif_interface_init(void);
-void netif_ctrlif_init(void);
-
-void netif_schedule_work(netif_t *netif);
-void netif_deschedule_work(netif_t *netif);
-
-int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-
-#endif /* __NETIF__BACKEND__COMMON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netback/control.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/control.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,65 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/control.c
- *
- * Routines for interfacing with the control plane.
- *
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
- switch ( msg->subtype )
- {
- case CMSG_NETIF_BE_CREATE:
- if ( msg->length != sizeof(netif_be_create_t) )
- goto parse_error;
- netif_create((netif_be_create_t *)&msg->msg[0]);
- break;
- case CMSG_NETIF_BE_DESTROY:
- if ( msg->length != sizeof(netif_be_destroy_t) )
- goto parse_error;
- netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
- break;
- case CMSG_NETIF_BE_CONNECT:
- if ( msg->length != sizeof(netif_be_connect_t) )
- goto parse_error;
- netif_connect((netif_be_connect_t *)&msg->msg[0]);
- break;
- case CMSG_NETIF_BE_DISCONNECT:
- if ( msg->length != sizeof(netif_be_disconnect_t) )
- goto parse_error;
- if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
- return; /* Sending the response is deferred until later. */
- break;
- default:
- goto parse_error;
- }
-
- ctrl_if_send_response(msg);
- return;
-
- parse_error:
- DPRINTK("Parse error while reading message subtype %d, len %d\n",
- msg->subtype, msg->length);
- msg->length = 0;
- ctrl_if_send_response(msg);
-}
-
-void netif_ctrlif_init(void)
-{
- ctrl_msg_t cmsg;
- netif_be_driver_status_t st;
-
- (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx,
- CALLBACK_IN_BLOCKING_CONTEXT);
-
- /* Send a driver-UP notification to the domain controller. */
- cmsg.type = CMSG_NETIF_BE;
- cmsg.subtype = CMSG_NETIF_BE_DRIVER_STATUS;
- cmsg.length = sizeof(netif_be_driver_status_t);
- st.status = NETIF_DRIVER_STATUS_UP;
- memcpy(cmsg.msg, &st, sizeof(st));
- ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,340 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/netif/backend/interface.c
- *
- * Network-device interface management.
- *
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-#include <linux/rtnetlink.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#endif
-
-#define NETIF_HASHSZ 1024
-#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
-
-static netif_t *netif_hash[NETIF_HASHSZ];
-
-netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
-{
- netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
- while ( (netif != NULL) &&
- ((netif->domid != domid) || (netif->handle != handle)) )
- netif = netif->hash_next;
- return netif;
-}
-
-static void __netif_up(netif_t *netif)
-{
- struct net_device *dev = netif->dev;
- spin_lock_bh(&dev->xmit_lock);
- netif->active = 1;
- spin_unlock_bh(&dev->xmit_lock);
- (void)request_irq(netif->irq, netif_be_int, 0, dev->name, netif);
- netif_schedule_work(netif);
-}
-
-static void __netif_down(netif_t *netif)
-{
- struct net_device *dev = netif->dev;
- spin_lock_bh(&dev->xmit_lock);
- netif->active = 0;
- spin_unlock_bh(&dev->xmit_lock);
- free_irq(netif->irq, netif);
- netif_deschedule_work(netif);
-}
-
-static int net_open(struct net_device *dev)
-{
- netif_t *netif = netdev_priv(dev);
- if ( netif->status == CONNECTED )
- __netif_up(netif);
- netif_start_queue(dev);
- return 0;
-}
-
-static int net_close(struct net_device *dev)
-{
- netif_t *netif = netdev_priv(dev);
- netif_stop_queue(dev);
- if ( netif->status == CONNECTED )
- __netif_down(netif);
- return 0;
-}
-
-static void __netif_disconnect_complete(void *arg)
-{
- netif_t *netif = (netif_t *)arg;
- ctrl_msg_t cmsg;
- netif_be_disconnect_t disc;
-
- /*
- * These can't be done in netif_disconnect() because at that point there
- * may be outstanding requests in the network stack whose asynchronous
- * responses must still be notified to the remote driver.
- */
- unbind_evtchn_from_irq(netif->evtchn);
- vfree(netif->tx); /* Frees netif->rx as well. */
-
- /* Construct the deferred response message. */
- cmsg.type = CMSG_NETIF_BE;
- cmsg.subtype = CMSG_NETIF_BE_DISCONNECT;
- cmsg.id = netif->disconnect_rspid;
- cmsg.length = sizeof(netif_be_disconnect_t);
- disc.domid = netif->domid;
- disc.netif_handle = netif->handle;
- disc.status = NETIF_BE_STATUS_OKAY;
- memcpy(cmsg.msg, &disc, sizeof(disc));
-
- /*
- * Make sure message is constructed /before/ status change, because
- * after the status change the 'netif' structure could be deallocated at
- * any time. Also make sure we send the response /after/ status change,
- * as otherwise a subsequent CONNECT request could spuriously fail if
- * another CPU doesn't see the status change yet.
- */
- mb();
- if ( netif->status != DISCONNECTING )
- BUG();
- netif->status = DISCONNECTED;
- mb();
-
- /* Send the successful response. */
- ctrl_if_send_response(&cmsg);
-}
-
-void netif_disconnect_complete(netif_t *netif)
-{
- INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif);
- schedule_work(&netif->work);
-}
-
-void netif_create(netif_be_create_t *create)
-{
- int err = 0;
- domid_t domid = create->domid;
- unsigned int handle = create->netif_handle;
- struct net_device *dev;
- netif_t **pnetif, *netif;
- char name[IFNAMSIZ] = {};
-
- snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
- dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
- if ( dev == NULL )
- {
- DPRINTK("Could not create netif: out of memory\n");
- create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
-
- netif = netdev_priv(dev);
- memset(netif, 0, sizeof(*netif));
- netif->domid = domid;
- netif->handle = handle;
- netif->status = DISCONNECTED;
- atomic_set(&netif->refcnt, 0);
- netif->dev = dev;
-
- netif->credit_bytes = netif->remaining_credit = ~0UL;
- netif->credit_usec = 0UL;
- /*init_ac_timer(&new_vif->credit_timeout);*/
-
- pnetif = &netif_hash[NETIF_HASH(domid, handle)];
- while ( *pnetif != NULL )
- {
- if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
- {
- DPRINTK("Could not create netif: already exists\n");
- create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
- free_netdev(dev);
- return;
- }
- pnetif = &(*pnetif)->hash_next;
- }
-
- dev->hard_start_xmit = netif_be_start_xmit;
- dev->get_stats = netif_be_get_stats;
- dev->open = net_open;
- dev->stop = net_close;
-
- /* Disable queuing. */
- dev->tx_queue_len = 0;
-
- /*
- * Initialise a dummy MAC address. We choose the numerically largest
- * non-broadcast address to prevent the address getting stolen by an
- * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
- */
- memset(dev->dev_addr, 0xFF, ETH_ALEN);
- dev->dev_addr[0] &= ~0x01;
-
- rtnl_lock();
- err = register_netdevice(dev);
- rtnl_unlock();
-
- if ( err != 0 )
- {
- DPRINTK("Could not register new net device %s: err=%d\n",
- dev->name, err);
- create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
- free_netdev(dev);
- return;
- }
-
- netif->hash_next = *pnetif;
- *pnetif = netif;
-
- DPRINTK("Successfully created netif\n");
- create->status = NETIF_BE_STATUS_OKAY;
-}
-
-void netif_destroy(netif_be_destroy_t *destroy)
-{
- domid_t domid = destroy->domid;
- unsigned int handle = destroy->netif_handle;
- netif_t **pnetif, *netif;
-
- pnetif = &netif_hash[NETIF_HASH(domid, handle)];
- while ( (netif = *pnetif) != NULL )
- {
- if ( (netif->domid == domid) && (netif->handle == handle) )
- {
- if ( netif->status != DISCONNECTED )
- goto still_connected;
- goto destroy;
- }
- pnetif = &netif->hash_next;
- }
-
- destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
-
- still_connected:
- destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
- return;
-
- destroy:
- *pnetif = netif->hash_next;
- unregister_netdev(netif->dev);
- free_netdev(netif->dev);
- destroy->status = NETIF_BE_STATUS_OKAY;
-}
-
-void netif_connect(netif_be_connect_t *connect)
-{
- domid_t domid = connect->domid;
- unsigned int handle = connect->netif_handle;
- unsigned int evtchn = connect->evtchn;
- unsigned long tx_shmem_frame = connect->tx_shmem_frame;
- unsigned long rx_shmem_frame = connect->rx_shmem_frame;
- struct vm_struct *vma;
- pgprot_t prot;
- int error;
- netif_t *netif;
-#if 0
- struct net_device *eth0_dev;
-#endif
-
- netif = netif_find_by_handle(domid, handle);
- if ( unlikely(netif == NULL) )
- {
- DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n",
- connect->domid, connect->netif_handle);
- connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return;
- }
-
- if ( netif->status != DISCONNECTED )
- {
- connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
- return;
- }
-
- if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
- {
- connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
- return;
- }
-
- prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
- error = direct_remap_area_pages(&init_mm,
- VMALLOC_VMADDR(vma->addr),
- tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
- prot, domid);
- error |= direct_remap_area_pages(&init_mm,
- VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
- rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
- prot, domid);
- if ( error != 0 )
- {
- if ( error == -ENOMEM )
- connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
- else if ( error == -EFAULT )
- connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
- else
- connect->status = NETIF_BE_STATUS_ERROR;
- vfree(vma->addr);
- return;
- }
-
- netif->evtchn = evtchn;
- netif->irq = bind_evtchn_to_irq(evtchn);
- netif->tx_shmem_frame = tx_shmem_frame;
- netif->rx_shmem_frame = rx_shmem_frame;
- netif->tx =
- (netif_tx_interface_t *)vma->addr;
- netif->rx =
- (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
- netif->tx->resp_prod = netif->rx->resp_prod = 0;
- netif_get(netif);
- wmb(); /* Other CPUs see new state before interface is started. */
-
- rtnl_lock();
- netif->status = CONNECTED;
- wmb();
- if ( netif_running(netif->dev) )
- __netif_up(netif);
- rtnl_unlock();
-
- connect->status = NETIF_BE_STATUS_OKAY;
-}
-
-int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
-{
- domid_t domid = disconnect->domid;
- unsigned int handle = disconnect->netif_handle;
- netif_t *netif;
-
- netif = netif_find_by_handle(domid, handle);
- if ( unlikely(netif == NULL) )
- {
- DPRINTK("netif_disconnect attempted for non-existent netif"
- " (%u,%u)\n", disconnect->domid, disconnect->netif_handle);
- disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
- return 1; /* Caller will send response error message. */
- }
-
- if ( netif->status == CONNECTED )
- {
- rtnl_lock();
- netif->status = DISCONNECTING;
- netif->disconnect_rspid = rsp_id;
- wmb();
- if ( netif_running(netif->dev) )
- __netif_down(netif);
- rtnl_unlock();
- netif_put(netif);
- return 0; /* Caller should not send response message. */
- }
-
- disconnect->status = NETIF_BE_STATUS_OKAY;
- return 1;
-}
-
-void netif_interface_init(void)
-{
- memset(netif_hash, 0, sizeof(netif_hash));
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netback/loopback.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/loopback.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,148 +0,0 @@
-/******************************************************************************
- * netback/loopback.c
- *
- * A two-interface loopback device to emulate a local netfront-netback
- * connection. This ensures that local packet delivery looks identical
- * to inter-domain delivery. Most importantly, packets delivered locally
- * originating from other domains will get *copied* when they traverse this
- * driver. This prevents unbounded delays in socket-buffer queues from
- * causing the netback driver to "seize up".
- *
- * This driver creates a symmetric pair of loopback interfaces with names
- * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
- * bridge, just like a proper netback interface, while a local IP interface
- * is configured on 'veth0'.
- *
- * As with a real netback interface, vif0.0 is configured with a suitable
- * dummy MAC address. No default is provided for veth0: a reasonable strategy
- * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
- * (to avoid confusing the Etherbridge).
- *
- * Copyright (c) 2005 K A Fraser
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <net/dst.h>
-
-struct net_private {
- struct net_device *loopback_dev;
- struct net_device_stats stats;
-};
-
-static int loopback_open(struct net_device *dev)
-{
- struct net_private *np = netdev_priv(dev);
- memset(&np->stats, 0, sizeof(np->stats));
- netif_start_queue(dev);
- return 0;
-}
-
-static int loopback_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_private *np = netdev_priv(dev);
-
- dst_release(skb->dst);
- skb->dst = NULL;
-
- skb_orphan(skb);
-
- np->stats.tx_bytes += skb->len;
- np->stats.tx_packets++;
-
- /* Switch to loopback context. */
- dev = np->loopback_dev;
- np = netdev_priv(dev);
-
- np->stats.rx_bytes += skb->len;
- np->stats.rx_packets++;
-
- skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
- skb->protocol = eth_type_trans(skb, dev);
- skb->dev = dev;
- dev->last_rx = jiffies;
- netif_rx(skb);
-
- return 0;
-}
-
-static struct net_device_stats *loopback_get_stats(struct net_device *dev)
-{
- struct net_private *np = netdev_priv(dev);
- return &np->stats;
-}
-
-static void loopback_construct(struct net_device *dev, struct net_device *lo)
-{
- struct net_private *np = netdev_priv(dev);
-
- np->loopback_dev = lo;
-
- dev->open = loopback_open;
- dev->stop = loopback_close;
- dev->hard_start_xmit = loopback_start_xmit;
- dev->get_stats = loopback_get_stats;
-
- dev->tx_queue_len = 0;
-
- /*
- * We do not set a jumbo MTU on the interface. Otherwise the network
- * stack will try to send large packets that will get dropped by the
- * Ethernet bridge (unless the physical Ethernet interface is configured
- * to transfer jumbo packets). If a larger MTU is desired then the system
- * administrator can specify it using the 'ifconfig' command.
- */
- /*dev->mtu = 16*1024;*/
-}
-
-static int __init loopback_init(void)
-{
- struct net_device *dev1, *dev2;
- int err = -ENOMEM;
-
- dev1 = alloc_netdev(sizeof(struct net_private), "vif0.0", ether_setup);
- dev2 = alloc_netdev(sizeof(struct net_private), "veth0", ether_setup);
- if ( (dev1 == NULL) || (dev2 == NULL) )
- goto fail;
-
- loopback_construct(dev1, dev2);
- loopback_construct(dev2, dev1);
-
- /*
- * Initialise a dummy MAC address for the 'dummy backend' interface. We
- * choose the numerically largest non-broadcast address to prevent the
- * address getting stolen by an Ethernet bridge for STP purposes.
- */
- memset(dev1->dev_addr, 0xFF, ETH_ALEN);
- dev1->dev_addr[0] &= ~0x01;
-
- if ( (err = register_netdev(dev1)) != 0 )
- goto fail;
-
- if ( (err = register_netdev(dev2)) != 0 )
- {
- unregister_netdev(dev1);
- goto fail;
- }
-
- return 0;
-
- fail:
- if ( dev1 != NULL )
- kfree(dev1);
- if ( dev2 != NULL )
- kfree(dev2);
- return err;
-}
-
-module_init(loopback_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,794 +0,0 @@
-/******************************************************************************
- * drivers/xen/netback/netback.c
- *
- * Back-end of the driver for virtual network devices. This portion of the
- * driver exports a 'unified' network-device interface that can be accessed
- * by any operating system that implements a compatible front end. A
- * reference front-end implementation can be found in:
- * drivers/xen/netfront/netfront.c
- *
- * Copyright (c) 2002-2004, K A Fraser
- */
-
-#include "common.h"
-#include <asm-xen/balloon.h>
-
-static void netif_idx_release(u16 pending_idx);
-static void netif_page_release(struct page *page);
-static void make_tx_response(netif_t *netif,
- u16 id,
- s8 st);
-static int make_rx_response(netif_t *netif,
- u16 id,
- s8 st,
- memory_t addr,
- u16 size);
-
-static void net_tx_action(unsigned long unused);
-static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-
-static void net_rx_action(unsigned long unused);
-static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-
-static struct timer_list net_timer;
-
-static struct sk_buff_head rx_queue;
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
-static unsigned char rx_notify[NR_EVENT_CHANNELS];
-
-/* Don't currently gate addition of an interface to the tx scheduling list. */
-#define tx_work_exists(_if) (1)
-
-#define MAX_PENDING_REQS 256
-static unsigned long mmap_vstart;
-#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
-
-#define PKT_PROT_LEN 64
-
-static struct {
- netif_tx_request_t req;
- netif_t *netif;
-} pending_tx_info[MAX_PENDING_REQS];
-static u16 pending_ring[MAX_PENDING_REQS];
-typedef unsigned int PEND_RING_IDX;
-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-
-/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-static u16 dealloc_ring[MAX_PENDING_REQS];
-static PEND_RING_IDX dealloc_prod, dealloc_cons;
-
-static struct sk_buff_head tx_queue;
-static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
-
-static struct list_head net_schedule_list;
-static spinlock_t net_schedule_list_lock;
-
-#define MAX_MFN_ALLOC 64
-static unsigned long mfn_list[MAX_MFN_ALLOC];
-static unsigned int alloc_index = 0;
-static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
-
-static unsigned long alloc_mfn(void)
-{
- unsigned long mfn = 0, flags;
- spin_lock_irqsave(&mfn_lock, flags);
- if ( unlikely(alloc_index == 0) )
- alloc_index = HYPERVISOR_dom_mem_op(
- MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
- if ( alloc_index != 0 )
- mfn = mfn_list[--alloc_index];
- spin_unlock_irqrestore(&mfn_lock, flags);
- return mfn;
-}
-
-static void free_mfn(unsigned long mfn)
-{
- unsigned long flags;
- spin_lock_irqsave(&mfn_lock, flags);
- if ( alloc_index != MAX_MFN_ALLOC )
- mfn_list[alloc_index++] = mfn;
- else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
- &mfn, 1, 0) != 1 )
- BUG();
- spin_unlock_irqrestore(&mfn_lock, flags);
-}
-
-static inline void maybe_schedule_tx_action(void)
-{
- smp_mb();
- if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
- !list_empty(&net_schedule_list) )
- tasklet_schedule(&net_tx_tasklet);
-}
-
-/*
- * A gross way of confirming the origin of an skb data page. The slab
- * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
- */
-static inline int is_xen_skb(struct sk_buff *skb)
-{
- extern kmem_cache_t *skbuff_cachep;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
-#else
- kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
-#endif
- return (cp == skbuff_cachep);
-}
-
-int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- netif_t *netif = netdev_priv(dev);
-
- ASSERT(skb->dev == dev);
-
- /* Drop the packet if the target domain has no receive buffers. */
- if ( !netif->active ||
- (netif->rx_req_cons == netif->rx->req_prod) ||
- ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
- goto drop;
-
- /*
- * We do not copy the packet unless:
- * 1. The data is shared; or
- * 2. The data is not allocated from our special cache.
- * NB. We also couldn't cope with fragmented packets, but we won't get
- * any because we not advertise the NETIF_F_SG feature.
- */
- if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
- {
- int hlen = skb->data - skb->head;
- struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
- if ( unlikely(nskb == NULL) )
- goto drop;
- skb_reserve(nskb, hlen);
- __skb_put(nskb, skb->len);
- (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
- nskb->dev = skb->dev;
- dev_kfree_skb(skb);
- skb = nskb;
- }
-
- netif->rx_req_cons++;
- netif_get(netif);
-
- skb_queue_tail(&rx_queue, skb);
- tasklet_schedule(&net_rx_tasklet);
-
- return 0;
-
- drop:
- netif->stats.tx_dropped++;
- dev_kfree_skb(skb);
- return 0;
-}
-
-#if 0
-static void xen_network_done_notify(void)
-{
- static struct net_device *eth0_dev = NULL;
- if ( unlikely(eth0_dev == NULL) )
- eth0_dev = __dev_get_by_name("eth0");
- netif_rx_schedule(eth0_dev);
-}
-/*
- * Add following to poll() function in NAPI driver (Tigon3 is example):
- * if ( xen_network_done() )
- * tg3_enable_ints(tp);
- */
-int xen_network_done(void)
-{
- return skb_queue_empty(&rx_queue);
-}
-#endif
-
-static void net_rx_action(unsigned long unused)
-{
- netif_t *netif;
- s8 status;
- u16 size, id, evtchn;
- mmu_update_t *mmu;
- multicall_entry_t *mcl;
- unsigned long vdata, mdata, new_mfn;
- struct sk_buff_head rxq;
- struct sk_buff *skb;
- u16 notify_list[NETIF_RX_RING_SIZE];
- int notify_nr = 0;
-
- skb_queue_head_init(&rxq);
-
- mcl = rx_mcl;
- mmu = rx_mmu;
- while ( (skb = skb_dequeue(&rx_queue)) != NULL )
- {
- netif = netdev_priv(skb->dev);
- vdata = (unsigned long)skb->data;
- mdata = virt_to_machine(vdata);
-
- /* Memory squeeze? Back off for an arbitrary while. */
- if ( (new_mfn = alloc_mfn()) == 0 )
- {
- if ( net_ratelimit() )
- printk(KERN_WARNING "Memory squeeze in netback driver.\n");
- mod_timer(&net_timer, jiffies + HZ);
- skb_queue_head(&rx_queue, skb);
- break;
- }
-
- /*
- * Set the new P2M table entry before reassigning the old data page.
- * Heed the comment in pgtable-2level.h:pte_page(). :-)
- */
- phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
-
- mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
- mmu[1].ptr = MMU_EXTENDED_COMMAND;
- mmu[1].val = MMUEXT_SET_FOREIGNDOM;
- mmu[1].val |= (unsigned long)netif->domid << 16;
- mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
- mmu[2].val = MMUEXT_REASSIGN_PAGE;
-
- mcl[0].op = __HYPERVISOR_update_va_mapping;
- mcl[0].args[0] = vdata >> PAGE_SHIFT;
- mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
- mcl[0].args[2] = 0;
- mcl[1].op = __HYPERVISOR_mmu_update;
- mcl[1].args[0] = (unsigned long)mmu;
- mcl[1].args[1] = 3;
- mcl[1].args[2] = 0;
-
- mcl += 2;
- mmu += 3;
-
- __skb_queue_tail(&rxq, skb);
-
- /* Filled the batch queue? */
- if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
- break;
- }
-
- if ( mcl == rx_mcl )
- return;
-
- mcl[-2].args[2] = UVMF_FLUSH_TLB;
- if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
- BUG();
-
- mcl = rx_mcl;
- mmu = rx_mmu;
- while ( (skb = __skb_dequeue(&rxq)) != NULL )
- {
- netif = netdev_priv(skb->dev);
- size = skb->tail - skb->data;
-
- /* Rederive the machine addresses. */
- new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
- mdata = ((mmu[2].ptr & PAGE_MASK) |
- ((unsigned long)skb->data & ~PAGE_MASK));
-
- atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->frag_list = NULL;
-
- netif->stats.tx_bytes += size;
- netif->stats.tx_packets++;
-
- /* The update_va_mapping() must not fail. */
- if ( unlikely(mcl[0].args[5] != 0) )
- BUG();
-
- /* Check the reassignment error code. */
- status = NETIF_RSP_OKAY;
- if ( unlikely(mcl[1].args[5] != 0) )
- {
- DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
- free_mfn(mdata >> PAGE_SHIFT);
- status = NETIF_RSP_ERROR;
- }
-
- evtchn = netif->evtchn;
- id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
- if ( make_rx_response(netif, id, status, mdata, size) &&
- (rx_notify[evtchn] == 0) )
- {
- rx_notify[evtchn] = 1;
- notify_list[notify_nr++] = evtchn;
- }
-
- netif_put(netif);
- dev_kfree_skb(skb);
-
- mcl += 2;
- mmu += 3;
- }
-
- while ( notify_nr != 0 )
- {
- evtchn = notify_list[--notify_nr];
- rx_notify[evtchn] = 0;
- notify_via_evtchn(evtchn);
- }
-
- /* More work to do? */
- if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
- tasklet_schedule(&net_rx_tasklet);
-#if 0
- else
- xen_network_done_notify();
-#endif
-}
-
-static void net_alarm(unsigned long unused)
-{
- tasklet_schedule(&net_rx_tasklet);
-}
-
-struct net_device_stats *netif_be_get_stats(struct net_device *dev)
-{
- netif_t *netif = netdev_priv(dev);
- return &netif->stats;
-}
-
-static int __on_net_schedule_list(netif_t *netif)
-{
- return netif->list.next != NULL;
-}
-
-static void remove_from_net_schedule_list(netif_t *netif)
-{
- spin_lock_irq(&net_schedule_list_lock);
- if ( likely(__on_net_schedule_list(netif)) )
- {
- list_del(&netif->list);
- netif->list.next = NULL;
- netif_put(netif);
- }
- spin_unlock_irq(&net_schedule_list_lock);
-}
-
-static void add_to_net_schedule_list_tail(netif_t *netif)
-{
- if ( __on_net_schedule_list(netif) )
- return;
-
- spin_lock_irq(&net_schedule_list_lock);
- if ( !__on_net_schedule_list(netif) && netif->active )
- {
- list_add_tail(&netif->list, &net_schedule_list);
- netif_get(netif);
- }
- spin_unlock_irq(&net_schedule_list_lock);
-}
-
-void netif_schedule_work(netif_t *netif)
-{
- if ( (netif->tx_req_cons != netif->tx->req_prod) &&
- ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
- {
- add_to_net_schedule_list_tail(netif);
- maybe_schedule_tx_action();
- }
-}
-
-void netif_deschedule_work(netif_t *netif)
-{
- remove_from_net_schedule_list(netif);
-}
-
-#if 0
-static void tx_credit_callback(unsigned long data)
-{
- netif_t *netif = (netif_t *)data;
- netif->remaining_credit = netif->credit_bytes;
- netif_schedule_work(netif);
-}
-#endif
-
-static void net_tx_action(unsigned long unused)
-{
- struct list_head *ent;
- struct sk_buff *skb;
- netif_t *netif;
- netif_tx_request_t txreq;
- u16 pending_idx;
- NETIF_RING_IDX i;
- multicall_entry_t *mcl;
- PEND_RING_IDX dc, dp;
- unsigned int data_len;
-
- if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
- goto skip_dealloc;
-
- mcl = tx_mcl;
- while ( dc != dp )
- {
- pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
- mcl[0].op = __HYPERVISOR_update_va_mapping;
- mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
- mcl[0].args[1] = 0;
- mcl[0].args[2] = 0;
- mcl++;
- }
-
- mcl[-1].args[2] = UVMF_FLUSH_TLB;
- if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
- BUG();
-
- mcl = tx_mcl;
- while ( dealloc_cons != dp )
- {
- /* The update_va_mapping() must not fail. */
- if ( unlikely(mcl[0].args[5] != 0) )
- BUG();
-
- pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-
- netif = pending_tx_info[pending_idx].netif;
-
- make_tx_response(netif, pending_tx_info[pending_idx].req.id,
- NETIF_RSP_OKAY);
-
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-
- /*
- * Scheduling checks must happen after the above response is posted.
- * This avoids a possible race with a guest OS on another CPU if that
- * guest is testing against 'resp_prod' when deciding whether to notify
- * us when it queues additional packets.
- */
- mb();
- if ( (netif->tx_req_cons != netif->tx->req_prod) &&
- ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
- add_to_net_schedule_list_tail(netif);
-
- netif_put(netif);
-
- mcl++;
- }
-
- skip_dealloc:
- mcl = tx_mcl;
- while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
- !list_empty(&net_schedule_list) )
- {
- /* Get a netif from the list with work to do. */
- ent = net_schedule_list.next;
- netif = list_entry(ent, netif_t, list);
- netif_get(netif);
- remove_from_net_schedule_list(netif);
-
- /* Work to do? */
- i = netif->tx_req_cons;
- if ( (i == netif->tx->req_prod) ||
- ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
- {
- netif_put(netif);
- continue;
- }
-
- netif->tx->req_cons = ++netif->tx_req_cons;
-
- /*
- * 1. Ensure that we see the request when we copy it.
- * 2. Ensure that frontend sees updated req_cons before we check
- * for more work to schedule.
- */
- mb();
-
- memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
- sizeof(txreq));
-
-#if 0
- /* Credit-based scheduling. */
- if ( tx.size > netif->remaining_credit )
- {
- s_time_t now = NOW(), next_credit =
- netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
- if ( next_credit <= now )
- {
- netif->credit_timeout.expires = now;
- netif->remaining_credit = netif->credit_bytes;
- }
- else
- {
- netif->remaining_credit = 0;
- netif->credit_timeout.expires = next_credit;
- netif->credit_timeout.data = (unsigned long)netif;
- netif->credit_timeout.function = tx_credit_callback;
- netif->credit_timeout.cpu = smp_processor_id();
- add_ac_timer(&netif->credit_timeout);
- break;
- }
- }
- netif->remaining_credit -= tx.size;
-#endif
-
- netif_schedule_work(netif);
-
- if ( unlikely(txreq.size < ETH_HLEN) ||
- unlikely(txreq.size > ETH_FRAME_LEN) )
- {
- DPRINTK("Bad packet size: %d\n", txreq.size);
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- continue;
- }
-
- /* No crossing a page boundary as the payload mustn't fragment. */
- if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
- {
- DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
- txreq.addr, txreq.size,
- (txreq.addr &~PAGE_MASK) + txreq.size);
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- continue;
- }
-
- pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-
- data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
-
- if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
- {
- DPRINTK("Can't allocate a skb in start_xmit.\n");
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- break;
- }
-
- /* Packets passed to netif_rx() must have some headroom. */
- skb_reserve(skb, 16);
-
- mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
- mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
- mcl[0].args[2] = 0;
- mcl[0].args[3] = netif->domid;
- mcl++;
-
- memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
- pending_tx_info[pending_idx].netif = netif;
- *((u16 *)skb->data) = pending_idx;
-
- __skb_queue_tail(&tx_queue, skb);
-
- pending_cons++;
-
- /* Filled the batch queue? */
- if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
- break;
- }
-
- if ( mcl == tx_mcl )
- return;
-
- if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
- BUG();
-
- mcl = tx_mcl;
- while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
- {
- pending_idx = *((u16 *)skb->data);
- netif = pending_tx_info[pending_idx].netif;
- memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
-
- /* Check the remap error code. */
- if ( unlikely(mcl[0].args[5] != 0) )
- {
- DPRINTK("Bad page frame\n");
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- kfree_skb(skb);
- mcl++;
- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
- continue;
- }
-
- phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
- FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
-
- data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
-
- __skb_put(skb, data_len);
- memcpy(skb->data,
- (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
- data_len);
-
- if ( data_len < txreq.size )
- {
- /* Append the packet payload as a fragment. */
- skb_shinfo(skb)->frags[0].page =
- virt_to_page(MMAP_VADDR(pending_idx));
- skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
- skb_shinfo(skb)->frags[0].page_offset =
- (txreq.addr + data_len) & ~PAGE_MASK;
- skb_shinfo(skb)->nr_frags = 1;
- }
- else
- {
- /* Schedule a response immediately. */
- netif_idx_release(pending_idx);
- }
-
- skb->data_len = txreq.size - data_len;
- skb->len += skb->data_len;
-
- skb->dev = netif->dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- netif->stats.rx_bytes += txreq.size;
- netif->stats.rx_packets++;
-
- netif_rx(skb);
- netif->dev->last_rx = jiffies;
-
- mcl++;
- }
-}
-
-static void netif_idx_release(u16 pending_idx)
-{
- static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
- unsigned long flags;
-
- spin_lock_irqsave(&_lock, flags);
- dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
- spin_unlock_irqrestore(&_lock, flags);
-
- tasklet_schedule(&net_tx_tasklet);
-}
-
-static void netif_page_release(struct page *page)
-{
- u16 pending_idx = page - virt_to_page(mmap_vstart);
-
- /* Ready for next use. */
- set_page_count(page, 1);
-
- netif_idx_release(pending_idx);
-}
-
-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-{
- netif_t *netif = dev_id;
- if ( tx_work_exists(netif) )
- {
- add_to_net_schedule_list_tail(netif);
- maybe_schedule_tx_action();
- }
- return IRQ_HANDLED;
-}
-
-static void make_tx_response(netif_t *netif,
- u16 id,
- s8 st)
-{
- NETIF_RING_IDX i = netif->tx_resp_prod;
- netif_tx_response_t *resp;
-
- resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
- resp->id = id;
- resp->status = st;
- wmb();
- netif->tx->resp_prod = netif->tx_resp_prod = ++i;
-
- mb(); /* Update producer before checking event threshold. */
- if ( i == netif->tx->event )
- notify_via_evtchn(netif->evtchn);
-}
-
-static int make_rx_response(netif_t *netif,
- u16 id,
- s8 st,
- memory_t addr,
- u16 size)
-{
- NETIF_RING_IDX i = netif->rx_resp_prod;
- netif_rx_response_t *resp;
-
- resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
- resp->addr = addr;
- resp->id = id;
- resp->status = (s16)size;
- if ( st < 0 )
- resp->status = (s16)st;
- wmb();
- netif->rx->resp_prod = netif->rx_resp_prod = ++i;
-
- mb(); /* Update producer before checking event threshold. */
- return (i == netif->rx->event);
-}
-
-static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct list_head *ent;
- netif_t *netif;
- int i = 0;
-
- printk(KERN_ALERT "netif_schedule_list:\n");
- spin_lock_irq(&net_schedule_list_lock);
-
- list_for_each ( ent, &net_schedule_list )
- {
- netif = list_entry(ent, netif_t, list);
- printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
- i, netif->rx_req_cons, netif->rx_resp_prod);
- printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
- netif->tx_req_cons, netif->tx_resp_prod);
- printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
- netif->rx->req_prod, netif->rx->resp_prod);
- printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
- netif->rx->event, netif->tx->req_prod);
- printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
- netif->tx->resp_prod, netif->tx->event);
- i++;
- }
-
- spin_unlock_irq(&net_schedule_list_lock);
- printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-
- return IRQ_HANDLED;
-}
-
-static int __init netback_init(void)
-{
- int i;
- struct page *page;
-
- if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
- !(xen_start_info.flags & SIF_INITDOMAIN) )
- return 0;
-
- printk("Initialising Xen netif backend\n");
-
- /* We can increase reservation by this much in net_rx_action(). */
- balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
-
- skb_queue_head_init(&rx_queue);
- skb_queue_head_init(&tx_queue);
-
- init_timer(&net_timer);
- net_timer.data = 0;
- net_timer.function = net_alarm;
-
- netif_interface_init();
-
- if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
- BUG();
-
- for ( i = 0; i < MAX_PENDING_REQS; i++ )
- {
- page = virt_to_page(MMAP_VADDR(i));
- set_page_count(page, 1);
- SetPageForeign(page, netif_page_release);
- }
-
- pending_cons = 0;
- pending_prod = MAX_PENDING_REQS;
- for ( i = 0; i < MAX_PENDING_REQS; i++ )
- pending_ring[i] = i;
-
- spin_lock_init(&net_schedule_list_lock);
- INIT_LIST_HEAD(&net_schedule_list);
-
- netif_ctrlif_init();
-
- (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
- netif_be_dbg, SA_SHIRQ,
- "net-be-dbg", &netif_be_dbg);
-
- return 0;
-}
-
-static void netback_cleanup(void)
-{
- BUG();
-}
-
-module_init(netback_init);
-module_exit(netback_cleanup);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netfront/Kconfig
--- a/linux-2.6.11-xen-sparse/drivers/xen/netfront/Kconfig Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,6 +0,0 @@
-
-config XENNET
- tristate "Xen network driver"
- depends on NETDEVICES && ARCH_XEN
- help
- Network driver for Xen
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netfront/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/netfront/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y := netfront.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,1480 +0,0 @@
-/******************************************************************************
- * Virtual network driver for conversing with remote driver backends.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/proc_fs.h>
-#include <net/sock.h>
-#include <net/pkt_sched.h>
-#include <net/arp.h>
-#include <net/route.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm-xen/evtchn.h>
-#include <asm-xen/ctrl_if.h>
-#include <asm-xen/xen-public/io/netif.h>
-#include <asm-xen/balloon.h>
-#include <asm/page.h>
-
-#ifndef __GFP_NOWARN
-#define __GFP_NOWARN 0
-#endif
-#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
-
-#define init_skb_shinfo(_skb) \
- do { \
- atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
- skb_shinfo(_skb)->nr_frags = 0; \
- skb_shinfo(_skb)->frag_list = NULL; \
- } while (0)
-
-/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
-#define RX_HEADROOM 200
-
-/*
- * If the backend driver is pipelining transmit requests then we can be very
- * aggressive in avoiding new-packet notifications -- only need to send a
- * notification if there are no outstanding unreceived responses.
- * If the backend may be buffering our transmit buffers for any reason then we
- * are rather more conservative.
- */
-#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
-#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
-#else
-#define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
-#endif
-
-static void network_tx_buf_gc(struct net_device *dev);
-static void network_alloc_rx_buffers(struct net_device *dev);
-
-static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
-
-#ifdef CONFIG_PROC_FS
-static int xennet_proc_init(void);
-static int xennet_proc_addif(struct net_device *dev);
-static void xennet_proc_delif(struct net_device *dev);
-#else
-#define xennet_proc_init() (0)
-#define xennet_proc_addif(d) (0)
-#define xennet_proc_delif(d) ((void)0)
-#endif
-
-static struct list_head dev_list;
-
-struct net_private
-{
- struct list_head list;
- struct net_device *dev;
-
- struct net_device_stats stats;
- NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
- unsigned int tx_full;
-
- netif_tx_interface_t *tx;
- netif_rx_interface_t *rx;
-
- spinlock_t tx_lock;
- spinlock_t rx_lock;
-
- unsigned int handle;
- unsigned int evtchn;
- unsigned int irq;
-
- /* What is the status of our connection to the remote backend? */
-#define BEST_CLOSED 0
-#define BEST_DISCONNECTED 1
-#define BEST_CONNECTED 2
- unsigned int backend_state;
-
- /* Is this interface open or closed (down or up)? */
-#define UST_CLOSED 0
-#define UST_OPEN 1
- unsigned int user_state;
-
- /* Receive-ring batched refills. */
-#define RX_MIN_TARGET 8
-#define RX_MAX_TARGET NETIF_RX_RING_SIZE
- int rx_min_target, rx_max_target, rx_target;
- struct sk_buff_head rx_batch;
-
- /*
- * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
- * array is an index into a chain of free entries.
- */
- struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
- struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
-};
-
-/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
-#define ADD_ID_TO_FREELIST(_list, _id) \
- (_list)[(_id)] = (_list)[0]; \
- (_list)[0] = (void *)(unsigned long)(_id);
-#define GET_ID_FROM_FREELIST(_list) \
- ({ unsigned long _id = (unsigned long)(_list)[0]; \
- (_list)[0] = (_list)[_id]; \
- (unsigned short)_id; })
-
-static char *status_name[] = {
- [NETIF_INTERFACE_STATUS_CLOSED] = "closed",
- [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
- [NETIF_INTERFACE_STATUS_CONNECTED] = "connected",
- [NETIF_INTERFACE_STATUS_CHANGED] = "changed",
-};
-
-static char *be_state_name[] = {
- [BEST_CLOSED] = "closed",
- [BEST_DISCONNECTED] = "disconnected",
- [BEST_CONNECTED] = "connected",
-};
-
-#if DEBUG
-#define DPRINTK(fmt, args...) \
- printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-#else
-#define DPRINTK(fmt, args...) ((void)0)
-#endif
-#define IPRINTK(fmt, args...) \
- printk(KERN_INFO "xen_net: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_net: " fmt, ##args)
-
-static struct net_device *find_dev_by_handle(unsigned int handle)
-{
- struct list_head *ent;
- struct net_private *np;
- list_for_each (ent, &dev_list) {
- np = list_entry(ent, struct net_private, list);
- if (np->handle == handle)
- return np->dev;
- }
- return NULL;
-}
-
-/** Network interface info. */
-struct netif_ctrl {
- /** Number of interfaces. */
- int interface_n;
- /** Number of connected interfaces. */
- int connected_n;
- /** Error code. */
- int err;
- int up;
-};
-
-static struct netif_ctrl netctrl;
-
-static void netctrl_init(void)
-{
- memset(&netctrl, 0, sizeof(netctrl));
- netctrl.up = NETIF_DRIVER_STATUS_DOWN;
-}
-
-/** Get or set a network interface error.
- */
-static int netctrl_err(int err)
-{
- if ((err < 0) && !netctrl.err)
- netctrl.err = err;
- return netctrl.err;
-}
-
-/** Test if all network interfaces are connected.
- *
- * @return 1 if all connected, 0 if not, negative error code otherwise
- */
-static int netctrl_connected(void)
-{
- int ok;
-
- if (netctrl.err)
- ok = netctrl.err;
- else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
- ok = (netctrl.connected_n == netctrl.interface_n);
- else
- ok = 0;
-
- return ok;
-}
-
-/** Count the connected network interfaces.
- *
- * @return connected count
- */
-static int netctrl_connected_count(void)
-{
-
- struct list_head *ent;
- struct net_private *np;
- unsigned int connected;
-
- connected = 0;
-
- list_for_each(ent, &dev_list) {
- np = list_entry(ent, struct net_private, list);
- if (np->backend_state == BEST_CONNECTED)
- connected++;
- }
-
- netctrl.connected_n = connected;
- DPRINTK("> connected_n=%d interface_n=%d\n",
- netctrl.connected_n, netctrl.interface_n);
- return connected;
-}
-
-/** Send a packet on a net device to encourage switches to learn the
- * MAC. We send a fake ARP request.
- *
- * @param dev device
- * @return 0 on success, error code otherwise
- */
-static int send_fake_arp(struct net_device *dev)
-{
- struct sk_buff *skb;
- u32 src_ip, dst_ip;
-
- dst_ip = INADDR_BROADCAST;
- src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-
- /* No IP? Then nothing to do. */
- if (src_ip == 0)
- return 0;
-
- skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
- dst_ip, dev, src_ip,
- /*dst_hw*/ NULL, /*src_hw*/ NULL,
- /*target_hw*/ dev->dev_addr);
- if (skb == NULL)
- return -ENOMEM;
-
- return dev_queue_xmit(skb);
-}
-
-static int network_open(struct net_device *dev)
-{
- struct net_private *np = netdev_priv(dev);
-
- memset(&np->stats, 0, sizeof(np->stats));
-
- np->user_state = UST_OPEN;
-
- network_alloc_rx_buffers(dev);
- np->rx->event = np->rx_resp_cons + 1;
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static void network_tx_buf_gc(struct net_device *dev)
-{
- NETIF_RING_IDX i, prod;
- unsigned short id;
- struct net_private *np = netdev_priv(dev);
- struct sk_buff *skb;
-
- if (np->backend_state != BEST_CONNECTED)
- return;
-
- do {
- prod = np->tx->resp_prod;
- rmb(); /* Ensure we see responses up to 'rp'. */
-
- for (i = np->tx_resp_cons; i != prod; i++) {
- id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
- skb = np->tx_skbs[id];
- ADD_ID_TO_FREELIST(np->tx_skbs, id);
- dev_kfree_skb_irq(skb);
- }
-
- np->tx_resp_cons = prod;
-
- /*
- * Set a new event, then check for race with update of tx_cons. Note
- * that it is essential to schedule a callback, no matter how few
- * buffers are pending. Even if there is space in the transmit ring,
- * higher layers may be blocked because too much data is outstanding:
- * in such cases notification from Xen is likely to be the only kick
- * that we'll get.
- */
- np->tx->event =
- prod + ((np->tx->req_prod - prod) >> 1) + 1;
- mb();
- } while (prod != np->tx->resp_prod);
-
- if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
- np->tx_full = 0;
- if (np->user_state == UST_OPEN)
- netif_wake_queue(dev);
- }
-}
-
-
-static void network_alloc_rx_buffers(struct net_device *dev)
-{
- unsigned short id;
- struct net_private *np = netdev_priv(dev);
- struct sk_buff *skb;
- int i, batch_target;
- NETIF_RING_IDX req_prod = np->rx->req_prod;
-
- if (unlikely(np->backend_state != BEST_CONNECTED))
- return;
-
- /*
- * Allocate skbuffs greedily, even though we batch updates to the
- * receive ring. This creates a less bursty demand on the memory allocator,
- * so should reduce the chance of failed allocation requests both for
- * ourself and for other kernel subsystems.
- */
- batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
- for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
- if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
- break;
- __skb_queue_tail(&np->rx_batch, skb);
- }
-
- /* Is the batch large enough to be worthwhile? */
- if (i < (np->rx_target/2))
- return;
-
- for (i = 0; ; i++) {
- if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
- break;
-
- skb->dev = dev;
-
- id = GET_ID_FROM_FREELIST(np->rx_skbs);
-
- np->rx_skbs[id] = skb;
-
- np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
-
- rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
-
- /* Remove this page from pseudo phys map before passing back to Xen. */
- phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
- = INVALID_P2M_ENTRY;
-
- rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
- rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
- rx_mcl[i].args[1] = 0;
- rx_mcl[i].args[2] = 0;
- }
-
- /*
- * We may have allocated buffers which have entries outstanding in the page
- * update queue -- make sure we flush those first!
- */
- flush_page_update_queue();
-
- /* After all PTEs have been zapped we blow away stale TLB entries. */
- rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
-
- /* Give away a batch of pages. */
- rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
- rx_mcl[i].args[0] = MEMOP_decrease_reservation;
- rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
- rx_mcl[i].args[2] = (unsigned long)i;
- rx_mcl[i].args[3] = 0;
- rx_mcl[i].args[4] = DOMID_SELF;
-
- /* Tell the ballon driver what is going on. */
- balloon_update_driver_allowance(i);
-
- /* Zap PTEs and give away pages in one big multicall. */
- (void)HYPERVISOR_multicall(rx_mcl, i+1);
-
- /* Check return status of HYPERVISOR_dom_mem_op(). */
- if (unlikely(rx_mcl[i].args[5] != i))
- panic("Unable to reduce memory reservation\n");
-
- /* Above is a suitable barrier to ensure backend will see requests. */
- np->rx->req_prod = req_prod + i;
-
- /* Adjust our floating fill target if we risked running out of buffers. */
- if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
- ((np->rx_target *= 2) > np->rx_max_target))
- np->rx_target = np->rx_max_target;
-}
-
-
-static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned short id;
- struct net_private *np = netdev_priv(dev);
- netif_tx_request_t *tx;
- NETIF_RING_IDX i;
-
- if (unlikely(np->tx_full)) {
- printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
- netif_stop_queue(dev);
- goto drop;
- }
-
- if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
- PAGE_SIZE)) {
- struct sk_buff *nskb;
- if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
- goto drop;
- skb_put(nskb, skb->len);
- memcpy(nskb->data, skb->data, skb->len);
- nskb->dev = skb->dev;
- dev_kfree_skb(skb);
- skb = nskb;
- }
-
- spin_lock_irq(&np->tx_lock);
-
- if (np->backend_state != BEST_CONNECTED) {
- spin_unlock_irq(&np->tx_lock);
- goto drop;
- }
-
- i = np->tx->req_prod;
-
- id = GET_ID_FROM_FREELIST(np->tx_skbs);
- np->tx_skbs[id] = skb;
-
- tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
-
- tx->id = id;
- tx->addr = virt_to_machine(skb->data);
- tx->size = skb->len;
-
- wmb(); /* Ensure that backend will see the request. */
- np->tx->req_prod = i + 1;
-
- network_tx_buf_gc(dev);
-
- if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
- np->tx_full = 1;
- netif_stop_queue(dev);
- }
-
- spin_unlock_irq(&np->tx_lock);
-
- np->stats.tx_bytes += skb->len;
- np->stats.tx_packets++;
-
- /* Only notify Xen if we really have to. */
- mb();
- if (np->tx->TX_TEST_IDX == i)
- notify_via_evtchn(np->evtchn);
-
- return 0;
-
- drop:
- np->stats.tx_dropped++;
- dev_kfree_skb(skb);
- return 0;
-}
-
-static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-{
- struct net_device *dev = dev_id;
- struct net_private *np = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&np->tx_lock, flags);
- network_tx_buf_gc(dev);
- spin_unlock_irqrestore(&np->tx_lock, flags);
-
- if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state ==
UST_OPEN))
- netif_rx_schedule(dev);
-
- return IRQ_HANDLED;
-}
-
-
-static int netif_poll(struct net_device *dev, int *pbudget)
-{
- struct net_private *np = netdev_priv(dev);
- struct sk_buff *skb, *nskb;
- netif_rx_response_t *rx;
- NETIF_RING_IDX i, rp;
- mmu_update_t *mmu = rx_mmu;
- multicall_entry_t *mcl = rx_mcl;
- int work_done, budget, more_to_do = 1;
- struct sk_buff_head rxq;
- unsigned long flags;
-
- spin_lock(&np->rx_lock);
-
- if (np->backend_state != BEST_CONNECTED) {
- spin_unlock(&np->rx_lock);
- return 0;
- }
-
- skb_queue_head_init(&rxq);
-
- if ((budget = *pbudget) > dev->quota)
- budget = dev->quota;
-
- rp = np->rx->resp_prod;
- rmb(); /* Ensure we see queued responses up to 'rp'. */
-
- for (i = np->rx_resp_cons, work_done = 0;
- (i != rp) && (work_done < budget);
- i++, work_done++) {
- rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-
- /*
- * An error here is very odd. Usually indicates a backend bug,
- * low-memory condition, or that we didn't have reservation headroom.
- */
- if (unlikely(rx->status <= 0)) {
- if (net_ratelimit())
- printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
- np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
- wmb();
- np->rx->req_prod++;
- work_done--;
- continue;
- }
-
- skb = np->rx_skbs[rx->id];
- ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
-
- /* NB. We handle skb overflow later. */
- skb->data = skb->head + (rx->addr & ~PAGE_MASK);
- skb->len = rx->status;
- skb->tail = skb->data + skb->len;
-
- np->stats.rx_packets++;
- np->stats.rx_bytes += rx->status;
-
- /* Remap the page. */
- mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
- mmu->val = __pa(skb->head) >> PAGE_SHIFT;
- mmu++;
- mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
- mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
- mcl->args[2] = 0;
- mcl++;
-
- phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
- rx->addr >> PAGE_SHIFT;
-
- __skb_queue_tail(&rxq, skb);
- }
-
- /* Some pages are no longer absent... */
- balloon_update_driver_allowance(-work_done);
-
- /* Do all the remapping work, and M->P updates, in one big hypercall. */
- if (likely((mcl - rx_mcl) != 0)) {
- mcl->op = __HYPERVISOR_mmu_update;
- mcl->args[0] = (unsigned long)rx_mmu;
- mcl->args[1] = mmu - rx_mmu;
- mcl->args[2] = 0;
- mcl++;
- (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
- }
-
- while ((skb = __skb_dequeue(&rxq)) != NULL) {
- /*
- * Enough room in skbuff for the data we were passed? Also, Linux
- * expects at least 16 bytes headroom in each receive buffer.
- */
- if (unlikely(skb->tail > skb->end) ||
- unlikely((skb->data - skb->head) < 16)) {
- nskb = NULL;
-
- /* Only copy the packet if it fits in the current MTU. */
- if (skb->len <= (dev->mtu + ETH_HLEN)) {
- if ((skb->tail > skb->end) && net_ratelimit())
- printk(KERN_INFO "Received packet needs %d bytes more "
- "headroom.\n", skb->tail - skb->end);
-
- if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
- skb_reserve(nskb, 2);
- skb_put(nskb, skb->len);
- memcpy(nskb->data, skb->data, skb->len);
- nskb->dev = skb->dev;
- }
- }
- else if (net_ratelimit())
- printk(KERN_INFO "Received packet too big for MTU "
- "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
-
- /* Reinitialise and then destroy the old skbuff. */
- skb->len = 0;
- skb->tail = skb->data;
- init_skb_shinfo(skb);
- dev_kfree_skb(skb);
-
- /* Switch old for new, if we copied the buffer. */
- if ((skb = nskb) == NULL)
- continue;
- }
-
- /* Set the shared-info area, which is hidden behind the real data. */
- init_skb_shinfo(skb);
-
- /* Ethernet-specific work. Delayed to here as it peeks the header. */
- skb->protocol = eth_type_trans(skb, dev);
-
- /* Pass it up. */
- netif_receive_skb(skb);
- dev->last_rx = jiffies;
- }
-
- np->rx_resp_cons = i;
-
- /* If we get a callback with very few responses, reduce fill target. */
- /* NB. Note exponential increase, linear decrease. */
- if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
- (--np->rx_target < np->rx_min_target))
- np->rx_target = np->rx_min_target;
-
- network_alloc_rx_buffers(dev);
-
- *pbudget -= work_done;
- dev->quota -= work_done;
-
- if (work_done < budget) {
- local_irq_save(flags);
-
- np->rx->event = i + 1;
-
- /* Deal with hypervisor racing our resetting of rx_event. */
- mb();
- if (np->rx->resp_prod == i) {
- __netif_rx_complete(dev);
- more_to_do = 0;
- }
-
- local_irq_restore(flags);
- }
-
- spin_unlock(&np->rx_lock);
-
- return more_to_do;
-}
-
-
-static int network_close(struct net_device *dev)
-{
- struct net_private *np = netdev_priv(dev);
- np->user_state = UST_CLOSED;
- netif_stop_queue(np->dev);
- return 0;
-}
-
-
-static struct net_device_stats *network_get_stats(struct net_device *dev)
-{
- struct net_private *np = netdev_priv(dev);
- return &np->stats;
-}
-
-
-static void network_connect(struct net_device *dev,
- netif_fe_interface_status_t *status)
-{
- struct net_private *np;
- int i, requeue_idx;
- netif_tx_request_t *tx;
-
- np = netdev_priv(dev);
- spin_lock_irq(&np->tx_lock);
- spin_lock(&np->rx_lock);
-
- /* Recovery procedure: */
-
- /* Step 1: Reinitialise variables. */
- np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
- np->rx->event = np->tx->event = 1;
-
- /* Step 2: Rebuild the RX and TX ring contents.
- * NB. We could just free the queued TX packets now but we hope
- * that sending them out might do some good. We have to rebuild
- * the RX ring because some of our pages are currently flipped out
- * so we can't just free the RX skbs.
- * NB2. Freelist index entries are always going to be less than
- * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
- * greater than __PAGE_OFFSET: we use this property to distinguish
- * them.
- */
-
- /* Rebuild the TX buffer freelist and the TX ring itself.
- * NB. This reorders packets. We could keep more private state
- * to avoid this but maybe it doesn't matter so much given the
- * interface has been down.
- */
- for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
- if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
- struct sk_buff *skb = np->tx_skbs[i];
-
- tx = &np->tx->ring[requeue_idx++].req;
-
- tx->id = i;
- tx->addr = virt_to_machine(skb->data);
- tx->size = skb->len;
-
- np->stats.tx_bytes += skb->len;
- np->stats.tx_packets++;
- }
- }
- wmb();
- np->tx->req_prod = requeue_idx;
-
- /* Rebuild the RX buffer freelist and the RX ring itself. */
- for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
- if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
- np->rx->ring[requeue_idx++].req.id = i;
- wmb();
- np->rx->req_prod = requeue_idx;
-
- /* Step 3: All public and private state should now be sane. Get
- * ready to start sending and receiving packets and give the driver
- * domain a kick because we've probably just requeued some
- * packets.
- */
- np->backend_state = BEST_CONNECTED;
- wmb();
- notify_via_evtchn(status->evtchn);
- network_tx_buf_gc(dev);
-
- if (np->user_state == UST_OPEN)
- netif_start_queue(dev);
-
- spin_unlock(&np->rx_lock);
- spin_unlock_irq(&np->tx_lock);
-}
-
-static void vif_show(struct net_private *np)
-{
-#if DEBUG
- if (np) {
- IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
- np->handle,
- be_state_name[np->backend_state],
- np->user_state ? "open" : "closed",
- np->evtchn,
- np->irq,
- np->tx,
- np->rx);
- } else {
- IPRINTK("<vif NULL>\n");
- }
-#endif
-}
-
-/* Send a connect message to xend to tell it to bring up the interface. */
-static void send_interface_connect(struct net_private *np)
-{
- ctrl_msg_t cmsg = {
- .type = CMSG_NETIF_FE,
- .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
- .length = sizeof(netif_fe_interface_connect_t),
- };
- netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
-
- msg->handle = np->handle;
- msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
- msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
-
- ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
-
-/* Send a driver status notification to the domain controller. */
-static int send_driver_status(int ok)
-{
- int err = 0;
- ctrl_msg_t cmsg = {
- .type = CMSG_NETIF_FE,
- .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
- .length = sizeof(netif_fe_driver_status_t),
- };
- netif_fe_driver_status_t *msg = (void*)cmsg.msg;
-
- msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
- err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
- return err;
-}
-
-/* Stop network device and free tx/rx queues and irq.
- */
-static void vif_release(struct net_private *np)
-{
- /* Stop old i/f to prevent errors whilst we rebuild the state. */
- spin_lock_irq(&np->tx_lock);
- spin_lock(&np->rx_lock);
- netif_stop_queue(np->dev);
- /* np->backend_state = BEST_DISCONNECTED; */
- spin_unlock(&np->rx_lock);
- spin_unlock_irq(&np->tx_lock);
-
- /* Free resources. */
- if(np->tx != NULL){
- free_irq(np->irq, np->dev);
- unbind_evtchn_from_irq(np->evtchn);
- free_page((unsigned long)np->tx);
- free_page((unsigned long)np->rx);
- np->irq = 0;
- np->evtchn = 0;
- np->tx = NULL;
- np->rx = NULL;
- }
-}
-
-/* Release vif resources and close it down completely.
- */
-static void vif_close(struct net_private *np)
-{
- WPRINTK("Unexpected netif-CLOSED message in state %s\n",
- be_state_name[np->backend_state]);
- vif_release(np);
- np->backend_state = BEST_CLOSED;
- /* todo: take dev down and free. */
- vif_show(np);
-}
-
-/* Move the vif into disconnected state.
- * Allocates tx/rx pages.
- * Sends connect message to xend.
- */
-static void vif_disconnect(struct net_private *np)
-{
- if(np->tx) free_page((unsigned long)np->tx);
- if(np->rx) free_page((unsigned long)np->rx);
- // Before this np->tx and np->rx had better be null.
- np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
- np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
- memset(np->tx, 0, PAGE_SIZE);
- memset(np->rx, 0, PAGE_SIZE);
- np->backend_state = BEST_DISCONNECTED;
- send_interface_connect(np);
- vif_show(np);
-}
-
-/* Begin interface recovery.
- *
- * NB. Whilst we're recovering, we turn the carrier state off. We
- * take measures to ensure that this device isn't used for
- * anything. We also stop the queue for this device. Various
- * different approaches (e.g. continuing to buffer packets) have
- * been tested but don't appear to improve the overall impact on
- * TCP connections.
- *
- * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
- * is initiated by a special "RESET" message - disconnect could
- * just mean we're not allowed to use this interface any more.
- */
-static void vif_reset(struct net_private *np)
-{
- IPRINTK("Attempting to reconnect network interface: handle=%u\n",
- np->handle);
- vif_release(np);
- vif_disconnect(np);
- vif_show(np);
-}
-
-/* Move the vif into connected state.
- * Sets the mac and event channel from the message.
- * Binds the irq to the event channel.
- */
-static void
-vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
-{
- struct net_device *dev = np->dev;
- memcpy(dev->dev_addr, status->mac, ETH_ALEN);
- network_connect(dev, status);
- np->evtchn = status->evtchn;
- np->irq = bind_evtchn_to_irq(np->evtchn);
- (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
- netctrl_connected_count();
- (void)send_fake_arp(dev);
- vif_show(np);
-}
-
-
-/** Create a network device.
- * @param handle device handle
- * @param val return parameter for created device
- * @return 0 on success, error code otherwise
- */
-static int create_netdev(int handle, struct net_device **val)
-{
- int i, err = 0;
- struct net_device *dev = NULL;
- struct net_private *np = NULL;
-
- if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
- printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
- err = -ENOMEM;
- goto exit;
- }
-
- np = netdev_priv(dev);
- np->backend_state = BEST_CLOSED;
- np->user_state = UST_CLOSED;
- np->handle = handle;
-
- spin_lock_init(&np->tx_lock);
- spin_lock_init(&np->rx_lock);
-
- skb_queue_head_init(&np->rx_batch);
- np->rx_target = RX_MIN_TARGET;
- np->rx_min_target = RX_MIN_TARGET;
- np->rx_max_target = RX_MAX_TARGET;
-
- /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
- for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
- np->tx_skbs[i] = (void *)(i+1);
- for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
- np->rx_skbs[i] = (void *)(i+1);
-
- dev->open = network_open;
- dev->hard_start_xmit = network_start_xmit;
- dev->stop = network_close;
- dev->get_stats = network_get_stats;
- dev->poll = netif_poll;
- dev->weight = 64;
-
- if ((err = register_netdev(dev)) != 0) {
- printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
- goto exit;
- }
-
- if ((err = xennet_proc_addif(dev)) != 0) {
- unregister_netdev(dev);
- goto exit;
- }
-
- np->dev = dev;
- list_add(&np->list, &dev_list);
-
- exit:
- if ((err != 0) && (dev != NULL))
- kfree(dev);
- else if (val != NULL)
- *val = dev;
- return err;
-}
-
-/* Get the target interface for a status message.
- * Creates the interface when it makes sense.
- * The returned interface may be null when there is no error.
- *
- * @param status status message
- * @param np return parameter for interface state
- * @return 0 on success, error code otherwise
- */
-static int
-target_vif(netif_fe_interface_status_t *status, struct net_private **np)
-{
- int err = 0;
- struct net_device *dev;
-
- DPRINTK("> handle=%d\n", status->handle);
- if (status->handle < 0) {
- err = -EINVAL;
- goto exit;
- }
-
- if ((dev = find_dev_by_handle(status->handle)) != NULL)
- goto exit;
-
- if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
- goto exit;
- if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
- goto exit;
-
- /* It's a new interface in a good state - create it. */
- DPRINTK("> create device...\n");
- if ((err = create_netdev(status->handle, &dev)) != 0)
- goto exit;
-
- netctrl.interface_n++;
-
- exit:
- if (np != NULL)
- *np = ((dev && !err) ? netdev_priv(dev) : NULL);
- DPRINTK("< err=%d\n", err);
- return err;
-}
-
-/* Handle an interface status message. */
-static void netif_interface_status(netif_fe_interface_status_t *status)
-{
- int err = 0;
- struct net_private *np = NULL;
-
- DPRINTK("> status=%s handle=%d\n",
- status_name[status->status], status->handle);
-
- if ((err = target_vif(status, &np)) != 0) {
- WPRINTK("Invalid netif: handle=%u\n", status->handle);
- return;
- }
-
- if (np == NULL) {
- DPRINTK("> no vif\n");
- return;
- }
-
- switch (status->status) {
- case NETIF_INTERFACE_STATUS_CLOSED:
- switch (np->backend_state) {
- case BEST_CLOSED:
- case BEST_DISCONNECTED:
- case BEST_CONNECTED:
- vif_close(np);
- break;
- }
- break;
-
- case NETIF_INTERFACE_STATUS_DISCONNECTED:
- switch (np->backend_state) {
- case BEST_CLOSED:
- vif_disconnect(np);
- break;
- case BEST_DISCONNECTED:
- case BEST_CONNECTED:
- vif_reset(np);
- break;
- }
- break;
-
- case NETIF_INTERFACE_STATUS_CONNECTED:
- switch (np->backend_state) {
- case BEST_CLOSED:
- WPRINTK("Unexpected netif status %s in state %s\n",
- status_name[status->status],
- be_state_name[np->backend_state]);
- vif_disconnect(np);
- vif_connect(np, status);
- break;
- case BEST_DISCONNECTED:
- vif_connect(np, status);
- break;
- }
- break;
-
- case NETIF_INTERFACE_STATUS_CHANGED:
- /*
- * The domain controller is notifying us that a device has been
- * added or removed.
- */
- break;
-
- default:
- WPRINTK("Invalid netif status code %d\n", status->status);
- break;
- }
-
- vif_show(np);
-}
-
-/*
- * Initialize the network control interface.
- */
-static void netif_driver_status(netif_fe_driver_status_t *status)
-{
- netctrl.up = status->status;
- netctrl_connected_count();
-}
-
-/* Receive handler for control messages. */
-static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-
- switch (msg->subtype) {
- case CMSG_NETIF_FE_INTERFACE_STATUS:
- if (msg->length != sizeof(netif_fe_interface_status_t))
- goto error;
- netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
- break;
-
- case CMSG_NETIF_FE_DRIVER_STATUS:
- if (msg->length != sizeof(netif_fe_driver_status_t))
- goto error;
- netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
- break;
-
- error:
- default:
- msg->length = 0;
- break;
- }
-
- ctrl_if_send_response(msg);
-}
-
-
-#if 1
-/* Wait for all interfaces to be connected.
- *
- * This works OK, but we'd like to use the probing mode (see below).
- */
-static int probe_interfaces(void)
-{
- int err = 0, conn = 0;
- int wait_i, wait_n = 100;
-
- DPRINTK(">\n");
-
- for (wait_i = 0; wait_i < wait_n; wait_i++) {
- DPRINTK("> wait_i=%d\n", wait_i);
- conn = netctrl_connected();
- if(conn) break;
- DPRINTK("> schedule_timeout...\n");
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(10);
- }
-
- DPRINTK("> wait finished...\n");
- if (conn <= 0) {
- err = netctrl_err(-ENETDOWN);
- WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
- }
-
- DPRINTK("< err=%d\n", err);
-
- return err;
-}
-#else
-/* Probe for interfaces until no more are found.
- *
- * This is the mode we'd like to use, but at the moment it panics the kernel.
-*/
-static int probe_interfaces(void)
-{
- int err = 0;
- int wait_i, wait_n = 100;
- ctrl_msg_t cmsg = {
- .type = CMSG_NETIF_FE,
- .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
- .length = sizeof(netif_fe_interface_status_t),
- };
- netif_fe_interface_status_t msg = {};
- ctrl_msg_t rmsg = {};
- netif_fe_interface_status_t *reply = (void*)rmsg.msg;
- int state = TASK_UNINTERRUPTIBLE;
- u32 query = -1;
-
- DPRINTK(">\n");
-
- netctrl.interface_n = 0;
- for (wait_i = 0; wait_i < wait_n; wait_i++) {
- DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
- msg.handle = query;
- memcpy(cmsg.msg, &msg, sizeof(msg));
- DPRINTK("> set_current_state...\n");
- set_current_state(state);
- DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
- DPRINTK("> sending...\n");
- err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
- DPRINTK("> err=%d\n", err);
- if(err) goto exit;
- DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
- if((int)reply->handle < 0) {
- // No more interfaces.
- break;
- }
- query = -reply->handle - 2;
- DPRINTK(">netif_interface_status ...\n");
- netif_interface_status(reply);
- }
-
- exit:
- if (err) {
- err = netctrl_err(-ENETDOWN);
- WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
- }
-
- DPRINTK("< err=%d\n", err);
- return err;
-}
-
-#endif
-
-/*
- * We use this notifier to send out a fake ARP reply to reset switches and
- * router ARP caches when an IP interface is brought up on a VIF.
- */
-static int
-inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- struct net_device *dev = ifa->ifa_dev->dev;
- struct list_head *ent;
- struct net_private *np;
-
- if (event != NETDEV_UP)
- goto out;
-
- list_for_each (ent, &dev_list) {
- np = list_entry(ent, struct net_private, list);
- if (np->dev == dev)
- (void)send_fake_arp(dev);
- }
-
- out:
- return NOTIFY_DONE;
-}
-
-static struct notifier_block notifier_inetdev = {
- .notifier_call = inetdev_notify,
- .next = NULL,
- .priority = 0
-};
-
-static int __init netif_init(void)
-{
- int err = 0;
-
- if (xen_start_info.flags & SIF_INITDOMAIN)
- return 0;
-
- if ((err = xennet_proc_init()) != 0)
- return err;
-
- IPRINTK("Initialising virtual ethernet driver.\n");
- INIT_LIST_HEAD(&dev_list);
- (void)register_inetaddr_notifier(¬ifier_inetdev);
- netctrl_init();
- (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
- CALLBACK_IN_BLOCKING_CONTEXT);
- send_driver_status(1);
- err = probe_interfaces();
- if (err)
- ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
-
- DPRINTK("< err=%d\n", err);
- return err;
-}
-
-static void vif_suspend(struct net_private *np)
-{
- /* Avoid having tx/rx stuff happen until we're ready. */
- free_irq(np->irq, np->dev);
- unbind_evtchn_from_irq(np->evtchn);
-}
-
-static void vif_resume(struct net_private *np)
-{
- /*
- * Connect regardless of whether IFF_UP flag set.
- * Stop bad things from happening until we're back up.
- */
- np->backend_state = BEST_DISCONNECTED;
- memset(np->tx, 0, PAGE_SIZE);
- memset(np->rx, 0, PAGE_SIZE);
-
- send_interface_connect(np);
-}
-
-void netif_suspend(void)
-{
- struct list_head *ent;
- struct net_private *np;
-
- list_for_each (ent, &dev_list) {
- np = list_entry(ent, struct net_private, list);
- vif_suspend(np);
- }
-}
-
-void netif_resume(void)
-{
- struct list_head *ent;
- struct net_private *np;
-
- list_for_each (ent, &dev_list) {
- np = list_entry(ent, struct net_private, list);
- vif_resume(np);
- }
-}
-
-#ifdef CONFIG_PROC_FS
-
-#define TARGET_MIN 0UL
-#define TARGET_MAX 1UL
-#define TARGET_CUR 2UL
-
-static int xennet_proc_read(
- char *page, char **start, off_t off, int count, int *eof, void *data)
-{
- struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
- struct net_private *np = netdev_priv(dev);
- int len = 0, which_target = (int)data & 3;
-
- switch (which_target)
- {
- case TARGET_MIN:
- len = sprintf(page, "%d\n", np->rx_min_target);
- break;
- case TARGET_MAX:
- len = sprintf(page, "%d\n", np->rx_max_target);
- break;
- case TARGET_CUR:
- len = sprintf(page, "%d\n", np->rx_target);
- break;
- }
-
- *eof = 1;
- return len;
-}
-
-static int xennet_proc_write(
- struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
- struct net_private *np = netdev_priv(dev);
- int which_target = (int)data & 3;
- char string[64];
- long target;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (count <= 1)
- return -EBADMSG; /* runt */
- if (count > sizeof(string))
- return -EFBIG; /* too long */
-
- if (copy_from_user(string, buffer, count))
- return -EFAULT;
- string[sizeof(string)-1] = '\0';
-
- target = simple_strtol(string, NULL, 10);
- if (target < RX_MIN_TARGET)
- target = RX_MIN_TARGET;
- if (target > RX_MAX_TARGET)
- target = RX_MAX_TARGET;
-
- spin_lock(&np->rx_lock);
-
- switch (which_target)
- {
- case TARGET_MIN:
- if (target > np->rx_max_target)
- np->rx_max_target = target;
- np->rx_min_target = target;
- if (target > np->rx_target)
- np->rx_target = target;
- break;
- case TARGET_MAX:
- if (target < np->rx_min_target)
- np->rx_min_target = target;
- np->rx_max_target = target;
- if (target < np->rx_target)
- np->rx_target = target;
- break;
- case TARGET_CUR:
- break;
- }
-
- network_alloc_rx_buffers(dev);
-
- spin_unlock(&np->rx_lock);
-
- return count;
-}
-
-static int xennet_proc_init(void)
-{
- if (proc_mkdir("xen/net", NULL) == NULL)
- return -ENOMEM;
- return 0;
-}
-
-static int xennet_proc_addif(struct net_device *dev)
-{
- struct proc_dir_entry *dir, *min, *max, *cur;
- char name[30];
-
- sprintf(name, "xen/net/%s", dev->name);
-
- dir = proc_mkdir(name, NULL);
- if (!dir)
- goto nomem;
-
- min = create_proc_entry("rxbuf_min", 0644, dir);
- max = create_proc_entry("rxbuf_max", 0644, dir);
- cur = create_proc_entry("rxbuf_cur", 0444, dir);
- if (!min || !max || !cur)
- goto nomem;
-
- min->read_proc = xennet_proc_read;
- min->write_proc = xennet_proc_write;
- min->data = (void *)((unsigned long)dev | TARGET_MIN);
-
- max->read_proc = xennet_proc_read;
- max->write_proc = xennet_proc_write;
- max->data = (void *)((unsigned long)dev | TARGET_MAX);
-
- cur->read_proc = xennet_proc_read;
- cur->write_proc = xennet_proc_write;
- cur->data = (void *)((unsigned long)dev | TARGET_CUR);
-
- return 0;
-
- nomem:
- xennet_proc_delif(dev);
- return -ENOMEM;
-}
-
-static void xennet_proc_delif(struct net_device *dev)
-{
- char name[30];
-
- sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
- remove_proc_entry(name, NULL);
-
- sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
- remove_proc_entry(name, NULL);
-
- sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
- remove_proc_entry(name, NULL);
-
- sprintf(name, "xen/net/%s", dev->name);
- remove_proc_entry(name, NULL);
-}
-
-#endif
-
-module_init(netif_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/privcmd/Makefile
--- a/linux-2.6.11-xen-sparse/drivers/xen/privcmd/Makefile Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2 +0,0 @@
-
-obj-y := privcmd.o
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,220 +0,0 @@
-/******************************************************************************
- * privcmd.c
- *
- * Interface to privileged domain-0 commands.
- *
- * Copyright (c) 2002-2004, K A Fraser, B Dragovic
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/smp_lock.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/seq_file.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-#include <asm-xen/linux-public/privcmd.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-#include <asm-xen/xen_proc.h>
-
-static struct proc_dir_entry *privcmd_intf;
-
-static int privcmd_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long data)
-{
- int ret = -ENOSYS;
-
- switch ( cmd )
- {
- case IOCTL_PRIVCMD_HYPERCALL:
- {
- privcmd_hypercall_t hypercall;
-
- if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
- return -EFAULT;
-
- __asm__ __volatile__ (
- "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
- "movl 4(%%eax),%%ebx ;"
- "movl 8(%%eax),%%ecx ;"
- "movl 12(%%eax),%%edx ;"
- "movl 16(%%eax),%%esi ;"
- "movl 20(%%eax),%%edi ;"
- "movl (%%eax),%%eax ;"
- TRAP_INSTR "; "
- "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
- : "=a" (ret) : "0" (&hypercall) : "memory" );
-
- }
- break;
-
- case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
- {
- extern int initdom_ctrlif_domcontroller_port;
- ret = initdom_ctrlif_domcontroller_port;
- }
- break;
-
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- case IOCTL_PRIVCMD_MMAP:
- {
-#define PRIVCMD_MMAP_SZ 32
- privcmd_mmap_t mmapcmd;
- privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
- int i, rc;
-
- if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
- return -EFAULT;
-
- p = mmapcmd.entry;
-
- for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
- {
- int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
- PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
- if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
- return -EFAULT;
-
- for ( j = 0; j < n; j++ )
- {
- struct vm_area_struct *vma =
- find_vma( current->mm, msg[j].va );
-
- if ( !vma )
- return -EINVAL;
-
- if ( msg[j].va > PAGE_OFFSET )
- return -EINVAL;
-
- if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
- return -EINVAL;
-
- if ( (rc = direct_remap_area_pages(vma->vm_mm,
- msg[j].va&PAGE_MASK,
- msg[j].mfn<<PAGE_SHIFT,
- msg[j].npages<<PAGE_SHIFT,
- vma->vm_page_prot,
- mmapcmd.dom)) < 0 )
- return rc;
- }
- }
- ret = 0;
- }
- break;
-
- case IOCTL_PRIVCMD_MMAPBATCH:
- {
-#define MAX_DIRECTMAP_MMU_QUEUE 130
- mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
- privcmd_mmapbatch_t m;
- struct vm_area_struct *vma = NULL;
- unsigned long *p, addr;
- unsigned long mfn;
- int i;
-
- if ( copy_from_user(&m, (void *)data, sizeof(m)) )
- { ret = -EFAULT; goto batch_err; }
-
- vma = find_vma( current->mm, m.addr );
-
- if ( !vma )
- { ret = -EINVAL; goto batch_err; }
-
- if ( m.addr > PAGE_OFFSET )
- { ret = -EFAULT; goto batch_err; }
-
- if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
- { ret = -EFAULT; goto batch_err; }
-
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)m.dom << 16;
- v = w = &u[1];
-
- p = m.arr;
- addr = m.addr;
- for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
- {
- if ( get_user(mfn, p) )
- return -EFAULT;
-
- v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
-
- __direct_remap_area_pages(vma->vm_mm,
- addr,
- PAGE_SIZE,
- v);
-
- if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
- put_user( 0xF0000000 | mfn, p );
-
- v = w;
- }
- ret = 0;
- break;
-
- batch_err:
- printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
- ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
- break;
- }
- break;
-#endif
-
- case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
- {
- unsigned long m2p_start_mfn =
- HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
-
- if( put_user( m2p_start_mfn, (unsigned long *) data ) )
- ret = -EFAULT;
- else
- ret = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
-{
- /* DONTCOPY is essential for Xen as copy_page_range is broken. */
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
- return 0;
-}
-
-static struct file_operations privcmd_file_ops = {
- ioctl : privcmd_ioctl,
- mmap: privcmd_mmap
-};
-
-
-static int __init privcmd_init(void)
-{
- if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
- return 0;
-
- privcmd_intf = create_xen_proc_entry("privcmd", 0400);
- if ( privcmd_intf != NULL )
- privcmd_intf->proc_fops = &privcmd_file_ops;
-
- return 0;
-}
-
-__initcall(privcmd_init);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-generic/pgtable.h
--- a/linux-2.6.11-xen-sparse/include/asm-generic/pgtable.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,147 +0,0 @@
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
-
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- * - flush the old one
- * - update the page tables
- * - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_atomic(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-/*
- * Largely same as above, but only sets the access flags (dirty,
- * accessed, and writable). Furthermore, we know it always gets set
- * to a "more permissive" setting, which allows most architectures
- * to optimize this.
- */
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH_NEW
-/*
- * Establish a mapping where none previously existed
- */
-#define ptep_establish_new(__vma, __address, __ptep, __entry) \
-do { \
- set_pte(__ptep, __entry); \
-} while (0)
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(pte_t *ptep)
-{
- pte_t pte = *ptep;
- if (!pte_young(pte))
- return 0;
- set_pte(ptep, pte_mkold(pte));
- return 1;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep) \
-({ \
- int __young = ptep_test_and_clear_young(__ptep); \
- if (__young) \
- flush_tlb_page(__vma, __address); \
- __young; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
-{
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- return 0;
- set_pte(ptep, pte_mkclean(pte));
- return 1;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty = ptep_test_and_clear_dirty(__ptep); \
- if (__dirty) \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(pte_t *ptep)
-{
- pte_t pte = *ptep;
- pte_clear(ptep);
- return pte;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define ptep_clear_flush(__vma, __address, __ptep) \
-({ \
- pte_t __pte = ptep_get_and_clear(__ptep); \
- flush_tlb_page(__vma, __address); \
- __pte; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_pte(ptep, pte_wrprotect(old_pte));
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_MKDIRTY
-static inline void ptep_mkdirty(pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_pte(ptep, pte_mkdirty(old_pte));
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (pte_val(A) == pte_val(B))
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(page) (0)
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
-#define page_test_and_clear_young(page) (0)
-#endif
-
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
-#endif /* _ASM_GENERIC_PGTABLE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/agp.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/agp.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,37 +0,0 @@
-#ifndef AGP_H
-#define AGP_H 1
-
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-#include <asm/system.h>
-
-/*
- * Functions to keep the agpgart mappings coherent with the MMU.
- * The GART gives the CPU a physical alias of pages in memory. The alias
region is
- * mapped uncacheable. Make sure there are no conflicting mappings
- * with different cachability attributes for the same page. This avoids
- * data corruption on some CPUs.
- */
-
-int map_page_into_agp(struct page *page);
-int unmap_page_from_agp(struct page *page);
-#define flush_agp_mappings() global_flush_tlb()
-
-/* Could use CLFLUSH here if the cpu supports it. But then it would
- need to be called for each cacheline of the whole page so it may not be
- worth it. Would need a page for it. */
-#define flush_agp_cache() wbinvd()
-
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) phys_to_machine(x)
-#define gart_to_phys(x) machine_to_phys(x)
-
-/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-#define alloc_gatt_pages(order) ({
\
- char *_t; dma_addr_t _d; \
- _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
- _t; })
-#define free_gatt_pages(table, order) \
- dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/bugs.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/bugs.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,214 +0,0 @@
-/*
- * include/asm-i386/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- *
- * Cyrix stuff, June 1998 by:
- * - Rafael R. Reilova (moved everything from head.S),
- * <rreilova@xxxxxxxxxxxx>
- * - Channing Corn (tests & fixes),
- * - Andrew D. Balsa (code cleanup).
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-
-static int __init no_halt(char *s)
-{
- boot_cpu_data.hlt_works_ok = 0;
- return 1;
-}
-
-__setup("no-hlt", no_halt);
-
-static int __init mca_pentium(char *s)
-{
- mca_pentium_flag = 1;
- return 1;
-}
-
-__setup("mca-pentium", mca_pentium);
-
-static int __init no_387(char *s)
-{
- boot_cpu_data.hard_math = 0;
- write_cr0(0xE | read_cr0());
- return 1;
-}
-
-__setup("no387", no_387);
-
-static double __initdata x = 4195835.0;
-static double __initdata y = 3145727.0;
-
-/*
- * This used to check for exceptions..
- * However, it turns out that to support that,
- * the XMM trap handlers basically had to
- * be buggy. So let's have a correct XMM trap
- * handler, and forget about printing out
- * some status at boot.
- *
- * We should really only care about bugs here
- * anyway. Not features.
- */
-static void __init check_fpu(void)
-{
- if (!boot_cpu_data.hard_math) {
-#ifndef CONFIG_MATH_EMULATION
- printk(KERN_EMERG "No coprocessor found and no math emulation
present.\n");
- printk(KERN_EMERG "Giving up.\n");
- for (;;) ;
-#endif
- return;
- }
-
-/* Enable FXSR and company _before_ testing for FP problems. */
- /*
- * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
- */
- if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
- extern void __buggy_fxsr_alignment(void);
- __buggy_fxsr_alignment();
- }
- if (cpu_has_fxsr) {
- printk(KERN_INFO "Enabling fast FPU save and restore... ");
- set_in_cr4(X86_CR4_OSFXSR);
- printk("done.\n");
- }
- if (cpu_has_xmm) {
- printk(KERN_INFO "Enabling unmasked SIMD FPU exception
support... ");
- set_in_cr4(X86_CR4_OSXMMEXCPT);
- printk("done.\n");
- }
-
- /* Test for the divl bug.. */
- __asm__("fninit\n\t"
- "fldl %1\n\t"
- "fdivl %2\n\t"
- "fmull %2\n\t"
- "fldl %1\n\t"
- "fsubp %%st,%%st(1)\n\t"
- "fistpl %0\n\t"
- "fwait\n\t"
- "fninit"
- : "=m" (*&boot_cpu_data.fdiv_bug)
- : "m" (*&x), "m" (*&y));
- stts();
- if (boot_cpu_data.fdiv_bug)
- printk("Hmm, FPU with FDIV bug.\n");
-}
-
-static void __init check_hlt(void)
-{
- printk(KERN_INFO "Checking 'hlt' instruction... ");
- if (!boot_cpu_data.hlt_works_ok) {
- printk("disabled\n");
- return;
- }
- __asm__ __volatile__("hlt ; hlt ; hlt ; hlt");
- printk("OK.\n");
-}
-
-/*
- * Most 386 processors have a bug where a POPAD can lock the
- * machine even from user space.
- */
-
-static void __init check_popad(void)
-{
-#ifndef CONFIG_X86_POPAD_OK
- int res, inp = (int) &res;
-
- printk(KERN_INFO "Checking for popad bug... ");
- __asm__ __volatile__(
- "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl
(%%edx,%%edi),%%ecx "
- : "=&a" (res)
- : "d" (inp)
- : "ecx", "edi" );
- /* If this fails, it means that any user program may lock the CPU hard.
Too bad. */
- if (res != 12345678) printk( "Buggy.\n" );
- else printk( "OK.\n" );
-#endif
-}
-
-/*
- * Check whether we are able to run this kernel safely on SMP.
- *
- * - In order to run on a i386, we need to be compiled for i386
- * (for due to lack of "invlpg" and working WP on a i386)
- * - In order to run on anything without a TSC, we need to be
- * compiled for a i486.
- * - In order to support the local APIC on a buggy Pentium machine,
- * we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
- * which happens implicitly if compiled for a Pentium or lower
- * (unless an advanced selection of CPU features is used) as an
- * otherwise config implies a properly working local APIC without
- * the need to do extra reads from the APIC.
-*/
-
-static void __init check_config(void)
-{
-/*
- * We'd better not be a i386 if we're configured to use some
- * i486+ only features! (WP works in supervisor mode and the
- * new "invlpg" and "bswap" instructions)
- */
-#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) ||
defined(CONFIG_X86_BSWAP)
- if (boot_cpu_data.x86 == 3)
- panic("Kernel requires i486+ for 'invlpg' and other features");
-#endif
-
-/*
- * If we configured ourselves for a TSC, we'd better have one!
- */
-#ifdef CONFIG_X86_TSC
- if (!cpu_has_tsc)
- panic("Kernel compiled for Pentium+, requires TSC feature!");
-#endif
-
-/*
- * If we were told we had a good local APIC, check for buggy Pentia,
- * i.e. all B steppings and the C2 stepping of P54C when using their
- * integrated APIC (see 11AP erratum in "Pentium Processor
- * Specification Update").
- */
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
- && cpu_has_apic
- && boot_cpu_data.x86 == 5
- && boot_cpu_data.x86_model == 2
- && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
- panic("Kernel compiled for PMMX+, assumes a local APIC without
the read-before-write bug!");
-#endif
-}
-
-extern void alternative_instructions(void);
-
-static void __init check_bugs(void)
-{
- identify_cpu(&boot_cpu_data);
-#ifndef CONFIG_SMP
- printk("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- check_config();
- check_fpu();
- check_hlt();
- check_popad();
- system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 :
boot_cpu_data.x86);
- alternative_instructions();
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,143 +0,0 @@
-#ifndef __ARCH_DESC_H
-#define __ARCH_DESC_H
-
-#include <asm/ldt.h>
-#include <asm/segment.h>
-
-#ifndef __ASSEMBLY__
-
-#include <linux/preempt.h>
-#include <linux/smp.h>
-
-#include <asm/mmu.h>
-
-extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
-
-struct Xgt_desc_struct {
- unsigned short size;
- unsigned long address __attribute__((packed));
- unsigned short pad;
-} __attribute__ ((packed));
-
-extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
-
-#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a"
(GDT_ENTRY_LDT*8))
-
-#define get_cpu_gdt_table(_cpu) ((struct desc_struct
*)cpu_gdt_descr[(_cpu)].address)
-
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
-extern void set_intr_gate(unsigned int irq, void * addr);
-
-#define _set_tssldt_desc(n,addr,limit,type) \
-__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
- "movw %%ax,2(%2)\n\t" \
- "rorl $16,%%eax\n\t" \
- "movb %%al,4(%2)\n\t" \
- "movb %4,5(%2)\n\t" \
- "movb $0,6(%2)\n\t" \
- "movb %%ah,7(%2)\n\t" \
- "rorl $16,%%eax" \
- : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
-
-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void
*addr)
-{
- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
- offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
-}
-
-#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
-
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int
size)
-{
- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
- (int)addr, ((size << 3)-1), 0x82);
-}
-
-#define LDT_entry_a(info) \
- ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-
-#define LDT_entry_b(info) \
- (((info)->base_addr & 0xff000000) | \
- (((info)->base_addr & 0x00ff0000) >> 16) | \
- ((info)->limit & 0xf0000) | \
- (((info)->read_exec_only ^ 1) << 9) | \
- ((info)->contents << 10) | \
- (((info)->seg_not_present ^ 1) << 15) | \
- ((info)->seg_32bit << 22) | \
- ((info)->limit_in_pages << 23) | \
- ((info)->useable << 20) | \
- 0x7000)
-
-#define LDT_empty(info) (\
- (info)->base_addr == 0 && \
- (info)->limit == 0 && \
- (info)->contents == 0 && \
- (info)->read_exec_only == 1 && \
- (info)->seg_32bit == 0 && \
- (info)->limit_in_pages == 0 && \
- (info)->seg_not_present == 1 && \
- (info)->useable == 0 )
-
-#if TLS_SIZE != 24
-# error update this code.
-#endif
-
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-{
-#define C(i)
HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN
+ i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
- C(0); C(1); C(2);
-#undef C
-}
-
-static inline void clear_LDT(void)
-{
- int cpu = get_cpu();
-
- /*
- * NB. We load the default_ldt for lcall7/27 handling on demand, as
- * it slows down context switching. Noone uses it anyway.
- */
- cpu = cpu; /* XXX avoid compiler warning */
- queue_set_ldt(0UL, 0);
- put_cpu();
-}
-
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
-{
- void *segments = pc->ldt;
- int count = pc->size;
-
- if (likely(!count))
- segments = NULL;
-
- queue_set_ldt((unsigned long)segments, count);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- int cpu = get_cpu();
- load_LDT_nolock(pc, cpu);
- flush_page_update_queue();
- put_cpu();
-}
-
-static inline unsigned long get_desc_base(unsigned long *desc)
-{
- unsigned long base;
- base = ((desc[0] >> 16) & 0x0000ffff) |
- ((desc[1] << 16) & 0x00ff0000) |
- (desc[1] & 0xff000000);
- return base;
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,165 +0,0 @@
-#ifndef _ASM_I386_DMA_MAPPING_H
-#define _ASM_I386_DMA_MAPPING_H
-
-#include <linux/mm.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-extern dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
-
-extern void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction);
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- int i;
-
- BUG_ON(direction == DMA_NONE);
-
- for (i = 0; i < nents; i++ ) {
- BUG_ON(!sg[i].page);
-
- sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
- }
-
- flush_write_buffers();
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-extern void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
-
-extern void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t
size,
- enum dma_data_direction direction);
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline int
-dma_mapping_error(dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-
-static inline int
-dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all x86, so return the
- * maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
-}
-
-#define dma_is_consistent(d) (1)
-
-static inline void
-dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-extern int
-dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags);
-
-extern void
-dma_release_declared_memory(struct device *dev);
-
-extern void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,170 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/config.h>
-
-/* used by vmalloc.c, vsyscall.lds.S.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap.
- */
-#define __FIXADDR_TOP (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
-
-#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
-#include <asm/acpi.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-enum fixed_addresses {
- FIX_HOLE,
- FIX_VSYSCALL,
-#ifdef CONFIG_X86_LOCAL_APIC
- FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
-#endif
-#ifdef CONFIG_X86_IO_APIC
- FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-#endif
-#ifdef CONFIG_X86_VISWS_APIC
- FIX_CO_CPU, /* Cobalt timer */
- FIX_CO_APIC, /* Cobalt APIC Redirection Table */
- FIX_LI_PCIA, /* Lithium PCI Bridge A */
- FIX_LI_PCIB, /* Lithium PCI Bridge B */
-#endif
-#ifdef CONFIG_X86_F00F_BUG
- FIX_F00F_IDT, /* Virtual mapping for IDT */
-#endif
-#ifdef CONFIG_X86_CYCLONE_TIMER
- FIX_CYCLONE_TIMER, /*cyclone timer register*/
-#endif
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
-#ifdef CONFIG_ACPI_BOOT
- FIX_ACPI_BEGIN,
- FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
-#ifdef CONFIG_PCI_MMCONFIG
- FIX_PCIE_MCFG,
-#endif
- FIX_SHARED_INFO,
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-#define NR_FIX_ISAMAPS 256
- FIX_ISAMAP_END,
- FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-#endif
- __end_of_permanent_fixed_addresses,
- /* temporary boot-time mappings, used before ioremap() is functional */
-#define NR_FIX_BTMAPS 16
- FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
- FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
- FIX_WP_TEST,
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-extern void __set_fixmap_ma (enum fixed_addresses idx,
- unsigned long mach, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma(idx, phys) \
- __set_fixmap_ma(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma_ro(idx, phys) \
- __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
-#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
-
-#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
-#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-/*
- * This is the range that is readable by user mode, and things
- * acting like user mode such as get_user_pages.
- */
-#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
-#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
-
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-#endif /* !__ASSEMBLY__ */
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/floppy.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/floppy.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,147 +0,0 @@
-/*
- * Architecture specific parts of the Floppy driver
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995
- *
- * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
- */
-#ifndef __ASM_XEN_I386_FLOPPY_H
-#define __ASM_XEN_I386_FLOPPY_H
-
-#include <linux/vmalloc.h>
-
-/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-#include <asm/dma.h>
-#undef MAX_DMA_ADDRESS
-#define MAX_DMA_ADDRESS 0
-#define CROSS_64KB(a,s) (0)
-
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
-
-#define fd_request_dma() (0)
-#define fd_free_dma() ((void)0)
-#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
-#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
-#define fd_get_dma_residue() (virtual_dma_count + virtual_dma_residue)
-#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-/*
- * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
- * softirq context via motor_off_callback. A generic bug we happen to trigger.
- */
-#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size))
-#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-
-static int virtual_dma_count;
-static int virtual_dma_residue;
-static char *virtual_dma_addr;
-static int virtual_dma_mode;
-static int doing_pdma;
-
-static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
-{
- register unsigned char st;
- register int lcount;
- register char *lptr;
-
- if (!doing_pdma)
- return floppy_interrupt(irq, dev_id, regs);
-
- st = 1;
- for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
- lcount; lcount--, lptr++) {
- st=inb(virtual_dma_port+4) & 0xa0 ;
- if(st != 0xa0)
- break;
- if(virtual_dma_mode)
- outb_p(*lptr, virtual_dma_port+5);
- else
- *lptr = inb_p(virtual_dma_port+5);
- }
- virtual_dma_count = lcount;
- virtual_dma_addr = lptr;
- st = inb(virtual_dma_port+4);
-
- if(st == 0x20)
- return IRQ_HANDLED;
- if(!(st & 0x20)) {
- virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
- doing_pdma = 0;
- floppy_interrupt(irq, dev_id, regs);
- return IRQ_HANDLED;
- }
- return IRQ_HANDLED;
-}
-
-static void fd_disable_dma(void)
-{
- doing_pdma = 0;
- virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
-}
-
-static int fd_request_irq(void)
-{
- return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
- "floppy", NULL);
-}
-
-static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
-{
- doing_pdma = 1;
- virtual_dma_port = io;
- virtual_dma_mode = (mode == DMA_MODE_WRITE);
- virtual_dma_addr = addr;
- virtual_dma_count = size;
- virtual_dma_residue = 0;
- return 0;
-}
-
-/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-#define FDC1 xen_floppy_init()
-static int FDC2 = -1;
-
-static int xen_floppy_init(void)
-{
- use_virtual_dma = 1;
- can_use_virtual_dma = 1;
- return 0x3f0;
-}
-
-/*
- * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
- * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
- * coincides with another rtc CMOS user. Paul G.
- */
-#define FLOPPY0_TYPE ({ \
- unsigned long flags; \
- unsigned char val; \
- spin_lock_irqsave(&rtc_lock, flags); \
- val = (CMOS_READ(0x10) >> 4) & 15; \
- spin_unlock_irqrestore(&rtc_lock, flags); \
- val; \
-})
-
-#define FLOPPY1_TYPE ({ \
- unsigned long flags; \
- unsigned char val; \
- spin_lock_irqsave(&rtc_lock, flags); \
- val = CMOS_READ(0x10) & 15; \
- spin_unlock_irqrestore(&rtc_lock, flags); \
- val; \
-})
-
-#define N_FDC 2
-#define N_DRIVE 8
-
-#define FLOPPY_MOTOR_MASK 0xf0
-
-#define EXTRA_FLOPPY_PARAMS
-
-#endif /* __ASM_XEN_I386_FLOPPY_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,82 +0,0 @@
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@xxxxxxxxxxxxxx
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
- */
-
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/interrupt.h>
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#include <asm/tlbflush.h>
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-extern pte_t *pkmap_page_table;
-
-extern void kmap_init(void);
-
-/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- */
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-/*
- * Ordering is:
- *
- * FIXADDR_TOP
- * fixed_addresses
- * FIXADDR_START
- * temp fixed addresses
- * FIXADDR_BOOT_START
- * Persistent kmap area
- * PKMAP_BASE
- * VMALLOC_END
- * Vmalloc area
- * VMALLOC_START
- * high_memory
- */
-#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) &
PMD_MASK )
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-extern void * FASTCALL(kmap_high(struct page *page));
-extern void FASTCALL(kunmap_high(struct page *page));
-
-void *kmap(struct page *page);
-void kunmap(struct page *page);
-void *kmap_atomic(struct page *page, enum km_type type);
-void *kmap_atomic_pte(struct page *page, enum km_type type);
-void kunmap_atomic(void *kvaddr, enum km_type type);
-struct page *kmap_atomic_to_page(void *ptr);
-
-#define flush_cache_kmaps() do { } while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/io.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/io.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,425 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/compiler.h>
-
-/*
- * This file contains the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated
- * to (a) handle it all in a way that makes gcc able to optimize it
- * as well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- */
-
-/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- * Linus
- */
-
- /*
- * Bit simplified and optimized by Jan Hubicka
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
- *
- * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
- * isa_read[wl] and isa_write[wl] fixed
- * - Arnaldo Carvalho de Melo <acme@xxxxxxxxxxxxxxxx>
- */
-
-#define IO_SPACE_LIMIT 0xffff
-
-#define XQUAD_PORTIO_BASE 0xfe400000
-#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
-
-#ifdef __KERNEL__
-
-#include <asm-generic/iomap.h>
-
-#include <linux/vmalloc.h>
-#include <asm/fixmap.h>
-
-/**
- * virt_to_phys - map virtual addresses to physical
- * @address: address to remap
- *
- * The returned physical address is the physical (CPU) mapping for
- * the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
- *
- * This function does not give bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa(address);
-}
-
-/**
- * phys_to_virt - map physical address to virtual
- * @address: address to remap
- *
- * The returned virtual address is a current CPU mapping for
- * the memory address given. It is only valid to use this function on
- * addresses that have a kernel mapping
- *
- * This function does not handle bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
-
-#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
- (unsigned long) bio_offset((bio)))
-#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
- (unsigned long) (bv)->bv_offset)
-
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
- ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
- bvec_to_pseudophys((vec2))))
-
-extern void __iomem * __ioremap(unsigned long offset, unsigned long size,
unsigned long flags);
-
-/**
- * ioremap - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- */
-
-static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, 0);
-}
-
-extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long
size);
-extern void iounmap(volatile void __iomem *addr);
-
-/*
- * bt_ioremap() and bt_iounmap() are for temporary early boot-time
- * mappings, before the real ioremap() is functional.
- * A boot-time mapping is currently limited to at most 16 pages.
- */
-extern void *bt_ioremap(unsigned long offset, unsigned long size);
-extern void bt_iounmap(void *addr, unsigned long size);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-#else
-#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
-#endif
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-#define bus_to_virt(_x) __va(machine_to_phys(_x))
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
- return *(volatile unsigned char __force *) addr;
-}
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
- return *(volatile unsigned short __force *) addr;
-}
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
- return *(volatile unsigned int __force *) addr;
-}
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-
-static inline void writeb(unsigned char b, volatile void __iomem *addr)
-{
- *(volatile unsigned char __force *) addr = b;
-}
-static inline void writew(unsigned short b, volatile void __iomem *addr)
-{
- *(volatile unsigned short __force *) addr = b;
-}
-static inline void writel(unsigned int b, volatile void __iomem *addr)
-{
- *(volatile unsigned int __force *) addr = b;
-}
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-#define mmiowb()
-
-static inline void memset_io(volatile void __iomem *addr, unsigned char val,
int count)
-{
- memset((void __force *) addr, val, count);
-}
-static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
int count)
-{
- __memcpy(dst, (void __force *) src, count);
-}
-static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
int count)
-{
- __memcpy((void __force *) dst, src, count);
-}
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base +
(b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
-/*
- * Again, i386 does not require mem IO specific function.
- */
-
-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void
__force *)(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void
__force *)(__ISA_IO_base + (b)),(c),(d))
-
-/**
- * check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the mmio address io_addr. This
- * address should have been obtained by ioremap.
- * Returns 1 on a match.
- */
-
-static inline int check_signature(volatile void __iomem * io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/*
- * Cache management
- *
- * This needed for two cases
- * 1. Out of order aware processors
- * 2. Accidentally out of order processors (PPro errata #51)
- */
-
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-
-static inline void flush_write_buffers(void)
-{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-}
-
-#define dma_cache_inv(_start,_size) flush_write_buffers()
-#define dma_cache_wback(_start,_size) flush_write_buffers()
-#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
-
-#else
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-#define flush_write_buffers()
-
-#endif
-
-#endif /* __KERNEL__ */
-
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
-#elif defined(__UNSAFE_IO__)
-#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-#else
-#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n" \
- "2:\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,2b\n" \
- ".previous"
-#endif
-
-static inline void slow_down_io(void) {
- __asm__ __volatile__(
- __SLOW_DOWN_IO
-#ifdef REALLY_SLOW_IO
- __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
- : : );
-}
-
-#ifdef CONFIG_X86_NUMAQ
-extern void *xquad_portio; /* Where the IO area was mapped */
-#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) +
port)
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
- if (xquad_portio) \
- write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
- else \
- out##bwl##_local(value, port); \
-} \
-static inline void out##bwl(unsigned type value, int port) { \
- out##bwl##_quad(value, port, 0); \
-} \
-static inline unsigned type in##bwl##_quad(int port, int quad) { \
- if (xquad_portio) \
- return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
- else \
- return in##bwl##_local(port); \
-} \
-static inline unsigned type in##bwl(int port) { \
- return in##bwl##_quad(port, 0); \
-}
-#else
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl(unsigned type value, int port) { \
- out##bwl##_local(value, port); \
-} \
-static inline unsigned type in##bwl(int port) { \
- return in##bwl##_local(port); \
-}
-#endif
-
-
-#if __UNSAFE_IO__
-#define ____BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
- __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value),
"Nd"(port)); \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
- unsigned type value; \
- __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) :
"Nd"(port)); \
- return value; \
-}
-#else
-#define ____BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
- __asm__ __volatile__("1: out" #bwl " %" #bw "0, %w1\n" \
- "2:\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,2b\n" \
- ".previous" : : "a"(value), "Nd"(port)); \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
- unsigned type value; \
- __asm__ __volatile__("1:in" #bwl " %w1, %" #bw "0\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov" #bwl " $~0,%" #bw "0\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" : "=a"(value) : "Nd"(port)); \
- return value; \
-}
-#endif
-
-#define BUILDIO(bwl,bw,type) \
-____BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local_p(unsigned type value, int port) { \
- out##bwl##_local(value, port); \
- slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_local_p(int port) { \
- unsigned type value = in##bwl##_local(port); \
- slow_down_io(); \
- return value; \
-} \
-__BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_p(unsigned type value, int port) { \
- out##bwl(value, port); \
- slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_p(int port) { \
- unsigned type value = in##bwl(port); \
- slow_down_io(); \
- return value; \
-} \
-static inline void outs##bwl(int port, const void *addr, unsigned long count)
{ \
- __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) :
"d"(port)); \
-} \
-static inline void ins##bwl(int port, void *addr, unsigned long count) { \
- __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) :
"d"(port)); \
-}
-
-BUILDIO(b,b,char)
-BUILDIO(w,w,short)
-BUILDIO(l,,int)
-
-/* We will be supplying our own /dev/mem implementation */
-#define ARCH_HAS_DEV_MEM
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,131 +0,0 @@
-/*
- * This file should contain #defines for all of the interrupt vector
- * numbers used by this architecture.
- *
- * In addition, there are some standard defines:
- *
- * FIRST_EXTERNAL_VECTOR:
- * The first free place for external interrupts
- *
- * SYSCALL_VECTOR:
- * The IRQ vector a syscall makes the user to kernel transition
- * under.
- *
- * TIMER_IRQ:
- * The IRQ number the timer interrupt comes in at.
- *
- * NR_IRQS:
- * The total number of interrupt vectors (including all the
- * architecture specific interrupts) needed.
- *
- */
-#ifndef _ASM_IRQ_VECTORS_H
-#define _ASM_IRQ_VECTORS_H
-
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define FIRST_EXTERNAL_VECTOR 0x20
-
-#define SYSCALL_VECTOR 0x80
-
-/*
- * Vectors 0x20-0x2f are used for ISA interrupts.
- */
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- * some of the following vectors are 'rare', they are merged
- * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- * TLB, reschedule and local APIC vectors are performance-critical.
- *
- * Vectors 0xf0-0xfa are free (reserved for future Linux use).
- */
-#define SPURIOUS_APIC_VECTOR 0xff
-#define ERROR_APIC_VECTOR 0xfe
-#define INVALIDATE_TLB_VECTOR 0xfd
-#define RESCHEDULE_VECTOR 0xfc
-#define CALL_FUNCTION_VECTOR 0xfb
-
-#define THERMAL_APIC_VECTOR 0xf0
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR 0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee)
- * we start at 0x31 to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR 0x31
-#define FIRST_SYSTEM_VECTOR 0xef
-
-#define TIMER_IRQ timer_irq
-
-/*
- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
- * Right now the APIC is mostly only used for SMP.
- * 256 vectors is an architectural limit. (we can have
- * more than 256 devices theoretically, but they will
- * have to use shared interrupts)
- * Since vectors 0x00-0x1f are used/reserved for the CPU,
- * the usable vector space is 0x20-0xff (224 vectors)
- */
-
-#if 0
-/*
- * The maximum number of vectors supported by i386 processors
- * is limited to 256. For processors other than i386, NR_VECTORS
- * should be changed accordingly.
- */
-#define NR_VECTORS 256
-#endif
-
-#define FPU_IRQ 13
-
-#define FIRST_VM86_IRQ 3
-#define LAST_VM86_IRQ 15
-#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
-
-/*
- * The flat IRQ space is divided into two regions:
- * 1. A one-to-one mapping of real physical IRQs. This space is only used
- * if we have physical device-access privilege. This region is at the
- * start of the IRQ space so that existing device drivers do not need
- * to be modified to translate physical IRQ numbers into our IRQ space.
- * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
- * are bound using the provided bind/unbind functions.
- */
-
-#define PIRQ_BASE 0
-#define NR_PIRQS 128
-
-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS 128
-
-#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-#define NR_IRQ_VECTORS NR_IRQS
-
-#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-
-#ifndef __ASSEMBLY__
-/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
-extern int bind_virq_to_irq(int virq);
-extern void unbind_virq_from_irq(int virq);
-extern int bind_evtchn_to_irq(int evtchn);
-extern void unbind_evtchn_from_irq(int evtchn);
-
-extern void irq_suspend(void);
-extern void irq_resume(void);
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IRQ_VECTORS_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
---
a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,46 +0,0 @@
-/**
- * machine_specific_memory_setup - Hook for machine specific memory setup.
- *
- * Description:
- * This is included late in kernel/setup.c so that it can make
- * use of all of the static functions.
- **/
-
-static char * __init machine_specific_memory_setup(void)
-{
- char *who;
- unsigned long start_pfn, max_pfn;
-
- who = "Xen";
-
- start_pfn = 0;
- max_pfn = xen_start_info.nr_pages;
-
- e820.nr_map = 0;
- add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) -
PFN_PHYS(start_pfn), E820_RAM);
-
- return who;
-}
-
-void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
-{
- clear_bit(X86_FEATURE_VME, c->x86_capability);
- clear_bit(X86_FEATURE_DE, c->x86_capability);
- clear_bit(X86_FEATURE_PSE, c->x86_capability);
- clear_bit(X86_FEATURE_PGE, c->x86_capability);
- clear_bit(X86_FEATURE_SEP, c->x86_capability);
- if (!(xen_start_info.flags & SIF_PRIVILEGED))
- clear_bit(X86_FEATURE_MTRR, c->x86_capability);
-}
-
-extern void hypervisor_callback(void);
-extern void failsafe_callback(void);
-
-static void __init machine_specific_arch_setup(void)
-{
- HYPERVISOR_set_callbacks(
- __KERNEL_CS, (unsigned long)hypervisor_callback,
- __KERNEL_CS, (unsigned long)failsafe_callback);
-
- machine_specific_modify_cpu_capabilities(&boot_cpu_data);
-}
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
---
a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,5 +0,0 @@
-/* Hook to call BIOS initialisation function */
-
-#define ARCH_SETUP machine_specific_arch_setup();
-
-static void __init machine_specific_arch_setup(void);
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,74 +0,0 @@
-#ifndef __I386_SCHED_H
-#define __I386_SCHED_H
-
-#include <linux/config.h>
-#include <asm/desc.h>
-#include <asm/atomic.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-/*
- * Used for LDT copy/destruction.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
-
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk)
-{
-#ifdef CONFIG_SMP
- unsigned cpu = smp_processor_id();
- if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
- per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
-{
- int cpu = smp_processor_id();
-
- if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
- per_cpu(cpu_tlbstate, cpu).active_mm = next;
-#endif
- cpu_set(cpu, next->cpu_vm_mask);
-
- /* Re-load page tables */
- load_cr3(next->pgd);
-
- /*
- * load the LDT, if the LDT is different:
- */
- if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context, cpu);
- }
-#ifdef CONFIG_SMP
- else {
- per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
- BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
-
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload %cr3.
- */
- load_cr3(next->pgd);
- load_LDT_nolock(&next->context, cpu);
- }
- }
-#endif
-}
-
-#define deactivate_mm(tsk, mm) \
- asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
-
-#define activate_mm(prev, next) do { \
- switch_mm((prev),(next),NULL); \
- flush_page_update_queue(); \
-} while (0)
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,272 +0,0 @@
-#ifndef __ASM_MSR_H
-#define __ASM_MSR_H
-
-#include <linux/smp.h>
-#include <asm-xen/hypervisor.h>
-
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(_msr,_val1,_val2) do { \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 0; \
- op.u.msr.msr = (_msr); \
- op.u.msr.cpu_mask = (1 << smp_processor_id()); \
- HYPERVISOR_dom0_op(&op); \
- (_val1) = op.u.msr.out1; \
- (_val2) = op.u.msr.out2; \
-} while(0)
-
-#define wrmsr(_msr,_val1,_val2) do { \
- dom0_op_t op; \
- op.cmd = DOM0_MSR; \
- op.u.msr.write = 1; \
- op.u.msr.cpu_mask = (1 << smp_processor_id()); \
- op.u.msr.msr = (_msr); \
- op.u.msr.in1 = (_val1); \
- op.u.msr.in2 = (_val2); \
- HYPERVISOR_dom0_op(&op); \
-} while(0)
-
-#define rdmsrl(msr,val) do { \
- unsigned long l__,h__; \
- rdmsr (msr, l__, h__); \
- val = l__; \
- val |= ((u64)h__<<32); \
-} while(0)
-
-static inline void wrmsrl (unsigned long msr, unsigned long long val)
-{
- unsigned long lo, hi;
- lo = (unsigned long) val;
- hi = val >> 32;
- wrmsr (msr, lo, hi);
-}
-
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
- __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
- __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE 0x79
-#define MSR_IA32_UCODE_REV 0x8b
-
-#define MSR_P6_PERFCTR0 0xc1
-#define MSR_P6_PERFCTR1 0xc2
-
-#define MSR_IA32_BBL_CR_CTL 0x119
-
-#define MSR_IA32_SYSENTER_CS 0x174
-#define MSR_IA32_SYSENTER_ESP 0x175
-#define MSR_IA32_SYSENTER_EIP 0x176
-
-#define MSR_IA32_MCG_CAP 0x179
-#define MSR_IA32_MCG_STATUS 0x17a
-#define MSR_IA32_MCG_CTL 0x17b
-
-/* P4/Xeon+ specific */
-#define MSR_IA32_MCG_EAX 0x180
-#define MSR_IA32_MCG_EBX 0x181
-#define MSR_IA32_MCG_ECX 0x182
-#define MSR_IA32_MCG_EDX 0x183
-#define MSR_IA32_MCG_ESI 0x184
-#define MSR_IA32_MCG_EDI 0x185
-#define MSR_IA32_MCG_EBP 0x186
-#define MSR_IA32_MCG_ESP 0x187
-#define MSR_IA32_MCG_EFLAGS 0x188
-#define MSR_IA32_MCG_EIP 0x189
-#define MSR_IA32_MCG_RESERVED 0x18A
-
-#define MSR_P6_EVNTSEL0 0x186
-#define MSR_P6_EVNTSEL1 0x187
-
-#define MSR_IA32_PERF_STATUS 0x198
-#define MSR_IA32_PERF_CTL 0x199
-
-#define MSR_IA32_THERM_CONTROL 0x19a
-#define MSR_IA32_THERM_INTERRUPT 0x19b
-#define MSR_IA32_THERM_STATUS 0x19c
-#define MSR_IA32_MISC_ENABLE 0x1a0
-
-#define MSR_IA32_DEBUGCTLMSR 0x1d9
-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
-#define MSR_IA32_LASTINTFROMIP 0x1dd
-#define MSR_IA32_LASTINTTOIP 0x1de
-
-#define MSR_IA32_MC0_CTL 0x400
-#define MSR_IA32_MC0_STATUS 0x401
-#define MSR_IA32_MC0_ADDR 0x402
-#define MSR_IA32_MC0_MISC 0x403
-
-/* Pentium IV performance counter MSRs */
-#define MSR_P4_BPU_PERFCTR0 0x300
-#define MSR_P4_BPU_PERFCTR1 0x301
-#define MSR_P4_BPU_PERFCTR2 0x302
-#define MSR_P4_BPU_PERFCTR3 0x303
-#define MSR_P4_MS_PERFCTR0 0x304
-#define MSR_P4_MS_PERFCTR1 0x305
-#define MSR_P4_MS_PERFCTR2 0x306
-#define MSR_P4_MS_PERFCTR3 0x307
-#define MSR_P4_FLAME_PERFCTR0 0x308
-#define MSR_P4_FLAME_PERFCTR1 0x309
-#define MSR_P4_FLAME_PERFCTR2 0x30a
-#define MSR_P4_FLAME_PERFCTR3 0x30b
-#define MSR_P4_IQ_PERFCTR0 0x30c
-#define MSR_P4_IQ_PERFCTR1 0x30d
-#define MSR_P4_IQ_PERFCTR2 0x30e
-#define MSR_P4_IQ_PERFCTR3 0x30f
-#define MSR_P4_IQ_PERFCTR4 0x310
-#define MSR_P4_IQ_PERFCTR5 0x311
-#define MSR_P4_BPU_CCCR0 0x360
-#define MSR_P4_BPU_CCCR1 0x361
-#define MSR_P4_BPU_CCCR2 0x362
-#define MSR_P4_BPU_CCCR3 0x363
-#define MSR_P4_MS_CCCR0 0x364
-#define MSR_P4_MS_CCCR1 0x365
-#define MSR_P4_MS_CCCR2 0x366
-#define MSR_P4_MS_CCCR3 0x367
-#define MSR_P4_FLAME_CCCR0 0x368
-#define MSR_P4_FLAME_CCCR1 0x369
-#define MSR_P4_FLAME_CCCR2 0x36a
-#define MSR_P4_FLAME_CCCR3 0x36b
-#define MSR_P4_IQ_CCCR0 0x36c
-#define MSR_P4_IQ_CCCR1 0x36d
-#define MSR_P4_IQ_CCCR2 0x36e
-#define MSR_P4_IQ_CCCR3 0x36f
-#define MSR_P4_IQ_CCCR4 0x370
-#define MSR_P4_IQ_CCCR5 0x371
-#define MSR_P4_ALF_ESCR0 0x3ca
-#define MSR_P4_ALF_ESCR1 0x3cb
-#define MSR_P4_BPU_ESCR0 0x3b2
-#define MSR_P4_BPU_ESCR1 0x3b3
-#define MSR_P4_BSU_ESCR0 0x3a0
-#define MSR_P4_BSU_ESCR1 0x3a1
-#define MSR_P4_CRU_ESCR0 0x3b8
-#define MSR_P4_CRU_ESCR1 0x3b9
-#define MSR_P4_CRU_ESCR2 0x3cc
-#define MSR_P4_CRU_ESCR3 0x3cd
-#define MSR_P4_CRU_ESCR4 0x3e0
-#define MSR_P4_CRU_ESCR5 0x3e1
-#define MSR_P4_DAC_ESCR0 0x3a8
-#define MSR_P4_DAC_ESCR1 0x3a9
-#define MSR_P4_FIRM_ESCR0 0x3a4
-#define MSR_P4_FIRM_ESCR1 0x3a5
-#define MSR_P4_FLAME_ESCR0 0x3a6
-#define MSR_P4_FLAME_ESCR1 0x3a7
-#define MSR_P4_FSB_ESCR0 0x3a2
-#define MSR_P4_FSB_ESCR1 0x3a3
-#define MSR_P4_IQ_ESCR0 0x3ba
-#define MSR_P4_IQ_ESCR1 0x3bb
-#define MSR_P4_IS_ESCR0 0x3b4
-#define MSR_P4_IS_ESCR1 0x3b5
-#define MSR_P4_ITLB_ESCR0 0x3b6
-#define MSR_P4_ITLB_ESCR1 0x3b7
-#define MSR_P4_IX_ESCR0 0x3c8
-#define MSR_P4_IX_ESCR1 0x3c9
-#define MSR_P4_MOB_ESCR0 0x3aa
-#define MSR_P4_MOB_ESCR1 0x3ab
-#define MSR_P4_MS_ESCR0 0x3c0
-#define MSR_P4_MS_ESCR1 0x3c1
-#define MSR_P4_PMH_ESCR0 0x3ac
-#define MSR_P4_PMH_ESCR1 0x3ad
-#define MSR_P4_RAT_ESCR0 0x3bc
-#define MSR_P4_RAT_ESCR1 0x3bd
-#define MSR_P4_SAAT_ESCR0 0x3ae
-#define MSR_P4_SAAT_ESCR1 0x3af
-#define MSR_P4_SSU_ESCR0 0x3be
-#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in
manual */
-#define MSR_P4_TBPU_ESCR0 0x3c2
-#define MSR_P4_TBPU_ESCR1 0x3c3
-#define MSR_P4_TC_ESCR0 0x3c4
-#define MSR_P4_TC_ESCR1 0x3c5
-#define MSR_P4_U2L_ESCR0 0x3b0
-#define MSR_P4_U2L_ESCR1 0x3b1
-
-/* AMD Defined MSRs */
-#define MSR_K6_EFER 0xC0000080
-#define MSR_K6_STAR 0xC0000081
-#define MSR_K6_WHCR 0xC0000082
-#define MSR_K6_UWCCR 0xC0000085
-#define MSR_K6_EPMR 0xC0000086
-#define MSR_K6_PSOR 0xC0000087
-#define MSR_K6_PFIR 0xC0000088
-
-#define MSR_K7_EVNTSEL0 0xC0010000
-#define MSR_K7_EVNTSEL1 0xC0010001
-#define MSR_K7_EVNTSEL2 0xC0010002
-#define MSR_K7_EVNTSEL3 0xC0010003
-#define MSR_K7_PERFCTR0 0xC0010004
-#define MSR_K7_PERFCTR1 0xC0010005
-#define MSR_K7_PERFCTR2 0xC0010006
-#define MSR_K7_PERFCTR3 0xC0010007
-#define MSR_K7_HWCR 0xC0010015
-#define MSR_K7_CLK_CTL 0xC001001b
-#define MSR_K7_FID_VID_CTL 0xC0010041
-#define MSR_K7_FID_VID_STATUS 0xC0010042
-
-/* extended feature register */
-#define MSR_EFER 0xc0000080
-
-/* EFER bits: */
-
-/* Execute Disable enable */
-#define _EFER_NX 11
-#define EFER_NX (1<<_EFER_NX)
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1 0x107
-#define MSR_IDT_FCR2 0x108
-#define MSR_IDT_FCR3 0x109
-#define MSR_IDT_FCR4 0x10a
-
-#define MSR_IDT_MCR0 0x110
-#define MSR_IDT_MCR1 0x111
-#define MSR_IDT_MCR2 0x112
-#define MSR_IDT_MCR3 0x113
-#define MSR_IDT_MCR4 0x114
-#define MSR_IDT_MCR5 0x115
-#define MSR_IDT_MCR6 0x116
-#define MSR_IDT_MCR7 0x117
-#define MSR_IDT_MCR_CTRL 0x120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR 0x1107
-#define MSR_VIA_LONGHAUL 0x110a
-#define MSR_VIA_RNG 0x110b
-#define MSR_VIA_BCR2 0x1147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL 0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
-#define MSR_TMTA_LRTI_READOUT 0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-
-#endif /* __ASM_MSR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,203 +0,0 @@
-#ifndef _I386_PAGE_H
-#define _I386_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/foreign_page.h>
-
-#define arch_free_page(_page,_order) \
-({ int foreign = PageForeign(_page); \
- if (foreign) \
- (PageForeignDestructor(_page))(_page); \
- foreign; \
-})
-#define HAVE_ARCH_FREE_PAGE
-
-#ifdef CONFIG_XEN_SCRUB_PAGES
-#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-#else
-#define scrub_pages(_p,_n) ((void)0)
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-
-#include <asm/mmx.h>
-
-#define clear_page(page) mmx_clear_page((void *)(page))
-#define copy_page(to,from) mmx_copy_page(to,from)
-
-#else
-
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER |
__GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-
-/*
- * On older X86 processors it's not a win to use MMX here it seems.
- * Maybe the K6-III ?
- */
-
-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
-#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-
-#endif
-
-#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-
-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned int *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
-#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
-static inline unsigned long phys_to_machine(unsigned long phys)
-{
- unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
- return machine;
-}
-static inline unsigned long machine_to_phys(unsigned long machine)
-{
- unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
- return phys;
-}
-
-/*
- * These are used to make use of C type-checking..
- */
-extern int nx_enabled;
-#ifdef CONFIG_X86_PAE
-extern unsigned long long __supported_pte_mask;
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
-typedef struct { unsigned long long pgd; } pgd_t;
-typedef struct { unsigned long long pgprot; } pgprot_t;
-#define pmd_val(x) ((x).pmd)
-#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-#define __pmd(x) ((pmd_t) { (x) } )
-#define HPAGE_SHIFT 21
-#else
-typedef struct { unsigned long pte_low; } pte_t;
-typedef struct { unsigned long pgd; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define boot_pte_t pte_t /* or would you rather have a typedef */
-#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
- (x).pte_low)
-#define pte_val_ma(x) ((x).pte_low)
-#define HPAGE_SHIFT 22
-#endif
-#define PTE_MASK PAGE_MASK
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
-
-static inline unsigned long pgd_val(pgd_t x)
-{
- unsigned long ret = x.pgd;
- if (ret) ret = machine_to_phys(ret);
- return ret;
-}
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ({ unsigned long _x = (x); \
- (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
-#define __pte_ma(x) ((pte_t) { (x) } )
-#define __pgd(x) ({ unsigned long _x = (x); \
- (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#endif /* !__ASSEMBLY__ */
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-/*
- * This handles the memory map.. We could make this a config
- * option, but too many people screw it up, and too few need
- * it.
- *
- * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 950MB.
- *
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
- */
-
-#ifndef __ASSEMBLY__
-
-/*
- * This much address space is reserved for vmalloc() and iomap()
- * as well as fixmap mappings.
- */
-extern unsigned int __VMALLOC_RESERVE;
-
-/* Pure 2^n version of get_order */
-static __inline__ int get_order(unsigned long size)
-{
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
-
-extern int sysctl_legacy_va_layout;
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __ASSEMBLY__
-#define __PAGE_OFFSET (0xC0000000)
-#else
-#define __PAGE_OFFSET (0xC0000000UL)
-#endif
-
-
-#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
-#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-#define MAXMEM
(HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE)
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned
long)(x)+PAGE_OFFSET))
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#ifndef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn) (mem_map + (pfn))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif /* !CONFIG_DISCONTIGMEM */
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | \
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
-#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_PAGE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/param.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/param.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,23 +0,0 @@
-#ifndef _ASMi386_PARAM_H
-#define _ASMi386_PARAM_H
-
-#ifdef __KERNEL__
-# define HZ 100 /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in
"ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-#define COMMAND_LINE_SIZE 256
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pci.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pci.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,117 +0,0 @@
-#ifndef __i386_PCI_H
-#define __i386_PCI_H
-
-#include <linux/config.h>
-
-#ifdef __KERNEL__
-#include <linux/mm.h> /* for struct page */
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- already-configured bus numbers - to be used for buggy BIOSes
- or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses() 0
-#endif
-#define pcibios_scan_all_fns(a, b) 0
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM (pci_mem_start)
-
-#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-/* Dynamic DMA mapping stuff.
- * i386 has everything mapped statically.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include <linux/string.h>
-#include <asm/io.h>
-
-struct pci_dev;
-
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
- * this boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (1)
-
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static inline dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long
offset, int direction)
-{
- return ((dma64_addr_t) page_to_phys(page) +
- (dma64_addr_t) offset);
-}
-
-static inline struct page *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return pfn_to_page(dma_addr >> PAGE_SHIFT);
-}
-
-static inline unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static inline void
-pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr,
size_t len, int direction)
-{
-}
-
-static inline void
-pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t
dma_addr, size_t len, int direction)
-{
- flush_write_buffers();
-}
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int
write_combine);
-
-
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
-#endif /* __KERNEL__ */
-
-/* implement the pci_ DMA API in terms of the generic device dma_ one */
-#include <asm-generic/pci-dma-compat.h>
-
-/* generic pci stuff */
-#include <asm-generic/pci.h>
-
-/* On Xen we have to scan all functions since Xen hides bridges from
- * us. If a bridge is at fn=0 and that slot has a multifunction
- * device, we won't find the additional devices without scanning all
- * functions. */
-#undef pcibios_scan_all_fns
-#define pcibios_scan_all_fns(a, b) 1
-
-#endif /* __i386_PCI_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,52 +0,0 @@
-#ifndef _I386_PGALLOC_H
-#define _I386_PGALLOC_H
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <linux/threads.h>
-#include <linux/mm.h> /* for struct page */
-#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
-
-#define pmd_populate_kernel(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
-
-#define pmd_populate(mm, pmd, pte) do { \
- set_pmd(pmd, __pmd(_PAGE_TABLE + \
- ((unsigned long long)page_to_pfn(pte) << \
- (unsigned long long) PAGE_SHIFT))); \
- flush_page_update_queue(); \
-} while (0)
-/*
- * Allocate and free page tables.
- */
-extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
-
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-
-static inline void pte_free_kernel(pte_t *pte)
-{
- free_page((unsigned long)pte);
- make_page_writable(pte);
- flush_page_update_queue();
-}
-
-extern void pte_free(struct page *pte);
-
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-
-#ifdef CONFIG_X86_PAE
-/*
- * In the PAE case we free the pmds as part of the pgd.
- */
-#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
-#define __pmd_free_tlb(tlb,x) do { } while (0)
-#define pud_populate(mm, pmd, pte) BUG()
-#endif
-
-#define check_pgt_cache() do { } while (0)
-
-#endif /* _I386_PGALLOC_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,19 +0,0 @@
-#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
-#define _I386_PGTABLE_2LEVEL_DEFS_H
-
-/*
- * traditional i386 two-level paging structure:
- */
-
-#define PGDIR_SHIFT 22
-#define PTRS_PER_PGD 1024
-#define PTRS_PER_PGD_NO_HV (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
-
-/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
- */
-
-#define PTRS_PER_PTE 1024
-
-#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,118 +0,0 @@
-#ifndef _I386_PGTABLE_2LEVEL_H
-#define _I386_PGTABLE_2LEVEL_H
-
-#include <asm-generic/pgtable-nopmd.h>
-
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-#define set_pte_batched(pteptr, pteval) \
- queue_l1_entry_update(pteptr, (pteval).pte_low)
-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
-
-/*
- * A note on implementation of this atomic 'get-and-clear' operation.
- * This is actually very simple because Xen Linux can only run on a single
- * processor. Therefore, we cannot race other processors setting the 'accessed'
- * or 'dirty' bits on a page-table entry.
- * Even if pages are shared between domains, that is not a problem because
- * each domain will have separate page tables, with their own versions of
- * accessed & dirty state.
- */
-static inline pte_t ptep_get_and_clear(pte_t *xp)
-{
- pte_t pte = *xp;
- if (pte.pte_low)
- set_pte(xp, __pte_ma(0));
- return pte;
-}
-
-#define pte_same(a, b) ((a).pte_low == (b).pte_low)
-/*
- * We detect special mappings in one of two ways:
- * 1. If the MFN is an I/O page then Xen will set the m2p entry
- * to be outside our maximum possible pseudophys range.
- * 2. If the MFN belongs to a different domain then we will certainly
- * not have MFN in our p2m table. Conversely, if the page is ours,
- * then we'll have p2m(m2p(MFN))==MFN.
- * If we detect a special mapping then it doesn't have a 'struct page'.
- * We force !pfn_valid() by returning an out-of-range pointer.
- *
- * NB. These checks require that, for any MFN that is not in our reservation,
- * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
- * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
- * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
- *
- * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
- * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- * require. In all the cases we care about, the high bit gets shifted out
- * (e.g., phys_to_machine()) so behaviour there is correct.
- */
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_pfn(_pte) \
-({ \
- unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
- unsigned long pfn = mfn_to_pfn(mfn); \
- if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
- pfn = max_mapnr; /* special: force !pfn_valid() */ \
- pfn; \
-})
-
-#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
-
-#define pte_none(x) (!(x).pte_low)
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) |
pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-
-#define pmd_page_kernel(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-/*
- * All present user pages are user-executable:
- */
-static inline int pte_exec(pte_t pte)
-{
- return pte_user(pte);
-}
-
-/*
- * All present pages are kernel-executable:
- */
-static inline int pte_exec_kernel(pte_t pte)
-{
- return 1;
-}
-
-/*
- * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS 29
-
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
-
-/* Encode and de-code a swap entry */
-#define __swp_type(x) (((x).val >> 1) & 0x1f)
-#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) |
((offset) << 8) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low
})
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#endif /* _I386_PGTABLE_2LEVEL_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,506 +0,0 @@
-#ifndef _I386_PGTABLE_H
-#define _I386_PGTABLE_H
-
-#include <linux/config.h>
-#include <asm-xen/hypervisor.h>
-
-/*
- * The Linux memory management assumes a three-level page table setup. On
- * the i386, we use that, but "fold" the mid level into the top-level page
- * table, so that we physically have the same two-level page table as the
- * i386 mmu expects.
- *
- * This file contains the functions and defines necessary to modify and use
- * the i386 page table tree.
- */
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-#include <linux/threads.h>
-
-#ifndef _I386_BITOPS_H
-#include <asm/bitops.h>
-#endif
-
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-extern unsigned long empty_zero_page[1024];
-extern pgd_t swapper_pg_dir[1024];
-extern kmem_cache_t *pgd_cache;
-extern kmem_cache_t *pmd_cache;
-extern kmem_cache_t *pte_cache;
-extern spinlock_t pgd_lock;
-extern struct page *pgd_list;
-
-void pte_ctor(void *, kmem_cache_t *, unsigned long);
-void pte_dtor(void *, kmem_cache_t *, unsigned long);
-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
-void pgtable_cache_init(void);
-void paging_init(void);
-
-/*
- * The Linux x86 paging architecture is 'compile-time dual-mode', it
- * implements both the traditional 2-level x86 page tables and the
- * newer 3-level PAE-mode page tables.
- */
-#ifdef CONFIG_X86_PAE
-# include <asm/pgtable-3level-defs.h>
-# define PMD_SIZE (1UL << PMD_SHIFT)
-# define PMD_MASK (~(PMD_SIZE-1))
-#else
-# include <asm/pgtable-2level-defs.h>
-#endif
-
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_PGD_NR 0
-
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
-#define TWOLEVEL_PGDIR_SHIFT 22
-#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-
-/* Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
- 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
-#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
-#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#endif
-
-extern void *high_memory;
-extern unsigned long vmalloc_earlyreserve;
-
-/*
- * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
- * of the Pentium details, but assuming intel did the straightforward
- * thing, this bit set in the page directory entry just means that
- * the page directory entry points directly to a 4MB-aligned block of
- * memory.
- */
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if
present.. */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
-#define _PAGE_BIT_UNUSED2 10
-#define _PAGE_BIT_UNUSED3 11
-#define _PAGE_BIT_NX 63
-
-#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
-#define _PAGE_USER 0x004
-#define _PAGE_PWT 0x008
-#define _PAGE_PCD 0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY 0x040
-#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
-#define _PAGE_UNUSED1 0x200 /* available for programmer */
-#define _PAGE_UNUSED2 0x400
-#define _PAGE_UNUSED3 0x800
-
-#define _PAGE_FILE 0x040 /* set:pagecache unset:swap */
-#define _PAGE_PROTNONE 0x080 /* If not present */
-#ifdef CONFIG_X86_PAE
-#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
-#else
-#define _PAGE_NX 0
-#endif
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED
| _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |
_PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE \
- __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED \
- __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-
-#define PAGE_SHARED_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY \
- PAGE_COPY_NOEXEC
-#define PAGE_READONLY \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-
-#define _PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define _PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-
-extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
-#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
-#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
-#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-
-/*
- * The i386 can't do page protection for execute, and considers that
- * the same are read. Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
-/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'verify_area(VERIFY_WRITE,..)'
- */
-#undef TEST_VERIFY_AREA
-
-/* The boot page tables (all created as a single array) */
-extern unsigned long pg0[];
-
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
-
-#define pmd_none(x) (!pmd_val(x))
-/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
- can temporarily clear it. */
-#define pmd_present(x) (pmd_val(x))
-/* pmd_clear below */
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER &
~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_user(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
-static inline int pte_read(pte_t pte) { return (pte).pte_low &
_PAGE_USER; }
-static inline int pte_dirty(pte_t pte) { return (pte).pte_low &
_PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return (pte).pte_low &
_PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return (pte).pte_low &
_PAGE_RW; }
-
-/*
- * The following only works if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte) { return (pte).pte_low &
_PAGE_FILE; }
-
-static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
-static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER;
return pte; }
-static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &=
~_PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &=
~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW;
return pte; }
-static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER;
return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY;
return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |=
_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW;
return pte; }
-
-#ifdef CONFIG_X86_PAE
-# include <asm/pgtable-3level.h>
-#else
-# include <asm/pgtable-2level.h>
-#endif
-
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
-{
- pte_t pte = *ptep;
- int ret = pte_dirty(pte);
- if (ret)
- xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
- return ret;
-}
-
-static inline int ptep_test_and_clear_young(pte_t *ptep)
-{
- pte_t pte = *ptep;
- int ret = pte_young(pte);
- if (ret)
- xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
- return ret;
-}
-
-static inline void ptep_set_wrprotect(pte_t *ptep)
-{
- pte_t pte = *ptep;
- if (pte_write(pte))
- set_pte(ptep, pte_wrprotect(pte));
-}
-
-static inline void ptep_mkdirty(pte_t *ptep)
-{
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
-}
-
-/*
- * Macro to mark a page protection value as "uncacheable". On processors
which do not support
- * it, this is a no-op.
- */
-#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3)
\
- ? (__pgprot(pgprot_val(prot) | _PAGE_PCD |
_PAGE_PWT)) : (prot))
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- pte.pte_low &= _PAGE_CHG_MASK;
- pte.pte_low |= pgprot_val(newprot);
-#ifdef CONFIG_X86_PAE
- /*
- * Chop off the NX bit (if present), and add the NX portion of
- * the newprot (if present):
- */
- pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
- pte.pte_high |= (pgprot_val(newprot) >> 32) & \
- (__supported_pte_mask >> 32);
-#endif
- return pte;
-}
-
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
-#define pmd_clear(xp) do { \
- set_pmd(xp, __pmd(0)); \
- xen_flush_page_update_queue(); \
-} while (0)
-
-#define pmd_large(pmd) \
-((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
-
-/*
- * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * this macro returns the index of the entry in the pgd page which would
- * control the given virtual address
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_index_k(addr) pgd_index(addr)
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-#define pmd_index(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
-
-/*
- * Helper function that returns the kernel pagetable entry controlling
- * the virtual address 'address'. NULL means no pagetable entry present.
- * NOTE: the return type is pte_t but if the pmd is PSE then we return it
- * as a pte too.
- */
-extern pte_t *lookup_address(unsigned long address);
-
-/*
- * Make a given kernel text page executable/non-executable.
- * Returns the previous executability setting of that page (which
- * is used to restore the previous state). Used by the SMP bootup code.
- * NOTE: this is an __init function for security reasons.
- */
-#ifdef CONFIG_X86_PAE
- extern int set_kernel_exec(unsigned long vaddr, int enable);
-#else
- static inline int set_kernel_exec(unsigned long vaddr, int enable) { return
0;}
-#endif
-
-extern void noexec_setup(const char *str);
-
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
- pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
- pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
-#else
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-#endif
-
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- *
- * Also, we only update the dirty/accessed state if we set
- * the dirty bit by hand in the kernel, since the hardware
- * will do the accessed bit for us, and we don't want to
- * race with other CPU's that might be updating the dirty
- * bit at the same time.
- */
-#define update_mmu_cache(vma,address,pte) do { } while (0)
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- if (__dirty) { \
- if ( likely((__vma)->vm_mm == current->mm) ) { \
- xen_flush_page_update_queue(); \
-
HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, (__entry), UVMF_INVLPG); \
- } else { \
- xen_l1_entry_update((__ptep), (__entry).pte_low); \
- flush_tlb_page((__vma), (__address)); \
- } \
- } \
- } while (0)
-
-#define __HAVE_ARCH_PTEP_ESTABLISH
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
-} while (0)
-
-#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
-#define ptep_establish_new(__vma, __address, __ptep, __entry) \
-do { \
- if (likely((__vma)->vm_mm == current->mm)) { \
- xen_flush_page_update_queue(); \
- HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, \
- __entry, 0); \
- } else { \
- xen_l1_entry_update((__ptep), (__entry).pte_low); \
- } \
-} while (0)
-
-/* NOTE: make_page* callers must call flush_page_update_queue() */
-void make_lowmem_page_readonly(void *va);
-void make_lowmem_page_writable(void *va);
-void make_page_readonly(void *va);
-void make_page_writable(void *va);
-void make_pages_readonly(void *va, unsigned int nr);
-void make_pages_writable(void *va, unsigned int nr);
-
-#define virt_to_ptep(__va) \
-({ \
- pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
- pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
- pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
- pte_offset_kernel(__pmd, (unsigned long)(__va)); \
-})
-
-#define arbitrary_virt_to_machine(__va)
\
-({ \
- pte_t *__pte = virt_to_ptep(__va); \
- unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
- __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
-})
-
-#endif /* !__ASSEMBLY__ */
-
-#ifndef CONFIG_DISCONTIGMEM
-#define kern_addr_valid(addr) (1)
-#endif /* !CONFIG_DISCONTIGMEM */
-
-#define DOMID_LOCAL (0xFFFFU)
-int direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long machine_addr,
- unsigned long size,
- pgprot_t prot,
- domid_t domid);
-int __direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long size,
- mmu_update_t *v);
-
-#define io_remap_page_range(vma,from,phys,size,prot) \
-direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
-
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTEP_MKDIRTY
-#define __HAVE_ARCH_PTE_SAME
-#include <asm-generic/pgtable.h>
-
-#endif /* _I386_PGTABLE_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,684 +0,0 @@
-/*
- * include/asm-i386/processor.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_I386_PROCESSOR_H
-#define __ASM_I386_PROCESSOR_H
-
-#include <asm/vm86.h>
-#include <asm/math_emu.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <asm/msr.h>
-#include <asm/system.h>
-#include <linux/cache.h>
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/percpu.h>
-
-/* flag for disabling the tsc */
-extern int tsc_disable;
-
-struct desc_struct {
- unsigned long a,b;
-};
-
-#define desc_empty(desc) \
- (!((desc)->a + (desc)->b))
-
-#define desc_equal(desc1, desc2) \
- (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc));
pc; })
-
-/*
- * CPU type and hardware bug flags. Kept separately for each CPU.
- * Members of this structure are referenced in head.S, so think twice
- * before touching them. [mj]
- */
-
-struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
- __u8 x86_mask;
- char wp_works_ok; /* It doesn't on 386's */
- char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
- char hard_math;
- char rfu;
- int cpuid_level; /* Maximum supported CPUID level, -1=no
CPUID */
- unsigned long x86_capability[NCAPINTS];
- char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB - valid for CPUS which support this
- call */
- int x86_cache_alignment; /* In bytes */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
- unsigned long loops_per_jiffy;
- unsigned char x86_num_cores;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_RISE 6
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_NSC 8
-#define X86_VENDOR_NUM 9
-#define X86_VENDOR_UNKNOWN 0xff
-
-/*
- * capabilities of CPUs
- */
-
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct cpuinfo_x86 new_cpu_data;
-extern struct tss_struct doublefault_tss;
-DECLARE_PER_CPU(struct tss_struct, init_tss);
-extern pgd_t *cur_pgd; /* XXXsmp */
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
-#endif
-
-extern int phys_proc_id[NR_CPUS];
-extern char ignore_fpu_irq;
-
-extern void identify_cpu(struct cpuinfo_x86 *);
-extern void print_cpu_info(struct cpuinfo_x86 *);
-extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-extern void dodgy_tsc(void);
-
-#ifdef CONFIG_X86_HT
-extern void detect_ht(struct cpuinfo_x86 *c);
-#else
-static inline void detect_ht(struct cpuinfo_x86 *c) {}
-#endif
-
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-
-/*
- * Generic CPUID function
- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
- * resulting in stale register contents being returned.
- */
-static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
-{
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (op), "c"(0));
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax;
-
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (op)
- : "bx", "cx", "dx");
- return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx;
-
- __asm__("cpuid"
- : "=a" (eax), "=b" (ebx)
- : "0" (op)
- : "cx", "dx" );
- return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ecx;
-
- __asm__("cpuid"
- : "=a" (eax), "=c" (ecx)
- : "0" (op)
- : "bx", "dx" );
- return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx;
-
- __asm__("cpuid"
- : "=a" (eax), "=d" (edx)
- : "0" (op)
- : "bx", "cx");
- return edx;
-}
-
-#define load_cr3(pgdir) do { \
- queue_pt_switch(__pa(pgdir)); \
- cur_pgd = pgdir; /* XXXsmp */ \
-} while (/* CONSTCOND */0)
-
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
-#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x0008 /* enable debugging extensions */
-#define X86_CR4_PSE 0x0010 /* enable page size extensions */
-#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
-#define X86_CR4_MCE 0x0040 /* Machine check enable */
-#define X86_CR4_PGE 0x0080 /* enable global pages */
-#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3
*/
-#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
-
-/*
- * Save the cr4 feature set we're using (ie
- * Pentium 4MB enable and PPro Global page
- * enable), so that any CPU's that boot up
- * after us can get the correct flags.
- */
-extern unsigned long mmu_cr4_features;
-
-static inline void set_in_cr4 (unsigned long mask)
-{
- mmu_cr4_features |= mask;
- switch (mask) {
- case X86_CR4_OSFXSR:
- case X86_CR4_OSXMMEXCPT:
- break;
- default:
- do {
- const char *msg = "Xen unsupported cr4 update\n";
- (void)HYPERVISOR_console_io(
- CONSOLEIO_write, __builtin_strlen(msg),
- (char *)msg);
- BUG();
- } while (0);
- }
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
- mmu_cr4_features &= ~mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "andl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
- : : "irg" (~mask)
- :"ax");
-}
-
-/*
- * NSC/Cyrix CPU configuration register indexes
- */
-
-#define CX86_PCR0 0x20
-#define CX86_GCR 0xb8
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_PCR1 0xf0
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-/*
- * NSC/Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
-static inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
-{
- /* "monitor %eax,%ecx,%edx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc8;"
- : :"a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
- /* "mwait %eax,%ecx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc9;"
- : :"a" (eax), "c" (ecx));
-}
-
-/* from system description table in BIOS. Mostly for MCA use, but
-others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
-extern unsigned int mca_pentium_flag;
-
-/* Boot loader type from the setup header */
-extern int bootloader_type;
-
-/*
- * User space process size: 3GB (default).
- */
-#define TASK_SIZE (PAGE_OFFSET)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
-/*
- * Size of io_bitmap.
- */
-#define IO_BITMAP_BITS 65536
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
-
-struct i387_fsave_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- long status; /* software status information */
-};
-
-struct i387_fxsave_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd;
- unsigned short fop;
- long fip;
- long fcs;
- long foo;
- long fos;
- long mxcsr;
- long mxcsr_mask;
- long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
- long padding[56];
-} __attribute__ ((aligned (16)));
-
-struct i387_soft_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- unsigned char ftop, changed, lookahead, no_update, rm, alimit;
- struct info *info;
- unsigned long entry_eip;
-};
-
-union i387_union {
- struct i387_fsave_struct fsave;
- struct i387_fxsave_struct fxsave;
- struct i387_soft_struct soft;
-};
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-struct thread_struct;
-
-struct tss_struct {
- unsigned short back_link,__blh;
- unsigned long esp0;
- unsigned short ss0,__ss0h;
- unsigned long esp1;
- unsigned short ss1,__ss1h; /* ss1 is used to cache
MSR_IA32_SYSENTER_CS */
- unsigned long esp2;
- unsigned short ss2,__ss2h;
- unsigned long __cr3;
- unsigned long eip;
- unsigned long eflags;
- unsigned long eax,ecx,edx,ebx;
- unsigned long esp;
- unsigned long ebp;
- unsigned long esi;
- unsigned long edi;
- unsigned short es, __esh;
- unsigned short cs, __csh;
- unsigned short ss, __ssh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
- unsigned short ldt, __ldth;
- unsigned short trace, io_bitmap_base;
- /*
- * The extra 1 is there because the CPU will access an
- * additional byte beyond the end of the IO permission
- * bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
- /*
- * Cache the current maximum and the last task that used the bitmap:
- */
- unsigned long io_bitmap_max;
- struct thread_struct *io_bitmap_owner;
- /*
- * pads the TSS to be cacheline-aligned (size is 0x100)
- */
- unsigned long __cacheline_filler[35];
- /*
- * .. and then another 0x100 bytes for emergency kernel stack
- */
- unsigned long stack[64];
-} __attribute__((packed));
-
-#define ARCH_MIN_TASKALIGN 16
-
-struct thread_struct {
-/* cached TLS descriptors. */
- struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
- unsigned long esp0;
- unsigned long sysenter_cs;
- unsigned long eip;
- unsigned long esp;
- unsigned long fs;
- unsigned long gs;
- unsigned int io_pl;
-/* Hardware debugging registers */
- unsigned long debugreg[8]; /* %%db0-7 debug registers */
-/* fault info */
- unsigned long cr2, trap_no, error_code;
-/* floating point info */
- union i387_union i387;
-/* virtual 86 mode info */
- struct vm86_struct __user * vm86_info;
- unsigned long screen_bitmap;
- unsigned long v86flags, v86mask, saved_esp0;
- unsigned int saved_fs, saved_gs;
-/* IO permissions */
- unsigned long *io_bitmap_ptr;
-/* max allowed port in the bitmap, in bytes: */
- unsigned long io_bitmap_max;
-};
-
-#define INIT_THREAD { \
- .vm86_info = NULL, \
- .sysenter_cs = __KERNEL_CS, \
- .io_bitmap_ptr = NULL, \
-}
-
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS { \
- .esp0 = sizeof(init_stack) + (long)&init_stack, \
- .ss0 = __KERNEL_DS, \
- .ss1 = __KERNEL_CS, \
- .ldt = GDT_ENTRY_LDT, \
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
- .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
-}
-
-static inline void load_esp0(struct tss_struct *tss, struct thread_struct
*thread)
-{
- tss->esp0 = thread->esp0;
- /* This can only happen when SEP is enabled, no need to test
"SEP"arately */
- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
- tss->ss1 = thread->sysenter_cs;
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- }
- HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
-}
-
-#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
- set_fs(USER_DS); \
- regs->xds = __USER_DS; \
- regs->xes = __USER_DS; \
- regs->xss = __USER_DS; \
- regs->xcs = __USER_CS; \
- regs->eip = new_eip; \
- regs->esp = new_esp; \
-} while (0)
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
-/*
- * create a kernel thread without removing it from tasklists
- */
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-void show_trace(struct task_struct *task, unsigned long *stack);
-
-unsigned long get_wchan(struct task_struct *p);
-
-#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
-#define KSTK_TOP(info) \
-({ \
- unsigned long *__ptr = (unsigned long *)(info); \
- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
-})
-
-#define task_pt_regs(task) \
-({ \
- struct pt_regs *__regs__; \
- __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
- __regs__ - 1; \
-})
-
-#define KSTK_EIP(task) (task_pt_regs(task)->eip)
-#define KSTK_ESP(task) (task_pt_regs(task)->esp)
-
-
-struct microcode_header {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int datasize;
- unsigned int totalsize;
- unsigned int reserved[3];
-};
-
-struct microcode {
- struct microcode_header hdr;
- unsigned int bits[0];
-};
-
-typedef struct microcode microcode_t;
-typedef struct microcode_header microcode_header_t;
-
-/* microcode format is extended from prescott processors */
-struct extended_signature {
- unsigned int sig;
- unsigned int pf;
- unsigned int cksum;
-};
-
-struct extended_sigtable {
- unsigned int count;
- unsigned int cksum;
- unsigned int reserved[3];
- struct extended_signature sigs[0];
-};
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE _IO('6',0)
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- __asm__ __volatile__("rep;nop": : :"memory");
-}
-
-#define cpu_relax() rep_nop()
-
-/* generic versions from gas */
-#define GENERIC_NOP1 ".byte 0x90\n"
-#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
-#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
-#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
-#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
-#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
-
-/* Opteron nops */
-#define K8_NOP1 GENERIC_NOP1
-#define K8_NOP2 ".byte 0x66,0x90\n"
-#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
-#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
-#define K8_NOP5 K8_NOP3 K8_NOP2
-#define K8_NOP6 K8_NOP3 K8_NOP3
-#define K8_NOP7 K8_NOP4 K8_NOP3
-#define K8_NOP8 K8_NOP4 K8_NOP4
-
-/* K7 nops */
-/* uses eax dependencies (arbitary choice) */
-#define K7_NOP1 GENERIC_NOP1
-#define K7_NOP2 ".byte 0x8b,0xc0\n"
-#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
-#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
-#define K7_NOP5 K7_NOP4 ASM_NOP1
-#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
-#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-#define K7_NOP8 K7_NOP7 ASM_NOP1
-
-#ifdef CONFIG_MK8
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#elif defined(CONFIG_MK7)
-#define ASM_NOP1 K7_NOP1
-#define ASM_NOP2 K7_NOP2
-#define ASM_NOP3 K7_NOP3
-#define ASM_NOP4 K7_NOP4
-#define ASM_NOP5 K7_NOP5
-#define ASM_NOP6 K7_NOP6
-#define ASM_NOP7 K7_NOP7
-#define ASM_NOP8 K7_NOP8
-#else
-#define ASM_NOP1 GENERIC_NOP1
-#define ASM_NOP2 GENERIC_NOP2
-#define ASM_NOP3 GENERIC_NOP3
-#define ASM_NOP4 GENERIC_NOP4
-#define ASM_NOP5 GENERIC_NOP5
-#define ASM_NOP6 GENERIC_NOP6
-#define ASM_NOP7 GENERIC_NOP7
-#define ASM_NOP8 GENERIC_NOP8
-#endif
-
-#define ASM_NOP_MAX 8
-
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
- because they are microcoded there and very slow.
- However we don't do prefetches for pre XP Athlons currently
- That should be fixed. */
-#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
-{
- alternative_input(ASM_NOP4,
- "prefetchnta (%1)",
- X86_FEATURE_XMM,
- "r" (x));
-}
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-/* 3dnow! prefetch to get an exclusive cache line. Useful for
- spinlocks to avoid one state transition in the cache coherency protocol. */
-extern inline void prefetchw(const void *x)
-{
- alternative_input(ASM_NOP4,
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
-}
-#define spin_lock_prefetch(x) prefetchw(x)
-
-extern void select_idle_routine(const struct cpuinfo_x86 *c);
-
-#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-
-extern unsigned long boot_option_idle_override;
-
-#endif /* __ASM_I386_PROCESSOR_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/ptrace.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/ptrace.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,69 +0,0 @@
-#ifndef _I386_PTRACE_H
-#define _I386_PTRACE_H
-
-#define EBX 0
-#define ECX 1
-#define EDX 2
-#define ESI 3
-#define EDI 4
-#define EBP 5
-#define EAX 6
-#define DS 7
-#define ES 8
-#define FS 9
-#define GS 10
-#define ORIG_EAX 11
-#define EIP 12
-#define CS 13
-#define EFL 14
-#define UESP 15
-#define SS 16
-#define FRAME_SIZE 17
-
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-struct pt_regs {
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- int xds;
- int xes;
- long orig_eax;
- long eip;
- int xcs;
- long eflags;
- long esp;
- int xss;
-};
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETFPXREGS 18
-#define PTRACE_SETFPXREGS 19
-
-#define PTRACE_OLDSETOPTIONS 21
-
-#define PTRACE_GET_THREAD_AREA 25
-#define PTRACE_SET_THREAD_AREA 26
-
-#ifdef __KERNEL__
-struct task_struct;
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int
error_code);
-#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
-#define instruction_pointer(regs) ((regs)->eip)
-#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-#endif
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,96 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-/*
- * The layout of the per-CPU GDT under Linux:
- *
- * 0 - null
- * 1 - reserved
- * 2 - reserved
- * 3 - reserved
- *
- * 4 - unused <==== new cacheline
- * 5 - unused
- *
- * ------- start of TLS (Thread-Local Storage) segments:
- *
- * 6 - TLS segment #1 [ glibc's TLS segment ]
- * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
- * 8 - TLS segment #3
- * 9 - reserved
- * 10 - reserved
- * 11 - reserved
- *
- * ------- start of kernel segments:
- *
- * 12 - kernel code segment <==== new cacheline
- * 13 - kernel data segment
- * 14 - default user CS
- * 15 - default user DS
- * 16 - TSS
- * 17 - LDT
- * 18 - PNPBIOS support (16->32 gate)
- * 19 - PNPBIOS support
- * 20 - PNPBIOS support
- * 21 - PNPBIOS support
- * 22 - PNPBIOS support
- * 23 - APM BIOS support
- * 24 - APM BIOS support
- * 25 - APM BIOS support
- *
- * 26 - unused
- * 27 - unused
- * 28 - unused
- * 29 - unused
- * 30 - unused
- * 31 - TSS for double fault handler
- */
-#define GDT_ENTRY_TLS_ENTRIES 3
-#define GDT_ENTRY_TLS_MIN 6
-#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-
-#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-
-#define GDT_ENTRY_DEFAULT_USER_CS 14
-#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
-
-#define GDT_ENTRY_DEFAULT_USER_DS 15
-#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
-
-#define GDT_ENTRY_KERNEL_BASE 12
-
-#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
-#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
-
-#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
-#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
-
-#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
-#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
-
-#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
-#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
-
-#define GDT_ENTRY_DOUBLEFAULT_TSS 31
-
-/*
- * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
- */
-#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
-
-#define GDT_SIZE (GDT_ENTRIES * 8)
-
-/* Simple and small GDT entries for booting only */
-
-#define __BOOT_CS FLAT_GUESTOS_CS
-
-#define __BOOT_DS FLAT_GUESTOS_DS
-
-/*
- * The interrupt descriptor table has room for 256 idt's,
- * the global descriptor table is dependent on the number
- * of tasks we can have..
- */
-#define IDT_ENTRIES 256
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/setup.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/setup.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,66 +0,0 @@
-/*
- * Just a place holder. We don't want to have to test x86 before
- * we include stuff
- */
-
-#ifndef _i386_SETUP_H
-#define _i386_SETUP_H
-
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
-
-/*
- * Reserved space for vmalloc and iomap - defined in asm/page.h
- */
-#define MAXMEM_PFN PFN_DOWN(MAXMEM)
-#define MAX_NONPAE_PFN (1 << 20)
-
-#define PARAM_SIZE 2048
-#define COMMAND_LINE_SIZE 256
-
-#define OLD_CL_MAGIC_ADDR 0x90020
-#define OLD_CL_MAGIC 0xA33F
-#define OLD_CL_BASE_ADDR 0x90000
-#define OLD_CL_OFFSET 0x90022
-#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
-
-#ifndef __ASSEMBLY__
-/*
- * This is set up by the setup-routine at boot-time
- */
-extern unsigned char boot_params[PARAM_SIZE];
-
-#define PARAM (boot_params)
-#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
-#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
-#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
-#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-#define INITRD_START (__pa(xen_start_info.mod_start))
-#define INITRD_SIZE (xen_start_info.mod_len)
-#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
-#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
-#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _i386_SETUP_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,83 +0,0 @@
-#ifndef __XEN_SYNCH_BITOPS_H__
-#define __XEN_SYNCH_BITOPS_H__
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Heavily modified to provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-#include <linux/config.h>
-
-#define ADDR (*(volatile long *) addr)
-
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__ (
- "lock btsl %1,%0"
- : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__ (
- "lock btrl %1,%0"
- : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_change_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__ (
- "lock btcl %1,%0"
- : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-{
- int oldbit;
- __asm__ __volatile__ (
- "lock btsl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
- __asm__ __volatile__ (
- "lock btrl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__ (
- "lock btcl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
- return oldbit;
-}
-
-static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-{
- int oldbit;
- __asm__ __volatile__ (
- "btl %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
- return oldbit;
-}
-
-#define synch_test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- synch_const_test_bit((nr),(addr)) : \
- synch_var_test_bit((nr),(addr)))
-
-#endif /* __XEN_SYNCH_BITOPS_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,522 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <asm/synch_bitops.h>
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/evtchn.h>
-
-#ifdef __KERNEL__
-
-struct task_struct; /* one of the stranger aspects of C forward
declarations.. */
-extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev,
struct task_struct *next));
-
-#define switch_to(prev,next,last) do { \
- unsigned long esi,edi; \
- asm volatile("pushfl\n\t" \
- "pushl %%ebp\n\t" \
- "movl %%esp,%0\n\t" /* save ESP */ \
- "movl %5,%%esp\n\t" /* restore ESP */ \
- "movl $1f,%1\n\t" /* save EIP */ \
- "pushl %6\n\t" /* restore EIP */ \
- "jmp __switch_to\n" \
- "1:\t" \
- "popl %%ebp\n\t" \
- "popfl" \
- :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
- "=a" (last),"=S" (esi),"=D" (edi) \
- :"m" (next->thread.esp),"m" (next->thread.eip), \
- "2" (prev), "d" (next)); \
-} while (0)
-
-#define _set_base(addr,base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
-
-#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
-static inline unsigned long _get_base(char * addr)
-{
- unsigned long __base;
- __asm__("movb %3,%%dh\n\t"
- "movb %2,%%dl\n\t"
- "shll $16,%%edx\n\t"
- "movw %1,%%dx"
- :"=&d" (__base)
- :"m" (*((addr)+2)),
- "m" (*((addr)+4)),
- "m" (*((addr)+7)));
- return __base;
-}
-
-#define get_base(ldt) _get_base( ((char *)&(ldt)) )
-
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "pushl $0\n\t" \
- "popl %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" \
- : :"m" (*(unsigned int *)&(value)))
-
-/*
- * Save a segment register away
- */
-#define savesegment(seg, value) \
- asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
-
-/*
- * Clear and set 'TS' bit respectively
- */
-/* NB. 'clts' is done for us by Xen during virtual trap. */
-#define clts() ((void)0)
-#define read_cr0() \
- BUG();
-#define write_cr0(x) \
- BUG();
-
-#define read_cr4() \
- BUG();
-#define write_cr4(x) \
- BUG();
-#define stts() (HYPERVISOR_fpu_taskswitch())
-
-#endif /* __KERNEL__ */
-
-static inline void wbinvd(void)
-{
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.val = MMUEXT_FLUSH_CACHE;
- (void)HYPERVISOR_mmu_update(&u, 1, NULL);
-}
-
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned
long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-
-/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- *
- * cmpxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
- */
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
-{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- "lock cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
-}
-
-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,ll_low(value), ll_high(value));
-}
-
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int
size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#ifdef CONFIG_X86_CMPXCHG
-#define __HAVE_ARCH_CMPXCHG 1
-#endif
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-#ifdef __KERNEL__
-struct alt_instr {
- __u8 *instr; /* original instruction */
- __u8 *replacement;
- __u8 cpuid; /* cpuid bit set for replacement */
- __u8 instrlen; /* length of original instruction */
- __u8 replacementlen; /* length of new instruction, <= instrlen */
- __u8 pad;
-};
-#endif
-
-/*
- * Alternative instructions for different CPU types or capabilities.
- *
- * This allows to use optimized instructions even on generic binary
- * kernels.
- *
- * length of oldinstr must be longer or equal the length of newinstr
- * It can be padded with nops as needed.
- *
- * For non barrier like inlines please define new variants
- * without volatile and memory clobber.
- */
-#define alternative(oldinstr, newinstr, feature) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 4\n" \
- " .long 661b\n" /* label */ \
- " .long 663f\n" /* new instruction */
\
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n"
\
- ".section .altinstr_replacement,\"ax\"\n"
\
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature) : "memory")
-
-/*
- * Alternative inline assembly with input.
- *
- * Pecularities:
- * No memory clobber here.
- * Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the
- * replacement maake sure to pad to the worst case length.
- */
-#define alternative_input(oldinstr, newinstr, feature, input...)
\
- asm volatile ("661:\n\t" oldinstr "\n662:\n"
\
- ".section .altinstructions,\"a\"\n"
\
- " .align 4\n"
\
- " .long 661b\n" /* label */
\
- " .long 663f\n" /* new instruction */
\
- " .byte %c0\n" /* feature bit */
\
- " .byte 662b-661b\n" /* sourcelen */
\
- " .byte 664f-663f\n" /* replacementlen */
\
- ".previous\n"
\
- ".section .altinstr_replacement,\"ax\"\n"
\
- "663:\n\t" newinstr "\n664:\n" /* replacement */
\
- ".previous" :: "i" (feature), ##input)
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-
-
-/*
- * Actually only lfence would be needed for mb() because all stores done
- * by the kernel should be already ordered. But keep a full barrier for now.
- */
-
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
- **/
-
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_X86_OOSTORE
-/* Actually there are no OOO store capable CPUs for now that do SSE,
- but make it already an possibility. */
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-#else
-#define wmb() __asm__ __volatile__ ("": : :"memory")
-#endif
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-/* interrupt control.. */
-
-/*
- * The use of 'barrier' in the following reflects their use as local-lock
- * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operations must complete
- * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
- * includes these barriers, for example.
- */
-
-#define __cli()
\
-do { \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
-} while (0)
-
-#define __sti()
\
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
-} while (0)
-
-#define __save_flags(x)
\
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
-} while (0)
-
-#define __restore_flags(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
- } \
-} while (0)
-
-#define safe_halt() ((void)0)
-
-#define __save_and_cli(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
-} while (0)
-
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
-} while (0)
-
-#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_restore(x) __restore_flags(x)
-#define local_save_flags(x) __save_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
-
-#define irqs_disabled() HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-extern int es7000_plat;
-void cpu_idle_wait(void);
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,109 +0,0 @@
-#ifndef _I386_TLBFLUSH_H
-#define _I386_TLBFLUSH_H
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <asm/processor.h>
-
-#define __flush_tlb() xen_tlb_flush()
-#define __flush_tlb_global() xen_tlb_flush()
-
-extern unsigned long pgkern_mask;
-
-# define __flush_tlb_all() \
- do { \
- if (cpu_has_pge) \
- __flush_tlb_global(); \
- else \
- __flush_tlb(); \
- } while (0)
-
-#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-
-#define __flush_tlb_single(addr) xen_invlpg(addr)
-
-#define __flush_tlb_one(addr) __flush_tlb_single(addr)
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * ..but the i386 has somewhat limited tlb flushing capabilities,
- * and page-granular flushes are available only on i486 and up.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm)
- __flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb();
-}
-
-#else
-
-#include <asm/smp.h>
-
-#define local_flush_tlb() \
- __flush_tlb()
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-
-#define flush_tlb() flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long
start, unsigned long end)
-{
- flush_tlb_mm(vma->vm_mm);
-}
-
-#define TLBSTATE_OK 1
-#define TLBSTATE_LAZY 2
-
-struct tlb_state
-{
- struct mm_struct *active_mm;
- int state;
- char __cacheline_padding[L1_CACHE_BYTES-8];
-};
-DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
-
-
-#endif
-
-#define flush_tlb_kernel_range(start, end) flush_tlb_all()
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* i386 does not keep any page table caches in TLB */
-}
-
-#endif /* _I386_TLBFLUSH_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/vga.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/vga.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,20 +0,0 @@
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@xxxxxx>
- */
-
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-/*
- * On the PC, we can just recalculate addresses and then
- * access the videoram directly without any black magic.
- */
-
-#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
-
-#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
-
-#endif
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,884 +0,0 @@
-/*
- * include/asm-i386/xor.h
- *
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * High-speed RAID5 checksumming functions utilizing MMX instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
-#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
-#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
-#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
-#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
-#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
-
-#include <asm/i387.h>
-
-static void
-xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- ST(i,0) \
- XO1(i+1,1) \
- ST(i+1,1) \
- XO1(i+2,2) \
- ST(i+2,2) \
- XO1(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- ST(i,0) \
- XO2(i+1,1) \
- ST(i+1,1) \
- XO2(i+2,2) \
- ST(i+2,2) \
- XO2(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- ST(i,0) \
- XO3(i+1,1) \
- ST(i+1,1) \
- XO3(i+2,2) \
- ST(i+2,2) \
- XO3(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-
-static void
-xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 7;
-
- kernel_fpu_begin();
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- ST(i,0) \
- XO4(i+1,1) \
- ST(i+1,1) \
- XO4(i+2,2) \
- ST(i+2,2) \
- XO4(i+3,3) \
- ST(i+3,3)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $128, %1 ;\n"
- " addl $128, %2 ;\n"
- " addl $128, %3 ;\n"
- " addl $128, %4 ;\n"
- " addl $128, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
-
- kernel_fpu_end();
-}
-
-#undef LD
-#undef XO1
-#undef XO2
-#undef XO3
-#undef XO4
-#undef ST
-#undef BLOCK
-
-static void
-xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " .align 32 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- :
- : "memory" );
-
- kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory");
-
- kernel_fpu_end();
-}
-
-static void
-xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 6;
-
- kernel_fpu_begin();
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
-
- __asm__ __volatile__ (
- " .align 32,0x90 ;\n"
- " 1: ;\n"
- " movq (%1), %%mm0 ;\n"
- " movq 8(%1), %%mm1 ;\n"
- " pxor (%2), %%mm0 ;\n"
- " pxor 8(%2), %%mm1 ;\n"
- " movq 16(%1), %%mm2 ;\n"
- " pxor (%3), %%mm0 ;\n"
- " pxor 8(%3), %%mm1 ;\n"
- " pxor 16(%2), %%mm2 ;\n"
- " pxor (%4), %%mm0 ;\n"
- " pxor 8(%4), %%mm1 ;\n"
- " pxor 16(%3), %%mm2 ;\n"
- " movq 24(%1), %%mm3 ;\n"
- " pxor (%5), %%mm0 ;\n"
- " pxor 8(%5), %%mm1 ;\n"
- " movq %%mm0, (%1) ;\n"
- " pxor 16(%4), %%mm2 ;\n"
- " pxor 24(%2), %%mm3 ;\n"
- " movq %%mm1, 8(%1) ;\n"
- " pxor 16(%5), %%mm2 ;\n"
- " pxor 24(%3), %%mm3 ;\n"
- " movq 32(%1), %%mm4 ;\n"
- " movq %%mm2, 16(%1) ;\n"
- " pxor 24(%4), %%mm3 ;\n"
- " pxor 32(%2), %%mm4 ;\n"
- " movq 40(%1), %%mm5 ;\n"
- " pxor 24(%5), %%mm3 ;\n"
- " pxor 32(%3), %%mm4 ;\n"
- " pxor 40(%2), %%mm5 ;\n"
- " movq %%mm3, 24(%1) ;\n"
- " pxor 32(%4), %%mm4 ;\n"
- " pxor 40(%3), %%mm5 ;\n"
- " movq 48(%1), %%mm6 ;\n"
- " movq 56(%1), %%mm7 ;\n"
- " pxor 32(%5), %%mm4 ;\n"
- " pxor 40(%4), %%mm5 ;\n"
- " pxor 48(%2), %%mm6 ;\n"
- " pxor 56(%2), %%mm7 ;\n"
- " movq %%mm4, 32(%1) ;\n"
- " pxor 48(%3), %%mm6 ;\n"
- " pxor 56(%3), %%mm7 ;\n"
- " pxor 40(%5), %%mm5 ;\n"
- " pxor 48(%4), %%mm6 ;\n"
- " pxor 56(%4), %%mm7 ;\n"
- " movq %%mm5, 40(%1) ;\n"
- " pxor 48(%5), %%mm6 ;\n"
- " pxor 56(%5), %%mm7 ;\n"
- " movq %%mm6, 48(%1) ;\n"
- " movq %%mm7, 56(%1) ;\n"
-
- " addl $64, %1 ;\n"
- " addl $64, %2 ;\n"
- " addl $64, %3 ;\n"
- " addl $64, %4 ;\n"
- " addl $64, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
-
- kernel_fpu_end();
-}
-
-static struct xor_block_template xor_block_pII_mmx = {
- .name = "pII_mmx",
- .do_2 = xor_pII_mmx_2,
- .do_3 = xor_pII_mmx_3,
- .do_4 = xor_pII_mmx_4,
- .do_5 = xor_pII_mmx_5,
-};
-
-static struct xor_block_template xor_block_p5_mmx = {
- .name = "p5_mmx",
- .do_2 = xor_p5_mmx_2,
- .do_3 = xor_p5_mmx_3,
- .do_4 = xor_p5_mmx_4,
- .do_5 = xor_p5_mmx_5,
-};
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define XMMS_SAVE do { \
- preempt_disable(); \
- if (!(current_thread_info()->status & TS_USEDFPU)) \
- clts(); \
- __asm__ __volatile__ ( \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
- : "r" (xmm_save) \
- : "memory"); \
-} while(0)
-
-#define XMMS_RESTORE do { \
- __asm__ __volatile__ ( \
- "sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
- if (!(current_thread_info()->status & TS_USEDFPU)) \
- stts(); \
- preempt_enable(); \
-} while(0)
-
-#define ALIGN16 __attribute__((aligned(16)))
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1)
;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y"
;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1)
;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r"(p2), "+r"(p3)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
-
- __asm__ __volatile__ (
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- PF3(i) \
- PF3(i+2) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
-
- XMMS_RESTORE;
-}
-
-static struct xor_block_template xor_block_pIII_sse = {
- .name = "pIII_sse",
- .do_2 = xor_sse_2,
- .do_3 = xor_sse_3,
- .do_4 = xor_sse_4,
- .do_5 = xor_sse_5,
-};
-
-/* Also try the generic routines. */
-#include <asm-generic/xor.h>
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
- if (cpu_has_xmm) \
- xor_speed(&xor_block_pIII_sse); \
- if (cpu_has_mmx) { \
- xor_speed(&xor_block_pII_mmx); \
- xor_speed(&xor_block_p5_mmx); \
- } \
- } while (0)
-
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/balloon.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/balloon.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,51 +0,0 @@
-/******************************************************************************
- * balloon.h
- *
- * Xen balloon driver - enables returning/claiming memory to/from Xen.
- *
- * Copyright (c) 2003, B Dragovic
- * Copyright (c) 2003-2004, M Williamson, K Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_BALLOON_H__
-#define __ASM_BALLOON_H__
-
-/*
- * Inform the balloon driver that it should allow some slop for device-driver
- * memory activities.
- */
-extern void balloon_update_driver_allowance(long delta);
-
-/* Give up unmapped pages to the balloon driver. */
-extern void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns);
-
-/*
- * Prevent the balloon driver from changing the memory reservation during
- * a driver critical region.
- */
-extern spinlock_t balloon_lock;
-#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
-#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
-
-#endif /* __ASM_BALLOON_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/ctrl_if.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/ctrl_if.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,160 +0,0 @@
-/******************************************************************************
- * ctrl_if.h
- *
- * Management functions for special interface to the domain controller.
- *
- * Copyright (c) 2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_XEN__CTRL_IF_H__
-#define __ASM_XEN__CTRL_IF_H__
-
-#include <asm-xen/hypervisor.h>
-#include <asm-xen/queues.h>
-
-typedef control_msg_t ctrl_msg_t;
-
-/*
- * Callback function type. Called for asynchronous processing of received
- * request messages, and responses to previously-transmitted request messages.
- * The parameters are (@msg, @id).
- * @msg: Original request/response message (not a copy). The message can be
- * modified in-place by the handler (e.g., a response callback can
- * turn a request message into a response message in place). The message
- * is no longer accessible after the callback handler returns -- if the
- * message is required to persist for longer then it must be copied.
- * @id: (Response callbacks only) The 'id' that was specified when the
- * original request message was queued for transmission.
- */
-typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
-
-/*
- * Send @msg to the domain controller. Execute @hnd when a response is
- * received, passing the response message and the specified @id. This
- * operation will not block: it will return -EAGAIN if there is no space.
- * Notes:
- * 1. The @msg is copied if it is transmitted and so can be freed after this
- * function returns.
- * 2. If @hnd is NULL then no callback is executed.
- */
-int
-ctrl_if_send_message_noblock(
- ctrl_msg_t *msg,
- ctrl_msg_handler_t hnd,
- unsigned long id);
-
-/*
- * Send @msg to the domain controller. Execute @hnd when a response is
- * received, passing the response message and the specified @id. This
- * operation will block until the message is sent, or a signal is received
- * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
- * Notes:
- * 1. The @msg is copied if it is transmitted and so can be freed after this
- * function returns.
- * 2. If @hnd is NULL then no callback is executed.
- */
-int
-ctrl_if_send_message_block(
- ctrl_msg_t *msg,
- ctrl_msg_handler_t hnd,
- unsigned long id,
- long wait_state);
-
-/*
- * Send @msg to the domain controller. Block until the response is received,
- * and then copy it into the provided buffer, @rmsg.
- */
-int
-ctrl_if_send_message_and_get_response(
- ctrl_msg_t *msg,
- ctrl_msg_t *rmsg,
- long wait_state);
-
-/*
- * Request a callback when there is /possibly/ space to immediately send a
- * message to the domain controller. This function returns 0 if there is
- * already space to trasnmit a message --- in this case the callback task /may/
- * still be executed. If this function returns 1 then the callback /will/ be
- * executed when space becomes available.
- */
-int
-ctrl_if_enqueue_space_callback(
- struct tq_struct *task);
-
-/*
- * Send a response (@msg) to a message from the domain controller. This will
- * never block.
- * Notes:
- * 1. The @msg is copied and so can be freed after this function returns.
- * 2. The @msg may be the original request message, modified in-place.
- */
-void
-ctrl_if_send_response(
- ctrl_msg_t *msg);
-
-/*
- * Register a receiver for typed messages from the domain controller. The
- * handler (@hnd) is called for every received message of specified @type.
- * Returns TRUE (non-zero) if the handler was successfully registered.
- * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
- * occur in a context in which it is safe to yield (i.e., process context).
- */
-#define CALLBACK_IN_BLOCKING_CONTEXT 1
-int ctrl_if_register_receiver(
- u8 type,
- ctrl_msg_handler_t hnd,
- unsigned int flags);
-
-/*
- * Unregister a receiver for typed messages from the domain controller. The
- * handler (@hnd) will not be executed after this function returns.
- */
-void
-ctrl_if_unregister_receiver(
- u8 type, ctrl_msg_handler_t hnd);
-
-/* Suspend/resume notifications. */
-void ctrl_if_suspend(void);
-void ctrl_if_resume(void);
-
-/* Start-of-day setup. */
-void ctrl_if_init(void);
-
-/*
- * Returns TRUE if there are no outstanding message requests at the domain
- * controller. This can be used to ensure that messages have really flushed
- * through when it is not possible to use the response-callback interface.
- * WARNING: If other subsystems are using the control interface then this
- * function might never return TRUE!
- */
-int ctrl_if_transmitter_empty(void); /* !! DANGEROUS FUNCTION !! */
-
-/*
- * Manually discard response messages from the domain controller.
- * WARNING: This is usually done automatically -- this function should only
- * be called when normal interrupt mechanisms are disabled!
- */
-void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
-
-#endif /* __ASM_XEN__CONTROL_IF_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,104 +0,0 @@
-/******************************************************************************
- * evtchn.h
- *
- * Communication via Xen event channels.
- * Also definitions for the device that demuxes notifications to userspace.
- *
- * Copyright (c) 2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_EVTCHN_H__
-#define __ASM_EVTCHN_H__
-
-#include <linux/config.h>
-#include <asm-xen/hypervisor.h>
-#include <asm/ptrace.h>
-#include <asm/synch_bitops.h>
-#include <asm-xen/xen-public/event_channel.h>
-
-/*
- * LOW-LEVEL DEFINITIONS
- */
-
-/* Entry point for notifications into Linux subsystems. */
-asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
-
-/* Entry point for notifications into the userland character device. */
-void evtchn_device_upcall(int port);
-
-static inline void mask_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- synch_set_bit(port, &s->evtchn_mask[0]);
-}
-
-static inline void unmask_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
-
- synch_clear_bit(port, &s->evtchn_mask[0]);
-
- /*
- * The following is basically the equivalent of 'hw_resend_irq'. Just like
- * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
- */
- if ( synch_test_bit (port, &s->evtchn_pending[0]) &&
- !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
- {
- s->vcpu_data[0].evtchn_upcall_pending = 1;
- if ( !s->vcpu_data[0].evtchn_upcall_mask )
- force_evtchn_callback();
- }
-}
-
-static inline void clear_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- synch_clear_bit(port, &s->evtchn_pending[0]);
-}
-
-static inline int notify_via_evtchn(int port)
-{
- evtchn_op_t op;
- op.cmd = EVTCHNOP_send;
- op.u.send.local_port = port;
- return HYPERVISOR_event_channel_op(&op);
-}
-
-/*
- * CHARACTER-DEVICE DEFINITIONS
- */
-
-/* /dev/xen/evtchn resides at device number major=10, minor=201 */
-#define EVTCHN_MINOR 201
-
-/* /dev/xen/evtchn ioctls: */
-/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
-#define EVTCHN_RESET _IO('E', 1)
-/* EVTCHN_BIND: Bind to teh specified event-channel port. */
-#define EVTCHN_BIND _IO('E', 2)
-/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
-#define EVTCHN_UNBIND _IO('E', 3)
-
-#endif /* __ASM_EVTCHN_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/foreign_page.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/foreign_page.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,30 +0,0 @@
-/******************************************************************************
- * foreign_page.h
- *
- * Provide a "foreign" page type, that is owned by a foreign allocator and
- * not the normal buddy allocator in page_alloc.c
- *
- * Copyright (c) 2004, K A Fraser
- */
-
-#ifndef __ASM_XEN_FOREIGN_PAGE_H__
-#define __ASM_XEN_FOREIGN_PAGE_H__
-
-#define PG_foreign PG_arch_1
-
-#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
-
-#define SetPageForeign(page, dtor) do { \
- set_bit(PG_foreign, &(page)->flags); \
- (page)->mapping = (void *)dtor; \
-} while (0)
-
-#define ClearPageForeign(page) do { \
- clear_bit(PG_foreign, &(page)->flags); \
- (page)->mapping = NULL; \
-} while (0)
-
-#define PageForeignDestructor(page) \
- ( (void (*) (struct page *)) (page)->mapping )
-
-#endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,582 +0,0 @@
-/******************************************************************************
- * hypervisor.h
- *
- * Linux-specific hypervisor handling.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __HYPERVISOR_H__
-#define __HYPERVISOR_H__
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <asm-xen/xen-public/xen.h>
-#include <asm-xen/xen-public/dom0_ops.h>
-#include <asm-xen/xen-public/io/domain_controller.h>
-#include <asm/ptrace.h>
-#include <asm/page.h>
-#if defined(__i386__)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#include <asm-generic/pgtable-nopmd.h>
-#endif
-#endif
-
-/* arch/xen/i386/kernel/setup.c */
-union xen_start_info_union
-{
- start_info_t xen_start_info;
- char padding[512];
-};
-extern union xen_start_info_union xen_start_info_union;
-#define xen_start_info (xen_start_info_union.xen_start_info)
-
-/* arch/xen/kernel/evtchn.c */
-/* Force a proper event-channel callback from Xen. */
-void force_evtchn_callback(void);
-
-/* arch/xen/kernel/process.c */
-void xen_cpu_idle (void);
-
-/* arch/xen/i386/kernel/hypervisor.c */
-void do_hypervisor_callback(struct pt_regs *regs);
-
-/* arch/xen/i386/kernel/head.S */
-void lgdt_finish(void);
-
-/* arch/xen/i386/mm/hypervisor.c */
-/*
- * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
- * be MACHINE addresses.
- */
-
-extern unsigned int mmu_update_queue_idx;
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val);
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val);
-void queue_pt_switch(unsigned long ptr);
-void queue_tlb_flush(void);
-void queue_invlpg(unsigned long ptr);
-void queue_pgd_pin(unsigned long ptr);
-void queue_pgd_unpin(unsigned long ptr);
-void queue_pte_pin(unsigned long ptr);
-void queue_pte_unpin(unsigned long ptr);
-void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-void queue_machphys_update(unsigned long mfn, unsigned long pfn);
-void xen_l1_entry_update(pte_t *ptr, unsigned long val);
-void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
-void xen_pt_switch(unsigned long ptr);
-void xen_tlb_flush(void);
-void xen_invlpg(unsigned long ptr);
-void xen_pgd_pin(unsigned long ptr);
-void xen_pgd_unpin(unsigned long ptr);
-void xen_pte_pin(unsigned long ptr);
-void xen_pte_unpin(unsigned long ptr);
-void xen_set_ldt(unsigned long ptr, unsigned long bytes);
-void xen_machphys_update(unsigned long mfn, unsigned long pfn);
-
-void _flush_page_update_queue(void);
-static inline int flush_page_update_queue(void)
-{
- unsigned int idx = mmu_update_queue_idx;
- if ( idx != 0 ) _flush_page_update_queue();
- return idx;
-}
-#define xen_flush_page_update_queue() (_flush_page_update_queue())
-#define XEN_flush_page_update_queue() (_flush_page_update_queue())
-void MULTICALL_flush_page_update_queue(void);
-
-#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-/* Allocate a contiguous empty region of low memory. Return virtual start. */
-unsigned long allocate_empty_lowmem_region(unsigned long pages);
-#endif
-
-/*
- * Assembler stubs for hyper-calls.
- */
-
-static inline int
-HYPERVISOR_set_trap_table(
- trap_info_t *table)
-{
- int ret;
- unsigned long ignore;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_set_trap_table), "1" (table)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_mmu_update(
- mmu_update_t *req, int count, int *success_count)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
- "3" (success_count)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_gdt(
- unsigned long *frame_list, int entries)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
- : "memory" );
-
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_stack_switch(
- unsigned long ss, unsigned long esp)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_callbacks(
- unsigned long event_selector, unsigned long event_address,
- unsigned long failsafe_selector, unsigned long failsafe_address)
-{
- int ret;
- unsigned long ign1, ign2, ign3, ign4;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
- : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
- "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_fpu_taskswitch(
- void)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_yield(
- void)
-{
- int ret;
- unsigned long ign;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_block(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_shutdown(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_reboot(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_suspend(
- unsigned long srec)
-{
- int ret;
- unsigned long ign1, ign2;
-
- /* NB. On suspend, control software expects a suspend record in %esi. */
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=S" (ign2)
- : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
- "S" (srec) : "memory");
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_crash(
- void)
-{
- int ret;
- unsigned long ign1;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_shutdown | (SHUTDOWN_crash << SCHEDOP_reasonshift))
- : "memory" );
-
- return ret;
-}
-
-static inline long
-HYPERVISOR_set_timer_op(
- u64 timeout)
-{
- int ret;
- unsigned long timeout_hi = (unsigned long)(timeout>>32);
- unsigned long timeout_lo = (unsigned long)timeout;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
- : "memory");
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_dom0_op(
- dom0_op_t *dom0_op)
-{
- int ret;
- unsigned long ign1;
-
- dom0_op->interface_version = DOM0_INTERFACE_VERSION;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
- : "memory");
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_debugreg(
- int reg, unsigned long value)
-{
- int ret;
- unsigned long ign1, ign2;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
- : "memory" );
-
- return ret;
-}
-
-static inline unsigned long
-HYPERVISOR_get_debugreg(
- int reg)
-{
- unsigned long ret;
- unsigned long ign;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_update_descriptor(
- unsigned long ma, unsigned long word1, unsigned long word2)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
- "3" (word2)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_set_fast_trap(
- int idx)
-{
- int ret;
- unsigned long ign;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_dom_mem_op(
- unsigned int op, unsigned long *extent_list,
- unsigned long nr_extents, unsigned int extent_order)
-{
- int ret;
- unsigned long ign1, ign2, ign3, ign4, ign5;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
- "=D" (ign5)
- : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
- "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_multicall(
- void *call_list, int nr_calls)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_update_va_mapping(
- unsigned long page_nr, pte_t new_val, unsigned long flags)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_update_va_mapping),
- "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
- : "memory" );
-
- if ( unlikely(ret < 0) )
- {
- printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
- page_nr, (new_val).pte_low, flags);
- BUG();
- }
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_event_channel_op(
- void *op)
-{
- int ret;
- unsigned long ignore;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_event_channel_op), "1" (op)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_xen_version(
- int cmd)
-{
- int ret;
- unsigned long ignore;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_xen_version), "1" (cmd)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_console_io(
- int cmd, int count, char *str)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_physdev_op(
- void *physdev_op)
-{
- int ret;
- unsigned long ign;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_grant_table_op(
- unsigned int cmd, void *uop, unsigned int count)
-{
- int ret;
- unsigned long ign1, ign2, ign3;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
- : "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_update_va_mapping_otherdomain(
- unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
-{
- int ret;
- unsigned long ign1, ign2, ign3, ign4;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
- : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
- "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
- "memory" );
-
- return ret;
-}
-
-static inline int
-HYPERVISOR_vm_assist(
- unsigned int cmd, unsigned int type)
-{
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
- : "memory" );
-
- return ret;
-}
-
-#endif /* __HYPERVISOR_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/linux-public/privcmd.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/linux-public/privcmd.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,88 +0,0 @@
-/******************************************************************************
- * privcmd.h
- *
- * Interface to /proc/xen/privcmd.
- *
- * Copyright (c) 2003-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __PRIVCMD_H__
-#define __PRIVCMD_H__
-
-typedef struct privcmd_hypercall
-{
- unsigned long op;
- unsigned long arg[5];
-} privcmd_hypercall_t;
-
-typedef struct privcmd_mmap_entry {
- unsigned long va;
- unsigned long mfn;
- unsigned long npages;
-} privcmd_mmap_entry_t;
-
-typedef struct privcmd_mmap {
- int num;
- domid_t dom; /* target domain */
- privcmd_mmap_entry_t *entry;
-} privcmd_mmap_t;
-
-typedef struct privcmd_mmapbatch {
- int num; /* number of pages to populate */
- domid_t dom; /* target domain */
- unsigned long addr; /* virtual address */
- unsigned long *arr; /* array of mfns - top nibble set on err */
-} privcmd_mmapbatch_t;
-
-typedef struct privcmd_blkmsg
-{
- unsigned long op;
- void *buf;
- int buf_size;
-} privcmd_blkmsg_t;
-
-/*
- * @cmd: IOCTL_PRIVCMD_HYPERCALL
- * @arg: &privcmd_hypercall_t
- * Return: Value returned from execution of the specified hypercall.
- */
-#define IOCTL_PRIVCMD_HYPERCALL \
- _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-
-/*
- * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
- * @arg: n/a
- * Return: Port associated with domain-controller end of control event channel
- * for the initial domain.
- */
-#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
- _IOC(_IOC_NONE, 'P', 1, 0)
-#define IOCTL_PRIVCMD_MMAP \
- _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-#define IOCTL_PRIVCMD_MMAPBATCH \
- _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
-#define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN \
- _IOC(_IOC_READ, 'P', 4, sizeof(unsigned long))
-
-#endif /* __PRIVCMD_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/linux-public/suspend.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/linux-public/suspend.h Thu Aug
4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,43 +0,0 @@
-/******************************************************************************
- * suspend.h
- *
- * Copyright (c) 2003-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __ASM_XEN_SUSPEND_H__
-#define __ASM_XEN_SUSPEND_H__
-
-typedef struct suspend_record_st {
- /* To be filled in before resume. */
- start_info_t resume_info;
- /*
- * The number of a machine frame containing, in sequence, the number of
- * each machine frame that contains PFN -> MFN translation table data.
- */
- unsigned long pfn_to_mfn_frame_list;
- /* Number of entries in the PFN -> MFN translation table. */
- unsigned long nr_pfns;
-} suspend_record_t;
-
-#endif /* __ASM_XEN_SUSPEND_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/multicall.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,107 +0,0 @@
-/******************************************************************************
- * multicall.h
- *
- * Copyright (c) 2003-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __MULTICALL_H__
-#define __MULTICALL_H__
-
-#include <asm-xen/hypervisor.h>
-
-extern multicall_entry_t multicall_list[];
-extern int nr_multicall_ents;
-
-static inline void queue_multicall0(unsigned long op)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall1(unsigned long op, unsigned long arg1)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall2(
- unsigned long op, unsigned long arg1, unsigned long arg2)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall3(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall4(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- nr_multicall_ents = i+1;
-}
-
-static inline void queue_multicall5(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- multicall_list[i].args[4] = arg5;
- nr_multicall_ents = i+1;
-}
-
-static inline void execute_multicall_list(void)
-{
- if ( unlikely(nr_multicall_ents == 0) ) return;
- (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
- nr_multicall_ents = 0;
-}
-
-#endif /* __MULTICALL_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/queues.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/queues.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,81 +0,0 @@
-
-/*
- * Oh dear. Task queues were removed from Linux 2.6 and replaced by work
- * queues. Unfortunately the semantics is not the same. With task queues we
- * can defer work until a particular event occurs -- this is not
- * straightforwardly done with work queues (queued work is performed asap, or
- * after some fixed timeout). Conversely, work queues are a (slightly) neater
- * way of deferring work to a process context than using task queues in 2.4.
- *
- * This is a bit of a needless reimplementation -- should have just pulled
- * the code from 2.4, but I tried leveraging work queues to simplify things.
- * They didn't help. :-(
- */
-
-#ifndef __QUEUES_H__
-#define __QUEUES_H__
-
-#include <linux/version.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-
-struct tq_struct {
- void (*fn)(void *);
- void *arg;
- struct list_head list;
- unsigned long pending;
-};
-#define INIT_TQUEUE(_name, _fn, _arg) \
- do { \
- INIT_LIST_HEAD(&(_name)->list); \
- (_name)->pending = 0; \
- (_name)->fn = (_fn); (_name)->arg = (_arg); \
- } while ( 0 )
-#define DECLARE_TQUEUE(_name, _fn, _arg) \
- struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
-
-typedef struct {
- struct list_head list;
- spinlock_t lock;
-} task_queue;
-#define DECLARE_TASK_QUEUE(_name) \
- task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
-
-static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
-{
- unsigned long flags;
- if ( test_and_set_bit(0, &tqe->pending) )
- return 0;
- spin_lock_irqsave(&tql->lock, flags);
- list_add_tail(&tqe->list, &tql->list);
- spin_unlock_irqrestore(&tql->lock, flags);
- return 1;
-}
-
-static inline void run_task_queue(task_queue *tql)
-{
- struct list_head head, *ent;
- struct tq_struct *tqe;
- unsigned long flags;
- void (*fn)(void *);
- void *arg;
-
- spin_lock_irqsave(&tql->lock, flags);
- list_add(&head, &tql->list);
- list_del_init(&tql->list);
- spin_unlock_irqrestore(&tql->lock, flags);
-
- while ( !list_empty(&head) )
- {
- ent = head.next;
- list_del_init(ent);
- tqe = list_entry(ent, struct tq_struct, list);
- fn = tqe->fn;
- arg = tqe->arg;
- wmb();
- tqe->pending = 0;
- fn(arg);
- }
-}
-
-#endif /* __QUEUES_H__ */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/asm-xen/xen_proc.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/xen_proc.h Thu Aug 4
01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,13 +0,0 @@
-
-#ifndef __ASM_XEN_PROC_H__
-#define __ASM_XEN_PROC_H__
-
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-
-extern struct proc_dir_entry *create_xen_proc_entry(
- const char *name, mode_t mode);
-extern void remove_xen_proc_entry(
- const char *name);
-
-#endif /* __ASM_XEN_PROC_H__ */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/include/linux/gfp.h
--- a/linux-2.6.11-xen-sparse/include/linux/gfp.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,138 +0,0 @@
-#ifndef __LINUX_GFP_H
-#define __LINUX_GFP_H
-
-#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
-#include <linux/config.h>
-
-struct vm_area_struct;
-
-/*
- * GFP bitmasks..
- */
-/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
-#define __GFP_DMA 0x01
-#define __GFP_HIGHMEM 0x02
-
-/*
- * Action modifiers - doesn't change the zoning
- *
- * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail. This depends upon the particular VM implementation.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures.
- *
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
- */
-#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
-#define __GFP_HIGH 0x20 /* Should access emergency pools? */
-#define __GFP_IO 0x40 /* Can start physical IO? */
-#define __GFP_FS 0x80 /* Can call down to low-level FS? */
-#define __GFP_COLD 0x100 /* Cache-cold page required */
-#define __GFP_NOWARN 0x200 /* Suppress page allocation failure warning */
-#define __GFP_REPEAT 0x400 /* Retry the allocation. Might fail */
-#define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */
-#define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */
-#define __GFP_NO_GROW 0x2000 /* Slab internal usage */
-#define __GFP_COMP 0x4000 /* Add compound page metadata */
-#define __GFP_ZERO 0x8000 /* Return zeroed page on success */
-
-#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */
-#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
-
-/* if you forget to add the bitmask here kernel will crash, period */
-#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
- __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
- __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP)
-
-#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_NOIO (__GFP_WAIT)
-#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
-
-/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
- platforms, used as appropriate on others */
-
-#define GFP_DMA __GFP_DMA
-
-
-/*
- * There is only one page-allocator function, and two main namespaces to
- * it. The alloc_page*() variants return 'struct page *' and as such
- * can allocate highmem pages, the *get*page*() variants return
- * virtual kernel addresses to the allocated page(s).
- */
-
-/*
- * We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
- *
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
- */
-
-/*
- * If arch_free_page returns non-zero then the generic free_page code can
- * immediately bail: the arch-specific function has done all the work.
- */
-#ifndef HAVE_ARCH_FREE_PAGE
-#define arch_free_page(page, order) 0
-#endif
-
-extern struct page *
-FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
-
-static inline struct page *alloc_pages_node(int nid, unsigned int gfp_mask,
- unsigned int order)
-{
- if (unlikely(order >= MAX_ORDER))
- return NULL;
-
- return __alloc_pages(gfp_mask, order,
- NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
-}
-
-#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(unsigned gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(unsigned int gfp_mask, unsigned int order)
-{
- if (unlikely(order >= MAX_ORDER))
- return NULL;
-
- return alloc_pages_current(gfp_mask, order);
-}
-extern struct page *alloc_page_vma(unsigned gfp_mask,
- struct vm_area_struct *vma, unsigned long addr);
-#else
-#define alloc_pages(gfp_mask, order) \
- alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
-#endif
-#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-
-extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned
int order));
-extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
-
-#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask),0)
-
-#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA,(order))
-
-extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
-extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
-extern void FASTCALL(free_hot_page(struct page *page));
-extern void FASTCALL(free_cold_page(struct page *page));
-
-#define __free_page(page) __free_pages((page), 0)
-#define free_page(addr) free_pages((addr),0)
-
-void page_alloc_init(void);
-
-#endif /* __LINUX_GFP_H */
diff -r 30ecae4339d5 -r fa660d79f695
linux-2.6.11-xen-sparse/include/linux/highmem.h
--- a/linux-2.6.11-xen-sparse/include/linux/highmem.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,106 +0,0 @@
-#ifndef _LINUX_HIGHMEM_H
-#define _LINUX_HIGHMEM_H
-
-#include <linux/config.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_HIGHMEM
-
-#include <asm/highmem.h>
-
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-void kmap_flush_unused(void);
-
-#else /* CONFIG_HIGHMEM */
-
-static inline unsigned int nr_free_highpages(void) { return 0; }
-static inline void kmap_flush_unused(void) { }
-
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- return page_address(page);
-}
-
-#define kunmap(page) do { (void) (page); } while (0)
-
-#define kmap_atomic(page, idx) page_address(page)
-#define kunmap_atomic(addr, idx) do { } while (0)
-#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
-
-#endif /* CONFIG_HIGHMEM */
-
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
-static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *addr = kmap_atomic(page, KM_USER0);
- clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr, KM_USER0);
- /* Make sure this page is cleared on other CPU's too before using it */
- smp_wmb();
-}
-
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-static inline struct page *
-alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
-{
- struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr);
-
- if (page)
- clear_user_highpage(page, vaddr);
-
- return page;
-}
-#endif
-
-static inline void clear_highpage(struct page *page)
-{
- void *kaddr = kmap_atomic(page, KM_USER0);
- clear_page(kaddr);
- kunmap_atomic(kaddr, KM_USER0);
-}
-
-/*
- * Same but also flushes aliased cache contents to RAM.
- */
-static inline void memclear_highpage_flush(struct page *page, unsigned int
offset, unsigned int size)
-{
- void *kaddr;
-
- BUG_ON(offset + size > PAGE_SIZE);
-
- kaddr = kmap_atomic(page, KM_USER0);
- memset((char *)kaddr + offset, 0, size);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
-}
-
-static inline void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
-{
- char *vfrom, *vto;
-
- vfrom = kmap_atomic(from, KM_USER0);
- vto = kmap_atomic(to, KM_USER1);
- copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vfrom, KM_USER0);
- kunmap_atomic(vto, KM_USER1);
- /* Make sure this page is cleared on other CPU's too before using it */
- smp_wmb();
-}
-
-static inline void copy_highpage(struct page *to, struct page *from)
-{
- char *vfrom, *vto;
-
- vfrom = kmap_atomic(from, KM_USER0);
- vto = kmap_atomic(to, KM_USER1);
- copy_page(vto, vfrom);
- kunmap_atomic(vfrom, KM_USER0);
- kunmap_atomic(vto, KM_USER1);
-}
-
-#endif /* _LINUX_HIGHMEM_H */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/include/linux/irq.h
--- a/linux-2.6.11-xen-sparse/include/linux/irq.h Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,98 +0,0 @@
-#ifndef __irq_h
-#define __irq_h
-
-/*
- * Please do not include this file in generic code. There is currently
- * no requirement for any architecture to implement anything held
- * within this file.
- *
- * Thanks. --rmk
- */
-
-#include <linux/config.h>
-
-#if !defined(CONFIG_ARCH_S390)
-
-#include <linux/linkage.h>
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/cpumask.h>
-
-#include <asm/irq.h>
-#include <asm/ptrace.h>
-
-/*
- * IRQ line status.
- */
-#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
-#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
-#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
-#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
-#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
-#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
-#define IRQ_LEVEL 64 /* IRQ level triggered */
-#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
-#define IRQ_PER_CPU 256 /* IRQ is per CPU */
-
-/*
- * Interrupt controller descriptor. This is all we need
- * to describe about the low-level hardware.
- */
-struct hw_interrupt_type {
- const char * typename;
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
- void (*ack)(unsigned int irq);
- void (*end)(unsigned int irq);
- void (*set_affinity)(unsigned int irq, cpumask_t dest);
-};
-
-typedef struct hw_interrupt_type hw_irq_controller;
-
-/*
- * This is the "IRQ descriptor", which contains various information
- * about the irq, including what kind of hardware handling it has,
- * whether it is disabled etc etc.
- *
- * Pad this out to 32 bytes for cache and indexing reasons.
- */
-typedef struct irq_desc {
- hw_irq_controller *handler;
- void *handler_data;
- struct irqaction *action; /* IRQ action list */
- unsigned int status; /* IRQ status */
- unsigned int depth; /* nested irq disables */
- unsigned int irq_count; /* For detecting broken interrupts */
- unsigned int irqs_unhandled;
- spinlock_t lock;
-} ____cacheline_aligned irq_desc_t;
-
-extern irq_desc_t irq_desc [NR_IRQS];
-
-#include <asm/hw_irq.h> /* the arch dependent stuff */
-
-extern int setup_irq(unsigned int irq, struct irqaction * new);
-extern int teardown_irq(unsigned int irq, struct irqaction * old);
-
-#ifdef CONFIG_GENERIC_HARDIRQS
-extern cpumask_t irq_affinity[NR_IRQS];
-extern int no_irq_affinity;
-extern int noirqdebug_setup(char *str);
-
-extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
- struct irqaction *action);
-extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
-extern void note_interrupt(unsigned int irq, irq_desc_t *desc, int action_ret);
-extern void report_bad_irq(unsigned int irq, irq_desc_t *desc, int action_ret);
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
-
-extern void init_irq_proc(void);
-#endif
-
-extern hw_irq_controller no_irq_type; /* needed in every arch ? */
-
-#endif
-
-#endif /* __irq_h */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/kernel/irq/manage.c
--- a/linux-2.6.11-xen-sparse/kernel/irq/manage.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,392 +0,0 @@
-/*
- * linux/kernel/irq/manage.c
- *
- * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
- *
- * This file contains driver APIs to the irq subsystem.
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/interrupt.h>
-
-#include "internals.h"
-
-#ifdef CONFIG_SMP
-
-cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-
-/**
- * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
- *
- * This function waits for any pending IRQ handlers for this interrupt
- * to complete before returning. If you use this function while
- * holding a resource the IRQ handler may need you will deadlock.
- *
- * This function may be called - with care - from IRQ context.
- */
-void synchronize_irq(unsigned int irq)
-{
- struct irq_desc *desc = irq_desc + irq;
-
- while (desc->status & IRQ_INPROGRESS)
- cpu_relax();
-}
-
-EXPORT_SYMBOL(synchronize_irq);
-
-#endif
-
-/**
- * disable_irq_nosync - disable an irq without waiting
- * @irq: Interrupt to disable
- *
- * Disable the selected interrupt line. Disables and Enables are
- * nested.
- * Unlike disable_irq(), this function does not ensure existing
- * instances of the IRQ handler have completed before returning.
- *
- * This function may be called from IRQ context.
- */
-void disable_irq_nosync(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->handler->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(disable_irq_nosync);
-
-/**
- * disable_irq - disable an irq and wait for completion
- * @irq: Interrupt to disable
- *
- * Disable the selected interrupt line. Enables and Disables are
- * nested.
- * This function waits for any pending IRQ handlers for this interrupt
- * to complete before returning. If you use this function while
- * holding a resource the IRQ handler may need you will deadlock.
- *
- * This function may be called - with care - from IRQ context.
- */
-void disable_irq(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
-
- disable_irq_nosync(irq);
- if (desc->action)
- synchronize_irq(irq);
-}
-
-EXPORT_SYMBOL(disable_irq);
-
-/**
- * enable_irq - enable handling of an irq
- * @irq: Interrupt to enable
- *
- * Undoes the effect of one call to disable_irq(). If this
- * matches the last disable, processing of interrupts on this
- * IRQ line is re-enabled.
- *
- * This function may be called from IRQ context.
- */
-void enable_irq(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- switch (desc->depth) {
- case 0:
- WARN_ON(1);
- break;
- case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
-
- desc->status = status;
- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = status | IRQ_REPLAY;
- hw_resend_irq(desc->handler,irq);
- }
- desc->handler->enable(irq);
- /* fall-through */
- }
- default:
- desc->depth--;
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(enable_irq);
-
-/*
- * Internal function that tells the architecture code whether a
- * particular irq has been exclusively allocated or is available
- * for driver use.
- */
-int can_request_irq(unsigned int irq, unsigned long irqflags)
-{
- struct irqaction *action;
-
- if (irq >= NR_IRQS)
- return 0;
-
- action = irq_desc[irq].action;
- if (action)
- if (irqflags & action->flags & SA_SHIRQ)
- action = NULL;
-
- return !action;
-}
-
-/**
- * setup_irq - register an irqaction structure
- * @irq: Interrupt to register
- * @irqaction: The irqaction structure to be registered
- *
- * Normally called by request_irq, this function can be used
- * directly to allocate special interrupts that are part of the
- * architecture.
- */
-int setup_irq(unsigned int irq, struct irqaction * new)
-{
- struct irq_desc *desc = irq_desc + irq;
- struct irqaction *old, **p;
- unsigned long flags;
- int shared = 0;
-
- if (desc->handler == &no_irq_type)
- return -ENOSYS;
- /*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
- */
- if (new->flags & SA_SAMPLE_RANDOM) {
- /*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
- */
- rand_initialize_irq(irq);
- }
-
- /*
- * The following block of code has to be executed atomically
- */
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & new->flags & SA_SHIRQ)) {
- spin_unlock_irqrestore(&desc->lock,flags);
- return -EBUSY;
- }
-
- /* add new interrupt at end of irq queue */
- do {
- p = &old->next;
- old = *p;
- } while (old);
- shared = 1;
- }
-
- *p = new;
-
- if (!shared) {
- desc->depth = 0;
- desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
- IRQ_WAITING | IRQ_INPROGRESS);
- if (desc->handler->startup)
- desc->handler->startup(irq);
- else
- desc->handler->enable(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
- new->irq = irq;
- register_irq_proc(irq);
- new->dir = NULL;
- register_handler_proc(irq, new);
-
- return 0;
-}
-
-/*
- * teardown_irq - unregister an irqaction
- * @irq: Interrupt line being freed
- * @old: Pointer to the irqaction that is to be unregistered
- *
- * This function is called by free_irq and does the actual
- * business of unregistering the handler. It exists as a
- * seperate function to enable handlers to be unregistered
- * for irqactions that have been allocated statically at
- * boot time.
- *
- * This function must not be called from interrupt context.
- */
-int teardown_irq(unsigned int irq, struct irqaction * old)
-{
- struct irq_desc *desc;
- struct irqaction **p;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return -ENOENT;
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- for (;;) {
- struct irqaction * action = *p;
-
- if (action) {
- struct irqaction **pp = p;
-
- p = &action->next;
- if (action != old)
- continue;
-
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- if (desc->handler->shutdown)
- desc->handler->shutdown(irq);
- else
- desc->handler->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
- unregister_handler_proc(irq, action);
-
- /* Make sure it's not being used on another CPU */
- synchronize_irq(irq);
- return 0;
- }
- printk(KERN_ERR "Trying to teardown free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- return -ENOENT;
- }
-}
-
-/**
- * free_irq - free an interrupt
- * @irq: Interrupt line to free
- * @dev_id: Device identity to free
- *
- * Remove an interrupt handler. The handler is removed and if the
- * interrupt line is no longer in use by any driver it is disabled.
- * On a shared IRQ the caller must ensure the interrupt is disabled
- * on the card it drives before calling this function. The function
- * does not return until any executing interrupts for this IRQ
- * have completed.
- *
- * This function must not be called from interrupt context.
- */
-void free_irq(unsigned int irq, void *dev_id)
-{
- struct irq_desc *desc;
- struct irqaction *action;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
- for (action = desc->action; action != NULL; action = action->next) {
- if (action->dev_id != dev_id)
- continue;
-
- spin_unlock_irqrestore(&desc->lock,flags);
-
- if (teardown_irq(irq, action) == 0)
- kfree(action);
- return;
- }
- printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- return;
-}
-
-EXPORT_SYMBOL(free_irq);
-
-/**
- * request_irq - allocate an interrupt line
- * @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
- * @irqflags: Interrupt type flags
- * @devname: An ascii name for the claiming device
- * @dev_id: A cookie passed back to the handler function
- *
- * This call allocates interrupt resources and enables the
- * interrupt line and IRQ handling. From the point this
- * call is made your handler function may be invoked. Since
- * your handler function must clear any interrupt the board
- * raises, you must take care both to initialise your hardware
- * and to set up the interrupt handler in the right order.
- *
- * Dev_id must be globally unique. Normally the address of the
- * device data structure is used as the cookie. Since the handler
- * receives this value it makes sense to use it.
- *
- * If your interrupt is shared you must pass a non NULL dev_id
- * as this is required when freeing the interrupt.
- *
- * Flags:
- *
- * SA_SHIRQ Interrupt is shared
- * SA_INTERRUPT Disable local interrupts while processing
- * SA_SAMPLE_RANDOM The interrupt can be used for entropy
- *
- */
-int request_irq(unsigned int irq,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long irqflags, const char * devname, void *dev_id)
-{
- struct irqaction * action;
- int retval;
-
- /*
- * Sanity-check: shared interrupts must pass in a real dev-ID,
- * otherwise we'll have trouble later trying to figure out
- * which interrupt is which (messes up the interrupt freeing
- * logic etc).
- */
- if ((irqflags & SA_SHIRQ) && !dev_id)
- return -EINVAL;
- if (irq >= NR_IRQS)
- return -EINVAL;
- if (!handler)
- return -EINVAL;
-
- action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
- if (!action)
- return -ENOMEM;
-
- action->handler = handler;
- action->flags = irqflags;
- cpus_clear(action->mask);
- action->name = devname;
- action->next = NULL;
- action->dev_id = dev_id;
-
- retval = setup_irq(irq, action);
- if (retval)
- kfree(action);
-
- return retval;
-}
-
-EXPORT_SYMBOL(request_irq);
-
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/mkbuildtree
--- a/linux-2.6.11-xen-sparse/mkbuildtree Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,111 +0,0 @@
-#!/bin/bash
-
-# mkbuildtree <build tree>
-#
-# Creates symbolic links in <build tree> for the sparse tree
-# in the current directory.
-
-# Script to determine the relative path between two directories.
-# Copyright (c) D. J. Hawkey Jr. 2002
-# Fixed for Xen project by K. Fraser in 2003.
-abs_to_rel ()
-{
- local CWD SRCPATH
-
- if [ "$1" != "/" -a "${1##*[^/]}" = "/" ]; then
- SRCPATH=${1%?}
- else
- SRCPATH=$1
- fi
- if [ "$2" != "/" -a "${2##*[^/]}" = "/" ]; then
- DESTPATH=${2%?}
- else
- DESTPATH=$2
- fi
-
- CWD=$PWD
- [ "${1%%[^/]*}" != "/" ] && cd $1 && SRCPATH=$PWD
- [ "${2%%[^/]*}" != "/" ] && cd $2 && DESTPATH=$PWD
- [ "$CWD" != "$PWD" ] && cd $CWD
-
- BASEPATH=$SRCPATH
-
- [ "$SRCPATH" = "$DESTPATH" ] && DESTPATH="." && return
- [ "$SRCPATH" = "/" ] && DESTPATH=${DESTPATH#?} && return
-
- while [ "$BASEPATH/" != "${DESTPATH%${DESTPATH#$BASEPATH/}}" ]; do
- BASEPATH=${BASEPATH%/*}
- done
-
- SRCPATH=${SRCPATH#$BASEPATH}
- DESTPATH=${DESTPATH#$BASEPATH}
- DESTPATH=${DESTPATH#?}
- while [ -n "$SRCPATH" ]; do
- SRCPATH=${SRCPATH%/*}
- DESTPATH="../$DESTPATH"
- done
-
- [ -z "$BASEPATH" ] && BASEPATH="/"
- [ "${DESTPATH##*[^/]}" = "/" ] && DESTPATH=${DESTPATH%?}
-}
-
-# relative_lndir <target_dir>
-# Creates a tree of symlinks in the current working directory that mirror
-# real files in <target_dir>. <target_dir> should be relative to the current
-# working directory. Symlinks in <target_dir> are ignored. Source-control files
-# are ignored.
-relative_lndir ()
-{
- local SYMLINK_DIR REAL_DIR pref i j
- SYMLINK_DIR=$PWD
- REAL_DIR=$1
- (
- cd $REAL_DIR
- for i in `find . -type d | grep -v SCCS`; do
- [ -d $SYMLINK_DIR/$i ] || mkdir -p $SYMLINK_DIR/$i
- (
- cd $i
- pref=`echo $i | sed -e 's#/[^/]*#../#g' -e 's#^\.##'`
- for j in `find . -maxdepth 1 -type f -o -type l`; do
- ln -sf ${pref}${REAL_DIR}/$i/$j ${SYMLINK_DIR}/$i/$j
- done
- )
- done
- )
-}
-
-[ "$1" == "" ] && { echo "Syntax: $0 <linux tree to xenify>"; exit 1; }
-
-# Get absolute path to the destination directory
-pushd . >/dev/null
-cd ${1} || { echo "cannot cd to ${1}"; exit 1; }
-AD=$PWD
-popd >/dev/null
-
-# Get absolute path to the source directory
-AS=`pwd`
-
-# Get path to source, relative to destination
-abs_to_rel ${AD} ${AS}
-RS=$DESTPATH
-
-# Remove old copies of files and directories at the destination
-for i in `find . -type f -o -type l` ; do rm -f ${AD}/${i#./} ; done
-
-# We now work from the destination directory
-cd ${AD} || { echo "cannot cd to ${AD}"; exit 1; }
-
-# Remove old symlinks
-for i in `find . -type l`; do rm -f $i; done
-
-# Create symlinks of files and directories which exist in the sparse source
-relative_lndir ${RS}
-rm -f mkbuildtree
-
-
-# Create links to the shared definitions of the Xen interfaces.
-rm -rf ${AD}/include/asm-xen/xen-public
-mkdir ${AD}/include/asm-xen/xen-public
-cd ${AD}/include/asm-xen/xen-public
-relative_lndir ../../../${RS}/../xen/include/public
-
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/mm/highmem.c
--- a/linux-2.6.11-xen-sparse/mm/highmem.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,614 +0,0 @@
-/*
- * High memory handling common code and variables.
- *
- * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@xxxxxxx
- * Gerhard Wichert, Siemens AG, Gerhard.Wichert@xxxxxxxxxxxxxx
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * 64-bit physical space. With current x86 CPUs this
- * means up to 64 Gigabytes physical RAM.
- *
- * Rewrote high memory support to move the page cache into
- * high memory. Implemented permanent (schedulable) kmaps
- * based on Linus' idea.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@xxxxxxxxxx>
- */
-
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/swap.h>
-#include <linux/bio.h>
-#include <linux/pagemap.h>
-#include <linux/mempool.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/hash.h>
-#include <linux/highmem.h>
-#include <asm/tlbflush.h>
-
-static mempool_t *page_pool, *isa_page_pool;
-
-static void *page_pool_alloc(int gfp_mask, void *data)
-{
- int gfp = gfp_mask | (int) (long) data;
-
- return alloc_page(gfp);
-}
-
-static void page_pool_free(void *page, void *data)
-{
- __free_page(page);
-}
-
-/*
- * Virtual_count is not a pure "count".
- * 0 means that it is not mapped, and has not been mapped
- * since a TLB flush - it is usable.
- * 1 means that there are no users, but it has been mapped
- * since the last TLB flush - so we can't use it.
- * n means that there are (n-1) current users of it.
- */
-#ifdef CONFIG_HIGHMEM
-static int pkmap_count[LAST_PKMAP];
-static unsigned int last_pkmap_nr;
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
-
-pte_t * pkmap_page_table;
-
-static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
-
-static void flush_all_zero_pkmaps(void)
-{
- int i;
-
- flush_cache_kmaps();
-
- for (i = 0; i < LAST_PKMAP; i++) {
- struct page *page;
-
- /*
- * zero means we don't have anything to do,
- * >1 means that it is still in use. Only
- * a count of 1 means that it is free but
- * needs to be unmapped
- */
- if (pkmap_count[i] != 1)
- continue;
- pkmap_count[i] = 0;
-
- /* sanity check */
- if (pte_none(pkmap_page_table[i]))
- BUG();
-
- /*
- * Don't need an atomic fetch-and-clear op here;
- * no-one has the page mapped, and cannot get at
- * its virtual address (and hence PTE) without first
- * getting the kmap_lock (which is held here).
- * So no dangers, even with speculative execution.
- */
- page = pte_page(pkmap_page_table[i]);
- pte_clear(&pkmap_page_table[i]);
-
- set_page_address(page, NULL);
- }
- flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
-}
-
-static inline unsigned long map_new_virtual(struct page *page)
-{
- unsigned long vaddr;
- int count;
-
-start:
- count = LAST_PKMAP;
- /* Find an empty entry */
- for (;;) {
- last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
- if (!last_pkmap_nr) {
- flush_all_zero_pkmaps();
- count = LAST_PKMAP;
- }
- if (!pkmap_count[last_pkmap_nr])
- break; /* Found a usable entry */
- if (--count)
- continue;
-
- /*
- * Sleep for somebody else to unmap their entries
- */
- {
- DECLARE_WAITQUEUE(wait, current);
-
- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&pkmap_map_wait, &wait);
- spin_unlock(&kmap_lock);
- schedule();
- remove_wait_queue(&pkmap_map_wait, &wait);
- spin_lock(&kmap_lock);
-
- /* Somebody else might have mapped it while we slept */
- if (page_address(page))
- return (unsigned long)page_address(page);
-
- /* Re-start */
- goto start;
- }
- }
- vaddr = PKMAP_ADDR(last_pkmap_nr);
- set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
-
- pkmap_count[last_pkmap_nr] = 1;
- set_page_address(page, (void *)vaddr);
-
- return vaddr;
-}
-
-void kmap_flush_unused(void)
-{
- spin_lock(&kmap_lock);
- flush_all_zero_pkmaps();
- spin_unlock(&kmap_lock);
-}
-
-EXPORT_SYMBOL(kmap_flush_unused);
-
-void fastcall *kmap_high(struct page *page)
-{
- unsigned long vaddr;
-
- /*
- * For highmem pages, we can't trust "virtual" until
- * after we have the lock.
- *
- * We cannot call this from interrupts, as it may block
- */
- spin_lock(&kmap_lock);
- vaddr = (unsigned long)page_address(page);
- if (!vaddr)
- vaddr = map_new_virtual(page);
- pkmap_count[PKMAP_NR(vaddr)]++;
- if (pkmap_count[PKMAP_NR(vaddr)] < 2)
- BUG();
- spin_unlock(&kmap_lock);
- return (void*) vaddr;
-}
-
-EXPORT_SYMBOL(kmap_high);
-
-void fastcall kunmap_high(struct page *page)
-{
- unsigned long vaddr;
- unsigned long nr;
- int need_wakeup;
-
- spin_lock(&kmap_lock);
- vaddr = (unsigned long)page_address(page);
- if (!vaddr)
- BUG();
- nr = PKMAP_NR(vaddr);
-
- /*
- * A count must never go down to zero
- * without a TLB flush!
- */
- need_wakeup = 0;
- switch (--pkmap_count[nr]) {
- case 0:
- BUG();
- case 1:
- /*
- * Avoid an unnecessary wake_up() function call.
- * The common case is pkmap_count[] == 1, but
- * no waiters.
- * The tasks queued in the wait-queue are guarded
- * by both the lock in the wait-queue-head and by
- * the kmap_lock. As the kmap_lock is held here,
- * no need for the wait-queue-head's lock. Simply
- * test if the queue is empty.
- */
- need_wakeup = waitqueue_active(&pkmap_map_wait);
- }
- spin_unlock(&kmap_lock);
-
- /* do wake-up, if needed, race-free outside of the spin lock */
- if (need_wakeup)
- wake_up(&pkmap_map_wait);
-}
-
-EXPORT_SYMBOL(kunmap_high);
-
-#define POOL_SIZE 64
-
-static __init int init_emergency_pool(void)
-{
- struct sysinfo i;
- si_meminfo(&i);
- si_swapinfo(&i);
-
- if (!i.totalhigh)
- return 0;
-
- page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free,
NULL);
- if (!page_pool)
- BUG();
- printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
-
- return 0;
-}
-
-__initcall(init_emergency_pool);
-
-/*
- * highmem version, map in to vec
- */
-static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
-{
- unsigned long flags;
- unsigned char *vto;
-
- local_irq_save(flags);
- vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
- memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto, KM_BOUNCE_READ);
- local_irq_restore(flags);
-}
-
-#else /* CONFIG_HIGHMEM */
-
-#define bounce_copy_vec(to, vfrom) \
- memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom,
(to)->bv_len)
-
-#endif
-
-#define ISA_POOL_SIZE 16
-
-/*
- * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
- * as the max address, so check if the pool has already been created.
- */
-int init_emergency_isa_pool(void)
-{
- if (isa_page_pool)
- return 0;
-
- isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc,
page_pool_free, (void *) __GFP_DMA);
- if (!isa_page_pool)
- BUG();
-
- printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
- return 0;
-}
-
-/*
- * Simple bounce buffer support for highmem pages. Depending on the
- * queue gfp mask set, *to may or may not be a highmem page. kmap it
- * always, it will do the Right Thing
- */
-static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
-{
- unsigned char *vfrom;
- struct bio_vec *tovec, *fromvec;
- int i;
-
- __bio_for_each_segment(tovec, to, i, 0) {
- fromvec = from->bi_io_vec + i;
-
- /*
- * not bounced
- */
- if (tovec->bv_page == fromvec->bv_page)
- continue;
-
- /*
- * fromvec->bv_offset and fromvec->bv_len might have been
- * modified by the block layer, so use the original copy,
- * bounce_copy_vec already uses tovec->bv_len
- */
- vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
-
- flush_dcache_page(tovec->bv_page);
- bounce_copy_vec(tovec, vfrom);
- }
-}
-
-static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
-{
- struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, *org_vec;
- int i;
-
- if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
- set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
-
- /*
- * free up bounce indirect pages used
- */
- __bio_for_each_segment(bvec, bio, i, 0) {
- org_vec = bio_orig->bi_io_vec + i;
- if (bvec->bv_page == org_vec->bv_page)
- continue;
-
- mempool_free(bvec->bv_page, pool);
- }
-
- bio_endio(bio_orig, bio_orig->bi_size, err);
- bio_put(bio);
-}
-
-static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int
err)
-{
- if (bio->bi_size)
- return 1;
-
- bounce_end_io(bio, page_pool, err);
- return 0;
-}
-
-static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done,
int err)
-{
- if (bio->bi_size)
- return 1;
-
- bounce_end_io(bio, isa_page_pool, err);
- return 0;
-}
-
-static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
-{
- struct bio *bio_orig = bio->bi_private;
-
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
- copy_to_high_bio_irq(bio_orig, bio);
-
- bounce_end_io(bio, pool, err);
-}
-
-static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int
err)
-{
- if (bio->bi_size)
- return 1;
-
- __bounce_end_io_read(bio, page_pool, err);
- return 0;
-}
-
-static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done,
int err)
-{
- if (bio->bi_size)
- return 1;
-
- __bounce_end_io_read(bio, isa_page_pool, err);
- return 0;
-}
-
-static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
- mempool_t *pool)
-{
- struct page *page;
- struct bio *bio = NULL;
- int i, rw = bio_data_dir(*bio_orig);
- struct bio_vec *to, *from;
-
- bio_for_each_segment(from, *bio_orig, i) {
- page = from->bv_page;
-
- /*
- * is destination page below bounce pfn?
- */
- if (page_to_pfn(page) < q->bounce_pfn)
- continue;
-
- /*
- * irk, bounce it
- */
- if (!bio)
- bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
-
- to = bio->bi_io_vec + i;
-
- to->bv_page = mempool_alloc(pool, q->bounce_gfp);
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
-
- if (rw == WRITE) {
- char *vto, *vfrom;
-
- flush_dcache_page(from->bv_page);
- vto = page_address(to->bv_page) + to->bv_offset;
- vfrom = kmap(from->bv_page) + from->bv_offset;
- memcpy(vto, vfrom, to->bv_len);
- kunmap(from->bv_page);
- }
- }
-
- /*
- * no pages bounced
- */
- if (!bio)
- return;
-
- /*
- * at least one page was bounced, fill in possible non-highmem
- * pages
- */
- __bio_for_each_segment(from, *bio_orig, i, 0) {
- to = bio_iovec_idx(bio, i);
- if (!to->bv_page) {
- to->bv_page = from->bv_page;
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
- }
- }
-
- bio->bi_bdev = (*bio_orig)->bi_bdev;
- bio->bi_flags |= (1 << BIO_BOUNCED);
- bio->bi_sector = (*bio_orig)->bi_sector;
- bio->bi_rw = (*bio_orig)->bi_rw;
-
- bio->bi_vcnt = (*bio_orig)->bi_vcnt;
- bio->bi_idx = (*bio_orig)->bi_idx;
- bio->bi_size = (*bio_orig)->bi_size;
-
- if (pool == page_pool) {
- bio->bi_end_io = bounce_end_io_write;
- if (rw == READ)
- bio->bi_end_io = bounce_end_io_read;
- } else {
- bio->bi_end_io = bounce_end_io_write_isa;
- if (rw == READ)
- bio->bi_end_io = bounce_end_io_read_isa;
- }
-
- bio->bi_private = *bio_orig;
- *bio_orig = bio;
-}
-
-void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
-{
- mempool_t *pool;
-
- /*
- * for non-isa bounce case, just check if the bounce pfn is equal
- * to or bigger than the highest pfn in the system -- in that case,
- * don't waste time iterating over bio segments
- */
- if (!(q->bounce_gfp & GFP_DMA)) {
- if (q->bounce_pfn >= blk_max_pfn)
- return;
- pool = page_pool;
- } else {
- BUG_ON(!isa_page_pool);
- pool = isa_page_pool;
- }
-
- /*
- * slow path
- */
- __blk_queue_bounce(q, bio_orig, pool);
-}
-
-EXPORT_SYMBOL(blk_queue_bounce);
-
-#if defined(HASHED_PAGE_VIRTUAL)
-
-#define PA_HASH_ORDER 7
-
-/*
- * Describes one page->virtual association
- */
-struct page_address_map {
- struct page *page;
- void *virtual;
- struct list_head list;
-};
-
-/*
- * page_address_map freelist, allocated from page_address_maps.
- */
-static struct list_head page_address_pool; /* freelist */
-static spinlock_t pool_lock; /* protects page_address_pool */
-
-/*
- * Hash table bucket
- */
-static struct page_address_slot {
- struct list_head lh; /* List of page_address_maps */
- spinlock_t lock; /* Protect this bucket's list */
-} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
-
-static struct page_address_slot *page_slot(struct page *page)
-{
- return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
-}
-
-void *page_address(struct page *page)
-{
- unsigned long flags;
- void *ret;
- struct page_address_slot *pas;
-
- if (!PageHighMem(page))
- return lowmem_page_address(page);
-
- pas = page_slot(page);
- ret = NULL;
- spin_lock_irqsave(&pas->lock, flags);
- if (!list_empty(&pas->lh)) {
- struct page_address_map *pam;
-
- list_for_each_entry(pam, &pas->lh, list) {
- if (pam->page == page) {
- ret = pam->virtual;
- goto done;
- }
- }
- }
-done:
- spin_unlock_irqrestore(&pas->lock, flags);
- return ret;
-}
-
-EXPORT_SYMBOL(page_address);
-
-void set_page_address(struct page *page, void *virtual)
-{
- unsigned long flags;
- struct page_address_slot *pas;
- struct page_address_map *pam;
-
- BUG_ON(!PageHighMem(page));
-
- pas = page_slot(page);
- if (virtual) { /* Add */
- BUG_ON(list_empty(&page_address_pool));
-
- spin_lock_irqsave(&pool_lock, flags);
- pam = list_entry(page_address_pool.next,
- struct page_address_map, list);
- list_del(&pam->list);
- spin_unlock_irqrestore(&pool_lock, flags);
-
- pam->page = page;
- pam->virtual = virtual;
-
- spin_lock_irqsave(&pas->lock, flags);
- list_add_tail(&pam->list, &pas->lh);
- spin_unlock_irqrestore(&pas->lock, flags);
- } else { /* Remove */
- spin_lock_irqsave(&pas->lock, flags);
- list_for_each_entry(pam, &pas->lh, list) {
- if (pam->page == page) {
- list_del(&pam->list);
- spin_unlock_irqrestore(&pas->lock, flags);
- spin_lock_irqsave(&pool_lock, flags);
- list_add_tail(&pam->list, &page_address_pool);
- spin_unlock_irqrestore(&pool_lock, flags);
- goto done;
- }
- }
- spin_unlock_irqrestore(&pas->lock, flags);
- }
-done:
- return;
-}
-
-static struct page_address_map page_address_maps[LAST_PKMAP];
-
-void __init page_address_init(void)
-{
- int i;
-
- INIT_LIST_HEAD(&page_address_pool);
- for (i = 0; i < ARRAY_SIZE(page_address_maps); i++)
- list_add(&page_address_maps[i].list, &page_address_pool);
- for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
- INIT_LIST_HEAD(&page_address_htable[i].lh);
- spin_lock_init(&page_address_htable[i].lock);
- }
- spin_lock_init(&pool_lock);
-}
-
-#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/mm/memory.c
--- a/linux-2.6.11-xen-sparse/mm/memory.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2303 +0,0 @@
-/*
- * linux/mm/memory.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- */
-
-/*
- * demand-loading started 01.12.91 - seems it is high on the list of
- * things wanted, and it should be easy to implement. - Linus
- */
-
-/*
- * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
- * pages started 02.12.91, seems to work. - Linus.
- *
- * Tested sharing by executing about 30 /bin/sh: under the old kernel it
- * would have taken more than the 6M I have free, but it worked well as
- * far as I could see.
- *
- * Also corrected some "invalidate()"s - I wasn't doing enough of them.
- */
-
-/*
- * Real VM (paging to/from disk) started 18.12.91. Much more work and
- * thought has to go into this. Oh, well..
- * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
- * Found it. Everything seems to work now.
- * 20.12.91 - Ok, making the swap-device changeable like the root.
- */
-
-/*
- * 05.04.94 - Multi-page memory management added for v1.1.
- * Idea by Alex Bligh (alex@xxxxxxxxxxxxxxx)
- *
- * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
- * (Gerhard.Wichert@xxxxxxxxxxxxxx)
- *
- * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/rmap.h>
-#include <linux/acct.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-
-#include <linux/swapops.h>
-#include <linux/elf.h>
-
-#ifndef CONFIG_DISCONTIGMEM
-/* use the per-pgdat data instead for discontigmem - mbligh */
-unsigned long max_mapnr;
-struct page *mem_map;
-
-EXPORT_SYMBOL(max_mapnr);
-EXPORT_SYMBOL(mem_map);
-#endif
-
-unsigned long num_physpages;
-/*
- * A number of key systems in x86 including ioremap() rely on the assumption
- * that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
- * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
- * and ZONE_HIGHMEM.
- */
-void * high_memory;
-unsigned long vmalloc_earlyreserve;
-
-EXPORT_SYMBOL(num_physpages);
-EXPORT_SYMBOL(high_memory);
-EXPORT_SYMBOL(vmalloc_earlyreserve);
-
-/*
- * Note: this doesn't free the actual pages themselves. That
- * has been handled earlier when unmapping all the memory regions.
- */
-static inline void clear_pmd_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long start, unsigned long end)
-{
- struct page *page;
-
- if (pmd_none(*pmd))
- return;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- if (!((start | end) & ~PMD_MASK)) {
- /* Only clear full, aligned ranges */
- page = pmd_page(*pmd);
- pmd_clear(pmd);
- dec_page_state(nr_page_table_pages);
- tlb->mm->nr_ptes--;
- pte_free_tlb(tlb, page);
- }
-}
-
-static inline void clear_pud_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long start, unsigned long end)
-{
- unsigned long addr = start, next;
- pmd_t *pmd, *__pmd;
-
- if (pud_none(*pud))
- return;
- if (unlikely(pud_bad(*pud))) {
- pud_ERROR(*pud);
- pud_clear(pud);
- return;
- }
-
- pmd = __pmd = pmd_offset(pud, start);
- do {
- next = (addr + PMD_SIZE) & PMD_MASK;
- if (next > end || next <= addr)
- next = end;
-
- clear_pmd_range(tlb, pmd, addr, next);
- pmd++;
- addr = next;
- } while (addr && (addr < end));
-
- if (!((start | end) & ~PUD_MASK)) {
- /* Only clear full, aligned ranges */
- pud_clear(pud);
- pmd_free_tlb(tlb, __pmd);
- }
-}
-
-
-static inline void clear_pgd_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start, unsigned long end)
-{
- unsigned long addr = start, next;
- pud_t *pud, *__pud;
-
- if (pgd_none(*pgd))
- return;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return;
- }
-
- pud = __pud = pud_offset(pgd, start);
- do {
- next = (addr + PUD_SIZE) & PUD_MASK;
- if (next > end || next <= addr)
- next = end;
-
- clear_pud_range(tlb, pud, addr, next);
- pud++;
- addr = next;
- } while (addr && (addr < end));
-
- if (!((start | end) & ~PGDIR_MASK)) {
- /* Only clear full, aligned ranges */
- pgd_clear(pgd);
- pud_free_tlb(tlb, __pud);
- }
-}
-
-/*
- * This function clears user-level page tables of a process.
- *
- * Must be called with pagetable lock held.
- */
-void clear_page_range(struct mmu_gather *tlb, unsigned long start, unsigned
long end)
-{
- unsigned long addr = start, next;
- pgd_t * pgd = pgd_offset(tlb->mm, start);
- unsigned long i;
-
- for (i = pgd_index(start); i <= pgd_index(end-1); i++) {
- next = (addr + PGDIR_SIZE) & PGDIR_MASK;
- if (next > end || next <= addr)
- next = end;
-
- clear_pgd_range(tlb, pgd, addr, next);
- pgd++;
- addr = next;
- }
-}
-
-pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long
address)
-{
- if (!pmd_present(*pmd)) {
- struct page *new;
-
- spin_unlock(&mm->page_table_lock);
- new = pte_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (pmd_present(*pmd)) {
- pte_free(new);
- goto out;
- }
- mm->nr_ptes++;
- inc_page_state(nr_page_table_pages);
- pmd_populate(mm, pmd, new);
- }
-out:
- return pte_offset_map(pmd, address);
-}
-
-pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned
long address)
-{
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- spin_unlock(&mm->page_table_lock);
- new = pte_alloc_one_kernel(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (pmd_present(*pmd)) {
- pte_free_kernel(new);
- goto out;
- }
- pmd_populate_kernel(mm, pmd, new);
- }
-out:
- return pte_offset_kernel(pmd, address);
-}
-
-/*
- * copy one vm_area from one task to the other. Assumes the page tables
- * already present in the new task to be cleared in the whole range
- * covered by this vma.
- *
- * dst->page_table_lock is held on entry and exit,
- * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
- */
-
-static inline void
-copy_swap_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t pte)
-{
- if (pte_file(pte))
- return;
- swap_duplicate(pte_to_swp_entry(pte));
- if (list_empty(&dst_mm->mmlist)) {
- spin_lock(&mmlist_lock);
- list_add(&dst_mm->mmlist, &src_mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
-}
-
-static inline void
-copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
- unsigned long addr)
-{
- pte_t pte = *src_pte;
- struct page *page;
- unsigned long pfn;
-
- /* pte contains position in swap, so copy. */
- if (!pte_present(pte)) {
- copy_swap_pte(dst_mm, src_mm, pte);
- set_pte(dst_pte, pte);
- return;
- }
- pfn = pte_pfn(pte);
- /* the pte points outside of valid memory, the
- * mapping is assumed to be good, meaningful
- * and not mapped via rmap - duplicate the
- * mapping as is.
- */
- page = NULL;
- if (pfn_valid(pfn))
- page = pfn_to_page(pfn);
-
- if (!page || PageReserved(page)) {
- set_pte(dst_pte, pte);
- return;
- }
-
- /*
- * If it's a COW mapping, write protect it both
- * in the parent and the child
- */
- if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
- ptep_set_wrprotect(src_pte);
- pte = *src_pte;
- }
-
- /*
- * If it's a shared mapping, mark it clean in
- * the child
- */
- if (vm_flags & VM_SHARED)
- pte = pte_mkclean(pte);
- pte = pte_mkold(pte);
- get_page(page);
- dst_mm->rss++;
- if (PageAnon(page))
- dst_mm->anon_rss++;
- set_pte(dst_pte, pte);
- page_dup_rmap(page);
-}
-
-static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- pte_t *src_pte, *dst_pte;
- pte_t *s, *d;
- unsigned long vm_flags = vma->vm_flags;
-
- d = dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
- if (!dst_pte)
- return -ENOMEM;
-
- spin_lock(&src_mm->page_table_lock);
- s = src_pte = pte_offset_map_nested(src_pmd, addr);
- for (; addr < end; addr += PAGE_SIZE, s++, d++) {
- if (pte_none(*s))
- continue;
- copy_one_pte(dst_mm, src_mm, d, s, vm_flags, addr);
- }
- pte_unmap_nested(src_pte);
- pte_unmap(dst_pte);
- spin_unlock(&src_mm->page_table_lock);
- cond_resched_lock(&dst_mm->page_table_lock);
- return 0;
-}
-
-static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- pmd_t *src_pmd, *dst_pmd;
- int err = 0;
- unsigned long next;
-
- src_pmd = pmd_offset(src_pud, addr);
- dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
- if (!dst_pmd)
- return -ENOMEM;
-
- for (; addr < end; addr = next, src_pmd++, dst_pmd++) {
- next = (addr + PMD_SIZE) & PMD_MASK;
- if (next > end || next <= addr)
- next = end;
- if (pmd_none(*src_pmd))
- continue;
- if (pmd_bad(*src_pmd)) {
- pmd_ERROR(*src_pmd);
- pmd_clear(src_pmd);
- continue;
- }
- err = copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
- vma, addr, next);
- if (err)
- break;
- }
- return err;
-}
-
-static int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- pud_t *src_pud, *dst_pud;
- int err = 0;
- unsigned long next;
-
- src_pud = pud_offset(src_pgd, addr);
- dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
- if (!dst_pud)
- return -ENOMEM;
-
- for (; addr < end; addr = next, src_pud++, dst_pud++) {
- next = (addr + PUD_SIZE) & PUD_MASK;
- if (next > end || next <= addr)
- next = end;
- if (pud_none(*src_pud))
- continue;
- if (pud_bad(*src_pud)) {
- pud_ERROR(*src_pud);
- pud_clear(src_pud);
- continue;
- }
- err = copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
- vma, addr, next);
- if (err)
- break;
- }
- return err;
-}
-
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma)
-{
- pgd_t *src_pgd, *dst_pgd;
- unsigned long addr, start, end, next;
- int err = 0;
-
- if (is_vm_hugetlb_page(vma))
- return copy_hugetlb_page_range(dst, src, vma);
-
- start = vma->vm_start;
- src_pgd = pgd_offset(src, start);
- dst_pgd = pgd_offset(dst, start);
-
- end = vma->vm_end;
- addr = start;
- while (addr && (addr < end-1)) {
- next = (addr + PGDIR_SIZE) & PGDIR_MASK;
- if (next > end || next <= addr)
- next = end;
- if (pgd_none(*src_pgd))
- goto next_pgd;
- if (pgd_bad(*src_pgd)) {
- pgd_ERROR(*src_pgd);
- pgd_clear(src_pgd);
- goto next_pgd;
- }
- err = copy_pud_range(dst, src, dst_pgd, src_pgd,
- vma, addr, next);
- if (err)
- break;
-
-next_pgd:
- src_pgd++;
- dst_pgd++;
- addr = next;
- }
-
- return err;
-}
-
-static void zap_pte_range(struct mmu_gather *tlb,
- pmd_t *pmd, unsigned long address,
- unsigned long size, struct zap_details *details)
-{
- unsigned long offset;
- pte_t *ptep;
-
- if (pmd_none(*pmd))
- return;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- ptep = pte_offset_map(pmd, address);
- offset = address & ~PMD_MASK;
- if (offset + size > PMD_SIZE)
- size = PMD_SIZE - offset;
- size &= PAGE_MASK;
- if (details && !details->check_mapping && !details->nonlinear_vma)
- details = NULL;
- for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
- pte_t pte = *ptep;
- if (pte_none(pte))
- continue;
- if (pte_present(pte)) {
- struct page *page = NULL;
- unsigned long pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- page = NULL;
- }
- if (unlikely(details) && page) {
- /*
- * unmap_shared_mapping_pages() wants to
- * invalidate cache without truncating:
- * unmap shared but keep private pages.
- */
- if (details->check_mapping &&
- details->check_mapping != page->mapping)
- continue;
- /*
- * Each page->index must be checked when
- * invalidating or truncating nonlinear.
- */
- if (details->nonlinear_vma &&
- (page->index < details->first_index ||
- page->index > details->last_index))
- continue;
- }
- pte = ptep_get_and_clear(ptep);
- tlb_remove_tlb_entry(tlb, ptep, address+offset);
- if (unlikely(!page))
- continue;
- if (unlikely(details) && details->nonlinear_vma
- && linear_page_index(details->nonlinear_vma,
- address+offset) != page->index)
- set_pte(ptep, pgoff_to_pte(page->index));
- if (pte_dirty(pte))
- set_page_dirty(page);
- if (PageAnon(page))
- tlb->mm->anon_rss--;
- else if (pte_young(pte))
- mark_page_accessed(page);
- tlb->freed++;
- page_remove_rmap(page);
- tlb_remove_page(tlb, page);
- continue;
- }
- /*
- * If details->check_mapping, we leave swap entries;
- * if details->nonlinear_vma, we leave file entries.
- */
- if (unlikely(details))
- continue;
- if (!pte_file(pte))
- free_swap_and_cache(pte_to_swp_entry(pte));
- pte_clear(ptep);
- }
- pte_unmap(ptep-1);
-}
-
-static void zap_pmd_range(struct mmu_gather *tlb,
- pud_t *pud, unsigned long address,
- unsigned long size, struct zap_details *details)
-{
- pmd_t * pmd;
- unsigned long end;
-
- if (pud_none(*pud))
- return;
- if (unlikely(pud_bad(*pud))) {
- pud_ERROR(*pud);
- pud_clear(pud);
- return;
- }
- pmd = pmd_offset(pud, address);
- end = address + size;
- if (end > ((address + PUD_SIZE) & PUD_MASK))
- end = ((address + PUD_SIZE) & PUD_MASK);
- do {
- zap_pte_range(tlb, pmd, address, end - address, details);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
-}
-
-static void zap_pud_range(struct mmu_gather *tlb,
- pgd_t * pgd, unsigned long address,
- unsigned long end, struct zap_details *details)
-{
- pud_t * pud;
-
- if (pgd_none(*pgd))
- return;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return;
- }
- pud = pud_offset(pgd, address);
- do {
- zap_pmd_range(tlb, pud, address, end - address, details);
- address = (address + PUD_SIZE) & PUD_MASK;
- pud++;
- } while (address && (address < end));
-}
-
-static void unmap_page_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long address,
- unsigned long end, struct zap_details *details)
-{
- unsigned long next;
- pgd_t *pgd;
- int i;
-
- BUG_ON(address >= end);
- pgd = pgd_offset(vma->vm_mm, address);
- tlb_start_vma(tlb, vma);
- for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
- next = (address + PGDIR_SIZE) & PGDIR_MASK;
- if (next <= address || next > end)
- next = end;
- zap_pud_range(tlb, pgd, address, next, details);
- address = next;
- pgd++;
- }
- tlb_end_vma(tlb, vma);
-}
-
-#ifdef CONFIG_PREEMPT
-# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
-#else
-/* No preempt: go for improved straight-line efficiency */
-# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
-#endif
-
-/**
- * unmap_vmas - unmap a range of memory covered by a list of vma's
- * @tlbp: address of the caller's struct mmu_gather
- * @mm: the controlling mm_struct
- * @vma: the starting vma
- * @start_addr: virtual address at which to start unmapping
- * @end_addr: virtual address at which to end unmapping
- * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
- * @details: details of nonlinear truncation or shared cache invalidation
- *
- * Returns the number of vma's which were covered by the unmapping.
- *
- * Unmap all pages in the vma list. Called under page_table_lock.
- *
- * We aim to not hold page_table_lock for too long (for scheduling latency
- * reasons). So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
- * return the ending mmu_gather to the caller.
- *
- * Only addresses between `start' and `end' will be unmapped.
- *
- * The VMA list must be sorted in ascending virtual address order.
- *
- * unmap_vmas() assumes that the caller will flush the whole unmapped address
- * range after unmap_vmas() returns. So the only responsibility here is to
- * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
- * drops the lock and schedules.
- */
-int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long start_addr,
- unsigned long end_addr, unsigned long *nr_accounted,
- struct zap_details *details)
-{
- unsigned long zap_bytes = ZAP_BLOCK_SIZE;
- unsigned long tlb_start = 0; /* For tlb_finish_mmu */
- int tlb_start_valid = 0;
- int ret = 0;
- spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
- int fullmm = tlb_is_full_mm(*tlbp);
-
- for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
- unsigned long start;
- unsigned long end;
-
- start = max(vma->vm_start, start_addr);
- if (start >= vma->vm_end)
- continue;
- end = min(vma->vm_end, end_addr);
- if (end <= vma->vm_start)
- continue;
-
- if (vma->vm_flags & VM_ACCOUNT)
- *nr_accounted += (end - start) >> PAGE_SHIFT;
-
- ret++;
- while (start != end) {
- unsigned long block;
-
- if (!tlb_start_valid) {
- tlb_start = start;
- tlb_start_valid = 1;
- }
-
- if (is_vm_hugetlb_page(vma)) {
- block = end - start;
- unmap_hugepage_range(vma, start, end);
- } else {
- block = min(zap_bytes, end - start);
- unmap_page_range(*tlbp, vma, start,
- start + block, details);
- }
-
- start += block;
- zap_bytes -= block;
- if ((long)zap_bytes > 0)
- continue;
-
- tlb_finish_mmu(*tlbp, tlb_start, start);
-
- if (need_resched() ||
- need_lockbreak(&mm->page_table_lock) ||
- (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
- if (i_mmap_lock) {
- /* must reset count of rss freed */
- *tlbp = tlb_gather_mmu(mm, fullmm);
- details->break_addr = start;
- goto out;
- }
- spin_unlock(&mm->page_table_lock);
- cond_resched();
- spin_lock(&mm->page_table_lock);
- }
-
- *tlbp = tlb_gather_mmu(mm, fullmm);
- tlb_start_valid = 0;
- zap_bytes = ZAP_BLOCK_SIZE;
- }
- }
-out:
- return ret;
-}
-
-/**
- * zap_page_range - remove user pages in a given range
- * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
- * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
- */
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size, struct zap_details *details)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct mmu_gather *tlb;
- unsigned long end = address + size;
- unsigned long nr_accounted = 0;
-
- if (is_vm_hugetlb_page(vma)) {
- zap_hugepage_range(vma, address, size);
- return;
- }
-
- lru_add_drain();
- spin_lock(&mm->page_table_lock);
- tlb = tlb_gather_mmu(mm, 0);
- unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
- tlb_finish_mmu(tlb, address, end);
- acct_update_integrals();
- spin_unlock(&mm->page_table_lock);
-}
-
-/*
- * Do a quick page-table lookup for a single page.
- * mm->page_table_lock must be held.
- */
-static struct page *
-__follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
- unsigned long pfn;
- struct page *page;
-
- page = follow_huge_addr(mm, address, write);
- if (! IS_ERR(page))
- return page;
-
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- goto out;
-
- pud = pud_offset(pgd, address);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- goto out;
-
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- goto out;
- if (pmd_huge(*pmd))
- return follow_huge_pmd(mm, address, pmd, write);
-
- ptep = pte_offset_map(pmd, address);
- if (!ptep)
- goto out;
-
- pte = *ptep;
- pte_unmap(ptep);
- if (pte_present(pte)) {
- if (write && !pte_write(pte))
- goto out;
- if (read && !pte_read(pte))
- goto out;
- pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (write && !pte_dirty(pte) && !PageDirty(page))
- set_page_dirty(page);
- mark_page_accessed(page);
- return page;
- }
- }
-
-out:
- return NULL;
-}
-
-struct page *
-follow_page(struct mm_struct *mm, unsigned long address, int write)
-{
- return __follow_page(mm, address, /*read*/0, write);
-}
-
-int
-check_user_page_readable(struct mm_struct *mm, unsigned long address)
-{
- return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
-}
-
-EXPORT_SYMBOL(check_user_page_readable);
-
-/*
- * Given a physical address, is there a useful struct page pointing to
- * it? This may become more complex in the future if we start dealing
- * with IO-aperture pages for direct-IO.
- */
-
-static inline struct page *get_page_map(struct page *page)
-{
- if (!pfn_valid(page_to_pfn(page)))
- return NULL;
- return page;
-}
-
-
-static inline int
-untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
- unsigned long address)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- /* Check if the vma is for an anonymous mapping. */
- if (vma->vm_ops && vma->vm_ops->nopage)
- return 0;
-
- /* Check if page directory entry exists. */
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return 1;
-
- pud = pud_offset(pgd, address);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return 1;
-
- /* Check if page middle directory entry exists. */
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return 1;
-
- /* There is a pte slot for 'address' in 'mm'. */
- return 0;
-}
-
-
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
- struct page **pages, struct vm_area_struct **vmas)
-{
- int i;
- unsigned int flags;
-
- /*
- * Require read or write permissions.
- * If 'force' is set, we only require the "MAY" flags.
- */
- flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
- i = 0;
-
- do {
- struct vm_area_struct * vma;
-
- vma = find_extend_vma(mm, start);
- if (!vma && in_gate_area(tsk, start)) {
- unsigned long pg = start & PAGE_MASK;
- struct vm_area_struct *gate_vma = get_gate_vma(tsk);
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- if (write) /* user gate pages are read-only */
- return i ? : -EFAULT;
- if (pg > TASK_SIZE)
- pgd = pgd_offset_k(pg);
- else
- pgd = pgd_offset_gate(mm, pg);
- BUG_ON(pgd_none(*pgd));
- pud = pud_offset(pgd, pg);
- BUG_ON(pud_none(*pud));
- pmd = pmd_offset(pud, pg);
- BUG_ON(pmd_none(*pmd));
- pte = pte_offset_map(pmd, pg);
- BUG_ON(pte_none(*pte));
- if (pages) {
- pages[i] = pte_page(*pte);
- get_page(pages[i]);
- }
- pte_unmap(pte);
- if (vmas)
- vmas[i] = gate_vma;
- i++;
- start += PAGE_SIZE;
- len--;
- continue;
- }
-
- if (!vma || (vma->vm_flags & VM_IO)
- || !(flags & vma->vm_flags))
- return i ? : -EFAULT;
-
- if (is_vm_hugetlb_page(vma)) {
- i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &len, i);
- continue;
- }
- spin_lock(&mm->page_table_lock);
- do {
- struct page *map;
- int lookup_write = write;
-
- cond_resched_lock(&mm->page_table_lock);
- while (!(map = follow_page(mm, start, lookup_write))) {
- /*
- * Shortcut for anonymous pages. We don't want
- * to force the creation of pages tables for
- * insanly big anonymously mapped areas that
- * nobody touched so far. This is important
- * for doing a core dump for these mappings.
- */
- if (!lookup_write &&
- untouched_anonymous_page(mm,vma,start)) {
- map = ZERO_PAGE(start);
- break;
- }
- spin_unlock(&mm->page_table_lock);
- switch (handle_mm_fault(mm,vma,start,write)) {
- case VM_FAULT_MINOR:
- tsk->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- tsk->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- return i ? i : -EFAULT;
- case VM_FAULT_OOM:
- return i ? i : -ENOMEM;
- default:
- BUG();
- }
- /*
- * Now that we have performed a write fault
- * and surely no longer have a shared page we
- * shouldn't write, we shouldn't ignore an
- * unwritable page in the page table if
- * we are forcing write access.
- */
- lookup_write = write && !force;
- spin_lock(&mm->page_table_lock);
- }
- if (pages) {
- pages[i] = get_page_map(map);
- if (!pages[i]) {
- spin_unlock(&mm->page_table_lock);
- while (i--)
- page_cache_release(pages[i]);
- i = -EFAULT;
- goto out;
- }
- flush_dcache_page(pages[i]);
- if (!PageReserved(pages[i]))
- page_cache_get(pages[i]);
- }
- if (vmas)
- vmas[i] = vma;
- i++;
- start += PAGE_SIZE;
- len--;
- } while(len && start < vma->vm_end);
- spin_unlock(&mm->page_table_lock);
- } while(len);
-out:
- return i;
-}
-
-EXPORT_SYMBOL(get_user_pages);
-
-static void zeromap_pte_range(pte_t * pte, unsigned long address,
- unsigned long size, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address),
prot));
- BUG_ON(!pte_none(*pte));
- set_pte(pte, zero_pte);
- address += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd,
- unsigned long address, unsigned long size, pgprot_t prot)
-{
- unsigned long base, end;
-
- base = address & PUD_MASK;
- address &= ~PUD_MASK;
- end = address + size;
- if (end > PUD_SIZE)
- end = PUD_SIZE;
- do {
- pte_t * pte = pte_alloc_map(mm, pmd, base + address);
- if (!pte)
- return -ENOMEM;
- zeromap_pte_range(pte, base + address, end - address, prot);
- pte_unmap(pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-static inline int zeromap_pud_range(struct mm_struct *mm, pud_t * pud,
- unsigned long address,
- unsigned long size, pgprot_t prot)
-{
- unsigned long base, end;
- int error = 0;
-
- base = address & PGDIR_MASK;
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- do {
- pmd_t * pmd = pmd_alloc(mm, pud, base + address);
- error = -ENOMEM;
- if (!pmd)
- break;
- error = zeromap_pmd_range(mm, pmd, base + address,
- end - address, prot);
- if (error)
- break;
- address = (address + PUD_SIZE) & PUD_MASK;
- pud++;
- } while (address && (address < end));
- return 0;
-}
-
-int zeromap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size, pgprot_t prot)
-{
- int i;
- int error = 0;
- pgd_t * pgd;
- unsigned long beg = address;
- unsigned long end = address + size;
- unsigned long next;
- struct mm_struct *mm = vma->vm_mm;
-
- pgd = pgd_offset(mm, address);
- flush_cache_range(vma, beg, end);
- BUG_ON(address >= end);
- BUG_ON(end > vma->vm_end);
-
- spin_lock(&mm->page_table_lock);
- for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
- pud_t *pud = pud_alloc(mm, pgd, address);
- error = -ENOMEM;
- if (!pud)
- break;
- next = (address + PGDIR_SIZE) & PGDIR_MASK;
- if (next <= beg || next > end)
- next = end;
- error = zeromap_pud_range(mm, pud, address,
- next - address, prot);
- if (error)
- break;
- address = next;
- pgd++;
- }
- /*
- * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
- */
- flush_tlb_range(vma, beg, end);
- spin_unlock(&mm->page_table_lock);
- return error;
-}
-
-/*
- * maps a range of physical memory into the requested pages. the old
- * mappings are removed. any references to nonexistent pages results
- * in null mappings (currently treated as "copy-on-access")
- */
-static inline void
-remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long pfn, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- BUG_ON(!pte_none(*pte));
- if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
- set_pte(pte, pfn_pte(pfn, prot));
- address += PAGE_SIZE;
- pfn++;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int
-remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
- unsigned long size, unsigned long pfn, pgprot_t prot)
-{
- unsigned long base, end;
-
- base = address & PUD_MASK;
- address &= ~PUD_MASK;
- end = address + size;
- if (end > PUD_SIZE)
- end = PUD_SIZE;
- pfn -= (address >> PAGE_SHIFT);
- do {
- pte_t * pte = pte_alloc_map(mm, pmd, base + address);
- if (!pte)
- return -ENOMEM;
- remap_pte_range(pte, base + address, end - address,
- (address >> PAGE_SHIFT) + pfn, prot);
- pte_unmap(pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-static inline int remap_pud_range(struct mm_struct *mm, pud_t * pud,
- unsigned long address, unsigned long size,
- unsigned long pfn, pgprot_t prot)
-{
- unsigned long base, end;
- int error;
-
- base = address & PGDIR_MASK;
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- pfn -= address >> PAGE_SHIFT;
- do {
- pmd_t *pmd = pmd_alloc(mm, pud, base+address);
- error = -ENOMEM;
- if (!pmd)
- break;
- error = remap_pmd_range(mm, pmd, base + address, end - address,
- (address >> PAGE_SHIFT) + pfn, prot);
- if (error)
- break;
- address = (address + PUD_SIZE) & PUD_MASK;
- pud++;
- } while (address && (address < end));
- return error;
-}
-
-/* Note: this is only safe if the mm semaphore is held when called. */
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- int error = 0;
- pgd_t *pgd;
- unsigned long beg = from;
- unsigned long end = from + size;
- unsigned long next;
- struct mm_struct *mm = vma->vm_mm;
- int i;
-
- pfn -= from >> PAGE_SHIFT;
- pgd = pgd_offset(mm, from);
- flush_cache_range(vma, beg, end);
- BUG_ON(from >= end);
-
- /*
- * Physically remapped pages are special. Tell the
- * rest of the world about it:
- * VM_IO tells people not to look at these pages
- * (accesses can have side effects).
- * VM_RESERVED tells swapout not to try to touch
- * this region.
- */
- vma->vm_flags |= VM_IO | VM_RESERVED;
-
- spin_lock(&mm->page_table_lock);
- for (i = pgd_index(beg); i <= pgd_index(end-1); i++) {
- pud_t *pud = pud_alloc(mm, pgd, from);
- error = -ENOMEM;
- if (!pud)
- break;
- next = (from + PGDIR_SIZE) & PGDIR_MASK;
- if (next > end || next <= from)
- next = end;
- error = remap_pud_range(mm, pud, from, end - from,
- pfn + (from >> PAGE_SHIFT), prot);
- if (error)
- break;
- from = next;
- pgd++;
- }
- /*
- * Why flush? remap_pte_range has a BUG_ON for !pte_none()
- */
- flush_tlb_range(vma, beg, end);
- spin_unlock(&mm->page_table_lock);
-
- return error;
-}
-
-EXPORT_SYMBOL(remap_pfn_range);
-
-/*
- * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
- * servicing faults for write access. In the normal case, do always want
- * pte_mkwrite. But get_user_pages can cause write faults for mappings
- * that do not have writing enabled, when used by access_process_vm.
- */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
-{
- if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
- return pte;
-}
-
-/*
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
- */
-static inline void break_cow(struct vm_area_struct * vma, struct page *
new_page, unsigned long address,
- pte_t *page_table)
-{
- pte_t entry;
-
- flush_cache_page(vma, address);
- entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
- vma);
- ptep_establish(vma, address, page_table, entry);
- update_mmu_cache(vma, address, entry);
-}
-
-/*
- * This routine handles present pages, when users try to write
- * to a shared page. It is done by copying the page to a new address
- * and decrementing the shared-page counter for the old page.
- *
- * Goto-purists beware: the only reason for goto's here is that it results
- * in better assembly code.. The "default" path will see no jumps at all.
- *
- * Note that this routine assumes that the protection checks have been
- * done by the caller (the low-level page fault routine in most cases).
- * Thus we can safely just mark it writable once we've done any necessary
- * COW.
- *
- * We also mark the page dirty at this point even though the page will
- * change only once the write actually happens. This avoids a few races,
- * and potentially makes it more efficient.
- *
- * We hold the mm semaphore and the page_table_lock on entry and exit
- * with the page_table_lock released.
- */
-static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
- unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
-{
- struct page *old_page, *new_page;
- unsigned long pfn = pte_pfn(pte);
- pte_t entry;
-
- if (unlikely(!pfn_valid(pfn))) {
- /*
- * This should really halt the system so it can be debugged or
- * at least the kernel stops what it's doing before it corrupts
- * data, but for the moment just pretend this is OOM.
- */
- pte_unmap(page_table);
- printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
- address);
- spin_unlock(&mm->page_table_lock);
- return VM_FAULT_OOM;
- }
- old_page = pfn_to_page(pfn);
-
- if (!TestSetPageLocked(old_page)) {
- int reuse = can_share_swap_page(old_page);
- unlock_page(old_page);
- if (reuse) {
- flush_cache_page(vma, address);
- entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
- vma);
- ptep_set_access_flags(vma, address, page_table, entry,
1);
- update_mmu_cache(vma, address, entry);
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
- return VM_FAULT_MINOR;
- }
- }
- pte_unmap(page_table);
-
- /*
- * Ok, we need to copy. Oh, well..
- */
- if (!PageReserved(old_page))
- page_cache_get(old_page);
- spin_unlock(&mm->page_table_lock);
-
- if (unlikely(anon_vma_prepare(vma)))
- goto no_new_page;
- if (old_page == ZERO_PAGE(address)) {
- new_page = alloc_zeroed_user_highpage(vma, address);
- if (!new_page)
- goto no_new_page;
- } else {
- new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- if (!new_page)
- goto no_new_page;
- copy_user_highpage(new_page, old_page, address);
- }
- /*
- * Re-check the pte - we dropped the lock
- */
- spin_lock(&mm->page_table_lock);
- page_table = pte_offset_map(pmd, address);
- if (likely(pte_same(*page_table, pte))) {
- if (PageAnon(old_page))
- mm->anon_rss--;
- if (PageReserved(old_page)) {
- ++mm->rss;
- acct_update_integrals();
- update_mem_hiwater();
- } else
- page_remove_rmap(old_page);
- break_cow(vma, new_page, address, page_table);
- lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
-
- /* Free the old page.. */
- new_page = old_page;
- }
- pte_unmap(page_table);
- page_cache_release(new_page);
- page_cache_release(old_page);
- spin_unlock(&mm->page_table_lock);
- return VM_FAULT_MINOR;
-
-no_new_page:
- page_cache_release(old_page);
- return VM_FAULT_OOM;
-}
-
-/*
- * Helper functions for unmap_mapping_range().
- *
- * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
- *
- * We have to restart searching the prio_tree whenever we drop the lock,
- * since the iterator is only valid while the lock is held, and anyway
- * a later vma might be split and reinserted earlier while lock dropped.
- *
- * The list of nonlinear vmas could be handled more efficiently, using
- * a placeholder, but handle it in the same way until a need is shown.
- * It is important to search the prio_tree before nonlinear list: a vma
- * may become nonlinear and be shifted from prio_tree to nonlinear list
- * while the lock is dropped; but never shifted from list to prio_tree.
- *
- * In order to make forward progress despite restarting the search,
- * vm_truncate_count is used to mark a vma as now dealt with, so we can
- * quickly skip it next time around. Since the prio_tree search only
- * shows us those vmas affected by unmapping the range in question, we
- * can't efficiently keep all vmas in step with mapping->truncate_count:
- * so instead reset them all whenever it wraps back to 0 (then go to 1).
- * mapping->truncate_count and vma->vm_truncate_count are protected by
- * i_mmap_lock.
- *
- * In order to make forward progress despite repeatedly restarting some
- * large vma, note the break_addr set by unmap_vmas when it breaks out:
- * and restart from that address when we reach that vma again. It might
- * have been split or merged, shrunk or extended, but never shifted: so
- * restart_addr remains valid so long as it remains in the vma's range.
- * unmap_mapping_range forces truncate_count to leap over page-aligned
- * values so we can save vma's restart_addr in its truncate_count field.
- */
-#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
-
-static void reset_vma_truncate_counts(struct address_space *mapping)
-{
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
-
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
- vma->vm_truncate_count = 0;
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
- vma->vm_truncate_count = 0;
-}
-
-static int unmap_mapping_range_vma(struct vm_area_struct *vma,
- unsigned long start_addr, unsigned long end_addr,
- struct zap_details *details)
-{
- unsigned long restart_addr;
- int need_break;
-
-again:
- restart_addr = vma->vm_truncate_count;
- if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
- start_addr = restart_addr;
- if (start_addr >= end_addr) {
- /* Top of vma has been split off since last time */
- vma->vm_truncate_count = details->truncate_count;
- return 0;
- }
- }
-
- details->break_addr = end_addr;
- zap_page_range(vma, start_addr, end_addr - start_addr, details);
-
- /*
- * We cannot rely on the break test in unmap_vmas:
- * on the one hand, we don't want to restart our loop
- * just because that broke out for the page_table_lock;
- * on the other hand, it does no test when vma is small.
- */
- need_break = need_resched() ||
- need_lockbreak(details->i_mmap_lock);
-
- if (details->break_addr >= end_addr) {
- /* We have now completed this vma: mark it so */
- vma->vm_truncate_count = details->truncate_count;
- if (!need_break)
- return 0;
- } else {
- /* Note restart_addr in vma's truncate_count field */
- vma->vm_truncate_count = details->break_addr;
- if (!need_break)
- goto again;
- }
-
- spin_unlock(details->i_mmap_lock);
- cond_resched();
- spin_lock(details->i_mmap_lock);
- return -EINTR;
-}
-
-static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
- struct zap_details *details)
-{
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
- pgoff_t vba, vea, zba, zea;
-
-restart:
- vma_prio_tree_foreach(vma, &iter, root,
- details->first_index, details->last_index) {
- /* Skip quickly over those we have already dealt with */
- if (vma->vm_truncate_count == details->truncate_count)
- continue;
-
- vba = vma->vm_pgoff;
- vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
- /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
- zba = details->first_index;
- if (zba < vba)
- zba = vba;
- zea = details->last_index;
- if (zea > vea)
- zea = vea;
-
- if (unmap_mapping_range_vma(vma,
- ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
- ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
- details) < 0)
- goto restart;
- }
-}
-
-static inline void unmap_mapping_range_list(struct list_head *head,
- struct zap_details *details)
-{
- struct vm_area_struct *vma;
-
- /*
- * In nonlinear VMAs there is no correspondence between virtual address
- * offset and file offset. So we must perform an exhaustive search
- * across *all* the pages in each nonlinear VMA, not just the pages
- * whose virtual address lies outside the file truncation point.
- */
-restart:
- list_for_each_entry(vma, head, shared.vm_set.list) {
- /* Skip quickly over those we have already dealt with */
- if (vma->vm_truncate_count == details->truncate_count)
- continue;
- details->nonlinear_vma = vma;
- if (unmap_mapping_range_vma(vma, vma->vm_start,
- vma->vm_end, details) < 0)
- goto restart;
- }
-}
-
-/**
- * unmap_mapping_range - unmap the portion of all mmaps
- * in the specified address_space corresponding to the specified
- * page range in the underlying file.
- * @address_space: the address space containing mmaps to be unmapped.
- * @holebegin: byte in first page to unmap, relative to the start of
- * the underlying file. This will be rounded down to a PAGE_SIZE
- * boundary. Note that this is different from vmtruncate(), which
- * must keep the partial page. In contrast, we must get rid of
- * partial pages.
- * @holelen: size of prospective hole in bytes. This will be rounded
- * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
- * end of the file.
- * @even_cows: 1 when truncating a file, unmap even private COWed pages;
- * but 0 when invalidating pagecache, don't throw away private data.
- */
-void unmap_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen, int even_cows)
-{
- struct zap_details details;
- pgoff_t hba = holebegin >> PAGE_SHIFT;
- pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- /* Check for overflow. */
- if (sizeof(holelen) > sizeof(hlen)) {
- long long holeend =
- (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (holeend & ~(long long)ULONG_MAX)
- hlen = ULONG_MAX - hba + 1;
- }
-
- details.check_mapping = even_cows? NULL: mapping;
- details.nonlinear_vma = NULL;
- details.first_index = hba;
- details.last_index = hba + hlen - 1;
- if (details.last_index < details.first_index)
- details.last_index = ULONG_MAX;
- details.i_mmap_lock = &mapping->i_mmap_lock;
-
- spin_lock(&mapping->i_mmap_lock);
-
- /* serialize i_size write against truncate_count write */
- smp_wmb();
- /* Protect against page faults, and endless unmapping loops */
- mapping->truncate_count++;
- /*
- * For archs where spin_lock has inclusive semantics like ia64
- * this smp_mb() will prevent to read pagetable contents
- * before the truncate_count increment is visible to
- * other cpus.
- */
- smp_mb();
- if (unlikely(is_restart_addr(mapping->truncate_count))) {
- if (mapping->truncate_count == 0)
- reset_vma_truncate_counts(mapping);
- mapping->truncate_count++;
- }
- details.truncate_count = mapping->truncate_count;
-
- if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
- unmap_mapping_range_tree(&mapping->i_mmap, &details);
- if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
- unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
- spin_unlock(&mapping->i_mmap_lock);
-}
-EXPORT_SYMBOL(unmap_mapping_range);
-
-/*
- * Handle all mappings that got truncated by a "truncate()"
- * system call.
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
-
- if (inode->i_size < offset)
- goto do_expand;
- /*
- * truncation of in-use swapfiles is disallowed - it would cause
- * subsequent swapout to scribble on the now-freed blocks.
- */
- if (IS_SWAPFILE(inode))
- goto out_busy;
- i_size_write(inode, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- goto out_truncate;
-
-do_expand:
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out_big;
- i_size_write(inode, offset);
-
-out_truncate:
- if (inode->i_op && inode->i_op->truncate)
- inode->i_op->truncate(inode);
- return 0;
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out_big:
- return -EFBIG;
-out_busy:
- return -ETXTBSY;
-}
-
-EXPORT_SYMBOL(vmtruncate);
-
-/*
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time. We also make sure to queue
- * the 'original' request together with the readahead ones...
- *
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
- */
-void swapin_readahead(swp_entry_t entry, unsigned long addr,struct
vm_area_struct *vma)
-{
-#ifdef CONFIG_NUMA
- struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
-#endif
- int i, num;
- struct page *new_page;
- unsigned long offset;
-
- /*
- * Get the number of handles we should do readahead io to.
- */
- num = valid_swaphandles(entry, &offset);
- for (i = 0; i < num; offset++, i++) {
- /* Ok, do the async read-ahead now */
- new_page = read_swap_cache_async(swp_entry(swp_type(entry),
- offset), vma, addr);
- if (!new_page)
- break;
- page_cache_release(new_page);
-#ifdef CONFIG_NUMA
- /*
- * Find the next applicable VMA for the NUMA policy.
- */
- addr += PAGE_SIZE;
- if (addr == 0)
- vma = NULL;
- if (vma) {
- if (addr >= vma->vm_end) {
- vma = next_vma;
- next_vma = vma ? vma->vm_next : NULL;
- }
- if (vma && addr < vma->vm_start)
- vma = NULL;
- } else {
- if (next_vma && addr >= next_vma->vm_start) {
- vma = next_vma;
- next_vma = vma->vm_next;
- }
- }
-#endif
- }
- lru_add_drain(); /* Push any new pages onto the LRU now */
-}
-
-/*
- * We hold the mm semaphore and the page_table_lock on entry and
- * should release the pagetable lock on exit..
- */
-static int do_swap_page(struct mm_struct * mm,
- struct vm_area_struct * vma, unsigned long address,
- pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
-{
- struct page *page;
- swp_entry_t entry = pte_to_swp_entry(orig_pte);
- pte_t pte;
- int ret = VM_FAULT_MINOR;
-
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
- page = lookup_swap_cache(entry);
- if (!page) {
- swapin_readahead(entry, address, vma);
- page = read_swap_cache_async(entry, vma, address);
- if (!page) {
- /*
- * Back out if somebody else faulted in this pte while
- * we released the page table lock.
- */
- spin_lock(&mm->page_table_lock);
- page_table = pte_offset_map(pmd, address);
- if (likely(pte_same(*page_table, orig_pte)))
- ret = VM_FAULT_OOM;
- else
- ret = VM_FAULT_MINOR;
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
- goto out;
- }
-
- /* Had to read the page from swap area: Major fault */
- ret = VM_FAULT_MAJOR;
- inc_page_state(pgmajfault);
- grab_swap_token();
- }
-
- mark_page_accessed(page);
- lock_page(page);
-
- /*
- * Back out if somebody else faulted in this pte while we
- * released the page table lock.
- */
- spin_lock(&mm->page_table_lock);
- page_table = pte_offset_map(pmd, address);
- if (unlikely(!pte_same(*page_table, orig_pte))) {
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
- unlock_page(page);
- page_cache_release(page);
- ret = VM_FAULT_MINOR;
- goto out;
- }
-
- /* The page isn't present yet, go ahead with the fault. */
-
- swap_free(entry);
- if (vm_swap_full())
- remove_exclusive_swap_page(page);
-
- mm->rss++;
- acct_update_integrals();
- update_mem_hiwater();
-
- pte = mk_pte(page, vma->vm_page_prot);
- if (write_access && can_share_swap_page(page)) {
- pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- write_access = 0;
- }
- unlock_page(page);
-
- flush_icache_page(vma, page);
- set_pte(page_table, pte);
- page_add_anon_rmap(page, vma, address);
-
- if (write_access) {
- if (do_wp_page(mm, vma, address,
- page_table, pmd, pte) == VM_FAULT_OOM)
- ret = VM_FAULT_OOM;
- goto out;
- }
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, pte);
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
-out:
- return ret;
-}
-
-/*
- * We are called with the MM semaphore and page_table_lock
- * spinlock held to protect against concurrent faults in
- * multithreaded programs.
- */
-static int
-do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
- pte_t *page_table, pmd_t *pmd, int write_access,
- unsigned long addr)
-{
- pte_t entry;
- struct page * page = ZERO_PAGE(addr);
-
- /* Read-only mapping of ZERO_PAGE. */
- entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
-
- /* ..except if it's a write access */
- if (write_access) {
- /* Allocate our own private page. */
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
-
- if (unlikely(anon_vma_prepare(vma)))
- goto no_mem;
- page = alloc_zeroed_user_highpage(vma, addr);
- if (!page)
- goto no_mem;
-
- spin_lock(&mm->page_table_lock);
- page_table = pte_offset_map(pmd, addr);
-
- if (!pte_none(*page_table)) {
- pte_unmap(page_table);
- page_cache_release(page);
- spin_unlock(&mm->page_table_lock);
- goto out;
- }
- mm->rss++;
- acct_update_integrals();
- update_mem_hiwater();
- entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
- vma->vm_page_prot)),
- vma);
- lru_cache_add_active(page);
- SetPageReferenced(page);
- page_add_anon_rmap(page, vma, addr);
- }
-
- ptep_establish_new(vma, addr, page_table, entry);
- pte_unmap(page_table);
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, addr, entry);
- spin_unlock(&mm->page_table_lock);
-out:
- return VM_FAULT_MINOR;
-no_mem:
- return VM_FAULT_OOM;
-}
-
-/*
- * do_no_page() tries to create a new page mapping. It aggressively
- * tries to share with existing pages, but makes a separate copy if
- * the "write_access" parameter is true in order to avoid the next
- * page fault.
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * This is called with the MM semaphore held and the page table
- * spinlock held. Exit with the spinlock released.
- */
-static int
-do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
-{
- struct page * new_page;
- struct address_space *mapping = NULL;
- pte_t entry;
- unsigned int sequence = 0;
- int ret = VM_FAULT_MINOR;
- int anon = 0;
-
- if (!vma->vm_ops || !vma->vm_ops->nopage)
- return do_anonymous_page(mm, vma, page_table,
- pmd, write_access, address);
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
-
- if (vma->vm_file) {
- mapping = vma->vm_file->f_mapping;
- sequence = mapping->truncate_count;
- smp_rmb(); /* serializes i_size against truncate_count */
- }
-retry:
- cond_resched();
- new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
- /*
- * No smp_rmb is needed here as long as there's a full
- * spin_lock/unlock sequence inside the ->nopage callback
- * (for the pagecache lookup) that acts as an implicit
- * smp_mb() and prevents the i_size read to happen
- * after the next truncate_count read.
- */
-
- /* no page was available -- either SIGBUS or OOM */
- if (new_page == NOPAGE_SIGBUS)
- return VM_FAULT_SIGBUS;
- if (new_page == NOPAGE_OOM)
- return VM_FAULT_OOM;
-
- /*
- * Should we do an early C-O-W break?
- */
- if (write_access && !(vma->vm_flags & VM_SHARED)) {
- struct page *page;
-
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- if (!page)
- goto oom;
- copy_user_highpage(page, new_page, address);
- page_cache_release(new_page);
- new_page = page;
- anon = 1;
- }
-
- spin_lock(&mm->page_table_lock);
- /*
- * For a file-backed vma, someone could have truncated or otherwise
- * invalidated this page. If unmap_mapping_range got called,
- * retry getting the page.
- */
- if (mapping && unlikely(sequence != mapping->truncate_count)) {
- sequence = mapping->truncate_count;
- spin_unlock(&mm->page_table_lock);
- page_cache_release(new_page);
- goto retry;
- }
- page_table = pte_offset_map(pmd, address);
-
- /*
- * This silly early PAGE_DIRTY setting removes a race
- * due to the bad i386 page protection. But it's valid
- * for other architectures too.
- *
- * Note that if write_access is true, we either now have
- * an exclusive copy of the page, or this is a shared mapping,
- * so we can make it writable and dirty to avoid having to
- * handle that later.
- */
- /* Only go through if we didn't race with anybody else... */
- if (pte_none(*page_table)) {
- if (!PageReserved(new_page))
- ++mm->rss;
- acct_update_integrals();
- update_mem_hiwater();
-
- flush_icache_page(vma, new_page);
- entry = mk_pte(new_page, vma->vm_page_prot);
- if (write_access)
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_establish_new(vma, address, page_table, entry);
- if (anon) {
- lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
- } else
- page_add_file_rmap(new_page);
- pte_unmap(page_table);
- } else {
- /* One of our sibling threads was faster, back out. */
- pte_unmap(page_table);
- page_cache_release(new_page);
- spin_unlock(&mm->page_table_lock);
- goto out;
- }
-
- /* no need to invalidate: a not-present page shouldn't be cached */
- update_mmu_cache(vma, address, entry);
- spin_unlock(&mm->page_table_lock);
-out:
- return ret;
-oom:
- page_cache_release(new_page);
- ret = VM_FAULT_OOM;
- goto out;
-}
-
-/*
- * Fault of a previously existing named mapping. Repopulate the pte
- * from the encoded file_pte if possible. This enables swappable
- * nonlinear vmas.
- */
-static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
- unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
-{
- unsigned long pgoff;
- int err;
-
- BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
- /*
- * Fall back to the linear mapping if the fs does not support
- * ->populate:
- */
- if (!vma->vm_ops || !vma->vm_ops->populate ||
- (write_access && !(vma->vm_flags & VM_SHARED))) {
- pte_clear(pte);
- return do_no_page(mm, vma, address, write_access, pte, pmd);
- }
-
- pgoff = pte_to_pgoff(*pte);
-
- pte_unmap(pte);
- spin_unlock(&mm->page_table_lock);
-
- err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
vma->vm_page_prot, pgoff, 0);
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- if (err)
- return VM_FAULT_SIGBUS;
- return VM_FAULT_MAJOR;
-}
-
-/*
- * These routines also need to handle stuff like marking pages dirty
- * and/or accessed for architectures that don't do it in hardware (most
- * RISC architectures). The early dirtying is also good on the i386.
- *
- * There is also a hook called "update_mmu_cache()" that architectures
- * with external mmu caches can use to update those (ie the Sparc or
- * PowerPC hashed page tables that act as extended TLBs).
- *
- * Note the "page_table_lock". It is to protect against kswapd removing
- * pages from under us. Note that kswapd only ever _removes_ pages, never
- * adds them. As such, once we have noticed that the page is not present,
- * we can drop the lock early.
- *
- * The adding of pages is protected by the MM semaphore (which we hold),
- * so we don't need to worry about a page being suddenly been added into
- * our VM.
- *
- * We enter with the pagetable spinlock held, we are supposed to
- * release it when done.
- */
-static inline int handle_pte_fault(struct mm_struct *mm,
- struct vm_area_struct * vma, unsigned long address,
- int write_access, pte_t *pte, pmd_t *pmd)
-{
- pte_t entry;
-
- entry = *pte;
- if (!pte_present(entry)) {
- /*
- * If it truly wasn't present, we know that kswapd
- * and the PTE updates will not touch it later. So
- * drop the lock.
- */
- if (pte_none(entry))
- return do_no_page(mm, vma, address, write_access, pte,
pmd);
- if (pte_file(entry))
- return do_file_page(mm, vma, address, write_access,
pte, pmd);
- return do_swap_page(mm, vma, address, pte, pmd, entry,
write_access);
- }
-
- if (write_access) {
- if (!pte_write(entry))
- return do_wp_page(mm, vma, address, pte, pmd, entry);
-
- entry = pte_mkdirty(entry);
- }
- entry = pte_mkyoung(entry);
- ptep_set_access_flags(vma, address, pte, entry, write_access);
- update_mmu_cache(vma, address, entry);
- pte_unmap(pte);
- spin_unlock(&mm->page_table_lock);
- return VM_FAULT_MINOR;
-}
-
-/*
- * By the time we get here, we already hold the mm semaphore
- */
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
- unsigned long address, int write_access)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- __set_current_state(TASK_RUNNING);
-
- inc_page_state(pgfault);
-
- if (is_vm_hugetlb_page(vma))
- return VM_FAULT_SIGBUS; /* mapping truncation does this. */
-
- /*
- * We need the page table lock to synchronize with kswapd
- * and the SMP-safe atomic PTE updates.
- */
- pgd = pgd_offset(mm, address);
- spin_lock(&mm->page_table_lock);
-
- pud = pud_alloc(mm, pgd, address);
- if (!pud)
- goto oom;
-
- pmd = pmd_alloc(mm, pud, address);
- if (!pmd)
- goto oom;
-
- pte = pte_alloc_map(mm, pmd, address);
- if (!pte)
- goto oom;
-
- return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
-
- oom:
- spin_unlock(&mm->page_table_lock);
- return VM_FAULT_OOM;
-}
-
-#ifndef __ARCH_HAS_4LEVEL_HACK
-/*
- * Allocate page upper directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
- *
- * On a two-level or three-level page table, this ends up actually being
- * entirely optimized away.
- */
-pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
-{
- pud_t *new;
-
- spin_unlock(&mm->page_table_lock);
- new = pud_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (pgd_present(*pgd)) {
- pud_free(new);
- goto out;
- }
- pgd_populate(mm, pgd, new);
- out:
- return pud_offset(pgd, address);
-}
-
-/*
- * Allocate page middle directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
- *
- * On a two-level page table, this ends up actually being entirely
- * optimized away.
- */
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long
address)
-{
- pmd_t *new;
-
- spin_unlock(&mm->page_table_lock);
- new = pmd_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (pud_present(*pud)) {
- pmd_free(new);
- goto out;
- }
- pud_populate(mm, pud, new);
- out:
- return pmd_offset(pud, address);
-}
-#else
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long
address)
-{
- pmd_t *new;
-
- spin_unlock(&mm->page_table_lock);
- new = pmd_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (pgd_present(*pud)) {
- pmd_free(new);
- goto out;
- }
- pgd_populate(mm, pud, new);
-out:
- return pmd_offset(pud, address);
-}
-#endif
-
-int make_pages_present(unsigned long addr, unsigned long end)
-{
- int ret, len, write;
- struct vm_area_struct * vma;
-
- vma = find_vma(current->mm, addr);
- if (!vma)
- return -1;
- write = (vma->vm_flags & VM_WRITE) != 0;
- if (addr >= end)
- BUG();
- if (end > vma->vm_end)
- BUG();
- len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
- ret = get_user_pages(current, current->mm, addr,
- len, write, 0, NULL, NULL);
- if (ret < 0)
- return ret;
- return ret == len ? 0 : -1;
-}
-
-/*
- * Map a vmalloc()-space virtual address to the physical page.
- */
-struct page * vmalloc_to_page(void * vmalloc_addr)
-{
- unsigned long addr = (unsigned long) vmalloc_addr;
- struct page *page = NULL;
- pgd_t *pgd = pgd_offset_k(addr);
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- if (!pgd_none(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (!pud_none(*pud)) {
- pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd)) {
- ptep = pte_offset_map(pmd, addr);
- pte = *ptep;
- if (pte_present(pte))
- page = pte_page(pte);
- pte_unmap(ptep);
- }
- }
- }
- return page;
-}
-
-EXPORT_SYMBOL(vmalloc_to_page);
-
-/*
- * Map a vmalloc()-space virtual address to the physical page frame number.
- */
-unsigned long vmalloc_to_pfn(void * vmalloc_addr)
-{
- return page_to_pfn(vmalloc_to_page(vmalloc_addr));
-}
-
-EXPORT_SYMBOL(vmalloc_to_pfn);
-
-/*
- * update_mem_hiwater
- * - update per process rss and vm high water data
- */
-void update_mem_hiwater(void)
-{
- struct task_struct *tsk = current;
-
- if (tsk->mm) {
- if (tsk->mm->hiwater_rss < tsk->mm->rss)
- tsk->mm->hiwater_rss = tsk->mm->rss;
- if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
- tsk->mm->hiwater_vm = tsk->mm->total_vm;
- }
-}
-
-#if !defined(__HAVE_ARCH_GATE_AREA)
-
-#if defined(AT_SYSINFO_EHDR)
-struct vm_area_struct gate_vma;
-
-static int __init gate_vma_init(void)
-{
- gate_vma.vm_mm = NULL;
- gate_vma.vm_start = FIXADDR_USER_START;
- gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_page_prot = PAGE_READONLY;
- gate_vma.vm_flags = 0;
- return 0;
-}
-__initcall(gate_vma_init);
-#endif
-
-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-{
-#ifdef AT_SYSINFO_EHDR
- return &gate_vma;
-#else
- return NULL;
-#endif
-}
-
-int in_gate_area_no_task(unsigned long addr)
-{
-#ifdef AT_SYSINFO_EHDR
- if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
- return 1;
-#endif
- return 0;
-}
-
-#endif /* __HAVE_ARCH_GATE_AREA */
diff -r 30ecae4339d5 -r fa660d79f695 linux-2.6.11-xen-sparse/mm/page_alloc.c
--- a/linux-2.6.11-xen-sparse/mm/page_alloc.c Thu Aug 4 01:13:46 2005
+++ /dev/null Tue Aug 9 15:17:45 2005
@@ -1,2157 +0,0 @@
-/*
- * linux/mm/page_alloc.c
- *
- * Manages the free list, the system allocates free pages here.
- * Note that kmalloc() lives in slab.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- * Swap reorganised 29.12.95, Stephen Tweedie
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
- * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
- * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
- */
-
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/suspend.h>
-#include <linux/pagevec.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-#include <linux/notifier.h>
-#include <linux/topology.h>
-#include <linux/sysctl.h>
-#include <linux/cpu.h>
-#include <linux/nodemask.h>
-#include <linux/vmalloc.h>
-
-#include <asm/tlbflush.h>
-#include "internal.h"
-
-/* MCD - HACK: Find somewhere to initialize this EARLY, or make this
initializer cleaner */
-nodemask_t node_online_map = { { [0] = 1UL } };
-nodemask_t node_possible_map = NODE_MASK_ALL;
-struct pglist_data *pgdat_list;
-unsigned long totalram_pages;
-unsigned long totalhigh_pages;
-long nr_swap_pages;
-/*
- * results with 256, 32 in the lowmem_reserve sysctl:
- * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
- * 1G machine -> (16M dma, 784M normal, 224M high)
- * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
- * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
- * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
- */
-int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
-
-EXPORT_SYMBOL(totalram_pages);
-EXPORT_SYMBOL(nr_swap_pages);
-
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
-int min_free_kbytes = 1024;
-
-unsigned long __initdata nr_kernel_pages;
-unsigned long __initdata nr_all_pages;
-
-/*
- * Temporary debugging check for pages not lying within a given zone.
- */
-static int bad_range(struct zone *zone, struct page *page)
-{
- if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
- return 1;
- if (page_to_pfn(page) < zone->zone_start_pfn)
- return 1;
-#ifdef CONFIG_HOLES_IN_ZONE
- if (!pfn_valid(page_to_pfn(page)))
- return 1;
-#endif
- if (zone != page_zone(page))
- return 1;
- return 0;
-}
-
-static void bad_page(const char *function, struct page *page)
-{
- printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
- function, current->comm, page);
- printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
- (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags,
- page->mapping, page_mapcount(page), page_count(page));
- printk(KERN_EMERG "Backtrace:\n");
- dump_stack();
- printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
- page->flags &= ~(1 << PG_private |
- 1 << PG_locked |
- 1 << PG_lru |
- 1 << PG_active |
- 1 << PG_dirty |
- 1 << PG_swapcache |
- 1 << PG_writeback);
- set_page_count(page, 0);
- reset_page_mapcount(page);
- page->mapping = NULL;
- tainted |= TAINT_BAD_PAGE;
-}
-
-#ifndef CONFIG_HUGETLB_PAGE
-#define prep_compound_page(page, order) do { } while (0)
-#define destroy_compound_page(page, order) do { } while (0)
-#else
-/*
- * Higher-order pages are called "compound pages". They are structured thusly:
- *
- * The first PAGE_SIZE page is called the "head page".
- *
- * The remaining PAGE_SIZE pages are called "tail pages".
- *
- * All pages have PG_compound set. All pages have their ->private pointing at
- * the head page (even the head page has this).
- *
- * The first tail page's ->mapping, if non-zero, holds the address of the
- * compound page's put_page() function.
- *
- * The order of the allocation is stored in the first tail page's ->index
- * This is only for debug at present. This usage means that zero-order pages
- * may not be compound.
- */
-static void prep_compound_page(struct page *page, unsigned long order)
-{
- int i;
- int nr_pages = 1 << order;
-
- page[1].mapping = NULL;
- page[1].index = order;
- for (i = 0; i < nr_pages; i++) {
- struct page *p = page + i;
-
- SetPageCompound(p);
- p->private = (unsigned long)page;
- }
-}
-
-static void destroy_compound_page(struct page *page, unsigned long order)
-{
- int i;
- int nr_pages = 1 << order;
-
- if (!PageCompound(page))
- return;
-
- if (page[1].index != order)
- bad_page(__FUNCTION__, page);
-
- for (i = 0; i < nr_pages; i++) {
- struct page *p = page + i;
-
- if (!PageCompound(p))
- bad_page(__FUNCTION__, page);
- if (p->private != (unsigned long)page)
- bad_page(__FUNCTION__, page);
- ClearPageCompound(p);
- }
-}
-#endif /* CONFIG_HUGETLB_PAGE */
-
-/*
- * function for dealing with page's order in buddy system.
- * zone->lock is already acquired when we use these.
- * So, we don't need atomic page->flags operations here.
- */
-static inline unsigned long page_order(struct page *page) {
- return page->private;
-}
-
-static inline void set_page_order(struct page *page, int order) {
- page->private = order;
- __SetPagePrivate(page);
-}
-
-static inline void rmv_page_order(struct page *page)
-{
- __ClearPagePrivate(page);
- page->private = 0;
-}
-
-/*
- * This function checks whether a page is free && is the buddy
- * we can do coalesce a page and its buddy if
- * (a) the buddy is free &&
- * (b) the buddy is on the buddy system &&
- * (c) a page and its buddy have the same order.
- * for recording page's order, we use page->private and PG_private.
- *
- */
-static inline int page_is_buddy(struct page *page, int order)
-{
- if (PagePrivate(page) &&
- (page_order(page) == order) &&
- !PageReserved(page) &&
- page_count(page) == 0)
- return 1;
- return 0;
-}
-
-/*
- * Freeing function for a buddy system allocator.
- *
- * The concept of a buddy system is to maintain direct-mapped table
- * (containing bit values) for memory blocks of various "orders".
- * The bottom level table contains the map for the smallest allocatable
- * units of memory (here, pages), and each level above it describes
- * pairs of units from the levels below, hence, "buddies".
- * At a high level, all that happens here is marking the table entry
- * at the bottom level available, and propagating the changes upward
- * as necessary, plus some accounting needed to play nicely with other
- * parts of the VM system.
- * At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with PG_Private.Page's
- * order is recorded in page->private field.
- * So when we are allocating or freeing one, we can derive the state of the
- * other. That is, if we allocate a small block, and both were
- * free, the remainder of the region must be split into blocks.
- * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.
- *
- * -- wli
- */
-
-static inline void __free_pages_bulk (struct page *page, struct page *base,
- struct zone *zone, unsigned int order)
-{
- unsigned long page_idx;
- struct page *coalesced;
- int order_size = 1 << order;
-
- if (unlikely(order))
- destroy_compound_page(page, order);
-
- page_idx = page - base;
-
- BUG_ON(page_idx & (order_size - 1));
- BUG_ON(bad_range(zone, page));
-
- zone->free_pages += order_size;
- while (order < MAX_ORDER-1) {
- struct free_area *area;
- struct page *buddy;
- int buddy_idx;
-
- buddy_idx = (page_idx ^ (1 << order));
- buddy = base + buddy_idx;
- if (bad_range(zone, buddy))
- break;
- if (!page_is_buddy(buddy, order))
- break;
- /* Move the buddy up one level. */
- list_del(&buddy->lru);
- area = zone->free_area + order;
- area->nr_free--;
- rmv_page_order(buddy);
- page_idx &= buddy_idx;
- order++;
- }
- coalesced = base + page_idx;
- set_page_order(coalesced, order);
- list_add(&coalesced->lru, &zone->free_area[order].free_list);
- zone->free_area[order].nr_free++;
-}
-
-static inline void free_pages_check(const char *function, struct page *page)
-{
- if ( page_mapped(page) ||
- page->mapping != NULL ||
- page_count(page) != 0 ||
- (page->flags & (
- 1 << PG_lru |
- 1 << PG_private |
- 1 << PG_locked |
- 1 << PG_active |
- 1 << PG_reclaim |
- 1 << PG_slab |
- 1 << PG_swapcache |
- 1 << PG_writeback )))
- bad_page(function, page);
- if (PageDirty(page))
- ClearPageDirty(page);
-}
-
-/*
- * Frees a list of pages.
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free, or 0 for all on the list.
- *
- * If the zone was previously in an "all pages pinned" state then look to
- * see if this freeing clears that state.
- *
- * And clear the zone's pages_scanned counter, to hold off the "all pages are
- * pinned" detection logic.
- */
-static int
-free_pages_bulk(struct zone *zone, int count,
- struct list_head *list, unsigned int order)
-{
- unsigned long flags;
- struct page *base, *page = NULL;
- int ret = 0;
-
- base = zone->zone_mem_map;
- spin_lock_irqsave(&zone->lock, flags);
- zone->all_unreclaimable = 0;
- zone->pages_scanned = 0;
- while (!list_empty(list) && count--) {
- page = list_entry(list->prev, struct page, lru);
- /* have to delete it as __free_pages_bulk list manipulates */
- list_del(&page->lru);
- __free_pages_bulk(page, base, zone, order);
- ret++;
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- return ret;
-}
-
-void __free_pages_ok(struct page *page, unsigned int order)
-{
- LIST_HEAD(list);
- int i;
-
- if (arch_free_page(page, order))
- return;
-
- mod_page_state(pgfree, 1 << order);
-
-#ifndef CONFIG_MMU
- if (order > 0)
- for (i = 1 ; i < (1 << order) ; ++i)
- __put_page(page + i);
-#endif
-
- for (i = 0 ; i < (1 << order) ; ++i)
- free_pages_check(__FUNCTION__, page + i);
- list_add(&page->lru, &list);
- kernel_map_pages(page, 1<<order, 0);
- free_pages_bulk(page_zone(page), 1, &list, order);
-}
-
-
-/*
- * The order of subdivision here is critical for the IO subsystem.
- * Please do not alter this order without good reasons and regression
- * testing. Specifically, as large blocks of memory are subdivided,
- * the order in which smaller blocks are delivered depends on the order
- * they're subdivided in this function. This is the primary factor
- * influencing the order in which pages are delivered to the IO
- * subsystem according to empirical testing, and this is also justified
- * by considering the behavior of a buddy system containing a single
- * large block of memory acted on by a series of small allocations.
- * This behavior is a critical factor in sglist merging's success.
- *
- * -- wli
- */
-static inline struct page *
-expand(struct zone *zone, struct page *page,
- int low, int high, struct free_area *area)
-{
- unsigned long size = 1 << high;
-
- while (high > low) {
- area--;
- high--;
- size >>= 1;
- BUG_ON(bad_range(zone, &page[size]));
- list_add(&page[size].lru, &area->free_list);
- area->nr_free++;
- set_page_order(&page[size], high);
- }
- return page;
-}
-
-void set_page_refs(struct page *page, int order)
-{
-#ifdef CONFIG_MMU
- set_page_count(page, 1);
-#else
- int i;
-
- /*
- * We need to reference all the pages for this order, otherwise if
- * anyone accesses one of the pages with (get/put) it will be freed.
- * - eg: access_process_vm()
- */
- for (i = 0; i < (1 << order); i++)
- set_page_count(page + i, 1);
-#endif /* CONFIG_MMU */
-}
-
-/*
- * This page is about to be returned from the page allocator
- */
-static void prep_new_page(struct page *page, int order)
-{
- if (page->mapping || page_mapped(page) ||
- (page->flags & (
- 1 << PG_private |
- 1 << PG_locked |
- 1 << PG_lru |
- 1 << PG_active |
- 1 << PG_dirty |
- 1 << PG_reclaim |
- 1 << PG_swapcache |
- 1 << PG_writeback )))
- bad_page(__FUNCTION__, page);
-
- page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
- 1 << PG_referenced | 1 << PG_arch_1 |
- 1 << PG_checked | 1 << PG_mappedtodisk);
- page->private = 0;
- set_page_refs(page, order);
- kernel_map_pages(page, 1 << order, 1);
-}
-
-/*
- * Do the hard work of removing an element from the buddy allocator.
- * Call me with the zone->lock already held.
- */
-static struct page *__rmqueue(struct zone *zone, unsigned int order)
-{
- struct free_area * area;
- unsigned int current_order;
- struct page *page;
-
- for (current_order = order; current_order < MAX_ORDER; ++current_order)
{
- area = zone->free_area + current_order;
- if (list_empty(&area->free_list))
- continue;
-
- page = list_entry(area->free_list.next, struct page, lru);
- list_del(&page->lru);
- rmv_page_order(page);
- area->nr_free--;
- zone->free_pages -= 1UL << order;
- return expand(zone, page, order, current_order, area);
- }
-
- return NULL;
-}
-
-/*
- * Obtain a specified number of elements from the buddy allocator, all under
- * a single hold of the lock, for efficiency. Add them to the supplied list.
- * Returns the number of new pages which were placed at *list.
- */
-static int rmqueue_bulk(struct zone *zone, unsigned int order,
- unsigned long count, struct list_head *list)
-{
- unsigned long flags;
- int i;
- int allocated = 0;
- struct page *page;
-
- spin_lock_irqsave(&zone->lock, flags);
- for (i = 0; i < count; ++i) {
- page = __rmqueue(zone, order);
- if (page == NULL)
- break;
- allocated++;
- list_add_tail(&page->lru, list);
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- return allocated;
-}
-
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
-static void __drain_pages(unsigned int cpu)
-{
- struct zone *zone;
- int i;
-
- for_each_zone(zone) {
- struct per_cpu_pageset *pset;
-
- pset = &zone->pageset[cpu];
- for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
- struct per_cpu_pages *pcp;
-
- pcp = &pset->pcp[i];
- pcp->count -= free_pages_bulk(zone, pcp->count,
- &pcp->list, 0);
- }
- }
-}
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
-
-#ifdef CONFIG_PM
-
-void mark_free_pages(struct zone *zone)
-{
- unsigned long zone_pfn, flags;
- int order;
- struct list_head *curr;
-
- if (!zone->spanned_pages)
- return;
-
- spin_lock_irqsave(&zone->lock, flags);
- for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
- ClearPageNosaveFree(pfn_to_page(zone_pfn +
zone->zone_start_pfn));
-
- for (order = MAX_ORDER - 1; order >= 0; --order)
- list_for_each(curr, &zone->free_area[order].free_list) {
- unsigned long start_pfn, i;
-
- start_pfn = page_to_pfn(list_entry(curr, struct page,
lru));
-
- for (i=0; i < (1<<order); i++)
- SetPageNosaveFree(pfn_to_page(start_pfn+i));
- }
- spin_unlock_irqrestore(&zone->lock, flags);
-}
-
-/*
- * Spill all of this CPU's per-cpu pages back into the buddy allocator.
- */
-void drain_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-}
-#endif /* CONFIG_PM */
-
-static void zone_statistics(struct zonelist *zonelist, struct zone *z)
-{
-#ifdef CONFIG_NUMA
- unsigned long flags;
- int cpu;
- pg_data_t *pg = z->zone_pgdat;
- pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
- struct per_cpu_pageset *p;
-
- local_irq_save(flags);
- cpu = smp_processor_id();
- p = &z->pageset[cpu];
- if (pg == orig) {
- z->pageset[cpu].numa_hit++;
- } else {
- p->numa_miss++;
- zonelist->zones[0]->pageset[cpu].numa_foreign++;
- }
- if (pg == NODE_DATA(numa_node_id()))
- p->local_node++;
- else
- p->other_node++;
- local_irq_restore(flags);
-#endif
-}
-
-/*
- * Free a 0-order page
- */
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
-static void fastcall free_hot_cold_page(struct page *page, int cold)
-{
- struct zone *zone = page_zone(page);
- struct per_cpu_pages *pcp;
- unsigned long flags;
-
- if (arch_free_page(page, 0))
- return;
-
- kernel_map_pages(page, 1, 0);
- inc_page_state(pgfree);
- if (PageAnon(page))
- page->mapping = NULL;
- free_pages_check(__FUNCTION__, page);
- pcp = &zone->pageset[get_cpu()].pcp[cold];
- local_irq_save(flags);
- if (pcp->count >= pcp->high)
- pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
- list_add(&page->lru, &pcp->list);
- pcp->count++;
- local_irq_restore(flags);
- put_cpu();
-}
-
-void fastcall free_hot_page(struct page *page)
-{
- free_hot_cold_page(page, 0);
-}
-
-void fastcall free_cold_page(struct page *page)
-{
- free_hot_cold_page(page, 1);
-}
-
-static inline void prep_zero_page(struct page *page, int order, int gfp_flags)
-{
- int i;
-
- BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
- for(i = 0; i < (1 << order); i++)
- clear_highpage(page + i);
-}
-
-/*
- * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
- * we cheat by calling it from here, in the order > 0 path. Saves a branch
- * or two.
- */
-static struct page *
-buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
-{
- unsigned long flags;
- struct page *page = NULL;
- int cold = !!(gfp_flags & __GFP_COLD);
-
- if (order == 0) {
- struct per_cpu_pages *pcp;
-
- pcp = &zone->pageset[get_cpu()].pcp[cold];
- local_irq_save(flags);
- if (pcp->count <= pcp->low)
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, &pcp->list);
- if (pcp->count) {
- page = list_entry(pcp->list.next, struct page, lru);
- list_del(&page->lru);
- pcp->count--;
- }
- local_irq_restore(flags);
- put_cpu();
- }
-
- if (page == NULL) {
- spin_lock_irqsave(&zone->lock, flags);
- page = __rmqueue(zone, order);
- spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- if (page != NULL) {
- BUG_ON(bad_range(zone, page));
- mod_page_state_zone(zone, pgalloc, 1 << order);
- prep_new_page(page, order);
-
- if (gfp_flags & __GFP_ZERO)
- prep_zero_page(page, order, gfp_flags);
-
- if (order && (gfp_flags & __GFP_COMP))
- prep_compound_page(page, order);
- }
- return page;
-}
-
-/*
- * Return 1 if free pages are above 'mark'. This takes into account the order
- * of the allocation.
- */
-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int can_try_harder, int gfp_high)
-{
- /* free_pages my go negative - that's OK */
- long min = mark, free_pages = z->free_pages - (1 << order) + 1;
- int o;
-
- if (gfp_high)
- min -= min / 2;
- if (can_try_harder)
- min -= min / 4;
-
- if (free_pages <= min + z->lowmem_reserve[classzone_idx])
- return 0;
- for (o = 0; o < order; o++) {
- /* At the next order, this order's pages become unavailable */
- free_pages -= z->free_area[o].nr_free << o;
-
- /* Require fewer higher order pages to be free */
- min >>= 1;
-
- if (free_pages <= min)
- return 0;
- }
- return 1;
-}
-
-/*
- * This is the 'heart' of the zoned buddy allocator.
- */
-struct page * fastcall
-__alloc_pages(unsigned int gfp_mask, unsigned int order,
- struct zonelist *zonelist)
-{
- const int wait = gfp_mask & __GFP_WAIT;
- struct zone **zones, *z;
- struct page *page;
- struct reclaim_state reclaim_state;
- struct task_struct *p = current;
- int i;
- int classzone_idx;
- int do_retry;
- int can_try_harder;
- int did_some_progress;
-
- might_sleep_if(wait);
-
- /*
- * The caller may dip into page reserves a bit more if the caller
- * cannot run direct reclaim, or is the caller has realtime scheduling
- * policy
- */
- can_try_harder = (unlikely(rt_task(p)) && !in_interrupt()) || !wait;
-
- zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
-
- if (unlikely(zones[0] == NULL)) {
- /* Should this ever happen?? */
- return NULL;
- }
-
- classzone_idx = zone_idx(zones[0]);
-
- restart:
- /* Go through the zonelist once, looking for a zone with enough free */
- for (i = 0; (z = zones[i]) != NULL; i++) {
-
- if (!zone_watermark_ok(z, order, z->pages_low,
- classzone_idx, 0, 0))
- continue;
-
- page = buffered_rmqueue(z, order, gfp_mask);
- if (page)
- goto got_pg;
- }
-
- for (i = 0; (z = zones[i]) != NULL; i++)
- wakeup_kswapd(z, order);
-
- /*
- * Go through the zonelist again. Let __GFP_HIGH and allocations
- * coming from realtime tasks to go deeper into reserves
- */
- for (i = 0; (z = zones[i]) != NULL; i++) {
- if (!zone_watermark_ok(z, order, z->pages_min,
- classzone_idx, can_try_harder,
- gfp_mask & __GFP_HIGH))
- continue;
-
- page = buffered_rmqueue(z, order, gfp_mask);
- if (page)
- goto got_pg;
- }
-
- /* This allocation should allow future memory freeing. */
- if (((p->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) {
- /* go through the zonelist yet again, ignoring mins */
- for (i = 0; (z = zones[i]) != NULL; i++) {
- page = buffered_rmqueue(z, order, gfp_mask);
- if (page)
- goto got_pg;
- }
- goto nopage;
- }
-
- /* Atomic allocations - we can't balance anything */
- if (!wait)
- goto nopage;
-
-rebalance:
- cond_resched();
-
- /* We now go into synchronous reclaim */
- p->flags |= PF_MEMALLOC;
- reclaim_state.reclaimed_slab = 0;
- p->reclaim_state = &reclaim_state;
-
- did_some_progress = try_to_free_pages(zones, gfp_mask, order);
-
- p->reclaim_state = NULL;
- p->flags &= ~PF_MEMALLOC;
-
- cond_resched();
-
- if (likely(did_some_progress)) {
- /*
- * Go through the zonelist yet one more time, keep
- * very high watermark here, this is only to catch
- * a parallel oom killing, we must fail if we're still
- * under heavy pressure.
- */
- for (i = 0; (z = zones[i]) != NULL; i++) {
- if (!zone_watermark_ok(z, order, z->pages_min,
- classzone_idx, can_try_harder,
- gfp_mask & __GFP_HIGH))
- continue;
-
- page = buffered_rmqueue(z, order, gfp_mask);
- if (page)
- goto got_pg;
- }
- } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
- /*
- * Go through the zonelist yet one more time, keep
- * very high watermark here, this is only to catch
- * a parallel oom killing, we must fail if we're still
- * under heavy pressure.
- */
- for (i = 0; (z = zones[i]) != NULL; i++) {
- if (!zone_watermark_ok(z, order, z->pages_high,
- classzone_idx, 0, 0))
- continue;
-
- page = buffered_rmqueue(z, order, gfp_mask);
- if (page)
- goto got_pg;
- }
-
- out_of_memory(gfp_mask);
- goto restart;
- }
-
- /*
- * Don't let big-order allocations loop unless the caller explicitly
- * requests that. Wait for some write requests to complete then retry.
- *
- * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
- * <= 3, but that may not be true in other implementations.
- */
- do_retry = 0;
- if (!(gfp_mask & __GFP_NORETRY)) {
- if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
- do_retry = 1;
- if (gfp_mask & __GFP_NOFAIL)
- do_retry = 1;
- }
- if (do_retry) {
- blk_congestion_wait(WRITE, HZ/50);
- goto rebalance;
- }
-
-nopage:
- if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
- printk(KERN_WARNING "%s: page allocation failure."
- " order:%d, mode:0x%x\n",
- p->comm, order, gfp_mask);
- dump_stack();
- }
- return NULL;
-got_pg:
- zone_statistics(zonelist, z);
- return page;
-}
-
-EXPORT_SYMBOL(__alloc_pages);
-
-/*
- * Common helper functions.
- */
-fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int
order)
-{
- struct page * page;
- page = alloc_pages(gfp_mask, order);
- if (!page)
- return 0;
- return (unsigned long) page_address(page);
-}
-
-EXPORT_SYMBOL(__get_free_pages);
-
-fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
-{
- struct page * page;
-
- /*
- * get_zeroed_page() returns a 32-bit address, which cannot represent
- * a highmem page
- */
- BUG_ON(gfp_mask & __GFP_HIGHMEM);
-
- page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
- if (page)
- return (unsigned long) page_address(page);
- return 0;
-}
-
-EXPORT_SYMBOL(get_zeroed_page);
-
-void __pagevec_free(struct pagevec *pvec)
-{
- int i = pagevec_count(pvec);
-
- while (--i >= 0)
- free_hot_cold_page(pvec->pages[i], pvec->cold);
-}
-
-fastcall void __free_pages(struct page *page, unsigned int order)
-{
- if (!PageReserved(page) && put_page_testzero(page)) {
- if (order == 0)
- free_hot_page(page);
- else
- __free_pages_ok(page, order);
- }
-}
-
-EXPORT_SYMBOL(__free_pages);
-
-fastcall void free_pages(unsigned long addr, unsigned int order)
-{
- if (addr != 0) {
- BUG_ON(!virt_addr_valid((void *)addr));
- __free_pages(virt_to_page((void *)addr), order);
- }
-}
-
-EXPORT_SYMBOL(free_pages);
-
-/*
- * Total amount of free (allocatable) RAM:
- */
-unsigned int nr_free_pages(void)
-{
- unsigned int sum = 0;
- struct zone *zone;
-
- for_each_zone(zone)
- sum += zone->free_pages;
-
- return sum;
-}
-
-EXPORT_SYMBOL(nr_free_pages);
-
-#ifdef CONFIG_NUMA
-unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
-{
- unsigned int i, sum = 0;
-
- for (i = 0; i < MAX_NR_ZONES; i++)
- sum += pgdat->node_zones[i].free_pages;
-
- return sum;
-}
-#endif
-
-static unsigned int nr_free_zone_pages(int offset)
-{
- pg_data_t *pgdat;
- unsigned int sum = 0;
-
- for_each_pgdat(pgdat) {
- struct zonelist *zonelist = pgdat->node_zonelists + offset;
- struct zone **zonep = zonelist->zones;
- struct zone *zone;
-
- for (zone = *zonep++; zone; zone = *zonep++) {
- unsigned long size = zone->present_pages;
- unsigned long high = zone->pages_high;
- if (size > high)
- sum += size - high;
- }
- }
-
- return sum;
-}
-
-/*
- * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
- */
-unsigned int nr_free_buffer_pages(void)
-{
- return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
-}
-
-/*
- * Amount of free RAM allocatable within all zones
- */
-unsigned int nr_free_pagecache_pages(void)
-{
- return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
-}
-
-#ifdef CONFIG_HIGHMEM
-unsigned int nr_free_highpages (void)
-{
- pg_data_t *pgdat;
- unsigned int pages = 0;
-
- for_each_pgdat(pgdat)
- pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
-
- return pages;
-}
-#endif
-
-#ifdef CONFIG_NUMA
-static void show_node(struct zone *zone)
-{
- printk("Node %d ", zone->zone_pgdat->node_id);
-}
-#else
-#define show_node(zone) do { } while (0)
-#endif
-
-/*
- * Accumulate the page_state information across all CPUs.
- * The result is unavoidably approximate - it can change
- * during and after execution of this function.
- */
-static DEFINE_PER_CPU(struct page_state, page_states) = {0};
-
-atomic_t nr_pagecache = ATOMIC_INIT(0);
-EXPORT_SYMBOL(nr_pagecache);
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
-#endif
-
-void __get_page_state(struct page_state *ret, int nr)
-{
- int cpu = 0;
-
- memset(ret, 0, sizeof(*ret));
-
- cpu = first_cpu(cpu_online_map);
- while (cpu < NR_CPUS) {
- unsigned long *in, *out, off;
-
- in = (unsigned long *)&per_cpu(page_states, cpu);
-
- cpu = next_cpu(cpu, cpu_online_map);
-
- if (cpu < NR_CPUS)
- prefetch(&per_cpu(page_states, cpu));
-
- out = (unsigned long *)ret;
- for (off = 0; off < nr; off++)
- *out++ += *in++;
- }
-}
-
-void get_page_state(struct page_state *ret)
-{
- int nr;
-
- nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
- nr /= sizeof(unsigned long);
-
- __get_page_state(ret, nr + 1);
-}
-
-void get_full_page_state(struct page_state *ret)
-{
- __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
-}
-
-unsigned long __read_page_state(unsigned offset)
-{
- unsigned long ret = 0;
- int cpu;
-
- for_each_online_cpu(cpu) {
- unsigned long in;
-
- in = (unsigned long)&per_cpu(page_states, cpu) + offset;
- ret += *((unsigned long *)in);
- }
- return ret;
-}
-
-void __mod_page_state(unsigned offset, unsigned long delta)
-{
- unsigned long flags;
- void* ptr;
-
- local_irq_save(flags);
- ptr = &__get_cpu_var(page_states);
- *(unsigned long*)(ptr + offset) += delta;
- local_irq_restore(flags);
-}
-
-EXPORT_SYMBOL(__mod_page_state);
-
-void __get_zone_counts(unsigned long *active, unsigned long *inactive,
- unsigned long *free, struct pglist_data *pgdat)
-{
- struct zone *zones = pgdat->node_zones;
- int i;
-
- *active = 0;
- *inactive = 0;
- *free = 0;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- *active += zones[i].nr_active;
- *inactive += zones[i].nr_inactive;
- *free += zones[i].free_pages;
- }
-}
-
-void get_zone_counts(unsigned long *active,
- unsigned long *inactive, unsigned long *free)
-{
- struct pglist_data *pgdat;
-
- *active = 0;
- *inactive = 0;
- *free = 0;
- for_each_pgdat(pgdat) {
- unsigned long l, m, n;
- __get_zone_counts(&l, &m, &n, pgdat);
- *active += l;
- *inactive += m;
- *free += n;
- }
-}
-
-void si_meminfo(struct sysinfo *val)
-{
- val->totalram = totalram_pages;
- val->sharedram = 0;
- val->freeram = nr_free_pages();
- val->bufferram = nr_blockdev_pages();
-#ifdef CONFIG_HIGHMEM
- val->totalhigh = totalhigh_pages;
- val->freehigh = nr_free_highpages();
-#else
- val->totalhigh = 0;
- val->freehigh = 0;
-#endif
- val->mem_unit = PAGE_SIZE;
-}
-
-EXPORT_SYMBOL(si_meminfo);
-
-#ifdef CONFIG_NUMA
-void si_meminfo_node(struct sysinfo *val, int nid)
-{
- pg_data_t *pgdat = NODE_DATA(nid);
-
- val->totalram = pgdat->node_present_pages;
- val->freeram = nr_free_pages_pgdat(pgdat);
- val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
- val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
- val->mem_unit = PAGE_SIZE;
-}
-#endif
-
-#define K(x) ((x) << (PAGE_SHIFT-10))
-
-/*
- * Show free area list (used inside shift_scroll-lock stuff)
- * We also calculate the percentage fragmentation. We do this by counting the
- * memory on each free list with the exception of the first item on the list.
- */
-void show_free_areas(void)
-{
- struct page_state ps;
- int cpu, temperature;
- unsigned long active;
- unsigned long inactive;
- unsigned long free;
- struct zone *zone;
-
- for_each_zone(zone) {
- show_node(zone);
- printk("%s per-cpu:", zone->name);
-
- if (!zone->present_pages) {
- printk(" empty\n");
- continue;
- } else
- printk("\n");
-
- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
- struct per_cpu_pageset *pageset;
-
- if (!cpu_possible(cpu))
- continue;
-
- pageset = zone->pageset + cpu;
-
- for (temperature = 0; temperature < 2; temperature++)
- printk("cpu %d %s: low %d, high %d, batch %d\n",
- cpu,
- temperature ? "cold" : "hot",
- pageset->pcp[temperature].low,
- pageset->pcp[temperature].high,
- pageset->pcp[temperature].batch);
- }
- }
-
- get_page_state(&ps);
- get_zone_counts(&active, &inactive, &free);
-
- printk("\nFree pages: %11ukB (%ukB HighMem)\n",
- K(nr_free_pages()),
- K(nr_free_highpages()));
-
- printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
- "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
- active,
- inactive,
- ps.nr_dirty,
- ps.nr_writeback,
- ps.nr_unstable,
- nr_free_pages(),
- ps.nr_slab,
- ps.nr_mapped,
- ps.nr_page_table_pages);
-
- for_each_zone(zone) {
- int i;
-
- show_node(zone);
- printk("%s"
- " free:%lukB"
- " min:%lukB"
- " low:%lukB"
- " high:%lukB"
- " active:%lukB"
- " inactive:%lukB"
- " present:%lukB"
- " pages_scanned:%lu"
- " all_unreclaimable? %s"
- "\n",
- zone->name,
- K(zone->free_pages),
- K(zone->pages_min),
- K(zone->pages_low),
- K(zone->pages_high),
- K(zone->nr_active),
- K(zone->nr_inactive),
- K(zone->present_pages),
- zone->pages_scanned,
- (zone->all_unreclaimable ? "yes" : "no")
- );
- printk("lowmem_reserve[]:");
- for (i = 0; i < MAX_NR_ZONES; i++)
- printk(" %lu", zone->lowmem_reserve[i]);
- printk("\n");
- }
-
- for_each_zone(zone) {
- unsigned long nr, flags, order, total = 0;
-
- show_node(zone);
- printk("%s: ", zone->name);
- if (!zone->present_pages) {
- printk("empty\n");
- continue;
- }
-
- spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order < MAX_ORDER; order++) {
- nr = zone->free_area[order].nr_free;
- total += nr << order;
- printk("%lu*%lukB ", nr, K(1UL) << order);
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- printk("= %lukB\n", K(total));
- }
-
- show_swap_cache_info();
-}
-
-/*
- * Builds allocation fallback zone lists.
- */
-static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist
*zonelist, int j, int k)
-{
- switch (k) {
- struct zone *zone;
- default:
- BUG();
- case ZONE_HIGHMEM:
- zone = pgdat->node_zones + ZONE_HIGHMEM;
- if (zone->present_pages) {
-#ifndef CONFIG_HIGHMEM
- BUG();
-#endif
- zonelist->zones[j++] = zone;
- }
- case ZONE_NORMAL:
- zone = pgdat->node_zones + ZONE_NORMAL;
- if (zone->present_pages)
- zonelist->zones[j++] = zone;
- case ZONE_DMA:
- zone = pgdat->node_zones + ZONE_DMA;
- if (zone->present_pages)
- zonelist->zones[j++] = zone;
- }
-
- return j;
-}
-
-#ifdef CONFIG_NUMA
-#define MAX_NODE_LOAD (num_online_nodes())
-static int __initdata node_load[MAX_NUMNODES];
-/**
- * find_next_best_node - find the next node that should appear in a given
- * node's fallback list
- * @node: node whose fallback list we're appending
- * @used_node_mask: nodemask_t of already used nodes
- *
- * We use a number of factors to determine which is the next node that should
- * appear on a given node's fallback list. The node should not have appeared
- * already in @node's fallback list, and it should be the next closest node
- * according to the distance array (which contains arbitrary distance values
- * from each node to each node in the system), and should also prefer nodes
- * with no CPUs, since presumably they'll have very little allocation pressure
- * on them otherwise.
- * It returns -1 if no node is found.
- */
-static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
-{
- int i, n, val;
- int min_val = INT_MAX;
- int best_node = -1;
-
- for_each_online_node(i) {
- cpumask_t tmp;
-
- /* Start from local node */
- n = (node+i) % num_online_nodes();
-
- /* Don't want a node to appear more than once */
- if (node_isset(n, *used_node_mask))
- continue;
-
- /* Use the local node if we haven't already */
- if (!node_isset(node, *used_node_mask)) {
- best_node = node;
- break;
- }
-
- /* Use the distance array to find the distance */
- val = node_distance(node, n);
-
- /* Give preference to headless and unused nodes */
- tmp = node_to_cpumask(n);
- if (!cpus_empty(tmp))
- val += PENALTY_FOR_NODE_WITH_CPUS;
-
- /* Slight preference for less loaded node */
- val *= (MAX_NODE_LOAD*MAX_NUMNODES);
- val += node_load[n];
-
- if (val < min_val) {
- min_val = val;
- best_node = n;
- }
- }
-
- if (best_node >= 0)
- node_set(best_node, *used_node_mask);
-
- return best_node;
-}
-
-static void __init build_zonelists(pg_data_t *pgdat)
-{
- int i, j, k, node, local_node;
- int prev_node, load;
- struct zonelist *zonelist;
- nodemask_t used_mask;
-
- /* initialize zonelists */
- for (i = 0; i < GFP_ZONETYPES; i++) {
- zonelist = pgdat->node_zonelists + i;
- memset(zonelist, 0, sizeof(*zonelist));
- zonelist->zones[0] = NULL;
- }
-
- /* NUMA-aware ordering of nodes */
- local_node = pgdat->node_id;
- load = num_online_nodes();
- prev_node = local_node;
- nodes_clear(used_mask);
- while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
- /*
- * We don't want to pressure a particular node.
- * So adding penalty to the first node in same
- * distance group to make it round-robin.
- */
- if (node_distance(local_node, node) !=
- node_distance(local_node, prev_node))
- node_load[node] += load;
- prev_node = node;
- load--;
- for (i = 0; i < GFP_ZONETYPES; i++) {
- zonelist = pgdat->node_zonelists + i;
- for (j = 0; zonelist->zones[j] != NULL; j++);
-
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
-
- j = build_zonelists_node(NODE_DATA(node), zonelist, j,
k);
- zonelist->zones[j] = NULL;
- }
- }
-}
-
-#else /* CONFIG_NUMA */
-
-static void __init build_zonelists(pg_data_t *pgdat)
-{
- int i, j, k, node, local_node;
-
- local_node = pgdat->node_id;
- for (i = 0; i < GFP_ZONETYPES; i++) {
- struct zonelist *zonelist;
-
- zonelist = pgdat->node_zonelists + i;
- memset(zonelist, 0, sizeof(*zonelist));
-
- j = 0;
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
-
- j = build_zonelists_node(pgdat, zonelist, j, k);
- /*
- * Now we build the zonelist so that it contains the zones
- * of all the other nodes.
- * We don't want to pressure a particular node, so when
- * building the zones for node N, we make sure that the
- * zones coming right after the local ones are those from
- * node N+1 (modulo N)
- */
- for (node = local_node + 1; node < MAX_NUMNODES; node++) {
- if (!node_online(node))
- continue;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j,
k);
- }
- for (node = 0; node < local_node; node++) {
- if (!node_online(node))
- continue;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j,
k);
- }
-
- zonelist->zones[j] = NULL;
- }
-}
-
-#endif /* CONFIG_NUMA */
-
-void __init build_all_zonelists(void)
-{
- int i;
-
- for_each_online_node(i)
- build_zonelists(NODE_DATA(i));
- printk("Built %i zonelists\n", num_online_nodes());
-}
-
-/*
- * Helper functions to size the waitqueue hash table.
- * Essentially these want to choose hash table sizes sufficiently
- * large so that collisions trying to wait on pages are rare.
- * But in fact, the number of active page waitqueues on typical
- * systems is ridiculously low, less than 200. So this is even
- * conservative, even though it seems large.
- *
- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
- * waitqueues, i.e. the size of the waitq table given the number of pages.
- */
-#define PAGES_PER_WAITQUEUE 256
-
-static inline unsigned long wait_table_size(unsigned long pages)
-{
- unsigned long size = 1;
-
- pages /= PAGES_PER_WAITQUEUE;
-
- while (size < pages)
- size <<= 1;
-
- /*
- * Once we have dozens or even hundreds of threads sleeping
- * on IO we've got bigger problems than wait queue collision.
- * Limit the size of the wait table to a reasonable size.
- */
- size = min(size, 4096UL);
-
- return max(size, 4UL);
-}
-
-/*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
- */
-static inline unsigned long wait_table_bits(unsigned long size)
-{
- return ffz(~size);
-}
-
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
-static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
- unsigned long *zones_size, unsigned long *zholes_size)
-{
- unsigned long realtotalpages, totalpages = 0;
- int i;
-
- for (i = 0; i < MAX_NR_ZONES; i++)
- totalpages += zones_size[i];
- pgdat->node_spanned_pages = totalpages;
-
- realtotalpages = totalpages;
- if (zholes_size)
- for (i = 0; i < MAX_NR_ZONES; i++)
- realtotalpages -= zholes_size[i];
- pgdat->node_present_pages = realtotalpages;
- printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
realtotalpages);
-}
-
-
-/*
- * Initially all pages are reserved - free ones are freed
- * up by free_all_bootmem() once the early boot process is
- * done. Non-atomic initialization, single-pass.
- */
-void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
-{
- struct page *start = pfn_to_page(start_pfn);
- struct page *page;
-
- for (page = start; page < (start + size); page++) {
- set_page_zone(page, NODEZONE(nid, zone));
- set_page_count(page, 0);
- reset_page_mapcount(page);
- SetPageReserved(page);
- INIT_LIST_HEAD(&page->lru);
-#ifdef WANT_PAGE_VIRTUAL
- /* The shift won't overflow because ZONE_NORMAL is below 4G. */
- if (!is_highmem_idx(zone))
- set_page_address(page, __va(start_pfn << PAGE_SHIFT));
-#endif
- start_pfn++;
- }
-}
-
-void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
- unsigned long size)
-{
- int order;
- for (order = 0; order < MAX_ORDER ; order++) {
- INIT_LIST_HEAD(&zone->free_area[order].free_list);
- zone->free_area[order].nr_free = 0;
- }
-}
-
-#ifndef __HAVE_ARCH_MEMMAP_INIT
-#define memmap_init(size, nid, zone, start_pfn) \
- memmap_init_zone((size), (nid), (zone), (start_pfn))
-#endif
-
-/*
- * Set up the zone data structures:
- * - mark all pages reserved
- * - mark all memory queues empty
- * - clear the memory bitmaps
- */
-static void __init free_area_init_core(struct pglist_data *pgdat,
- unsigned long *zones_size, unsigned long *zholes_size)
-{
- unsigned long i, j;
- const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
- int cpu, nid = pgdat->node_id;
- unsigned long zone_start_pfn = pgdat->node_start_pfn;
-
- pgdat->nr_zones = 0;
- init_waitqueue_head(&pgdat->kswapd_wait);
- pgdat->kswapd_max_order = 0;
-
- for (j = 0; j < MAX_NR_ZONES; j++) {
- struct zone *zone = pgdat->node_zones + j;
- unsigned long size, realsize;
- unsigned long batch;
-
- zone_table[NODEZONE(nid, j)] = zone;
- realsize = size = zones_size[j];
- if (zholes_size)
- realsize -= zholes_size[j];
-
- if (j == ZONE_DMA || j == ZONE_NORMAL)
- nr_kernel_pages += realsize;
- nr_all_pages += realsize;
-
- zone->spanned_pages = size;
- zone->present_pages = realsize;
- zone->name = zone_names[j];
- spin_lock_init(&zone->lock);
- spin_lock_init(&zone->lru_lock);
- zone->zone_pgdat = pgdat;
- zone->free_pages = 0;
-
- zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
-
- /*
- * The per-cpu-pages pools are set to around 1000th of the
- * size of the zone. But no more than 1/4 of a meg - there's
- * no point in going beyond the size of L2 cache.
- *
- * OK, so we don't know how big the cache is. So guess.
- */
- batch = zone->present_pages / 1024;
- if (batch * PAGE_SIZE > 256 * 1024)
- batch = (256 * 1024) / PAGE_SIZE;
- batch /= 4; /* We effectively *= 4 below */
- if (batch < 1)
- batch = 1;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- struct per_cpu_pages *pcp;
-
- pcp = &zone->pageset[cpu].pcp[0]; /* hot */
- pcp->count = 0;
- pcp->low = 2 * batch;
- pcp->high = 6 * batch;
- pcp->batch = 1 * batch;
- INIT_LIST_HEAD(&pcp->list);
-
- pcp = &zone->pageset[cpu].pcp[1]; /* cold */
- pcp->count = 0;
- pcp->low = 0;
- pcp->high = 2 * batch;
- pcp->batch = 1 * batch;
- INIT_LIST_HEAD(&pcp->list);
- }
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
- zone_names[j], realsize, batch);
- INIT_LIST_HEAD(&zone->active_list);
- INIT_LIST_HEAD(&zone->inactive_list);
- zone->nr_scan_active = 0;
- zone->nr_scan_inactive = 0;
- zone->nr_active = 0;
- zone->nr_inactive = 0;
- if (!size)
- continue;
-
- /*
- * The per-page waitqueue mechanism uses hashed waitqueues
- * per zone.
- */
- zone->wait_table_size = wait_table_size(size);
- zone->wait_table_bits =
- wait_table_bits(zone->wait_table_size);
- zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node(pgdat, zone->wait_table_size
- * sizeof(wait_queue_head_t));
-
- for(i = 0; i < zone->wait_table_size; ++i)
- init_waitqueue_head(zone->wait_table + i);
-
- pgdat->nr_zones = j+1;
-
- zone->zone_mem_map = pfn_to_page(zone_start_pfn);
- zone->zone_start_pfn = zone_start_pfn;
-
- if ((zone_start_pfn) & (zone_required_alignment-1))
- printk(KERN_CRIT "BUG: wrong zone alignment, it will
crash\n");
-
- memmap_init(size, nid, j, zone_start_pfn);
-
- zone_start_pfn += size;
-
- zone_init_free_lists(pgdat, zone, zone->spanned_pages);
- }
-}
-
-void __init node_alloc_mem_map(struct pglist_data *pgdat)
-{
- unsigned long size;
-
- size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
- pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
-#ifndef CONFIG_DISCONTIGMEM
- mem_map = contig_page_data.node_mem_map;
-#endif
-}
-
-void __init free_area_init_node(int nid, struct pglist_data *pgdat,
- unsigned long *zones_size, unsigned long node_start_pfn,
- unsigned long *zholes_size)
-{
- pgdat->node_id = nid;
- pgdat->node_start_pfn = node_start_pfn;
- calculate_zone_totalpages(pgdat, zones_size, zholes_size);
-
- if (!pfn_to_page(node_start_pfn))
- node_alloc_mem_map(pgdat);
-
- free_area_init_core(pgdat, zones_size, zholes_size);
-}
-
-#ifndef CONFIG_DISCONTIGMEM
-static bootmem_data_t contig_bootmem_data;
-struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
-
-EXPORT_SYMBOL(contig_page_data);
-
-void __init free_area_init(unsigned long *zones_size)
-{
- free_area_init_node(0, &contig_page_data, zones_size,
- __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-static void *frag_start(struct seq_file *m, loff_t *pos)
-{
- pg_data_t *pgdat;
- loff_t node = *pos;
-
- for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
- --node;
-
- return pgdat;
-}
-
-static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
-{
- pg_data_t *pgdat = (pg_data_t *)arg;
-
- (*pos)++;
- return pgdat->pgdat_next;
-}
-
-static void frag_stop(struct seq_file *m, void *arg)
-{
-}
-
-/*
- * This walks the free areas for each zone.
- */
-static int frag_show(struct seq_file *m, void *arg)
-{
- pg_data_t *pgdat = (pg_data_t *)arg;
- struct zone *zone;
- struct zone *node_zones = pgdat->node_zones;
- unsigned long flags;
- int order;
-
- for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
- if (!zone->present_pages)
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
- for (order = 0; order < MAX_ORDER; ++order)
- seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
- spin_unlock_irqrestore(&zone->lock, flags);
- seq_putc(m, '\n');
- }
- return 0;
-}
-
-struct seq_operations fragmentation_op = {
- .start = frag_start,
- .next = frag_next,
- .stop = frag_stop,
- .show = frag_show,
-};
-
-static char *vmstat_text[] = {
- "nr_dirty",
- "nr_writeback",
- "nr_unstable",
- "nr_page_table_pages",
- "nr_mapped",
- "nr_slab",
-
- "pgpgin",
- "pgpgout",
- "pswpin",
- "pswpout",
- "pgalloc_high",
-
- "pgalloc_normal",
- "pgalloc_dma",
- "pgfree",
- "pgactivate",
- "pgdeactivate",
-
- "pgfault",
- "pgmajfault",
- "pgrefill_high",
- "pgrefill_normal",
- "pgrefill_dma",
-
- "pgsteal_high",
- "pgsteal_normal",
- "pgsteal_dma",
- "pgscan_kswapd_high",
- "pgscan_kswapd_normal",
-
- "pgscan_kswapd_dma",
- "pgscan_direct_high",
- "pgscan_direct_normal",
- "pgscan_direct_dma",
- "pginodesteal",
-
- "slabs_scanned",
- "kswapd_steal",
- "kswapd_inodesteal",
- "pageoutrun",
- "allocstall",
-
- "pgrotated",
-};
-
-static void *vmstat_start(struct seq_file *m, loff_t *pos)
-{
- struct page_state *ps;
-
- if (*pos >= ARRAY_SIZE(vmstat_text))
- return NULL;
-
- ps = kmalloc(sizeof(*ps), GFP_KERNEL);
- m->private = ps;
- if (!ps)
- return ERR_PTR(-ENOMEM);
- get_full_page_state(ps);
- ps->pgpgin /= 2; /* sectors -> kbytes */
- ps->pgpgout /= 2;
- return (unsigned long *)ps + *pos;
-}
-
-static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
-{
- (*pos)++;
- if (*pos >= ARRAY_SIZE(vmstat_text))
- return NULL;
- return (unsigned long *)m->private + *pos;
-}
-
-static int vmstat_show(struct seq_file *m, void *arg)
-{
- unsigned long *l = arg;
- unsigned long off = l - (unsigned long *)m->private;
-
- seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
- return 0;
-}
-
-static void vmstat_stop(struct seq_file *m, void *arg)
-{
- kfree(m->private);
- m->private = NULL;
-}
-
-struct seq_operations vmstat_op = {
- .start = vmstat_start,
- .next = vmstat_next,
- .stop = vmstat_stop,
- .show = vmstat_show,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int page_alloc_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (unsigned long)hcpu;
- long *count;
- unsigned long *src, *dest;
-
- if (action == CPU_DEAD) {
- int i;
-
- /* Drain local pagecache count. */
- count = &per_cpu(nr_pagecache_local, cpu);
- atomic_add(*count, &nr_pagecache);
- *count = 0;
- local_irq_disable();
- __drain_pages(cpu);
-
- /* Add dead cpu's page_states to our own. */
- dest = (unsigned long *)&__get_cpu_var(page_states);
- src = (unsigned long *)&per_cpu(page_states, cpu);
-
- for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
- i++) {
- dest[i] += src[i];
- src[i] = 0;
- }
-
- local_irq_enable();
- }
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void __init page_alloc_init(void)
-{
- hotcpu_notifier(page_alloc_cpu_notify, 0);
-}
-
-/*
- * setup_per_zone_lowmem_reserve - called whenever
- * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
- * has a correct pages reserved value, so an adequate number of
- * pages are left in the zone after a successful __alloc_pages().
- */
-static void setup_per_zone_lowmem_reserve(void)
-{
- struct pglist_data *pgdat;
- int j, idx;
-
- for_each_pgdat(pgdat) {
- for (j = 0; j < MAX_NR_ZONES; j++) {
- struct zone * zone = pgdat->node_zones + j;
- unsigned long present_pages = zone->present_pages;
-
- zone->lowmem_reserve[j] = 0;
-
- for (idx = j-1; idx >= 0; idx--) {
- struct zone * lower_zone = pgdat->node_zones +
idx;
-
- lower_zone->lowmem_reserve[j] = present_pages /
sysctl_lowmem_reserve_ratio[idx];
- present_pages += lower_zone->present_pages;
- }
- }
- }
-}
-
-/*
- * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
- * that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
- */
-static void setup_per_zone_pages_min(void)
-{
- unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
- unsigned long lowmem_pages = 0;
- struct zone *zone;
- unsigned long flags;
-
- /* Calculate total number of !ZONE_HIGHMEM pages */
- for_each_zone(zone) {
- if (!is_highmem(zone))
- lowmem_pages += zone->present_pages;
- }
-
- for_each_zone(zone) {
- spin_lock_irqsave(&zone->lru_lock, flags);
- if (is_highmem(zone)) {
- /*
- * Often, highmem doesn't need to reserve any pages.
- * But the pages_min/low/high values are also used for
- * batching up page reclaim activity so we need a
- * decent value here.
- */
- int min_pages;
-
- min_pages = zone->present_pages / 1024;
- if (min_pages < SWAP_CLUSTER_MAX)
- min_pages = SWAP_CLUSTER_MAX;
- if (min_pages > 128)
- min_pages = 128;
- zone->pages_min = min_pages;
- } else {
- /* if it's a lowmem zone, reserve a number of pages
- * proportionate to the zone's size.
- */
- zone->pages_min = (pages_min * zone->present_pages) /
- lowmem_pages;
- }
-
- /*
- * When interpreting these watermarks, just keep in mind that:
- * zone->pages_min == (zone->pages_min * 4) / 4;
- */
- zone->pages_low = (zone->pages_min * 5) / 4;
- zone->pages_high = (zone->pages_min * 6) / 4;
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- }
-}
-
-/*
- * Initialise min_free_kbytes.
- *
- * For small machines we want it small (128k min). For large machines
- * we want it large (64MB max). But it is not linear, because network
- * bandwidth does not increase linearly with machine size. We use
- *
- * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
- * min_free_kbytes = sqrt(lowmem_kbytes * 16)
- *
- * which yields
- *
- * 16MB: 512k
- * 32MB: 724k
- * 64MB: 1024k
- * 128MB: 1448k
- * 256MB: 2048k
- * 512MB: 2896k
- * 1024MB: 4096k
- * 2048MB: 5792k
- * 4096MB: 8192k
- * 8192MB: 11584k
- * 16384MB: 16384k
- */
-static int __init init_per_zone_pages_min(void)
-{
- unsigned long lowmem_kbytes;
-
- lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
-
- min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
- if (min_free_kbytes < 128)
- min_free_kbytes = 128;
- if (min_free_kbytes > 65536)
- min_free_kbytes = 65536;
- setup_per_zone_pages_min();
- setup_per_zone_lowmem_reserve();
- return 0;
-}
-module_init(init_per_zone_pages_min)
-
-/*
- * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
- * that we can call two helper functions whenever min_free_kbytes
- * changes.
- */
-int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t
*ppos)
-{
- proc_dointvec(table, write, file, buffer, length, ppos);
- setup_per_zone_pages_min();
- return 0;
-}
-
-/*
- * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
- * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
- * whenever sysctl_lowmem_reserve_ratio changes.
- *
- * The reserve ratio obviously has absolutely no relation with the
- * pages_min watermarks. The lowmem reserve ratio can only make sense
- * if in function of the boot time zone sizes.
- */
-int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t
*ppos)
-{
- proc_dointvec_minmax(table, write, file, buffer, length, ppos);
- setup_per_zone_lowmem_reserve();
- return 0;
-}
-
-__initdata int hashdist = HASHDIST_DEFAULT;
-
-#ifdef CONFIG_NUMA
-static int __init set_hashdist(char *str)
-{
- if (!str)
- return 0;
- hashdist = simple_strtoul(str, &str, 0);
- return 1;
-}
-__setup("hashdist=", set_hashdist);
-#endif
-
-/*
- * allocate a large system hash table from bootmem
- * - it is assumed that the hash table must contain an exact power-of-2
- * quantity of entries
- * - limit is the number of hash buckets, not the total allocation size
- */
-void *__init alloc_large_system_hash(const char *tablename,
- unsigned long bucketsize,
- unsigned long numentries,
- int scale,
- int flags,
- unsigned int *_hash_shift,
- unsigned int *_hash_mask,
- unsigned long limit)
-{
- unsigned long long max = limit;
- unsigned long log2qty, size;
- void *table = NULL;
-
- /* allow the kernel cmdline to have a say */
- if (!numentries) {
- /* round applicable memory size up to nearest megabyte */
- numentries = (flags & HASH_HIGHMEM) ? nr_all_pages :
nr_kernel_pages;
- numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
- numentries >>= 20 - PAGE_SHIFT;
- numentries <<= 20 - PAGE_SHIFT;
-
- /* limit to 1 bucket per 2^scale bytes of low memory */
- if (scale > PAGE_SHIFT)
- numentries >>= (scale - PAGE_SHIFT);
- else
- numentries <<= (PAGE_SHIFT - scale);
- }
- /* rounded up to nearest power of 2 in size */
- numentries = 1UL << (long_log2(numentries) + 1);
-
- /* limit allocation size to 1/16 total memory by default */
- if (max == 0) {
- max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
- do_div(max, bucketsize);
- }
-
- if (numentries > max)
- numentries = max;
-
- log2qty = long_log2(numentries);
-
- do {
- size = bucketsize << log2qty;
- if (flags & HASH_EARLY)
- table = alloc_bootmem(size);
- else if (hashdist)
- table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
- else {
- unsigned long order;
- for (order = 0; ((1UL << order) << PAGE_SHIFT) < size;
order++)
- ;
- table = (void*) __get_free_pages(GFP_ATOMIC, order);
- }
- } while (!table && size > PAGE_SIZE && --log2qty);
-
- if (!table)
- panic("Failed to allocate %s hash table\n", tablename);
-
- printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
- tablename,
- (1U << log2qty),
- long_log2(size) - PAGE_SHIFT,
- size);
-
- if (_hash_shift)
- *_hash_shift = log2qty;
- if (_hash_mask)
- *_hash_mask = (1 << log2qty) - 1;
-
- return table;
-}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|