# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1175074721 -3600
# Node ID 0b2794d3320f2d618540faa403e5d03c617c7b7d
# Parent 14aeb7981e4ebbbdc2d7d171a2b55b79642bc0a1
# Parent ff6a1607c17b2e1f2e8b09e8722097f50e16c715
Merge with xen-ia64-unstable.hg
---
Config.mk | 14 -
docs/xen-api/xenapi-datamodel.tex | 140 ++++++++++
linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S | 16 -
linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c | 2
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c | 18 -
tools/Rules.mk | 6
tools/examples/xend-config.sxp | 14 -
tools/ioemu/Makefile.target | 4
tools/ioemu/vnc.c | 13
tools/libxen/include/xen_network.h | 33 ++
tools/libxen/src/xen_network.c | 74 +++++
tools/pygrub/src/pygrub | 17 -
tools/python/xen/util/xmlrpcclient.py | 123 +++++++++
tools/python/xen/util/xmlrpclib2.py | 71 -----
tools/python/xen/xend/XendAPI.py | 203 +++++++--------
tools/python/xen/xend/XendClient.py | 2
tools/python/xen/xend/XendConfig.py | 11
tools/python/xen/xend/XendDomain.py | 20 +
tools/python/xen/xend/XendDomainInfo.py | 8
tools/python/xen/xend/XendLogging.py | 1
tools/python/xen/xend/XendMonitor.py | 16 -
tools/python/xen/xend/XendNetwork.py | 43 ++-
tools/python/xen/xend/XendNode.py | 14 -
tools/python/xen/xend/XendOptions.py | 8
tools/python/xen/xend/XendPIFMetrics.py | 8
tools/python/xen/xend/XendStateStore.py | 13
tools/python/xen/xend/XendVMMetrics.py | 11
tools/python/xen/xend/server/SSLXMLRPCServer.py | 103 +++++++
tools/python/xen/xend/server/SrvServer.py | 100 +++++--
tools/python/xen/xend/server/XMLRPCServer.py | 51 +++
tools/python/xen/xm/XenAPI.py | 16 -
tools/python/xen/xm/create.dtd | 2
tools/python/xen/xm/create.py | 68 +++--
tools/python/xen/xm/main.py | 35 +-
tools/python/xen/xm/opts.py | 19 +
tools/python/xen/xm/xenapi_create.py | 13
tools/xm-test/tests/destroy/06_destroy_dom0_neg.py | 2
xen/acm/acm_policy.c | 24 +
xen/arch/ia64/asm-offsets.c | 9
xen/arch/ia64/linux-xen/irq_ia64.c | 2
xen/arch/ia64/linux-xen/mca.c | 10
xen/arch/ia64/linux-xen/smp.c | 2
xen/arch/ia64/vmx/pal_emul.c | 2
xen/arch/ia64/vmx/vmx_process.c | 2
xen/arch/ia64/vmx/vmx_virt.c | 78 ++---
xen/arch/ia64/xen/dom0_ops.c | 4
xen/arch/ia64/xen/domain.c | 8
xen/arch/ia64/xen/faults.c | 2
xen/arch/ia64/xen/hypercall.c | 4
xen/arch/ia64/xen/hyperprivop.S | 13
xen/arch/ia64/xen/mm.c | 26 -
xen/arch/ia64/xen/privop.c | 30 +-
xen/arch/ia64/xen/privop_stat.c | 86 +++---
xen/arch/ia64/xen/tlb_track.c | 42 +--
xen/arch/ia64/xen/vcpu.c | 10
xen/arch/ia64/xen/vhpt.c | 30 +-
xen/arch/powerpc/backtrace.c | 15 -
xen/arch/powerpc/mm.c | 2
xen/arch/x86/Rules.mk | 4
xen/arch/x86/apic.c | 2
xen/arch/x86/extable.c | 2
xen/arch/x86/hvm/io.c | 28 +-
xen/arch/x86/hvm/svm/intr.c | 124 ++++-----
xen/arch/x86/hvm/svm/svm.c | 24 -
xen/arch/x86/hvm/svm/vmcb.c | 1
xen/arch/x86/hvm/vmx/intr.c | 35 --
xen/arch/x86/irq.c | 2
xen/arch/x86/mm.c | 34 +-
xen/arch/x86/mm/hap/hap.c | 18 -
xen/arch/x86/mm/shadow/common.c | 40 +--
xen/arch/x86/mm/shadow/multi.c | 56 ++--
xen/arch/x86/smp.c | 6
xen/arch/x86/time.c | 22 -
xen/arch/x86/traps.c | 26 +
xen/arch/x86/x86_32/asm-offsets.c | 14 -
xen/arch/x86/x86_32/domain_page.c | 6
xen/arch/x86/x86_32/entry.S | 4
xen/arch/x86/x86_32/seg_fixup.c | 2
xen/arch/x86/x86_64/asm-offsets.c | 26 -
xen/arch/x86/x86_64/compat/entry.S | 2
xen/arch/x86/x86_64/entry.S | 4
xen/arch/x86/x86_emulate.c | 12
xen/common/domain.c | 4
xen/common/multicall.c | 5
xen/common/page_alloc.c | 2
xen/common/perfc.c | 223 ++++++++---------
xen/common/schedule.c | 13
xen/drivers/char/console.c | 10
xen/include/asm-ia64/bug.h | 1
xen/include/asm-ia64/linux-xen/asm/asmmacro.h | 4
xen/include/asm-ia64/linux-xen/asm/iosapic.h | 7
xen/include/asm-ia64/perfc_defn.h | 214 ++++++++--------
xen/include/asm-ia64/privop_stat.h | 25 -
xen/include/asm-ia64/tlb_track.h | 4
xen/include/asm-powerpc/bug.h | 1
xen/include/asm-powerpc/debugger.h | 4
xen/include/asm-x86/bug.h | 6
xen/include/asm-x86/hvm/svm/vmcb.h | 1
xen/include/asm-x86/multicall.h | 104 ++++---
xen/include/asm-x86/perfc_defn.h | 126 ++++-----
xen/include/asm-x86/x86_32/asm_defns.h | 12
xen/include/asm-x86/x86_32/bug.h | 6
xen/include/asm-x86/x86_64/asm_defns.h | 19 -
xen/include/asm-x86/x86_64/bug.h | 6
xen/include/public/foreign/Makefile | 4
xen/include/xen/lib.h | 4
xen/include/xen/perfc.h | 120 +++------
xen/include/xen/perfc_defn.h | 15 -
108 files changed, 1869 insertions(+), 1278 deletions(-)
diff -r 14aeb7981e4e -r 0b2794d3320f Config.mk
--- a/Config.mk Tue Mar 27 12:21:48 2007 -0600
+++ b/Config.mk Wed Mar 28 10:38:41 2007 +0100
@@ -31,16 +31,26 @@ EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR)
EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR)
endif
-# cc-option
+# cc-option: Check if compiler supports first option, else fall back to second.
# Usage: cflags-y += $(call cc-option,$(CC),-march=winchip-c6,-march=i586)
cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc \
/dev/null 2>&1`"; then echo "$(2)"; else echo "$(3)"; fi ;)
-# cc-ver
+# cc-ver: Check compiler is at least specified version. Return boolean 'y'/'n'.
# Usage: ifeq ($(call cc-ver,$(CC),0x030400),y)
cc-ver = $(shell if [ $$((`$(1) -dumpversion | awk -F. \
'{ printf "0x%02x%02x%02x", $$1, $$2, $$3}'`)) -ge $$(($(2))) ]; \
then echo y; else echo n; fi ;)
+
+# cc-ver-check: Check compiler is at least specified version, else fail.
+# Usage: $(call cc-ver-check,CC,0x030400,"Require at least gcc-3.4")
+cc-ver-check = $(eval $(call cc-ver-check-closure,$(1),$(2),$(3)))
+define cc-ver-check-closure
+ ifeq ($$(call cc-ver,$$($(1)),$(2)),n)
+ override $(1) = echo "*** FATAL BUILD ERROR: "$(3) >&2; exit 1;
+ cc-option := n
+ endif
+endef
ifneq ($(debug),y)
CFLAGS += -DNDEBUG
diff -r 14aeb7981e4e -r 0b2794d3320f docs/xen-api/xenapi-datamodel.tex
--- a/docs/xen-api/xenapi-datamodel.tex Tue Mar 27 12:21:48 2007 -0600
+++ b/docs/xen-api/xenapi-datamodel.tex Wed Mar 28 10:38:41 2007 +0100
@@ -6549,6 +6549,7 @@ Quals & Field & Type & Description \\
$\mathit{RW}$ & {\tt name/description} & string & a notes field containg
human-readable description \\
$\mathit{RO}_\mathit{run}$ & {\tt VIFs} & (VIF ref) Set & list of connected
vifs \\
$\mathit{RO}_\mathit{run}$ & {\tt PIFs} & (PIF ref) Set & list of connected
pifs \\
+$\mathit{RW}$ & {\tt other\_config} & (string $\rightarrow$ string) Map &
additional configuration \\
\hline
\end{longtable}
\subsection{RPCs associated with class: network}
@@ -6798,6 +6799,145 @@ Get the PIFs field of the given network.
value of the field
+\vspace{0.3cm}
+\vspace{0.3cm}
+\vspace{0.3cm}
+\subsubsection{RPC name:~get\_other\_config}
+
+{\bf Overview:}
+Get the other\_config field of the given network.
+
+ \noindent {\bf Signature:}
+\begin{verbatim} ((string -> string) Map) get_other_config (session_id s,
network ref self)\end{verbatim}
+
+
+\noindent{\bf Arguments:}
+
+
+\vspace{0.3cm}
+\begin{tabular}{|c|c|p{7cm}|}
+ \hline
+{\bf type} & {\bf name} & {\bf description} \\ \hline
+{\tt network ref } & self & reference to the object \\ \hline
+
+\end{tabular}
+
+\vspace{0.3cm}
+
+ \noindent {\bf Return Type:}
+{\tt
+(string $\rightarrow$ string) Map
+}
+
+
+value of the field
+\vspace{0.3cm}
+\vspace{0.3cm}
+\vspace{0.3cm}
+\subsubsection{RPC name:~set\_other\_config}
+
+{\bf Overview:}
+Set the other\_config field of the given network.
+
+ \noindent {\bf Signature:}
+\begin{verbatim} void set_other_config (session_id s, network ref self,
(string -> string) Map value)\end{verbatim}
+
+
+\noindent{\bf Arguments:}
+
+
+\vspace{0.3cm}
+\begin{tabular}{|c|c|p{7cm}|}
+ \hline
+{\bf type} & {\bf name} & {\bf description} \\ \hline
+{\tt network ref } & self & reference to the object \\ \hline
+
+{\tt (string $\rightarrow$ string) Map } & value & New value to set \\ \hline
+
+\end{tabular}
+
+\vspace{0.3cm}
+
+ \noindent {\bf Return Type:}
+{\tt
+void
+}
+
+
+
+\vspace{0.3cm}
+\vspace{0.3cm}
+\vspace{0.3cm}
+\subsubsection{RPC name:~add\_to\_other\_config}
+
+{\bf Overview:}
+Add the given key-value pair to the other\_config field of the given
+network.
+
+ \noindent {\bf Signature:}
+\begin{verbatim} void add_to_other_config (session_id s, network ref self,
string key, string value)\end{verbatim}
+
+
+\noindent{\bf Arguments:}
+
+
+\vspace{0.3cm}
+\begin{tabular}{|c|c|p{7cm}|}
+ \hline
+{\bf type} & {\bf name} & {\bf description} \\ \hline
+{\tt network ref } & self & reference to the object \\ \hline
+
+{\tt string } & key & Key to add \\ \hline
+
+{\tt string } & value & Value to add \\ \hline
+
+\end{tabular}
+
+\vspace{0.3cm}
+
+ \noindent {\bf Return Type:}
+{\tt
+void
+}
+
+
+
+\vspace{0.3cm}
+\vspace{0.3cm}
+\vspace{0.3cm}
+\subsubsection{RPC name:~remove\_from\_other\_config}
+
+{\bf Overview:}
+Remove the given key and its corresponding value from the other\_config
+field of the given network. If the key is not in that Map, then do
+nothing.
+
+ \noindent {\bf Signature:}
+\begin{verbatim} void remove_from_other_config (session_id s, network ref
self, string key)\end{verbatim}
+
+
+\noindent{\bf Arguments:}
+
+
+\vspace{0.3cm}
+\begin{tabular}{|c|c|p{7cm}|}
+ \hline
+{\bf type} & {\bf name} & {\bf description} \\ \hline
+{\tt network ref } & self & reference to the object \\ \hline
+
+{\tt string } & key & Key to remove \\ \hline
+
+\end{tabular}
+
+\vspace{0.3cm}
+
+ \noindent {\bf Return Type:}
+{\tt
+void
+}
+
+
+
\vspace{0.3cm}
\vspace{0.3cm}
\vspace{0.3cm}
diff -r 14aeb7981e4e -r 0b2794d3320f
linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S Tue Mar 27
12:21:48 2007 -0600
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S Wed Mar 28
10:38:41 2007 +0100
@@ -148,11 +148,11 @@ NMI_MASK = 0x80000000
.endm
/*
- * Must be consistent with the definition in arch-x86_64.h:
+ * Must be consistent with the definition in arch-x86/xen-x86_64.h:
* struct iret_context {
* u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
* };
- * #define VGCF_IN_SYSCALL (1<<8)
+ * with rax, r11, and rcx being taken care of in the hypercall stub.
*/
.macro HYPERVISOR_IRET flag
testb $3,1*8(%rsp)
@@ -164,21 +164,15 @@ NMI_MASK = 0x80000000
jnz 1f
/* Direct iret to kernel space. Correct CS and SS. */
- orb $3,1*8(%rsp)
- orb $3,4*8(%rsp)
+ orl $3,1*8(%rsp)
+ orl $3,4*8(%rsp)
1: iretq
2: /* Slow iret via hypervisor. */
- andl $~NMI_MASK, 16(%rsp)
+ andl $~NMI_MASK, 2*8(%rsp)
pushq $\flag
jmp hypercall_page + (__HYPERVISOR_iret * 32)
.endm
-
- .macro SWITCH_TO_KERNEL ssoff,adjust=0
- jc 1f
- orb $1,\ssoff-\adjust+4(%rsp)
-1:
- .endm
/*
* A newly forked process directly context switches into this.
diff -r 14aeb7981e4e -r 0b2794d3320f
linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c Tue Mar 27
12:21:48 2007 -0600
+++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c Wed Mar 28
10:38:41 2007 +0100
@@ -123,7 +123,7 @@ static int take_machine_down(void *p_fas
static int take_machine_down(void *p_fast_suspend)
{
int fast_suspend = *(int *)p_fast_suspend;
- int suspend_cancelled, err, cpu;
+ int suspend_cancelled, err;
extern void time_resume(void);
if (fast_suspend) {
diff -r 14aeb7981e4e -r 0b2794d3320f
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Tue Mar 27
12:21:48 2007 -0600
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Wed Mar 28
10:38:41 2007 +0100
@@ -236,7 +236,10 @@ static ssize_t xenbus_dev_write(struct f
break;
case XS_WATCH:
- case XS_UNWATCH:
+ case XS_UNWATCH: {
+ static const char *XS_RESP = "OK";
+ struct xsd_sockmsg hdr;
+
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
if (token == NULL) {
@@ -246,9 +249,6 @@ static ssize_t xenbus_dev_write(struct f
token++;
if (msg_type == XS_WATCH) {
- static const char * XS_WATCH_RESP = "OK";
- struct xsd_sockmsg hdr;
-
watch = kmalloc(sizeof(*watch), GFP_KERNEL);
watch->watch.node = kmalloc(strlen(path)+1,
GFP_KERNEL);
@@ -266,11 +266,6 @@ static ssize_t xenbus_dev_write(struct f
}
list_add(&watch->list, &u->watches);
-
- hdr.type = XS_WATCH;
- hdr.len = strlen(XS_WATCH_RESP) + 1;
- queue_reply(u, (char *)&hdr, sizeof(hdr));
- queue_reply(u, (char *)XS_WATCH_RESP, hdr.len);
} else {
list_for_each_entry_safe(watch, tmp_watch,
&u->watches, list) {
@@ -285,7 +280,12 @@ static ssize_t xenbus_dev_write(struct f
}
}
+ hdr.type = msg_type;
+ hdr.len = strlen(XS_RESP) + 1;
+ queue_reply(u, (char *)&hdr, sizeof(hdr));
+ queue_reply(u, (char *)XS_RESP, hdr.len);
break;
+ }
default:
rc = -EINVAL;
diff -r 14aeb7981e4e -r 0b2794d3320f tools/Rules.mk
--- a/tools/Rules.mk Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/Rules.mk Wed Mar 28 10:38:41 2007 +0100
@@ -24,9 +24,9 @@ CFLAGS += $(CFLAGS-y)
CFLAGS += $(CFLAGS-y)
# Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers)
-ifeq ($(CONFIG_X86)$(call cc-ver,$(CC),0x030400),yn)
-$(error Xen tools require at least gcc-3.4)
-endif
+check-$(CONFIG_X86) = $(call cc-ver-check,CC,0x030400,\
+ "Xen requires at least gcc-3.4")
+$(eval $(check-y))
%.opic: %.c
$(CC) $(CPPFLAGS) -DPIC $(CFLAGS) -fPIC -c -o $@ $<
diff -r 14aeb7981e4e -r 0b2794d3320f tools/examples/xend-config.sxp
--- a/tools/examples/xend-config.sxp Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/examples/xend-config.sxp Wed Mar 28 10:38:41 2007 +0100
@@ -46,6 +46,11 @@
# (xen-api-server ((9363 pam '^localhost$ example\\.com$')
# (unix none)))
#
+# Optionally, the TCP Xen-API server can use SSL by specifying the private
+# key and certificate location:
+#
+# (9367 pam '' /etc/xen/xen-api.key /etc/xen/xen-api.crt)
+#
# Default:
# (xen-api-server ((unix)))
@@ -59,10 +64,17 @@
#(xend-unix-path /var/lib/xend/xend-socket)
-# Address and port xend should use for the TCP XMLRPC interface,
+
+# Address and port xend should use for the legacy TCP XMLRPC interface,
# if xen-tcp-xmlrpc-server is set.
#(xen-tcp-xmlrpc-server-address 'localhost')
#(xen-tcp-xmlrpc-server-port 8006)
+
+# SSL key and certificate to use for the legacy TCP XMLRPC interface.
+# Setting these will mean that this port serves only SSL connections as
+# opposed to plaintext ones.
+#(xend-tcp-xmlrpc-server-ssl-key-file /etc/xen/xmlrpc.key)
+#(xend-tcp-xmlrpc-server-ssl-cert-file /etc/xen/xmlrpc.crt)
# Port xend should use for the HTTP interface, if xend-http-server is set.
diff -r 14aeb7981e4e -r 0b2794d3320f tools/ioemu/Makefile.target
--- a/tools/ioemu/Makefile.target Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/ioemu/Makefile.target Wed Mar 28 10:38:41 2007 +0100
@@ -193,6 +193,10 @@ LIBS+=-lsocket -lnsl -lresolv
LIBS+=-lsocket -lnsl -lresolv
endif
+ifeq ($(debug),y)
+CFLAGS += -DQEMU_VNC_MONITOR_EXPORT
+endif
+
# profiling code
ifdef TARGET_GPROF
LDFLAGS+=-p
diff -r 14aeb7981e4e -r 0b2794d3320f tools/ioemu/vnc.c
--- a/tools/ioemu/vnc.c Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/ioemu/vnc.c Wed Mar 28 10:38:41 2007 +0100
@@ -113,8 +113,10 @@ struct VncState
int visible_w;
int visible_h;
+#ifdef QEMU_VNC_MONITOR_EXPORT
int ctl_keys; /* Ctrl+Alt starts calibration */
int shift_keys; /* Shift / CapsLock keys */
+#endif
int numlock;
};
@@ -895,6 +897,7 @@ static void do_key_event(VncState *vs, i
kbd_put_keycode(keycode & 0x7f);
else
kbd_put_keycode(keycode | 0x80);
+#ifdef QEMU_VNC_MONITOR_EXPORT
} else if (down) {
int qemu_keysym = 0;
@@ -922,8 +925,10 @@ static void do_key_event(VncState *vs, i
}
if (qemu_keysym != 0)
kbd_put_keysym(qemu_keysym);
- }
-
+#endif
+ }
+
+#ifdef QEMU_VNC_MONITOR_EXPORT
if (down) {
switch (sym) {
case XK_Control_L:
@@ -976,6 +981,10 @@ static void do_key_event(VncState *vs, i
break;
}
}
+#else
+ if (!down && sym == XK_Num_Lock)
+ vs->numlock = !vs->numlock;
+#endif
}
static void key_event(VncState *vs, int down, uint32_t sym)
diff -r 14aeb7981e4e -r 0b2794d3320f tools/libxen/include/xen_network.h
--- a/tools/libxen/include/xen_network.h Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/libxen/include/xen_network.h Wed Mar 28 10:38:41 2007 +0100
@@ -22,6 +22,7 @@
#include "xen_common.h"
#include "xen_network_decl.h"
#include "xen_pif_decl.h"
+#include "xen_string_string_map.h"
#include "xen_vif_decl.h"
@@ -68,6 +69,7 @@ typedef struct xen_network_record
char *name_description;
struct xen_vif_record_opt_set *vifs;
struct xen_pif_record_opt_set *pifs;
+ xen_string_string_map *other_config;
} xen_network_record;
/**
@@ -220,6 +222,13 @@ xen_network_get_pifs(xen_session *sessio
/**
+ * Get the other_config field of the given network.
+ */
+extern bool
+xen_network_get_other_config(xen_session *session, xen_string_string_map
**result, xen_network network);
+
+
+/**
* Set the name/label field of the given network.
*/
extern bool
@@ -234,6 +243,30 @@ xen_network_set_name_description(xen_ses
/**
+ * Set the other_config field of the given network.
+ */
+extern bool
+xen_network_set_other_config(xen_session *session, xen_network network,
xen_string_string_map *other_config);
+
+
+/**
+ * Add the given key-value pair to the other_config field of the given
+ * network.
+ */
+extern bool
+xen_network_add_to_other_config(xen_session *session, xen_network network,
char *key, char *value);
+
+
+/**
+ * Remove the given key and its corresponding value from the
+ * other_config field of the given network. If the key is not in that Map,
+ * then do nothing.
+ */
+extern bool
+xen_network_remove_from_other_config(xen_session *session, xen_network
network, char *key);
+
+
+/**
* Return a list of all the networks known to the system.
*/
extern bool
diff -r 14aeb7981e4e -r 0b2794d3320f tools/libxen/src/xen_network.c
--- a/tools/libxen/src/xen_network.c Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/libxen/src/xen_network.c Wed Mar 28 10:38:41 2007 +0100
@@ -24,6 +24,7 @@
#include "xen_internal.h"
#include "xen_network.h"
#include "xen_pif.h"
+#include "xen_string_string_map.h"
#include "xen_vif.h"
@@ -52,7 +53,10 @@ static const struct_member xen_network_r
.offset = offsetof(xen_network_record, vifs) },
{ .key = "PIFs",
.type = &abstract_type_ref_set,
- .offset = offsetof(xen_network_record, pifs) }
+ .offset = offsetof(xen_network_record, pifs) },
+ { .key = "other_config",
+ .type = &abstract_type_string_string_map,
+ .offset = offsetof(xen_network_record, other_config) }
};
const abstract_type xen_network_record_abstract_type_ =
@@ -78,6 +82,7 @@ xen_network_record_free(xen_network_reco
free(record->name_description);
xen_vif_record_opt_set_free(record->vifs);
xen_pif_record_opt_set_free(record->pifs);
+ xen_string_string_map_free(record->other_config);
free(record);
}
@@ -239,6 +244,23 @@ xen_network_get_pifs(xen_session *sessio
bool
+xen_network_get_other_config(xen_session *session, xen_string_string_map
**result, xen_network network)
+{
+ abstract_value param_values[] =
+ {
+ { .type = &abstract_type_string,
+ .u.string_val = network }
+ };
+
+ abstract_type result_type = abstract_type_string_string_map;
+
+ *result = NULL;
+ XEN_CALL_("network.get_other_config");
+ return session->ok;
+}
+
+
+bool
xen_network_set_name_label(xen_session *session, xen_network network, char
*label)
{
abstract_value param_values[] =
@@ -271,6 +293,56 @@ xen_network_set_name_description(xen_ses
bool
+xen_network_set_other_config(xen_session *session, xen_network network,
xen_string_string_map *other_config)
+{
+ abstract_value param_values[] =
+ {
+ { .type = &abstract_type_string,
+ .u.string_val = network },
+ { .type = &abstract_type_string_string_map,
+ .u.set_val = (arbitrary_set *)other_config }
+ };
+
+ xen_call_(session, "network.set_other_config", param_values, 2, NULL,
NULL);
+ return session->ok;
+}
+
+
+bool
+xen_network_add_to_other_config(xen_session *session, xen_network network,
char *key, char *value)
+{
+ abstract_value param_values[] =
+ {
+ { .type = &abstract_type_string,
+ .u.string_val = network },
+ { .type = &abstract_type_string,
+ .u.string_val = key },
+ { .type = &abstract_type_string,
+ .u.string_val = value }
+ };
+
+ xen_call_(session, "network.add_to_other_config", param_values, 3, NULL,
NULL);
+ return session->ok;
+}
+
+
+bool
+xen_network_remove_from_other_config(xen_session *session, xen_network
network, char *key)
+{
+ abstract_value param_values[] =
+ {
+ { .type = &abstract_type_string,
+ .u.string_val = network },
+ { .type = &abstract_type_string,
+ .u.string_val = key }
+ };
+
+ xen_call_(session, "network.remove_from_other_config", param_values, 2,
NULL, NULL);
+ return session->ok;
+}
+
+
+bool
xen_network_get_all(xen_session *session, struct xen_network_set **result)
{
diff -r 14aeb7981e4e -r 0b2794d3320f tools/pygrub/src/pygrub
--- a/tools/pygrub/src/pygrub Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/pygrub/src/pygrub Wed Mar 28 10:38:41 2007 +0100
@@ -125,16 +125,13 @@ class GrubLineEditor(curses.textpad.Text
is that we can handle lines longer than the window."""
self.win.clear()
- if self.pos > 70:
- if self.pos > 130:
- off = 120
- else:
- off = 55
- l = [ "<" ] + self.line[off:]
- p = self.pos - off
- else:
- l = self.line[:70]
- p = self.pos
+ p = self.pos
+ off = 0
+ while p > 70:
+ p -= 55
+ off += 55
+
+ l = self.line[off:off+70]
self.win.addstr(0, 0, string.join(l, ("")))
if self.pos > 70:
self.win.addch(0, 0, curses.ACS_LARROW)
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/util/xmlrpcclient.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/util/xmlrpcclient.py Wed Mar 28 10:38:41 2007 +0100
@@ -0,0 +1,123 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#============================================================================
+# Copyright (C) 2006 Anthony Liguori <aliguori@xxxxxxxxxx>
+# Copyright (C) 2007 XenSource Inc.
+#============================================================================
+
+
+from httplib import FakeSocket, HTTPConnection, HTTP
+import socket
+import string
+import xmlrpclib
+from types import StringTypes
+
+
+try:
+ import SSHTransport
+ ssh_enabled = True
+except ImportError:
+ # SSHTransport is disabled on Python <2.4, because it uses the subprocess
+ # package.
+ ssh_enabled = False
+
+
+# A new ServerProxy that also supports httpu urls. An http URL comes in the
+# form:
+#
+# httpu:///absolute/path/to/socket.sock
+#
+# It assumes that the RPC handler is /RPC2. This probably needs to be improved
+
+class HTTPUnixConnection(HTTPConnection):
+ def connect(self):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(self.host)
+
+class HTTPUnix(HTTP):
+ _connection_class = HTTPUnixConnection
+
+class UnixTransport(xmlrpclib.Transport):
+ def request(self, host, handler, request_body, verbose=0):
+ self.__handler = handler
+ return xmlrpclib.Transport.request(self, host, '/RPC2',
+ request_body, verbose)
+ def make_connection(self, host):
+ return HTTPUnix(self.__handler)
+
+
+# We need our own transport for HTTPS, because xmlrpclib.SafeTransport is
+# broken -- it does not handle ERROR_ZERO_RETURN properly.
+class HTTPSTransport(xmlrpclib.SafeTransport):
+ def _parse_response(self, file, sock):
+ p, u = self.getparser()
+ while 1:
+ try:
+ if sock:
+ response = sock.recv(1024)
+ else:
+ response = file.read(1024)
+ except socket.sslerror, exn:
+ if exn[0] == socket.SSL_ERROR_ZERO_RETURN:
+ break
+ raise
+
+ if not response:
+ break
+ if self.verbose:
+ print 'body:', repr(response)
+ p.feed(response)
+
+ file.close()
+ p.close()
+ return u.close()
+
+
+# See xmlrpclib2.TCPXMLRPCServer._marshalled_dispatch.
+def conv_string(x):
+ if isinstance(x, StringTypes):
+ s = string.replace(x, "'", r"\047")
+ exec "s = '" + s + "'"
+ return s
+ else:
+ return x
+
+
+class ServerProxy(xmlrpclib.ServerProxy):
+ def __init__(self, uri, transport=None, encoding=None, verbose=0,
+ allow_none=1):
+ if transport == None:
+ (protocol, rest) = uri.split(':', 1)
+ if protocol == 'httpu':
+ uri = 'http:' + rest
+ transport = UnixTransport()
+ elif protocol == 'https':
+ transport = HTTPSTransport()
+ elif protocol == 'ssh':
+ global ssh_enabled
+ if ssh_enabled:
+ (transport, uri) = SSHTransport.getHTTPURI(uri)
+ else:
+ raise ValueError(
+ "SSH transport not supported on Python <2.4.")
+ xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
+ verbose, allow_none)
+
+ def __request(self, methodname, params):
+ response = xmlrpclib.ServerProxy.__request(self, methodname, params)
+
+ if isinstance(response, tuple):
+ return tuple([conv_string(x) for x in response])
+ else:
+ return conv_string(response)
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/util/xmlrpclib2.py
--- a/tools/python/xen/util/xmlrpclib2.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/util/xmlrpclib2.py Wed Mar 28 10:38:41 2007 +0100
@@ -21,12 +21,10 @@ An enhanced XML-RPC client/server interf
"""
import re
-import string
import fcntl
from types import *
-from httplib import HTTPConnection, HTTP
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import SocketServer
import xmlrpclib, socket, os, stat
@@ -35,14 +33,6 @@ import mkdir
from xen.web import connection
from xen.xend.XendLogging import log
-
-try:
- import SSHTransport
- ssh_enabled = True
-except ImportError:
- # SSHTransport is disabled on Python <2.4, because it uses the subprocess
- # package.
- ssh_enabled = False
#
# Convert all integers to strings as described in the Xen API
@@ -64,13 +54,6 @@ def stringify(value):
return value
-# A new ServerProxy that also supports httpu urls. An http URL comes in the
-# form:
-#
-# httpu:///absolute/path/to/socket.sock
-#
-# It assumes that the RPC handler is /RPC2. This probably needs to be improved
-
# We're forced to subclass the RequestHandler class so that we can work around
# some bugs in Keep-Alive handling and also enabled it by default
class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
@@ -105,60 +88,6 @@ class XMLRPCRequestHandler(SimpleXMLRPCR
self.wfile.flush()
if self.close_connection == 1:
self.connection.shutdown(1)
-
-class HTTPUnixConnection(HTTPConnection):
- def connect(self):
- self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self.sock.connect(self.host)
-
-class HTTPUnix(HTTP):
- _connection_class = HTTPUnixConnection
-
-class UnixTransport(xmlrpclib.Transport):
- def request(self, host, handler, request_body, verbose=0):
- self.__handler = handler
- return xmlrpclib.Transport.request(self, host, '/RPC2',
- request_body, verbose)
- def make_connection(self, host):
- return HTTPUnix(self.__handler)
-
-
-# See _marshalled_dispatch below.
-def conv_string(x):
- if isinstance(x, StringTypes):
- s = string.replace(x, "'", r"\047")
- exec "s = '" + s + "'"
- return s
- else:
- return x
-
-
-class ServerProxy(xmlrpclib.ServerProxy):
- def __init__(self, uri, transport=None, encoding=None, verbose=0,
- allow_none=1):
- if transport == None:
- (protocol, rest) = uri.split(':', 1)
- if protocol == 'httpu':
- uri = 'http:' + rest
- transport = UnixTransport()
- elif protocol == 'ssh':
- global ssh_enabled
- if ssh_enabled:
- (transport, uri) = SSHTransport.getHTTPURI(uri)
- else:
- raise ValueError(
- "SSH transport not supported on Python <2.4.")
- xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
- verbose, allow_none)
-
- def __request(self, methodname, params):
- response = xmlrpclib.ServerProxy.__request(self, methodname, params)
-
- if isinstance(response, tuple):
- return tuple([conv_string(x) for x in response])
- else:
- return conv_string(response)
-
# This is a base XML-RPC server for TCP. It sets allow_reuse_address to
# true, and has an improved marshaller that logs and serializes exceptions.
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendAPI.py Wed Mar 28 10:38:41 2007 +0100
@@ -32,7 +32,9 @@ from xen.xend.XendError import *
from xen.xend.XendError import *
from xen.xend.XendClient import ERROR_INVALID_DOMAIN
from xen.xend.XendLogging import log
+from xen.xend.XendNetwork import XendNetwork
from xen.xend.XendTask import XendTask
+from xen.xend.XendPIFMetrics import XendPIFMetrics
from xen.xend.XendVMMetrics import XendVMMetrics
from xen.xend.XendAPIConstants import *
@@ -436,6 +438,12 @@ class XendAPI(object):
'debug' : valid_debug,
}
+ autoplug_classes = {
+ 'network' : XendNetwork,
+ 'VM_metrics' : XendVMMetrics,
+ 'PIF_metrics' : XendPIFMetrics,
+ }
+
# Cheat methods
# -------------
# Methods that have a trivial implementation for all classes.
@@ -457,6 +465,40 @@ class XendAPI(object):
setattr(cls, get_by_uuid, _get_by_uuid)
setattr(cls, get_uuid, _get_uuid)
+
+ # Autoplugging classes
+ # --------------------
+ # These have all of their methods grabbed out from the implementation
+ # class, and wrapped up to be compatible with the Xen-API.
+
+ for api_cls, impl_cls in autoplug_classes.items():
+ def doit(n):
+ getter = getattr(cls, '_%s_get' % api_cls)
+ dot_n = '%s.%s' % (api_cls, n)
+ full_n = '%s_%s' % (api_cls, n)
+ if not hasattr(cls, full_n):
+ f = getattr(impl_cls, n)
+ argcounts[dot_n] = f.func_code.co_argcount + 1
+ setattr(cls, full_n,
+ lambda s, session, ref, *args: \
+ xen_api_success( \
+ f(getter(s, session, ref), *args)))
+
+ ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, [])
+ rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, [])
+ methods = getattr(cls, '%s_methods' % api_cls, [])
+ funcs = getattr(cls, '%s_funcs' % api_cls, [])
+
+ for attr_name in ro_attrs + rw_attrs:
+ doit('get_%s' % attr_name)
+ for attr_name in rw_attrs + cls.Base_attr_rw:
+ doit('set_%s' % attr_name)
+ for method_name, return_type in methods + cls.Base_methods:
+ doit('%s' % method_name)
+ for func_name, return_type in funcs + cls.Base_funcs:
+ doit('%s' % func_name)
+
+
# Wrapping validators around XMLRPC calls
# ---------------------------------------
@@ -466,7 +508,8 @@ class XendAPI(object):
n_ = n.replace('.', '_')
try:
f = getattr(cls, n_)
- argcounts[n] = f.func_code.co_argcount - 1
+ if n not in argcounts:
+ argcounts[n] = f.func_code.co_argcount - 1
validators = takes_instance and validator and \
[validator] or []
@@ -516,7 +559,7 @@ class XendAPI(object):
Base_attr_ro = ['uuid']
Base_attr_rw = []
- Base_methods = [('destroy', None), ('get_record', 'Struct')]
+ Base_methods = [('get_record', 'Struct')]
Base_funcs = [('get_all', 'Set'), ('get_by_uuid', None)]
# Xen API: Class Session
@@ -916,59 +959,40 @@ class XendAPI(object):
network_attr_ro = ['VIFs', 'PIFs']
network_attr_rw = ['name_label',
- 'name_description']
-
- network_funcs = [('create', 'network')]
-
- def network_create(self, _, name_label, name_description):
- return xen_api_success(
- XendNode.instance().network_create(name_label, name_description))
+ 'name_description',
+ 'other_config']
+ network_methods = [('add_to_other_config', None),
+ ('remove_from_other_config', None),
+ ('destroy', None)]
+ network_funcs = [('create', None)]
+
+ def _network_get(self, _, ref):
+ return XendNode.instance().get_network(ref)
+
+ def network_get_all(self, _):
+ return xen_api_success(XendNode.instance().get_network_refs())
+
+ def network_create(self, _, record):
+ return xen_api_success(XendNode.instance().network_create(record))
def network_destroy(self, _, ref):
return xen_api_success(XendNode.instance().network_destroy(ref))
- def _get_network(self, ref):
- return XendNode.instance().get_network(ref)
-
- def network_get_all(self, _):
- return xen_api_success(XendNode.instance().get_network_refs())
-
- def network_get_record(self, _, ref):
- return xen_api_success(
- XendNode.instance().get_network(ref).get_record())
-
- def network_get_name_label(self, _, ref):
- return xen_api_success(self._get_network(ref).name_label)
-
- def network_get_name_description(self, _, ref):
- return xen_api_success(self._get_network(ref).name_description)
-
- def network_get_VIFs(self, _, ref):
- return xen_api_success(self._get_network(ref).get_VIF_UUIDs())
-
- def network_get_PIFs(self, session, ref):
- return xen_api_success(self._get_network(ref).get_PIF_UUIDs())
-
- def network_set_name_label(self, _, ref, val):
- return xen_api_success(self._get_network(ref).set_name_label(val))
-
- def network_set_name_description(self, _, ref, val):
- return
xen_api_success(self._get_network(ref).set_name_description(val))
# Xen API: Class PIF
# ----------------------------------------------------------------
- PIF_attr_ro = ['metrics']
+ PIF_attr_ro = ['network',
+ 'host',
+ 'metrics']
PIF_attr_rw = ['device',
- 'network',
- 'host',
'MAC',
'MTU',
'VLAN']
PIF_attr_inst = PIF_attr_rw
- PIF_methods = [('create_VLAN', 'int')]
+ PIF_methods = [('create_VLAN', 'int'), ('destroy', None)]
def _get_PIF(self, ref):
return XendNode.instance().pifs[ref]
@@ -1049,20 +1073,8 @@ class XendAPI(object):
def PIF_metrics_get_all(self, _):
return xen_api_success(XendNode.instance().pif_metrics.keys())
- def _PIF_metrics_get(self, ref):
+ def _PIF_metrics_get(self, _, ref):
return XendNode.instance().pif_metrics[ref]
-
- def PIF_metrics_get_record(self, _, ref):
- return xen_api_success(self._PIF_metrics_get(ref).get_record())
-
- def PIF_metrics_get_io_read_kbs(self, _, ref):
- return xen_api_success(self._PIF_metrics_get(ref).get_io_read_kbs())
-
- def PIF_metrics_get_io_write_kbs(self, _, ref):
- return xen_api_success(self._PIF_metrics_get(ref).get_io_write_kbs())
-
- def PIF_metrics_get_last_updated(self, _1, _2):
- return xen_api_success(now())
# Xen API: Class VM
@@ -1131,7 +1143,8 @@ class XendAPI(object):
('save', None),
('set_memory_dynamic_max_live', None),
('set_memory_dynamic_min_live', None),
- ('send_trigger', None)]
+ ('send_trigger', None),
+ ('destroy', None)]
VM_funcs = [('create', 'VM'),
('restore', None),
@@ -1390,7 +1403,8 @@ class XendAPI(object):
if key.startswith("cpumap"):
vcpu = int(key[6:])
try:
- xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
+ cpus = map(int, value.split(","))
+ xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus)
except Exception, ex:
log.exception(ex)
@@ -1633,14 +1647,15 @@ class XendAPI(object):
def VM_send_sysrq(self, _, vm_ref, req):
xeninfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
- if xeninfo.state != XEN_API_VM_POWER_STATE_RUNNING:
+ if xeninfo.state == XEN_API_VM_POWER_STATE_RUNNING \
+ or xeninfo.state == XEN_API_VM_POWER_STATE_PAUSED:
+ xeninfo.send_sysrq(req)
+ return xen_api_success_void()
+ else:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[xeninfo.state]])
- xeninfo.send_sysrq(req)
- return xen_api_success_void()
-
def VM_send_trigger(self, _, vm_ref, trigger, vcpu):
xendom = XendDomain.instance()
@@ -1675,58 +1690,31 @@ class XendAPI(object):
VM_metrics_attr_rw = []
VM_metrics_methods = []
- def _VM_metrics_get(self, ref):
+ def VIF_metrics_get_all(self, session):
+ return self.VIF_get_all(session)
+
+ def _VM_metrics_get(self, _, ref):
return XendVMMetrics.get_by_uuid(ref)
def VM_metrics_get_all(self, _):
return xen_api_success(XendVMMetrics.get_all())
- def VM_metrics_get_record(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_record())
-
- def VM_metrics_get_memory_actual(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_memory_actual())
-
- def VM_metrics_get_VCPUs_number(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_number())
-
- def VM_metrics_get_VCPUs_utilisation(self, _, ref):
- return
xen_api_success(self._VM_metrics_get(ref).get_VCPUs_utilisation())
-
- def VM_metrics_get_VCPUs_CPU(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_CPU())
-
- def VM_metrics_get_VCPUs_flags(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_flags())
-
- def VM_metrics_get_VCPUs_params(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_params())
-
- def VM_metrics_get_start_time(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_start_time())
-
- def VM_metrics_get_state(self, _, ref):
- return xen_api_success(self._VM_metrics_get(ref).get_state())
-
- def VM_metrics_get_last_updated(self, _1, _2):
- return xen_api_success(now())
-
# Xen API: Class VBD
# ----------------------------------------------------------------
- VBD_attr_ro = ['metrics',
+ VBD_attr_ro = ['VM',
+ 'VDI',
+ 'metrics',
'runtime_properties']
- VBD_attr_rw = ['VM',
- 'VDI',
- 'device',
+ VBD_attr_rw = ['device',
'bootable',
'mode',
'type']
VBD_attr_inst = VBD_attr_rw
- VBD_methods = [('media_change', None)]
+ VBD_methods = [('media_change', None), ('destroy', None)]
VBD_funcs = [('create', 'VBD')]
# object methods
@@ -1868,7 +1856,10 @@ class XendAPI(object):
'io_write_kbs',
'last_updated']
VBD_metrics_attr_rw = []
- VBD_methods = []
+ VBD_metrics_methods = []
+
+ def VBD_metrics_get_all(self, session):
+ return self.VBD_get_all(session)
def VBD_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vbd', ref)
@@ -1893,16 +1884,17 @@ class XendAPI(object):
# Xen API: Class VIF
# ----------------------------------------------------------------
- VIF_attr_ro = ['metrics',
+ VIF_attr_ro = ['network',
+ 'VM',
+ 'metrics',
'runtime_properties']
VIF_attr_rw = ['device',
- 'network',
- 'VM',
'MAC',
'MTU']
VIF_attr_inst = VIF_attr_rw
+ VIF_methods = [('destroy', None)]
VIF_funcs = [('create', 'VIF')]
@@ -1960,10 +1952,10 @@ class XendAPI(object):
return xen_api_success(vif_ref)
def VIF_get_VM(self, session, vif_ref):
- xendom = XendDomain.instance()
- vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
+ xendom = XendDomain.instance()
+ vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
return xen_api_success(vm.get_uuid())
-
+
def VIF_get_MTU(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MTU')
@@ -2008,7 +2000,7 @@ class XendAPI(object):
'io_write_kbs',
'last_updated']
VIF_metrics_attr_rw = []
- VIF_methods = []
+ VIF_metrics_methods = []
def VIF_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vif', ref)
@@ -2044,7 +2036,7 @@ class XendAPI(object):
'other_config']
VDI_attr_inst = VDI_attr_ro + VDI_attr_rw
- VDI_methods = [('snapshot', 'VDI')]
+ VDI_methods = [('snapshot', 'VDI'), ('destroy', None)]
VDI_funcs = [('create', 'VDI'),
('get_by_name_label', 'Set(VDI)')]
@@ -2161,6 +2153,7 @@ class XendAPI(object):
VTPM_attr_inst = VTPM_attr_rw
+ VTPM_methods = [('destroy', None)]
VTPM_funcs = [('create', 'VTPM')]
# object methods
@@ -2319,7 +2312,7 @@ class XendAPI(object):
'name_label',
'name_description']
- SR_methods = [('clone', 'SR')]
+ SR_methods = [('clone', 'SR'), ('destroy', None)]
SR_funcs = [('get_by_name_label', 'Set(SR)'),
('get_by_uuid', 'SR')]
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendClient.py
--- a/tools/python/xen/xend/XendClient.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendClient.py Wed Mar 28 10:38:41 2007 +0100
@@ -17,7 +17,7 @@
# Copyright (C) 2006 Anthony Liguori <aliguori@xxxxxxxxxx>
#============================================================================
-from xen.util.xmlrpclib2 import ServerProxy
+from xen.util.xmlrpcclient import ServerProxy
import os
import sys
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendConfig.py Wed Mar 28 10:38:41 2007 +0100
@@ -298,7 +298,7 @@ class XendConfig(dict):
'actions_after_reboot': 'restart',
'actions_after_crash': 'restart',
'actions_after_suspend': '',
- 'is_template': False,
+ 'is_a_template': False,
'is_control_domain': False,
'features': '',
'PV_bootloader': '',
@@ -452,7 +452,10 @@ class XendConfig(dict):
for key, typ in XENAPI_CFG_TYPES.items():
val = sxp.child_value(sxp_cfg, key)
if val is not None:
- cfg[key] = typ(val)
+ try:
+ cfg[key] = typ(val)
+ except (ValueError, TypeError), e:
+ log.warn('Unable to convert type value for key: %s' % key)
# Convert deprecated options to current equivalents.
@@ -845,6 +848,8 @@ class XendConfig(dict):
sxpr.append([name, s])
for xenapi, legacy in XENAPI_CFG_TO_LEGACY_CFG.items():
+ if legacy in ('cpus'): # skip this
+ continue
if self.has_key(xenapi) and self[xenapi] not in (None, []):
if type(self[xenapi]) == bool:
# convert booleans to ints before making an sxp item
@@ -858,7 +863,7 @@ class XendConfig(dict):
sxpr.append(["memory", int(self["memory_dynamic_max"])/MiB])
for legacy in LEGACY_UNSUPPORTED_BY_XENAPI_CFG:
- if legacy in ('domid', 'uuid'): # skip these
+ if legacy in ('domid', 'uuid', 'cpus'): # skip these
continue
if self.has_key(legacy) and self[legacy] not in (None, []):
sxpr.append([legacy, self[legacy]])
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendDomain.py Wed Mar 28 10:38:41 2007 +0100
@@ -569,6 +569,26 @@ class XendDomain:
finally:
self.domains_lock.release()
+ def autostart_domains(self):
+ """ Autostart managed domains that are marked as such. """
+
+ need_starting = []
+
+ self.domains_lock.acquire()
+ try:
+ for dom_uuid, dom in self.managed_domains.items():
+ if dom and dom.state == DOM_STATE_HALTED:
+ on_xend_start = dom.info.get('on_xend_start', 'ignore')
+ auto_power_on = dom.info.get('auto_power_on', False)
+ should_start = (on_xend_start == 'start') or auto_power_on
+ if should_start:
+ need_starting.append(dom_uuid)
+ finally:
+ self.domains_lock.release()
+
+ for dom_uuid in need_starting:
+ self.domain_start(dom_uuid, False)
+
def cleanup_domains(self):
"""Clean up domains that are marked as autostop.
Should be called when Xend goes down. This is currently
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendDomainInfo.py Wed Mar 28 10:38:41 2007 +0100
@@ -152,8 +152,9 @@ def recreate(info, priv):
try:
vmpath = xstransact.Read(dompath, "vm")
if not vmpath:
- log.warn('/local/domain/%d/vm is missing. recreate is '
- 'confused, trying our best to recover' % domid)
+ if not priv:
+ log.warn('/local/domain/%d/vm is missing. recreate is '
+ 'confused, trying our best to recover' % domid)
needs_reinitialising = True
raise XendError('reinit')
@@ -2354,7 +2355,8 @@ class XendDomainInfo:
if not dev_uuid:
raise XendError('Failed to create device')
- if self.state == XEN_API_VM_POWER_STATE_RUNNING:
+ if self.state == XEN_API_VM_POWER_STATE_RUNNING \
+ or self.state == XEN_API_VM_POWER_STATE_PAUSED:
_, config = self.info['devices'][dev_uuid]
dev_control = self.getDeviceController('vif')
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendLogging.py
--- a/tools/python/xen/xend/XendLogging.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendLogging.py Wed Mar 28 10:38:41 2007 +0100
@@ -62,6 +62,7 @@ if 'TRACE' not in logging.__dict__:
# Work around a bug in Python's inspect module: findsource is supposed to
# raise IOError if it fails, with other functions in that module coping
# with that, but some people are seeing IndexError raised from there.
+ # This is Python bug 1628987. http://python.org/sf/1628987.
if hasattr(inspect, 'findsource'):
real_findsource = getattr(inspect, 'findsource')
def findsource(*args, **kwargs):
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendMonitor.py
--- a/tools/python/xen/xend/XendMonitor.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendMonitor.py Wed Mar 28 10:38:41 2007 +0100
@@ -24,8 +24,8 @@ import re
"""Monitoring thread to keep track of Xend statistics. """
VBD_SYSFS_PATH = '/sys/devices/xen-backend/'
-VBD_WR_PATH = VBD_SYSFS_PATH + '%s/statistics/wr_req'
-VBD_RD_PATH = VBD_SYSFS_PATH + '%s/statistics/rd_req'
+VBD_WR_PATH = VBD_SYSFS_PATH + '%s/statistics/wr_sect'
+VBD_RD_PATH = VBD_SYSFS_PATH + '%s/statistics/rd_sect'
VBD_DOMAIN_RE = r'vbd-(?P<domid>\d+)-(?P<devid>\d+)$'
NET_PROCFS_PATH = '/proc/net/dev'
@@ -51,14 +51,9 @@ VIF_DOMAIN_RE = re.compile(r'vif(?P<domi
PROC_NET_DEV_RE)
PIF_RE = re.compile(r'^\s*(?P<iface>peth\d+):\s*' + PROC_NET_DEV_RE)
-# The VBD transfer figures are in "requests" where we don't
-# really know how many bytes per requests. For now we make
-# up a number roughly could be.
-VBD_ROUGH_BYTES_PER_REQUEST = 1024 * 8 * 4
-
# Interval to poll xc, sysfs and proc
POLL_INTERVAL = 2.0
-
+SECTOR_SIZE = 512
class XendMonitor(threading.Thread):
"""Monitors VCPU, VBD, VIF and PIF statistics for Xen API.
@@ -186,9 +181,8 @@ class XendMonitor(threading.Thread):
usage_at = time.time()
rd_stat = int(open(rd_stat_path).readline().strip())
wr_stat = int(open(wr_stat_path).readline().strip())
- rd_stat *= VBD_ROUGH_BYTES_PER_REQUEST
- wr_stat *= VBD_ROUGH_BYTES_PER_REQUEST
-
+ rd_stat *= SECTOR_SIZE
+ wr_stat *= SECTOR_SIZE
if domid not in stats:
stats[domid] = {}
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendNetwork.py
--- a/tools/python/xen/xend/XendNetwork.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendNetwork.py Wed Mar 28 10:38:41 2007 +0100
@@ -28,10 +28,17 @@ IP_ROUTE_RE = r'^default via ([\d\.]+) d
IP_ROUTE_RE = r'^default via ([\d\.]+) dev (\w+)'
class XendNetwork:
- def __init__(self, uuid, name, description):
+ def __init__(self, uuid, record):
self.uuid = uuid
- self.name_label = name
- self.name_description = description
+ self.name_label = record.get('name_label', '')
+ self.name_description = record.get('name_description', '')
+ self.other_config = record.get('other_config', {})
+
+ def get_name_label(self):
+ return self.name_label
+
+ def get_name_description(self):
+ return self.name_description
def set_name_label(self, new_name):
self.name_label = new_name
@@ -41,7 +48,7 @@ class XendNetwork:
self.name_description = new_desc
XendNode.instance().save_networks()
- def get_VIF_UUIDs(self):
+ def get_VIFs(self):
result = []
vms = XendDomain.instance().get_all_vms()
for vm in vms:
@@ -52,17 +59,37 @@ class XendNetwork:
result.append(vif)
return result
- def get_PIF_UUIDs(self):
+ def get_PIFs(self):
return [x.uuid for x in XendNode.instance().pifs.values()
if x.network == self]
- def get_record(self, transient = True):
+ def get_other_config(self):
+ return self.other_config
+
+ def set_other_config(self, value):
+ self.other_config = value
+ XendNode.instance().save_networks()
+
+ def add_to_other_config(self, key, value):
+ self.other_config[key] = value
+ XendNode.instance().save_networks()
+
+ def remove_from_other_config(self, key):
+ if key in self.other_config:
+ del self.other_config[key]
+ XendNode.instance().save_networks()
+
+ def get_record(self):
+ return self.get_record_internal(True)
+
+ def get_record_internal(self, transient):
result = {
'uuid': self.uuid,
'name_label': self.name_label,
'name_description': self.name_description,
+ 'other_config' : self.other_config,
}
if transient:
- result['VIFs'] = self.get_VIF_UUIDs()
- result['PIFs'] = self.get_PIF_UUIDs()
+ result['VIFs'] = self.get_VIFs()
+ result['PIFs'] = self.get_PIFs()
return result
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendNode.py Wed Mar 28 10:38:41 2007 +0100
@@ -141,11 +141,9 @@ class XendNode:
saved_networks = self.state_store.load_state('network')
if saved_networks:
for net_uuid, network in saved_networks.items():
- self.network_create(network.get('name_label'),
- network.get('name_description', ''),
- False, net_uuid)
+ self.network_create(network, False, net_uuid)
else:
- self.network_create('net0', '', False)
+ self.network_create({'name_label' : 'net0' }, False)
# initialise PIFs
saved_pifs = self.state_store.load_state('pif')
@@ -199,12 +197,10 @@ class XendNode:
- def network_create(self, name_label, name_description, persist = True,
- net_uuid = None):
+ def network_create(self, record, persist = True, net_uuid = None):
if net_uuid is None:
net_uuid = uuid.createString()
- self.networks[net_uuid] = XendNetwork(net_uuid, name_label,
- name_description)
+ self.networks[net_uuid] = XendNetwork(net_uuid, record)
if persist:
self.save_networks()
return net_uuid
@@ -280,7 +276,7 @@ class XendNode:
self.state_store.save_state('pif', pif_records)
def save_networks(self):
- net_records = dict([(k, v.get_record(transient = False))
+ net_records = dict([(k, v.get_record_internal(False))
for k, v in self.networks.items()])
self.state_store.save_state('network', net_records)
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendOptions.py
--- a/tools/python/xen/xend/XendOptions.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendOptions.py Wed Mar 28 10:38:41 2007 +0100
@@ -165,7 +165,13 @@ class XendOptions:
def get_xend_tcp_xmlrpc_server_address(self):
return self.get_config_string("xend-tcp-xmlrpc-server-address",
-
self.xend_tcp_xmlrpc_server_address_default)
+
self.xend_tcp_xmlrpc_server_address_default)
+
+ def get_xend_tcp_xmlrpc_server_ssl_key_file(self):
+ return self.get_config_string("xend-tcp-xmlrpc-server-ssl-key-file")
+
+ def get_xend_tcp_xmlrpc_server_ssl_cert_file(self):
+ return self.get_config_string("xend-tcp-xmlrpc-server-ssl-cert-file")
def get_xend_unix_xmlrpc_server(self):
return self.get_config_bool("xend-unix-xmlrpc-server",
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendPIFMetrics.py
--- a/tools/python/xen/xend/XendPIFMetrics.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendPIFMetrics.py Wed Mar 28 10:38:41 2007 +0100
@@ -39,11 +39,13 @@ class XendPIFMetrics:
return pifs_util[pifname][n]
return 0.0
+ def get_last_updated(self):
+ import xen.xend.XendAPI as XendAPI
+ return XendAPI.now()
+
def get_record(self):
- import xen.xend.XendAPI as XendAPI
return {'uuid' : self.uuid,
- 'PIF' : self.pif.uuid,
'io_read_kbs' : self.get_io_read_kbs(),
'io_write_kbs' : self.get_io_write_kbs(),
- 'last_updated' : XendAPI.now(),
+ 'last_updated' : self.get_last_updated(),
}
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendStateStore.py
--- a/tools/python/xen/xend/XendStateStore.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendStateStore.py Wed Mar 28 10:38:41 2007 +0100
@@ -126,6 +126,13 @@ class XendStateStore:
if val_name not in cls_dict:
cls_dict[val_name] = {}
cls_dict[val_name][val_uuid] = None
+ elif val_type == '':
+ # dictionary
+ k = val_elem.getAttribute('key').encode('utf8')
+ v = val_elem.getAttribute('value').encode('utf8')
+ if val_name not in cls_dict:
+ cls_dict[val_name] = {}
+ cls_dict[val_name][k] = v
elif val_type == 'string':
cls_dict[val_name] = val_text.encode('utf8')
elif val_type == 'float':
@@ -197,7 +204,11 @@ class XendStateStore:
if type(val) == dict:
for val_uuid in val.keys():
val_node = doc.createElement(key)
- val_node.setAttribute('uuid', val_uuid)
+ if key == 'other_config':
+ val_node.setAttribute('key', str(val_uuid))
+ val_node.setAttribute('value', str(val[val_uuid]))
+ else:
+ val_node.setAttribute('uuid', val_uuid)
node.appendChild(val_node)
elif type(val) in (list, tuple):
for val_uuid in val:
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/XendVMMetrics.py
--- a/tools/python/xen/xend/XendVMMetrics.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/XendVMMetrics.py Wed Mar 28 10:38:41 2007 +0100
@@ -92,7 +92,7 @@ class XendVMMetrics:
set_flag('blocked')
set_flag('online')
set_flag('running')
- vcpus_flags[i] = ",".join(flags)
+ vcpus_flags[i] = flags
return vcpus_flags
else:
return {}
@@ -115,7 +115,7 @@ class XendVMMetrics:
addState("dying")
addState("crashed")
addState("shutdown")
- return ",".join(states)
+ return states
except Exception, err:
# ignore missing domain
log.trace("domain_getinfo(%d) failed, ignoring: %s", domid,
str(err))
@@ -140,8 +140,11 @@ class XendVMMetrics:
def get_start_time(self):
return self.xend_domain_instance.info.get("start_time", -1)
+ def get_last_updated(self):
+ import xen.xend.XendAPI as XendAPI
+ return XendAPI.now()
+
def get_record(self):
- import xen.xend.XendAPI as XendAPI
return { 'uuid' : self.uuid,
'memory_actual' : self.get_memory_actual(),
'VCPUs_number' : self.get_VCPUs_number(),
@@ -151,5 +154,5 @@ class XendVMMetrics:
'VCPUs_params' : self.get_VCPUs_params(),
'start_time' : self.get_start_time(),
'state' : self.get_state(),
- 'last_updated' : XendAPI.now(),
+ 'last_updated' : self.get_last_updated(),
}
diff -r 14aeb7981e4e -r 0b2794d3320f
tools/python/xen/xend/server/SSLXMLRPCServer.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xend/server/SSLXMLRPCServer.py Wed Mar 28 10:38:41
2007 +0100
@@ -0,0 +1,103 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#============================================================================
+# Copyright (C) 2007 XenSource Inc.
+#============================================================================
+
+
+"""
+HTTPS wrapper for an XML-RPC server interface. Requires PyOpenSSL (Debian
+package python-pyopenssl).
+"""
+
+import socket
+
+from OpenSSL import SSL
+
+from xen.util.xmlrpclib2 import XMLRPCRequestHandler, TCPXMLRPCServer
+
+
+class SSLXMLRPCRequestHandler(XMLRPCRequestHandler):
+ def setup(self):
+ self.connection = self.request
+ self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
+ self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
+
+#
+# Taken from pyOpenSSL-0.6 examples (public-domain)
+#
+
+class SSLWrapper:
+ """
+ """
+ def __init__(self, conn):
+ """
+ Connection is not yet a new-style class,
+ so I'm making a proxy instead of subclassing.
+ """
+ self.__dict__["conn"] = conn
+ def __getattr__(self, name):
+ return getattr(self.__dict__["conn"], name)
+ def __setattr__(self, name, value):
+ setattr(self.__dict__["conn"], name, value)
+
+ def close(self):
+ self.shutdown()
+ return self.__dict__["conn"].close()
+
+ def shutdown(self, how=1):
+ """
+ SimpleXMLRpcServer.doPOST calls shutdown(1),
+ and Connection.shutdown() doesn't take
+ an argument. So we just discard the argument.
+ """
+ # Block until the shutdown is complete
+ self.__dict__["conn"].shutdown()
+ self.__dict__["conn"].shutdown()
+
+ def accept(self):
+ """
+ This is the other part of the shutdown() workaround.
+ Since servers create new sockets, we have to infect
+ them with our magic. :)
+ """
+ c, a = self.__dict__["conn"].accept()
+ return (SSLWrapper(c), a)
+
+#
+# End of pyOpenSSL-0.6 example code.
+#
+
+class SSLXMLRPCServer(TCPXMLRPCServer):
+ def __init__(self, addr, allowed, xenapi, logRequests = 1,
+ ssl_key_file = None, ssl_cert_file = None):
+
+ TCPXMLRPCServer.__init__(self, addr, allowed, xenapi,
+ SSLXMLRPCRequestHandler, logRequests)
+
+ if not ssl_key_file or not ssl_cert_file:
+ raise ValueError("SSLXMLRPCServer requires ssl_key_file "
+ "and ssl_cert_file to be set.")
+
+ # make a SSL socket
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ ctx.set_options(SSL.OP_NO_SSLv2)
+ ctx.use_privatekey_file (ssl_key_file)
+ ctx.use_certificate_file(ssl_cert_file)
+ self.socket = SSLWrapper(SSL.Connection(ctx,
+ socket.socket(self.address_family,
+ self.socket_type)))
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.server_bind()
+ self.server_activate()
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xend/server/SrvServer.py
--- a/tools/python/xen/xend/server/SrvServer.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xend/server/SrvServer.py Wed Mar 28 10:38:41 2007 +0100
@@ -52,6 +52,7 @@ from xen.xend import Vifctl
from xen.xend import Vifctl
from xen.xend.XendLogging import log
from xen.xend.XendClient import XEN_API_SOCKET
+from xen.xend.XendDomain import instance as xenddomain
from xen.web.SrvDir import SrvDir
from SrvRoot import SrvRoot
@@ -72,7 +73,7 @@ class XendServers:
def add(self, server):
self.servers.append(server)
- def cleanup(self, signum = 0, frame = None):
+ def cleanup(self, signum = 0, frame = None, reloading = False):
log.debug("SrvServer.cleanup()")
self.cleaningUp = True
for server in self.servers:
@@ -80,12 +81,18 @@ class XendServers:
server.shutdown()
except:
pass
+
+ # clean up domains for those that have on_xend_stop
+ if not reloading:
+ xenddomain().cleanup_domains()
+
self.running = False
+
def reloadConfig(self, signum = 0, frame = None):
log.debug("SrvServer.reloadConfig()")
self.reloadingConfig = True
- self.cleanup(signum, frame)
+ self.cleanup(signum, frame, reloading = True)
def start(self, status):
# Running the network script will spawn another process, which takes
@@ -144,6 +151,12 @@ class XendServers:
status.close()
status = None
+ # Reaching this point means we can auto start domains
+ try:
+ xenddomain().autostart_domains()
+ except Exception, e:
+ log.exception("Failed while autostarting domains")
+
# loop to keep main thread alive until it receives a SIGTERM
self.running = True
while self.running:
@@ -172,33 +185,49 @@ def _loadConfig(servers, root, reload):
api_cfg = xoptions.get_xen_api_server()
if api_cfg:
try:
- addrs = [(str(x[0]).split(':'),
- len(x) > 1 and x[1] or XendAPI.AUTH_PAM,
- len(x) > 2 and x[2] and map(re.compile, x[2].split(" "))
- or None)
- for x in api_cfg]
- for addrport, auth, allowed in addrs:
- if auth not in [XendAPI.AUTH_PAM, XendAPI.AUTH_NONE]:
- log.error('Xen-API server configuration %s is invalid, ' +
- 'as %s is not a valid authentication type.',
- api_cfg, auth)
- break
-
- if len(addrport) == 1:
- if addrport[0] == 'unix':
- servers.add(XMLRPCServer(auth, True,
- path = XEN_API_SOCKET,
- hosts_allowed = allowed))
- else:
- servers.add(
- XMLRPCServer(auth, True, True, '',
- int(addrport[0]),
- hosts_allowed = allowed))
- else:
- addr, port = addrport
- servers.add(XMLRPCServer(auth, True, True, addr,
- int(port),
- hosts_allowed = allowed))
+ for server_cfg in api_cfg:
+ # Parse the xen-api-server config
+
+ host = 'localhost'
+ port = 0
+ use_tcp = False
+ ssl_key_file = None
+ ssl_cert_file = None
+ auth_method = XendAPI.AUTH_NONE
+ hosts_allowed = None
+
+ host_addr = server_cfg[0].split(':', 1)
+ if len(host_addr) == 1 and host_addr[0].lower() == 'unix':
+ use_tcp = False
+ elif len(host_addr) == 1:
+ use_tcp = True
+ port = int(host_addr[0])
+ elif len(host_addr) == 2:
+ use_tcp = True
+ host = str(host_addr[0])
+ port = int(host_addr[1])
+
+ if len(server_cfg) > 1:
+ if server_cfg[1] in [XendAPI.AUTH_PAM, XendAPI.AUTH_NONE]:
+ auth_method = server_cfg[1]
+
+ if len(server_cfg) > 2:
+ hosts_allowed = server_cfg[2] or None
+
+
+ if len(server_cfg) > 4:
+ # SSL key and cert file
+ ssl_key_file = server_cfg[3]
+ ssl_cert_file = server_cfg[4]
+
+
+ servers.add(XMLRPCServer(auth_method, True, use_tcp = use_tcp,
+ ssl_key_file = ssl_key_file,
+ ssl_cert_file = ssl_cert_file,
+ host = host, port = port,
+ path = XEN_API_SOCKET,
+ hosts_allowed = hosts_allowed))
+
except (ValueError, TypeError), exn:
log.exception('Xen API Server init failed')
log.error('Xen-API server configuration %s is invalid.', api_cfg)
@@ -206,8 +235,17 @@ def _loadConfig(servers, root, reload):
if xoptions.get_xend_tcp_xmlrpc_server():
addr = xoptions.get_xend_tcp_xmlrpc_server_address()
port = xoptions.get_xend_tcp_xmlrpc_server_port()
- servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True,
- host = addr, port = port))
+ ssl_key_file = xoptions.get_xend_tcp_xmlrpc_server_ssl_key_file()
+ ssl_cert_file = xoptions.get_xend_tcp_xmlrpc_server_ssl_cert_file()
+
+ if ssl_key_file and ssl_cert_file:
+ servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True,
+ ssl_key_file = ssl_key_file,
+ ssl_cert_file = ssl_cert_file,
+ host = addr, port = port))
+ else:
+ servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True,
+ host = addr, port = port))
if xoptions.get_xend_unix_xmlrpc_server():
servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False))
diff -r 14aeb7981e4e -r 0b2794d3320f
tools/python/xen/xend/server/XMLRPCServer.py
--- a/tools/python/xen/xend/server/XMLRPCServer.py Tue Mar 27 12:21:48
2007 -0600
+++ b/tools/python/xen/xend/server/XMLRPCServer.py Wed Mar 28 10:38:41
2007 +0100
@@ -21,6 +21,11 @@ import types
import types
import xmlrpclib
from xen.util.xmlrpclib2 import UnixXMLRPCServer, TCPXMLRPCServer
+try:
+ from SSLXMLRPCServer import SSLXMLRPCServer
+ ssl_enabled = True
+except ImportError:
+ ssl_enabled = False
from xen.xend import XendAPI, XendDomain, XendDomainInfo, XendNode
from xen.xend import XendLogging, XendDmesg
@@ -87,14 +92,20 @@ exclude = ['domain_create', 'domain_rest
exclude = ['domain_create', 'domain_restore']
class XMLRPCServer:
- def __init__(self, auth, use_xenapi, use_tcp=False, host = "localhost",
- port = 8006, path = XML_RPC_SOCKET, hosts_allowed = None):
+ def __init__(self, auth, use_xenapi, use_tcp = False,
+ ssl_key_file = None, ssl_cert_file = None,
+ host = "localhost", port = 8006, path = XML_RPC_SOCKET,
+ hosts_allowed = None):
+
self.use_tcp = use_tcp
self.port = port
self.host = host
self.path = path
self.hosts_allowed = hosts_allowed
+ self.ssl_key_file = ssl_key_file
+ self.ssl_cert_file = ssl_cert_file
+
self.ready = False
self.running = True
self.auth = auth
@@ -107,14 +118,33 @@ class XMLRPCServer:
try:
if self.use_tcp:
- log.info("Opening TCP XML-RPC server on %s%d%s",
+ using_ssl = self.ssl_key_file and self.ssl_cert_file
+
+ log.info("Opening %s XML-RPC server on %s%d%s",
+ using_ssl and 'HTTPS' or 'TCP',
self.host and '%s:' % self.host or
'all interfaces, port ',
self.port, authmsg)
- self.server = TCPXMLRPCServer((self.host, self.port),
- self.hosts_allowed,
- self.xenapi is not None,
- logRequests = False)
+
+ if not ssl_enabled:
+ raise ValueError("pyOpenSSL not installed. "
+ "Unable to start HTTPS XML-RPC server")
+
+ if using_ssl:
+ self.server = SSLXMLRPCServer(
+ (self.host, self.port),
+ self.hosts_allowed,
+ self.xenapi is not None,
+ logRequests = False,
+ ssl_key_file = self.ssl_key_file,
+ ssl_cert_file = self.ssl_cert_file)
+ else:
+ self.server = TCPXMLRPCServer(
+ (self.host, self.port),
+ self.hosts_allowed,
+ self.xenapi is not None,
+ logRequests = False)
+
else:
log.info("Opening Unix domain socket XML-RPC server on %s%s",
self.path, authmsg)
@@ -126,7 +156,12 @@ class XMLRPCServer:
ready = True
running = False
return
-
+ except Exception, e:
+ log.exception('Cannot start server: %s!', e)
+ ready = True
+ running = False
+ return
+
# Register Xen API Functions
# -------------------------------------------------------------------
# exportable functions are ones that do not begin with '_'
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xm/XenAPI.py
--- a/tools/python/xen/xm/XenAPI.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xm/XenAPI.py Wed Mar 28 10:38:41 2007 +0100
@@ -12,7 +12,7 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
-# Copyright (C) 2006 XenSource Inc.
+# Copyright (C) 2006-2007 XenSource Inc.
#============================================================================
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
@@ -47,7 +47,7 @@ import gettext
import gettext
import xmlrpclib
-import xen.util.xmlrpclib2
+import xen.util.xmlrpcclient as xmlrpcclient
translation = gettext.translation('xen-xm', fallback = True)
@@ -85,7 +85,7 @@ _RECONNECT_AND_RETRY = (lambda _ : ())
_RECONNECT_AND_RETRY = (lambda _ : ())
-class Session(xen.util.xmlrpclib2.ServerProxy):
+class Session(xmlrpcclient.ServerProxy):
"""A server proxy and session manager for communicating with Xend using
the Xen-API.
@@ -104,13 +104,15 @@ class Session(xen.util.xmlrpclib2.Server
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
- xen.util.xmlrpclib2.ServerProxy.__init__(self, uri, transport,
- encoding, verbose,
- allow_none)
+ xmlrpcclient.ServerProxy.__init__(self, uri, transport, encoding,
+ verbose, allow_none)
self._session = None
self.last_login_method = None
self.last_login_params = None
+
+ def getSession(self):
+ return self._session
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
@@ -150,7 +152,7 @@ class Session(xen.util.xmlrpclib2.Server
elif name.startswith('login'):
return lambda *params: self._login(name, params)
else:
- return xen.util.xmlrpclib2.ServerProxy.__getattr__(self, name)
+ return xmlrpcclient.ServerProxy.__getattr__(self, name)
def _parse_result(result):
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xm/create.dtd
--- a/tools/python/xen/xm/create.dtd Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xm/create.dtd Wed Mar 28 10:38:41 2007 +0100
@@ -111,7 +111,7 @@
<!ELEMENT other_config EMPTY>
<!ATTLIST other_config key CDATA #REQUIRED
- value CDATA #REQUIRED>
+ value CDATA #REQUIRED>
<!ELEMENT qos_algorithm_param EMPTY>
<!ATTLIST qos_algorithm_param key CDATA #REQUIRED
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xm/create.py Wed Mar 28 10:38:41 2007 +0100
@@ -103,6 +103,11 @@ gopts.opt('xmldryrun', short='x',
fn=set_true, default=0,
use="XML dry run - prints the resulting configuration in XML but "
"does not create the domain.")
+
+gopts.opt('skipdtd', short='s',
+ fn=set_true, default=0,
+ use="Skip DTD checking - skips checks on XML before creating. "
+ " Experimental. Can decrease create time." )
gopts.opt('paused', short='p',
fn=set_true, default=0,
@@ -1098,6 +1103,8 @@ def parseCommandLine(argv):
if not gopts.vals.xauthority:
gopts.vals.xauthority = get_xauthority()
+ gopts.is_xml = False
+
# Process remaining args as config variables.
for arg in args:
if '=' in arg:
@@ -1106,11 +1113,16 @@ def parseCommandLine(argv):
if gopts.vals.config:
config = gopts.vals.config
else:
- gopts.load_defconfig()
- preprocess(gopts.vals)
- if not gopts.getopt('name') and gopts.getopt('defconfig'):
- gopts.setopt('name', os.path.basename(gopts.getopt('defconfig')))
- config = make_config(gopts.vals)
+ try:
+ gopts.load_defconfig()
+ preprocess(gopts.vals)
+ if not gopts.getopt('name') and gopts.getopt('defconfig'):
+ gopts.setopt('name',
os.path.basename(gopts.getopt('defconfig')))
+ config = make_config(gopts.vals)
+ except XMLFileError, ex:
+ XMLFile = ex.getFile()
+ gopts.is_xml = True
+ config = ex.getFile()
return (gopts, config)
@@ -1233,6 +1245,8 @@ def help():
return str(gopts)
def main(argv):
+ is_xml = False
+
try:
(opts, config) = parseCommandLine(argv)
except StandardError, ex:
@@ -1241,23 +1255,24 @@ def main(argv):
if not opts:
return
- if type(config) == str:
- try:
- config = sxp.parse(file(config))[0]
- except IOError, exn:
- raise OptionError("Cannot read file %s: %s" % (config, exn[1]))
-
- if serverType == SERVER_XEN_API:
- from xen.xm.xenapi_create import sxp2xml
- sxp2xml_inst = sxp2xml()
- doc = sxp2xml_inst.convert_sxp_to_xml(config, transient=True)
-
- if opts.vals.dryrun:
- SXPPrettyPrint.prettyprint(config)
-
- if opts.vals.xmldryrun and serverType == SERVER_XEN_API:
- from xml.dom.ext import PrettyPrint as XMLPrettyPrint
- XMLPrettyPrint(doc)
+ if not opts.is_xml:
+ if type(config) == str:
+ try:
+ config = sxp.parse(file(config))[0]
+ except IOError, exn:
+ raise OptionError("Cannot read file %s: %s" % (config, exn[1]))
+
+ if serverType == SERVER_XEN_API:
+ from xen.xm.xenapi_create import sxp2xml
+ sxp2xml_inst = sxp2xml()
+ doc = sxp2xml_inst.convert_sxp_to_xml(config, transient=True)
+
+ if opts.vals.dryrun and not opts.is_xml:
+ SXPPrettyPrint.prettyprint(config)
+
+ if opts.vals.xmldryrun and serverType == SERVER_XEN_API:
+ from xml.dom.ext import PrettyPrint as XMLPrettyPrint
+ XMLPrettyPrint(doc)
if opts.vals.dryrun or opts.vals.xmldryrun:
return
@@ -1268,10 +1283,15 @@ def main(argv):
if serverType == SERVER_XEN_API:
from xen.xm.xenapi_create import xenapi_create
xenapi_create_inst = xenapi_create()
- vm_refs = xenapi_create_inst.create(document = doc)
+ if opts.is_xml:
+ vm_refs = xenapi_create_inst.create(filename = config,
+ skipdtd = opts.vals.skipdtd)
+ else:
+ vm_refs = xenapi_create_inst.create(document = doc,
+ skipdtd = opts.vals.skipdtd)
map(lambda vm_ref: server.xenapi.VM.start(vm_ref, 0), vm_refs)
- else:
+ elif not opts.is_xml:
if not create_security_check(config):
raise security.ACMError(
'Security Configuration prevents domain from starting')
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xm/main.py Wed Mar 28 10:38:41 2007 +0100
@@ -49,7 +49,7 @@ from xen.xend.XendConstants import *
from xen.xm.opts import OptionError, Opts, wrap, set_true
from xen.xm import console
-from xen.util.xmlrpclib2 import ServerProxy
+from xen.util.xmlrpcclient import ServerProxy
import XenAPI
@@ -722,7 +722,7 @@ def getDomains(domain_names, state, full
states = ('running', 'blocked', 'paused', 'shutdown',
'crashed', 'dying')
def state_on_off(state):
- if dom_metrics['state'].find(state) > -1:
+ if state in dom_metrics['state']:
return state[0]
else:
return "-"
@@ -850,7 +850,8 @@ def parse_doms_info(info):
def check_sched_type(sched):
if serverType == SERVER_XEN_API:
- current =
server.xenapi.host.get_sched_policy(server.xenapi.session.get_this_host())
+ current = server.xenapi.host.get_sched_policy(
+ server.xenapi.session.get_this_host(server.getSession()))
else:
current = 'unknown'
for x in server.xend.node.info()[1:]:
@@ -952,12 +953,10 @@ def xm_vcpu_list(args):
['name', vm_records[vm_ref]['name_label']],
['vcpu_count', vm_records[vm_ref]['VCPUs_max']]]
-
-
for i in range(int(vm_records[vm_ref]['VCPUs_max'])):
def chk_flag(flag):
- return vm_metrics[vm_ref]['VCPUs_flags'][str(i)] \
- .find(flag) > -1 and 1 or 0
+ return flag in vm_metrics[vm_ref]['VCPUs_flags'][str(i)] \
+ and 1 or 0
vcpu_info = ['vcpu',
['number',
@@ -1044,7 +1043,7 @@ def xm_vcpu_list(args):
if serverType == SERVER_XEN_API:
nr_cpus = len(server.xenapi.host.get_host_CPUs(
- server.xenapi.session.get_this_host()))
+ server.xenapi.session.get_this_host(server.getSession())))
else:
for x in server.xend.node.info()[1:]:
if len(x) > 1 and x[0] == 'nr_cpus':
@@ -1260,8 +1259,9 @@ def xm_vcpu_pin(args):
cpumap = cpu_make_map(args[2])
if serverType == SERVER_XEN_API:
+ cpumap = map(str, cpumap)
server.xenapi.VM.add_to_VCPUs_params_live(
- get_single_vm(dom), "cpumap%i" % vcpu, ",".join(cpumap))
+ get_single_vm(dom), "cpumap%i" % int(vcpu), ",".join(cpumap))
else:
server.xend.domain.pincpu(dom, vcpu, cpumap)
@@ -1509,7 +1509,7 @@ def xm_info(args):
# Need to fake out old style xm info as people rely on parsing it
host_record = server.xenapi.host.get_record(
- server.xenapi.session.get_this_host())
+ server.xenapi.session.get_this_host(server.getSession()))
host_cpu_records = map(server.xenapi.host_cpu.get_record,
host_record["host_CPUs"])
@@ -1686,7 +1686,7 @@ def xm_debug_keys(args):
if serverType == SERVER_XEN_API:
server.xenapi.host.send_debug_keys(
- server.xenapi.session.get_this_host(),
+ server.xenapi.session.get_this_host(server.getSession()),
keys)
else:
server.xend.node.send_debug_keys(keys)
@@ -1715,7 +1715,7 @@ def xm_dmesg(args):
usage('dmesg')
if serverType == SERVER_XEN_API:
- host = server.xenapi.session.get_this_host()
+ host = server.xenapi.session.get_this_host(server.getSession())
if use_clear:
print server.xenapi.host.dmesg_clear(host),
else:
@@ -1731,7 +1731,7 @@ def xm_log(args):
if serverType == SERVER_XEN_API:
print server.xenapi.host.get_log(
- server.xenapi.session.get_this_host())
+ server.xenapi.session.get_this_host(server.getSession()))
else:
print server.xend.node.log()
@@ -2371,11 +2371,10 @@ def _run_cmd(cmd, cmd_name, args):
if isinstance(e, security.ACMError):
err(str(e))
return False, 1
- else:
- print "Unexpected error:", sys.exc_info()[0]
- print
- print "Please report to xen-devel@xxxxxxxxxxxxxxxxxxx"
- raise
+ print "Unexpected error:", sys.exc_info()[0]
+ print
+ print "Please report to xen-devel@xxxxxxxxxxxxxxxxxxx"
+ raise
return False, 1
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xm/opts.py
--- a/tools/python/xen/xm/opts.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xm/opts.py Wed Mar 28 10:38:41 2007 +0100
@@ -24,6 +24,8 @@ import sys
import sys
import types
+
+
def _line_wrap(text, width = 70):
lines = []
current_line = ''
@@ -59,6 +61,15 @@ class OptionError(Exception):
self.usage = usage
def __str__(self):
return self.message
+
+class XMLFileError(Exception):
+ """Thrown is input is an XML File"""
+ def __init__(self, XMLFile):
+ self.XMLFile = XMLFile
+ def __str__(self):
+ return "XMLFileError: %s" % self.XMLFile
+ def getFile(self):
+ return self.XMLFile
class Opt:
"""An individual option.
@@ -492,6 +503,14 @@ class Opts:
p = os.path.join(os.path.curdir, p)
if os.path.exists(p):
self.info('Using config file "%s".' % p)
+
+ f = open(p)
+ is_xml = (f.read(1) == '<')
+ f.close()
+
+ if is_xml:
+ raise XMLFileError(p)
+
self.load(p, help)
break
else:
diff -r 14aeb7981e4e -r 0b2794d3320f tools/python/xen/xm/xenapi_create.py
--- a/tools/python/xen/xm/xenapi_create.py Tue Mar 27 12:21:48 2007 -0600
+++ b/tools/python/xen/xm/xenapi_create.py Wed Mar 28 10:38:41 2007 +0100
@@ -75,15 +75,20 @@ class xenapi_create:
self.dtd = "/usr/lib/python/xen/xm/create.dtd"
- def create(self, filename=None, document=None):
+ def create(self, filename=None, document=None, skipdtd=False):
"""
Create a domain from an XML file or DOM tree
"""
+ if skipdtd:
+ print "Skipping DTD checks. Dangerous!"
+
if filename is not None:
- self.check_dtd(file)
- document = parse(file)
+ if not skipdtd:
+ self.check_dtd(filename)
+ document = parse(filename)
elif document is not None:
- self.check_dom_against_dtd(document)
+ if not skipdtd:
+ self.check_dom_against_dtd(document)
self.check_doc(document)
diff -r 14aeb7981e4e -r 0b2794d3320f
tools/xm-test/tests/destroy/06_destroy_dom0_neg.py
--- a/tools/xm-test/tests/destroy/06_destroy_dom0_neg.py Tue Mar 27
12:21:48 2007 -0600
+++ b/tools/xm-test/tests/destroy/06_destroy_dom0_neg.py Wed Mar 28
10:38:41 2007 +0100
@@ -10,5 +10,5 @@ status, output = traceCommand("xm destro
status, output = traceCommand("xm destroy 0")
if status == 0:
FAIL("xm destroy returned bad status, expected non 0, status is: %i" %
status)
-elif not re.search("Error", output):
+elif not re.search("Error", output, re.I):
FAIL("xm destroy returned bad output, expected Error:, output is: %s" %
output)
diff -r 14aeb7981e4e -r 0b2794d3320f xen/acm/acm_policy.c
--- a/xen/acm/acm_policy.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/acm/acm_policy.c Wed Mar 28 10:38:41 2007 +0100
@@ -62,6 +62,7 @@ do_acm_set_policy(void *buf, u32 buf_siz
do_acm_set_policy(void *buf, u32 buf_size)
{
struct acm_policy_buffer *pol = (struct acm_policy_buffer *)buf;
+ uint32_t offset, length;
/* some sanity checking */
if ((be32_to_cpu(pol->magic) != ACM_MAGIC) ||
(buf_size != be32_to_cpu(pol->len)) ||
@@ -92,22 +93,27 @@ do_acm_set_policy(void *buf, u32 buf_siz
/* get bin_policy lock and rewrite policy (release old one) */
write_lock(&acm_bin_pol_rwlock);
+ offset = be32_to_cpu(pol->policy_reference_offset);
+ length = be32_to_cpu(pol->primary_buffer_offset) - offset;
+
/* set label reference name */
- if (acm_set_policy_reference(buf +
be32_to_cpu(pol->policy_reference_offset),
- be32_to_cpu(pol->primary_buffer_offset) -
- be32_to_cpu(pol->policy_reference_offset)))
+ if ( (offset + length) > buf_size ||
+ acm_set_policy_reference(buf + offset, length))
goto error_lock_free;
/* set primary policy data */
- if (acm_primary_ops->set_binary_policy(buf +
be32_to_cpu(pol->primary_buffer_offset),
-
be32_to_cpu(pol->secondary_buffer_offset) -
-
be32_to_cpu(pol->primary_buffer_offset)))
+ offset = be32_to_cpu(pol->primary_buffer_offset);
+ length = be32_to_cpu(pol->secondary_buffer_offset) - offset;
+
+ if ( (offset + length) > buf_size ||
+ acm_primary_ops->set_binary_policy(buf + offset, length))
goto error_lock_free;
/* set secondary policy data */
- if (acm_secondary_ops->set_binary_policy(buf +
be32_to_cpu(pol->secondary_buffer_offset),
- be32_to_cpu(pol->len) -
-
be32_to_cpu(pol->secondary_buffer_offset)))
+ offset = be32_to_cpu(pol->secondary_buffer_offset);
+ length = be32_to_cpu(pol->len) - offset;
+ if ( (offset + length) > buf_size ||
+ acm_secondary_ops->set_binary_policy(buf + offset, length))
goto error_lock_free;
write_unlock(&acm_bin_pol_rwlock);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/asm-offsets.c Wed Mar 28 10:38:41 2007 +0100
@@ -223,10 +223,11 @@ void foo(void)
#ifdef PERF_COUNTERS
BLANK();
- DEFINE(RECOVER_TO_PAGE_FAULT_PERFC_OFS, offsetof (struct perfcounter,
recover_to_page_fault));
- DEFINE(RECOVER_TO_BREAK_FAULT_PERFC_OFS, offsetof (struct perfcounter,
recover_to_break_fault));
- DEFINE(FAST_HYPERPRIVOP_PERFC_OFS, offsetof (struct perfcounter,
fast_hyperprivop));
- DEFINE(FAST_REFLECT_PERFC_OFS, offsetof (struct perfcounter,
fast_reflect));
+ DEFINE(IA64_PERFC_recover_to_page_fault, PERFC_recover_to_page_fault);
+ DEFINE(IA64_PERFC_recover_to_break_fault, PERFC_recover_to_break_fault);
+ DEFINE(IA64_PERFC_fast_vhpt_translate, PERFC_fast_vhpt_translate);
+ DEFINE(IA64_PERFC_fast_hyperprivop, PERFC_fast_hyperprivop);
+ DEFINE(IA64_PERFC_fast_reflect, PERFC_fast_reflect);
#endif
BLANK();
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c Wed Mar 28 10:38:41 2007 +0100
@@ -113,7 +113,7 @@ ia64_handle_irq (ia64_vector vector, str
unsigned long saved_tpr;
#ifdef XEN
- perfc_incrc(irqs);
+ perfc_incr(irqs);
#endif
#if IRQ_DEBUG
#ifdef XEN
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/linux-xen/mca.c Wed Mar 28 10:38:41 2007 +0100
@@ -396,16 +396,6 @@ ia64_log_queue(int sal_info_type, int vi
#ifdef CONFIG_ACPI
#ifdef XEN
-/**
- * Copy from linux/include/asm-generic/bug.h
- */
-#define WARN_ON(condition) do { \
- if (unlikely((condition)!=0)) { \
- printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__,
__LINE__); \
- dump_stack(); \
- } \
-} while (0)
-
/**
* Copy from linux/kernel/irq/manage.c
*
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/linux-xen/smp.c Wed Mar 28 10:38:41 2007 +0100
@@ -148,7 +148,7 @@ handle_IPI (int irq, void *dev_id, struc
unsigned long ops;
#ifdef XEN
- perfc_incrc(ipis);
+ perfc_incr(ipis);
#endif
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/vmx/pal_emul.c Wed Mar 28 10:38:41 2007 +0100
@@ -37,7 +37,7 @@ pal_emul(struct vcpu *vcpu)
vcpu_get_gr_nat(vcpu, 30, &gr30);
vcpu_get_gr_nat(vcpu, 31, &gr31);
- perfc_incrc(vmx_pal_emul);
+ perfc_incr(vmx_pal_emul);
result = xen_pal_emulator(gr28, gr29, gr30, gr31);
vcpu_set_gr(vcpu, 8, result.status, 0);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c Wed Mar 28 10:38:41 2007 +0100
@@ -151,7 +151,7 @@ vmx_ia64_handle_break (unsigned long ifa
struct domain *d = current->domain;
struct vcpu *v = current;
- perfc_incrc(vmx_ia64_handle_break);
+ perfc_incr(vmx_ia64_handle_break);
#ifdef CRASH_DEBUG
if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
IS_VMM_ADDRESS(regs->cr_iip)) {
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Mar 28 10:38:41 2007 +0100
@@ -1401,159 +1401,159 @@ if ( (cause == 0xff && opcode == 0x1e000
switch(cause) {
case EVENT_RSM:
- perfc_incrc(vmx_rsm);
+ perfc_incr(vmx_rsm);
status=vmx_emul_rsm(vcpu, inst);
break;
case EVENT_SSM:
- perfc_incrc(vmx_ssm);
+ perfc_incr(vmx_ssm);
status=vmx_emul_ssm(vcpu, inst);
break;
case EVENT_MOV_TO_PSR:
- perfc_incrc(vmx_mov_to_psr);
+ perfc_incr(vmx_mov_to_psr);
status=vmx_emul_mov_to_psr(vcpu, inst);
break;
case EVENT_MOV_FROM_PSR:
- perfc_incrc(vmx_mov_from_psr);
+ perfc_incr(vmx_mov_from_psr);
status=vmx_emul_mov_from_psr(vcpu, inst);
break;
case EVENT_MOV_FROM_CR:
- perfc_incrc(vmx_mov_from_cr);
+ perfc_incr(vmx_mov_from_cr);
status=vmx_emul_mov_from_cr(vcpu, inst);
break;
case EVENT_MOV_TO_CR:
- perfc_incrc(vmx_mov_to_cr);
+ perfc_incr(vmx_mov_to_cr);
status=vmx_emul_mov_to_cr(vcpu, inst);
break;
case EVENT_BSW_0:
- perfc_incrc(vmx_bsw0);
+ perfc_incr(vmx_bsw0);
status=vmx_emul_bsw0(vcpu, inst);
break;
case EVENT_BSW_1:
- perfc_incrc(vmx_bsw1);
+ perfc_incr(vmx_bsw1);
status=vmx_emul_bsw1(vcpu, inst);
break;
case EVENT_COVER:
- perfc_incrc(vmx_cover);
+ perfc_incr(vmx_cover);
status=vmx_emul_cover(vcpu, inst);
break;
case EVENT_RFI:
- perfc_incrc(vmx_rfi);
+ perfc_incr(vmx_rfi);
status=vmx_emul_rfi(vcpu, inst);
break;
case EVENT_ITR_D:
- perfc_incrc(vmx_itr_d);
+ perfc_incr(vmx_itr_d);
status=vmx_emul_itr_d(vcpu, inst);
break;
case EVENT_ITR_I:
- perfc_incrc(vmx_itr_i);
+ perfc_incr(vmx_itr_i);
status=vmx_emul_itr_i(vcpu, inst);
break;
case EVENT_PTR_D:
- perfc_incrc(vmx_ptr_d);
+ perfc_incr(vmx_ptr_d);
status=vmx_emul_ptr_d(vcpu, inst);
break;
case EVENT_PTR_I:
- perfc_incrc(vmx_ptr_i);
+ perfc_incr(vmx_ptr_i);
status=vmx_emul_ptr_i(vcpu, inst);
break;
case EVENT_ITC_D:
- perfc_incrc(vmx_itc_d);
+ perfc_incr(vmx_itc_d);
status=vmx_emul_itc_d(vcpu, inst);
break;
case EVENT_ITC_I:
- perfc_incrc(vmx_itc_i);
+ perfc_incr(vmx_itc_i);
status=vmx_emul_itc_i(vcpu, inst);
break;
case EVENT_PTC_L:
- perfc_incrc(vmx_ptc_l);
+ perfc_incr(vmx_ptc_l);
status=vmx_emul_ptc_l(vcpu, inst);
break;
case EVENT_PTC_G:
- perfc_incrc(vmx_ptc_g);
+ perfc_incr(vmx_ptc_g);
status=vmx_emul_ptc_g(vcpu, inst);
break;
case EVENT_PTC_GA:
- perfc_incrc(vmx_ptc_ga);
+ perfc_incr(vmx_ptc_ga);
status=vmx_emul_ptc_ga(vcpu, inst);
break;
case EVENT_PTC_E:
- perfc_incrc(vmx_ptc_e);
+ perfc_incr(vmx_ptc_e);
status=vmx_emul_ptc_e(vcpu, inst);
break;
case EVENT_MOV_TO_RR:
- perfc_incrc(vmx_mov_to_rr);
+ perfc_incr(vmx_mov_to_rr);
status=vmx_emul_mov_to_rr(vcpu, inst);
break;
case EVENT_MOV_FROM_RR:
- perfc_incrc(vmx_mov_from_rr);
+ perfc_incr(vmx_mov_from_rr);
status=vmx_emul_mov_from_rr(vcpu, inst);
break;
case EVENT_THASH:
- perfc_incrc(vmx_thash);
+ perfc_incr(vmx_thash);
status=vmx_emul_thash(vcpu, inst);
break;
case EVENT_TTAG:
- perfc_incrc(vmx_ttag);
+ perfc_incr(vmx_ttag);
status=vmx_emul_ttag(vcpu, inst);
break;
case EVENT_TPA:
- perfc_incrc(vmx_tpa);
+ perfc_incr(vmx_tpa);
status=vmx_emul_tpa(vcpu, inst);
break;
case EVENT_TAK:
- perfc_incrc(vmx_tak);
+ perfc_incr(vmx_tak);
status=vmx_emul_tak(vcpu, inst);
break;
case EVENT_MOV_TO_AR_IMM:
- perfc_incrc(vmx_mov_to_ar_imm);
+ perfc_incr(vmx_mov_to_ar_imm);
status=vmx_emul_mov_to_ar_imm(vcpu, inst);
break;
case EVENT_MOV_TO_AR:
- perfc_incrc(vmx_mov_to_ar_reg);
+ perfc_incr(vmx_mov_to_ar_reg);
status=vmx_emul_mov_to_ar_reg(vcpu, inst);
break;
case EVENT_MOV_FROM_AR:
- perfc_incrc(vmx_mov_from_ar_reg);
+ perfc_incr(vmx_mov_from_ar_reg);
status=vmx_emul_mov_from_ar_reg(vcpu, inst);
break;
case EVENT_MOV_TO_DBR:
- perfc_incrc(vmx_mov_to_dbr);
+ perfc_incr(vmx_mov_to_dbr);
status=vmx_emul_mov_to_dbr(vcpu, inst);
break;
case EVENT_MOV_TO_IBR:
- perfc_incrc(vmx_mov_to_ibr);
+ perfc_incr(vmx_mov_to_ibr);
status=vmx_emul_mov_to_ibr(vcpu, inst);
break;
case EVENT_MOV_TO_PMC:
- perfc_incrc(vmx_mov_to_pmc);
+ perfc_incr(vmx_mov_to_pmc);
status=vmx_emul_mov_to_pmc(vcpu, inst);
break;
case EVENT_MOV_TO_PMD:
- perfc_incrc(vmx_mov_to_pmd);
+ perfc_incr(vmx_mov_to_pmd);
status=vmx_emul_mov_to_pmd(vcpu, inst);
break;
case EVENT_MOV_TO_PKR:
- perfc_incrc(vmx_mov_to_pkr);
+ perfc_incr(vmx_mov_to_pkr);
status=vmx_emul_mov_to_pkr(vcpu, inst);
break;
case EVENT_MOV_FROM_DBR:
- perfc_incrc(vmx_mov_from_dbr);
+ perfc_incr(vmx_mov_from_dbr);
status=vmx_emul_mov_from_dbr(vcpu, inst);
break;
case EVENT_MOV_FROM_IBR:
- perfc_incrc(vmx_mov_from_ibr);
+ perfc_incr(vmx_mov_from_ibr);
status=vmx_emul_mov_from_ibr(vcpu, inst);
break;
case EVENT_MOV_FROM_PMC:
- perfc_incrc(vmx_mov_from_pmc);
+ perfc_incr(vmx_mov_from_pmc);
status=vmx_emul_mov_from_pmc(vcpu, inst);
break;
case EVENT_MOV_FROM_PKR:
- perfc_incrc(vmx_mov_from_pkr);
+ perfc_incr(vmx_mov_from_pkr);
status=vmx_emul_mov_from_pkr(vcpu, inst);
break;
case EVENT_MOV_FROM_CPUID:
- perfc_incrc(vmx_mov_from_cpuid);
+ perfc_incr(vmx_mov_from_cpuid);
status=vmx_emul_mov_from_cpuid(vcpu, inst);
break;
case EVENT_VMSW:
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/dom0_ops.c Wed Mar 28 10:38:41 2007 +0100
@@ -372,7 +372,7 @@ do_dom0vp_op(unsigned long cmd,
} else {
ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
}
- perfc_incrc(dom0vp_phystomach);
+ perfc_incr(dom0vp_phystomach);
break;
case IA64_DOM0VP_machtophys:
if (!mfn_valid(arg0)) {
@@ -380,7 +380,7 @@ do_dom0vp_op(unsigned long cmd,
break;
}
ret = get_gpfn_from_mfn(arg0);
- perfc_incrc(dom0vp_machtophys);
+ perfc_incr(dom0vp_machtophys);
break;
case IA64_DOM0VP_zap_physmap:
ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/domain.c Wed Mar 28 10:38:41 2007 +0100
@@ -131,11 +131,11 @@ static void flush_vtlb_for_context_switc
if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time),
last_tlbflush_timestamp)) {
local_flush_tlb_all();
- perfc_incrc(tlbflush_clock_cswitch_purge);
+ perfc_incr(tlbflush_clock_cswitch_purge);
} else {
- perfc_incrc(tlbflush_clock_cswitch_skip);
- }
- perfc_incrc(flush_vtlb_for_context_switch);
+ perfc_incr(tlbflush_clock_cswitch_skip);
+ }
+ perfc_incr(flush_vtlb_for_context_switch);
}
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/faults.c Wed Mar 28 10:38:41 2007 +0100
@@ -187,7 +187,7 @@ static int handle_lazy_cover(struct vcpu
if (!PSCB(v, interrupt_collection_enabled)) {
PSCB(v, ifs) = regs->cr_ifs;
regs->cr_ifs = 0;
- perfc_incrc(lazy_cover);
+ perfc_incr(lazy_cover);
return 1; // retry same instruction with cr.ifs off
}
return 0;
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/hypercall.c Wed Mar 28 10:38:41 2007 +0100
@@ -161,7 +161,7 @@ ia64_hypercall(struct pt_regs *regs)
if (regs->r28 == PAL_HALT_LIGHT) {
if (vcpu_deliverable_interrupts(v) ||
event_pending(v)) {
- perfc_incrc(idle_when_pending);
+ perfc_incr(idle_when_pending);
vcpu_pend_unspecified_interrupt(v);
//printk("idle w/int#%d pending!\n",pi);
//this shouldn't happen, but it apparently does quite a bit! so don't
@@ -170,7 +170,7 @@ ia64_hypercall(struct pt_regs *regs)
//as deliver_pending_interrupt is called on the way out and will deliver it
}
else {
- perfc_incrc(pal_halt_light);
+ perfc_incr(pal_halt_light);
migrate_timer(&v->arch.hlt_timer,
v->processor);
set_timer(&v->arch.hlt_timer,
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S Wed Mar 28 10:38:41 2007 +0100
@@ -26,8 +26,7 @@
# define FAST_HYPERPRIVOPS
# ifdef PERF_COUNTERS
# define FAST_HYPERPRIVOP_CNT
-# define FAST_HYPERPRIVOP_PERFC(N) \
- (perfcounters + FAST_HYPERPRIVOP_PERFC_OFS + (4 * N))
+# define FAST_HYPERPRIVOP_PERFC(N) PERFC(fast_hyperprivop + N)
# define FAST_REFLECT_CNT
# endif
@@ -364,7 +363,7 @@ GLOBAL_ENTRY(fast_tick_reflect)
mov rp=r29;;
mov cr.itm=r26;; // ensure next tick
#ifdef FAST_REFLECT_CNT
- movl r20=perfcounters+FAST_REFLECT_PERFC_OFS+((0x3000>>8)*4);;
+ movl r20=PERFC(fast_reflect + (0x3000>>8));;
ld4 r21=[r20];;
adds r21=1,r21;;
st4 [r20]=r21;;
@@ -597,7 +596,7 @@ END(fast_break_reflect)
// r31 == pr
ENTRY(fast_reflect)
#ifdef FAST_REFLECT_CNT
- movl r22=perfcounters+FAST_REFLECT_PERFC_OFS;
+ movl r22=PERFC(fast_reflect);
shr r23=r20,8-2;;
add r22=r22,r23;;
ld4 r21=[r22];;
@@ -938,7 +937,7 @@ 1: // check the guest VHPT
(p7) br.cond.spnt.few page_not_present;;
#ifdef FAST_REFLECT_CNT
- movl r21=perfcounter+FAST_VHPT_TRANSLATE_PERFC_OFS;;
+ movl r21=PERFC(fast_vhpt_translate);;
ld4 r22=[r21];;
adds r22=1,r22;;
st4 [r21]=r22;;
@@ -968,7 +967,7 @@ END(fast_tlb_miss_reflect)
// we get here if fast_insert fails (e.g. due to metaphysical lookup)
ENTRY(recover_and_page_fault)
#ifdef PERF_COUNTERS
- movl r21=perfcounters + RECOVER_TO_PAGE_FAULT_PERFC_OFS;;
+ movl r21=PERFC(recover_to_page_fault);;
ld4 r22=[r21];;
adds r22=1,r22;;
st4 [r21]=r22;;
@@ -1832,7 +1831,7 @@ END(hyper_ptc_ga)
// recovery block for hyper_itc metaphysical memory lookup
ENTRY(recover_and_dispatch_break_fault)
#ifdef PERF_COUNTERS
- movl r21=perfcounters + RECOVER_TO_BREAK_FAULT_PERFC_OFS;;
+ movl r21=PERFC(recover_to_break_fault);;
ld4 r22=[r21];;
adds r22=1,r22;;
st4 [r21]=r22;;
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/mm.c Wed Mar 28 10:38:41 2007 +0100
@@ -1131,7 +1131,7 @@ assign_domain_page_replace(struct domain
domain_put_page(d, mpaddr, pte, old_pte, 1);
}
}
- perfc_incrc(assign_domain_page_replace);
+ perfc_incr(assign_domain_page_replace);
}
// caller must get_page(new_page) before
@@ -1207,7 +1207,7 @@ assign_domain_page_cmpxchg_rel(struct do
}
domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
- perfc_incrc(assign_domain_pge_cmpxchg_rel);
+ perfc_incr(assign_domain_pge_cmpxchg_rel);
return 0;
}
@@ -1266,7 +1266,7 @@ zap_domain_page_one(struct domain *d, un
BUG_ON(clear_PGC_allocate && (page_get_owner(page) == NULL));
domain_put_page(d, mpaddr, pte, old_pte, clear_PGC_allocate);
- perfc_incrc(zap_dcomain_page_one);
+ perfc_incr(zap_dcomain_page_one);
}
unsigned long
@@ -1279,7 +1279,7 @@ dom0vp_zap_physmap(struct domain *d, uns
}
zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1, INVALID_MFN);
- perfc_incrc(dom0vp_zap_physmap);
+ perfc_incr(dom0vp_zap_physmap);
return 0;
}
@@ -1333,7 +1333,7 @@ __dom0vp_add_physmap(struct domain* d, u
get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
//don't update p2m table because this page belongs to rd, not d.
- perfc_incrc(dom0vp_add_physmap);
+ perfc_incr(dom0vp_add_physmap);
out1:
put_domain(rd);
return error;
@@ -1503,7 +1503,7 @@ create_grant_host_mapping(unsigned long
#endif
((flags & GNTMAP_readonly) ?
ASSIGN_readonly : ASSIGN_writable));
- perfc_incrc(create_grant_host_mapping);
+ perfc_incr(create_grant_host_mapping);
return GNTST_okay;
}
@@ -1568,7 +1568,7 @@ destroy_grant_host_mapping(unsigned long
BUG_ON(pte_pgc_allocated(old_pte));
domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
- perfc_incrc(destroy_grant_host_mapping);
+ perfc_incr(destroy_grant_host_mapping);
return GNTST_okay;
}
@@ -1629,7 +1629,7 @@ steal_page(struct domain *d, struct page
free_domheap_page(new);
return -1;
}
- perfc_incrc(steal_page_refcount);
+ perfc_incr(steal_page_refcount);
}
spin_lock(&d->page_alloc_lock);
@@ -1693,7 +1693,7 @@ steal_page(struct domain *d, struct page
list_del(&page->list);
spin_unlock(&d->page_alloc_lock);
- perfc_incrc(steal_page);
+ perfc_incr(steal_page);
return 0;
}
@@ -1710,7 +1710,7 @@ guest_physmap_add_page(struct domain *d,
//BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >>
PAGE_SHIFT));
- perfc_incrc(guest_physmap_add_page);
+ perfc_incr(guest_physmap_add_page);
}
void
@@ -1719,7 +1719,7 @@ guest_physmap_remove_page(struct domain
{
BUG_ON(mfn == 0);//XXX
zap_domain_page_one(d, gpfn << PAGE_SHIFT, 0, mfn);
- perfc_incrc(guest_physmap_remove_page);
+ perfc_incr(guest_physmap_remove_page);
}
static void
@@ -1799,7 +1799,7 @@ domain_page_flush_and_put(struct domain*
break;
}
#endif
- perfc_incrc(domain_page_flush_and_put);
+ perfc_incr(domain_page_flush_and_put);
}
int
@@ -1996,7 +1996,7 @@ int get_page_type(struct page_info *page
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/privop.c Wed Mar 28 10:38:41 2007 +0100
@@ -641,15 +641,15 @@ static IA64FAULT priv_handle_op(VCPU * v
if (inst.M29.x3 != 0)
break;
if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
- perfc_incrc(mov_to_ar_imm);
+ perfc_incr(mov_to_ar_imm);
return priv_mov_to_ar_imm(vcpu, inst);
}
if (inst.M44.x4 == 6) {
- perfc_incrc(ssm);
+ perfc_incr(ssm);
return priv_ssm(vcpu, inst);
}
if (inst.M44.x4 == 7) {
- perfc_incrc(rsm);
+ perfc_incr(rsm);
return priv_rsm(vcpu, inst);
}
break;
@@ -658,9 +658,9 @@ static IA64FAULT priv_handle_op(VCPU * v
x6 = inst.M29.x6;
if (x6 == 0x2a) {
if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
- perfc_incrc(mov_from_ar); // privified mov from
kr
+ perfc_incr(mov_from_ar); // privified mov from
kr
else
- perfc_incrc(mov_to_ar_reg);
+ perfc_incr(mov_to_ar_reg);
return priv_mov_to_ar_reg(vcpu, inst);
}
if (inst.M29.x3 != 0)
@@ -676,9 +676,9 @@ static IA64FAULT priv_handle_op(VCPU * v
}
}
if (privify_en && x6 == 52 && inst.M28.r3 > 63)
- perfc_incrc(fc);
+ perfc_incr(fc);
else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
- perfc_incrc(cpuid);
+ perfc_incr(cpuid);
else
perfc_incra(misc_privop, x6);
return (*pfunc) (vcpu, inst);
@@ -688,23 +688,23 @@ static IA64FAULT priv_handle_op(VCPU * v
break;
if (inst.B8.x6 == 0x08) {
IA64FAULT fault;
- perfc_incrc(rfi);
+ perfc_incr(rfi);
fault = priv_rfi(vcpu, inst);
if (fault == IA64_NO_FAULT)
fault = IA64_RFI_IN_PROGRESS;
return fault;
}
if (inst.B8.x6 == 0x0c) {
- perfc_incrc(bsw0);
+ perfc_incr(bsw0);
return priv_bsw0(vcpu, inst);
}
if (inst.B8.x6 == 0x0d) {
- perfc_incrc(bsw1);
+ perfc_incr(bsw1);
return priv_bsw1(vcpu, inst);
}
if (inst.B8.x6 == 0x0) {
// break instr for privified cover
- perfc_incrc(cover);
+ perfc_incr(cover);
return priv_cover(vcpu, inst);
}
break;
@@ -713,7 +713,7 @@ static IA64FAULT priv_handle_op(VCPU * v
break;
#if 0
if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
- perfc_incrc(cover);
+ perfc_incr(cover);
return priv_cover(vcpu, inst);
}
#endif
@@ -721,13 +721,13 @@ static IA64FAULT priv_handle_op(VCPU * v
break; // I26.x3 == I27.x3
if (inst.I26.x6 == 0x2a) {
if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
- perfc_incrc(mov_from_ar); // privified
mov from kr
+ perfc_incr(mov_from_ar); // privified
mov from kr
else
- perfc_incrc(mov_to_ar_reg);
+ perfc_incr(mov_to_ar_reg);
return priv_mov_to_ar_reg(vcpu, inst);
}
if (inst.I27.x6 == 0x0a) {
- perfc_incrc(mov_to_ar_imm);
+ perfc_incr(mov_to_ar_imm);
return priv_mov_to_ar_imm(vcpu, inst);
}
break;
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/privop_stat.c
--- a/xen/arch/ia64/xen/privop_stat.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/privop_stat.c Wed Mar 28 10:38:41 2007 +0100
@@ -10,48 +10,39 @@ struct privop_addr_count {
unsigned long addr[PRIVOP_COUNT_NADDRS];
unsigned int count[PRIVOP_COUNT_NADDRS];
unsigned int overflow;
- atomic_t *perfc_addr;
- atomic_t *perfc_count;
- atomic_t *perfc_overflow;
};
-#undef PERFCOUNTER
+struct privop_addr_info {
+ enum perfcounter perfc_addr;
+ enum perfcounter perfc_count;
+ enum perfcounter perfc_overflow;
+};
+
#define PERFCOUNTER(var, name)
-
-#undef PERFCOUNTER_CPU
-#define PERFCOUNTER_CPU(var, name)
-
-#undef PERFCOUNTER_ARRAY
#define PERFCOUNTER_ARRAY(var, name, size)
-#undef PERFSTATUS
#define PERFSTATUS(var, name)
-
-#undef PERFSTATUS_CPU
-#define PERFSTATUS_CPU(var, name)
-
-#undef PERFSTATUS_ARRAY
#define PERFSTATUS_ARRAY(var, name, size)
-#undef PERFPRIVOPADDR
#define PERFPRIVOPADDR(name) \
{ \
- { 0 }, { 0 }, 0, \
- perfcounters.privop_addr_##name##_addr, \
- perfcounters.privop_addr_##name##_count, \
- perfcounters.privop_addr_##name##_overflow \
+ PERFC_privop_addr_##name##_addr, \
+ PERFC_privop_addr_##name##_count, \
+ PERFC_privop_addr_##name##_overflow \
},
-static struct privop_addr_count privop_addr_counter[] = {
+static const struct privop_addr_info privop_addr_info[] = {
#include <asm/perfc_defn.h>
};
#define PRIVOP_COUNT_NINSTS \
- (sizeof(privop_addr_counter) / sizeof(privop_addr_counter[0]))
+ (sizeof(privop_addr_info) / sizeof(privop_addr_info[0]))
+
+static DEFINE_PER_CPU(struct privop_addr_count[PRIVOP_COUNT_NINSTS],
privop_addr_counter);
void privop_count_addr(unsigned long iip, enum privop_inst inst)
{
- struct privop_addr_count *v = &privop_addr_counter[inst];
+ struct privop_addr_count *v = this_cpu(privop_addr_counter) + inst;
int i;
if (inst >= PRIVOP_COUNT_NINSTS)
@@ -72,31 +63,44 @@ void privop_count_addr(unsigned long iip
void gather_privop_addrs(void)
{
- int i, j;
- atomic_t *v;
- for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
- /* Note: addresses are truncated! */
- v = privop_addr_counter[i].perfc_addr;
- for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
- atomic_set(&v[j], privop_addr_counter[i].addr[j]);
+ unsigned int cpu;
- v = privop_addr_counter[i].perfc_count;
- for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
- atomic_set(&v[j], privop_addr_counter[i].count[j]);
+ for_each_cpu ( cpu ) {
+ perfc_t *perfcounters = per_cpu(perfcounters, cpu);
+ struct privop_addr_count *s = per_cpu(privop_addr_counter, cpu);
+ int i, j;
+
+ for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, s++) {
+ perfc_t *d;
+
+ /* Note: addresses are truncated! */
+ d = perfcounters + privop_addr_info[i].perfc_addr;
+ for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
+ d[j] = s->addr[j];
+
+ d = perfcounters + privop_addr_info[i].perfc_count;
+ for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
+ d[j] = s->count[j];
- atomic_set(privop_addr_counter[i].perfc_overflow,
- privop_addr_counter[i].overflow);
+ perfcounters[privop_addr_info[i].perfc_overflow] =
+ s->overflow;
+ }
}
}
void reset_privop_addrs(void)
{
- int i, j;
- for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
- struct privop_addr_count *v = &privop_addr_counter[i];
- for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
- v->addr[j] = v->count[j] = 0;
- v->overflow = 0;
+ unsigned int cpu;
+
+ for_each_cpu ( cpu ) {
+ struct privop_addr_count *v = per_cpu(privop_addr_counter, cpu);
+ int i, j;
+
+ for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, v++) {
+ for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
+ v->addr[j] = v->count[j] = 0;
+ v->overflow = 0;
+ }
}
}
#endif
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/tlb_track.c Wed Mar 28 10:38:41 2007 +0100
@@ -216,14 +216,14 @@ tlb_track_insert_or_dirty(struct tlb_tra
TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND;
#if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */
- perfc_incrc(tlb_track_iod);
+ perfc_incr(tlb_track_iod);
if (!pte_tlb_tracking(old_pte)) {
- perfc_incrc(tlb_track_iod_not_tracked);
+ perfc_incr(tlb_track_iod_not_tracked);
return TLB_TRACK_NOT_TRACKED;
}
#endif
if (pte_tlb_inserted_many(old_pte)) {
- perfc_incrc(tlb_track_iod_tracked_many);
+ perfc_incr(tlb_track_iod_tracked_many);
return TLB_TRACK_MANY;
}
@@ -260,7 +260,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
if (entry->vaddr == vaddr && entry->rid == rid) {
// tlb_track_printd("TLB_TRACK_FOUND\n");
ret = TLB_TRACK_FOUND;
- perfc_incrc(tlb_track_iod_found);
+ perfc_incr(tlb_track_iod_found);
#ifdef CONFIG_TLB_TRACK_CNT
entry->cnt++;
if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) {
@@ -276,7 +276,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
*/
// tlb_track_entry_printf(entry);
// tlb_track_printd("cnt = %ld\n", entry->cnt);
- perfc_incrc(tlb_track_iod_force_many);
+ perfc_incr(tlb_track_iod_force_many);
goto force_many;
}
#endif
@@ -294,14 +294,14 @@ tlb_track_insert_or_dirty(struct tlb_tra
if (pte_val(ret_pte) != pte_val(old_pte)) {
// tlb_track_printd("TLB_TRACK_AGAIN\n");
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
} else {
// tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n",
// entry);
ret = TLB_TRACK_MANY;
list_del(&entry->list);
// tlb_track_entry_printf(entry);
- perfc_incrc(tlb_track_iod_tracked_many_del);
+ perfc_incr(tlb_track_iod_tracked_many_del);
}
goto out;
}
@@ -314,7 +314,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
*/
// tlb_track_printd("TLB_TRACK_AGAIN\n");
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
goto out;
}
@@ -323,7 +323,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
/* Other thread else removed the tlb_track_entry after we got old_pte
before we got spin lock. */
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
goto out;
}
if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) {
@@ -334,10 +334,10 @@ tlb_track_insert_or_dirty(struct tlb_tra
/* entry can't be allocated.
fall down into full flush mode. */
bit_to_be_set |= _PAGE_TLB_INSERTED_MANY;
- perfc_incrc(tlb_track_iod_new_failed);
+ perfc_incr(tlb_track_iod_new_failed);
}
// tlb_track_printd("new_entry 0x%p\n", new_entry);
- perfc_incrc(tlb_track_iod_new_entry);
+ perfc_incr(tlb_track_iod_new_entry);
goto again;
}
@@ -348,7 +348,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
if (tlb_track_pte_zapped(old_pte, ret_pte)) {
// tlb_track_printd("zapped TLB_TRACK_AGAIN\n");
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
goto out;
}
@@ -359,7 +359,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
// tlb_track_printd("iserted TLB_TRACK_MANY\n");
BUG_ON(!pte_tlb_inserted(ret_pte));
ret = TLB_TRACK_MANY;
- perfc_incrc(tlb_track_iod_new_many);
+ perfc_incr(tlb_track_iod_new_many);
goto out;
}
BUG_ON(pte_tlb_inserted(ret_pte));
@@ -381,7 +381,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
#ifdef CONFIG_TLB_TRACK_CNT
entry->cnt = 0;
#endif
- perfc_incrc(tlb_track_iod_insert);
+ perfc_incr(tlb_track_iod_insert);
// tlb_track_entry_printf(entry);
} else {
goto out;
@@ -392,7 +392,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
cpu_set(v->processor, entry->pcpu_dirty_mask);
BUG_ON(v->vcpu_id >= NR_CPUS);
vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
- perfc_incrc(tlb_track_iod_dirtied);
+ perfc_incr(tlb_track_iod_dirtied);
out:
spin_unlock(&tlb_track->hash_lock);
@@ -432,19 +432,19 @@ tlb_track_search_and_remove(struct tlb_t
struct list_head* head = tlb_track_hash_head(tlb_track, ptep);
struct tlb_track_entry* entry;
- perfc_incrc(tlb_track_sar);
+ perfc_incr(tlb_track_sar);
if (!pte_tlb_tracking(old_pte)) {
- perfc_incrc(tlb_track_sar_not_tracked);
+ perfc_incr(tlb_track_sar_not_tracked);
return TLB_TRACK_NOT_TRACKED;
}
if (!pte_tlb_inserted(old_pte)) {
BUG_ON(pte_tlb_inserted_many(old_pte));
- perfc_incrc(tlb_track_sar_not_found);
+ perfc_incr(tlb_track_sar_not_found);
return TLB_TRACK_NOT_FOUND;
}
if (pte_tlb_inserted_many(old_pte)) {
BUG_ON(!pte_tlb_inserted(old_pte));
- perfc_incrc(tlb_track_sar_many);
+ perfc_incr(tlb_track_sar_many);
return TLB_TRACK_MANY;
}
@@ -475,14 +475,14 @@ tlb_track_search_and_remove(struct tlb_t
pte_tlb_inserted(current_pte))) {
BUG_ON(pte_tlb_inserted_many(current_pte));
spin_unlock(&tlb_track->hash_lock);
- perfc_incrc(tlb_track_sar_many);
+ perfc_incr(tlb_track_sar_many);
return TLB_TRACK_MANY;
}
list_del(&entry->list);
spin_unlock(&tlb_track->hash_lock);
*entryp = entry;
- perfc_incrc(tlb_track_sar_found);
+ perfc_incr(tlb_track_sar_found);
// tlb_track_entry_printf(entry);
#ifdef CONFIG_TLB_TRACK_CNT
// tlb_track_printd("cnt = %ld\n", entry->cnt);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/vcpu.c Wed Mar 28 10:38:41 2007 +0100
@@ -1616,7 +1616,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
*pteval = (address & _PAGE_PPN_MASK) |
__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
*itir = PAGE_SHIFT << 2;
- perfc_incrc(phys_translate);
+ perfc_incr(phys_translate);
return IA64_NO_FAULT;
}
} else if (!region && warn_region0_address) {
@@ -1637,7 +1637,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
if (trp != NULL) {
*pteval = trp->pte.val;
*itir = trp->itir;
- perfc_incrc(tr_translate);
+ perfc_incr(tr_translate);
return IA64_NO_FAULT;
}
}
@@ -1647,7 +1647,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
if (trp != NULL) {
*pteval = trp->pte.val;
*itir = trp->itir;
- perfc_incrc(tr_translate);
+ perfc_incr(tr_translate);
return IA64_NO_FAULT;
}
}
@@ -1660,7 +1660,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
&& vcpu_match_tr_entry_no_p(trp, address, rid)) {
*pteval = pte.val;
*itir = trp->itir;
- perfc_incrc(dtlb_translate);
+ perfc_incr(dtlb_translate);
return IA64_USE_TLB;
}
@@ -1709,7 +1709,7 @@ out:
out:
*itir = rr & RR_PS_MASK;
*pteval = pte.val;
- perfc_incrc(vhpt_translate);
+ perfc_incr(vhpt_translate);
return IA64_NO_FAULT;
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/ia64/xen/vhpt.c Wed Mar 28 10:38:41 2007 +0100
@@ -48,14 +48,14 @@ local_vhpt_flush(void)
/* this must be after flush */
tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp),
flush_time);
- perfc_incrc(local_vhpt_flush);
+ perfc_incr(local_vhpt_flush);
}
void
vcpu_vhpt_flush(struct vcpu* v)
{
__vhpt_flush(vcpu_vhpt_maddr(v));
- perfc_incrc(vcpu_vhpt_flush);
+ perfc_incr(vcpu_vhpt_flush);
}
static void
@@ -248,7 +248,7 @@ void vcpu_flush_vtlb_all(struct vcpu *v)
not running on this processor. There is currently no easy way to
check this. */
- perfc_incrc(vcpu_flush_vtlb_all);
+ perfc_incr(vcpu_flush_vtlb_all);
}
static void __vcpu_flush_vtlb_all(void *vcpu)
@@ -280,7 +280,7 @@ void domain_flush_vtlb_all(struct domain
__vcpu_flush_vtlb_all,
v, 1, 1);
}
- perfc_incrc(domain_flush_vtlb_all);
+ perfc_incr(domain_flush_vtlb_all);
}
// Callers may need to call smp_mb() before/after calling this.
@@ -322,7 +322,7 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr
vadr, 1UL << log_range);
ia64_ptcl(vadr, log_range << 2);
ia64_srlz_i();
- perfc_incrc(vcpu_flush_tlb_vhpt_range);
+ perfc_incr(vcpu_flush_tlb_vhpt_range);
}
void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
@@ -361,7 +361,7 @@ void domain_flush_vtlb_range (struct dom
/* ptc.ga */
platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
- perfc_incrc(domain_flush_vtlb_range);
+ perfc_incr(domain_flush_vtlb_range);
}
#ifdef CONFIG_XEN_IA64_TLB_TRACK
@@ -391,11 +391,11 @@ __domain_flush_vtlb_track_entry(struct d
*/
vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid);
if (likely(rr7_rid == entry->rid)) {
- perfc_incrc(tlb_track_use_rr7);
+ perfc_incr(tlb_track_use_rr7);
} else {
swap_rr0 = 1;
vaddr = (vaddr << 3) >> 3;// force vrn0
- perfc_incrc(tlb_track_swap_rr0);
+ perfc_incr(tlb_track_swap_rr0);
}
// tlb_track_entry_printf(entry);
@@ -435,18 +435,18 @@ __domain_flush_vtlb_track_entry(struct d
/* ptc.ga */
if (local_purge) {
ia64_ptcl(vaddr, PAGE_SHIFT << 2);
- perfc_incrc(domain_flush_vtlb_local);
+ perfc_incr(domain_flush_vtlb_local);
} else {
/* ptc.ga has release semantics. */
platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
PAGE_SHIFT);
- perfc_incrc(domain_flush_vtlb_global);
+ perfc_incr(domain_flush_vtlb_global);
}
if (swap_rr0) {
vcpu_set_rr(current, 0, old_rid);
}
- perfc_incrc(domain_flush_vtlb_track_entry);
+ perfc_incr(domain_flush_vtlb_track_entry);
}
void
@@ -512,7 +512,7 @@ void gather_vhpt_stats(void)
for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
if (!(v->ti_tag & INVALID_TI_TAG))
vhpt_valid++;
- perfc_seta(vhpt_valid_entries, cpu, vhpt_valid);
- }
-}
-#endif
+ per_cpu(perfcounters, cpu)[PERFC_vhpt_valid_entries] =
vhpt_valid;
+ }
+}
+#endif
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/powerpc/backtrace.c
--- a/xen/arch/powerpc/backtrace.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/powerpc/backtrace.c Wed Mar 28 10:38:41 2007 +0100
@@ -205,21 +205,6 @@ void show_backtrace_regs(struct cpu_user
console_end_sync();
}
-void __warn(char *file, int line)
-{
- ulong sp;
- ulong lr;
-
- console_start_sync();
- printk("WARN at %s:%d\n", file, line);
-
- sp = (ulong)__builtin_frame_address(0);
- lr = (ulong)__builtin_return_address(0);
- backtrace(sp, lr, lr);
-
- console_end_sync();
-}
-
void dump_execution_state(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/powerpc/mm.c Wed Mar 28 10:38:41 2007 +0100
@@ -261,7 +261,7 @@ int get_page_type(struct page_info *page
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/Rules.mk
--- a/xen/arch/x86/Rules.mk Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/Rules.mk Wed Mar 28 10:38:41 2007 +0100
@@ -59,6 +59,4 @@ HDRS += $(wildcard $(BASEDIR)/include/as
HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/vmx/*.h)
# Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers)
-ifneq ($(call cc-ver,$(CC),0x030400),y)
-$(error Xen requires at least gcc-3.4)
-endif
+$(call cc-ver-check,CC,0x030400,"Xen requires at least gcc-3.4")
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/apic.c Wed Mar 28 10:38:41 2007 +0100
@@ -1076,7 +1076,7 @@ fastcall void smp_apic_timer_interrupt(s
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
{
ack_APIC_irq();
- perfc_incrc(apic_timer);
+ perfc_incr(apic_timer);
raise_softirq(TIMER_SOFTIRQ);
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/extable.c
--- a/xen/arch/x86/extable.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/extable.c Wed Mar 28 10:38:41 2007 +0100
@@ -72,7 +72,7 @@ search_pre_exception_table(struct cpu_us
if ( fixup )
{
dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup));
- perfc_incrc(exception_fixed);
+ perfc_incr(exception_fixed);
}
return fixup;
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/hvm/io.c Wed Mar 28 10:38:41 2007 +0100
@@ -292,7 +292,11 @@ static inline void set_eflags_CF(int siz
static inline void set_eflags_CF(int size, unsigned long v1,
unsigned long v2, struct cpu_user_regs *regs)
{
- unsigned long mask = (1 << (8 * size)) - 1;
+ unsigned long mask;
+
+ ASSERT((size <= sizeof(mask)) && (size > 0));
+
+ mask = ~0UL >> (8 * (sizeof(mask) - size));
if ((v1 & mask) > (v2 & mask))
regs->eflags |= X86_EFLAGS_CF;
@@ -303,7 +307,13 @@ static inline void set_eflags_OF(int siz
static inline void set_eflags_OF(int size, unsigned long v1,
unsigned long v2, unsigned long v3, struct
cpu_user_regs *regs)
{
- if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
+ unsigned long mask;
+
+ ASSERT((size <= sizeof(mask)) && (size > 0));
+
+ mask = ~0UL >> (8 * (sizeof(mask) - size));
+
+ if ((v3 ^ v2) & (v3 ^ v1) & mask)
regs->eflags |= X86_EFLAGS_OF;
}
@@ -317,7 +327,11 @@ static inline void set_eflags_ZF(int siz
static inline void set_eflags_ZF(int size, unsigned long v1,
struct cpu_user_regs *regs)
{
- unsigned long mask = (1 << (8 * size)) - 1;
+ unsigned long mask;
+
+ ASSERT((size <= sizeof(mask)) && (size > 0));
+
+ mask = ~0UL >> (8 * (sizeof(mask) - size));
if ((v1 & mask) == 0)
regs->eflags |= X86_EFLAGS_ZF;
@@ -326,7 +340,13 @@ static inline void set_eflags_SF(int siz
static inline void set_eflags_SF(int size, unsigned long v1,
struct cpu_user_regs *regs)
{
- if (v1 & (1 << ((8 * size) - 1)))
+ unsigned long mask;
+
+ ASSERT((size <= sizeof(mask)) && (size > 0));
+
+ mask = ~0UL >> (8 * (sizeof(mask) - size));
+
+ if (v1 & mask)
regs->eflags |= X86_EFLAGS_SF;
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/hvm/svm/intr.c Wed Mar 28 10:38:41 2007 +0100
@@ -64,87 +64,75 @@ asmlinkage void svm_intr_assist(void)
{
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- struct periodic_time *pt;
int intr_type = APIC_DM_EXTINT;
int intr_vector = -1;
- int re_injecting = 0;
- /* Check if an Injection is active */
- /* Previous Interrupt delivery caused this Intercept? */
+ /*
+ * Do not deliver a virtual interrupt (vintr) if an exception is pending.
+ * This is because the delivery of the exception can arbitrarily delay
+ * the injection of the vintr (for example, if the exception is handled
+ * via an interrupt gate, hence zeroing RFLAGS.IF). In the meantime the
+ * vTPR can be modified upwards and we can end up delivering the vintr
+ * when it is not in fact valid to do so (because we do not re-check the
+ * vTPR value). Moreover, the guest will be able to see the updated
+ * APIC/PIC state (as if the interrupt had been acknowledged) yet will not
+ * have actually received the interrupt. This could confuse the guest!
+ */
+ if ( vmcb->eventinj.fields.v )
+ return;
+
+ /*
+ * Previous Interrupt delivery caused this intercept?
+ * This will happen if the injection is latched by the processor (hence
+ * clearing vintr.fields.irq) but then subsequently a fault occurs (e.g.,
+ * due to lack of shadow mapping of guest IDT or guest-kernel stack).
+ *
+ * NB. Exceptions that fault during delivery are lost. This needs to be
+ * fixed but we'll usually get away with it since faults are usually
+ * idempotent. But this isn't the case for e.g. software interrupts!
+ */
if ( vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0) )
{
- v->arch.hvm_svm.saved_irq_vector = vmcb->exitintinfo.fields.vector;
+ intr_vector = vmcb->exitintinfo.fields.vector;
vmcb->exitintinfo.bytes = 0;
- re_injecting = 1;
+ HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
+ svm_inject_extint(v, intr_vector);
+ return;
}
- /* Previous interrupt still pending? */
+ /*
+ * Previous interrupt still pending? This occurs if we return from VMRUN
+ * very early in the entry-to-guest process. Usually this is because an
+ * external physical interrupt was pending when we executed VMRUN.
+ */
if ( vmcb->vintr.fields.irq )
+ return;
+
+ /* Crank the handle on interrupt state and check for new interrrupts. */
+ pt_update_irq(v);
+ hvm_set_callback_irq_level();
+ if ( !cpu_has_pending_irq(v) )
+ return;
+
+ /*
+ * Create a 'fake' virtual interrupt on to intercept as soon as the
+ * guest _can_ take interrupts. Do not obtain the next interrupt from
+ * the vlapic/pic if unable to inject.
+ */
+ if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )
{
- intr_vector = vmcb->vintr.fields.vector;
- vmcb->vintr.bytes = 0;
- re_injecting = 1;
- }
- /* Pending IRQ saved at last VMExit? */
- else if ( v->arch.hvm_svm.saved_irq_vector >= 0 )
- {
- intr_vector = v->arch.hvm_svm.saved_irq_vector;
- v->arch.hvm_svm.saved_irq_vector = -1;
- re_injecting = 1;
- }
- /* Now let's check for newer interrrupts */
- else
- {
- pt_update_irq(v);
-
- hvm_set_callback_irq_level();
-
- if ( cpu_has_pending_irq(v) )
- {
- /*
- * Create a 'fake' virtual interrupt on to intercept as soon
- * as the guest _can_ take interrupts. Do not obtain the next
- * interrupt from the vlapic/pic if unable to inject.
- */
- if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )
- {
- vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
- HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
- svm_inject_extint(v, 0x0); /* actual vector doesn't really
matter */
- return;
- }
- intr_vector = cpu_get_interrupt(v, &intr_type);
- }
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
+ HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
+ svm_inject_extint(v, 0x0); /* actual vector doesn't matter */
+ return;
}
- /* have we got an interrupt to inject? */
- if ( intr_vector < 0 )
- return;
+ /* Okay, we can deliver the interrupt: grab it and update PIC state. */
+ intr_vector = cpu_get_interrupt(v, &intr_type);
+ BUG_ON(intr_vector < 0);
- switch ( intr_type )
- {
- case APIC_DM_EXTINT:
- case APIC_DM_FIXED:
- case APIC_DM_LOWEST:
- /* Re-injecting a PIT interruptt? */
- if ( re_injecting && (pt = is_pt_irq(v, intr_vector, intr_type)) )
- ++pt->pending_intr_nr;
- /* let's inject this interrupt */
- if (re_injecting)
- HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
- else
- HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
- svm_inject_extint(v, intr_vector);
- break;
- case APIC_DM_SMI:
- case APIC_DM_NMI:
- case APIC_DM_INIT:
- case APIC_DM_STARTUP:
- default:
- printk("Unsupported interrupt type: %d\n", intr_type);
- BUG();
- break;
- }
+ HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
+ svm_inject_extint(v, intr_vector);
pt_intr_post(v, intr_vector, intr_type);
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Mar 28 10:38:41 2007 +0100
@@ -64,8 +64,8 @@ extern int svm_dbg_on;
extern int svm_dbg_on;
void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
-static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
- struct cpu_user_regs *regs);
+static int svm_reset_to_realmode(struct vcpu *v,
+ struct cpu_user_regs *regs);
/* va of hardware host save area */
static void *hsa[NR_CPUS] __read_mostly;
@@ -749,19 +749,21 @@ static void svm_init_ap_context(
struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
{
struct vcpu *v;
+ struct vmcb_struct *vmcb;
cpu_user_regs_t *regs;
u16 cs_sel;
/* We know this is safe because hvm_bringup_ap() does it */
v = current->domain->vcpu[vcpuid];
+ vmcb = v->arch.hvm_svm.vmcb;
regs = &v->arch.guest_context.user_regs;
memset(ctxt, 0, sizeof(*ctxt));
/*
* We execute the trampoline code in real mode. The trampoline vector
- * passed to us is page alligned and is the physicall frame number for
- * the code. We will execute this code in real mode.
+ * passed to us is page alligned and is the physical frame number for
+ * the code. We will execute this code in real mode.
*/
cs_sel = trampoline_vector << 8;
ctxt->user_regs.eip = 0x0;
@@ -771,11 +773,11 @@ static void svm_init_ap_context(
* This is the launch of an AP; set state so that we begin executing
* the trampoline code in real-mode.
*/
- svm_do_vmmcall_reset_to_realmode(v, regs);
+ svm_reset_to_realmode(v, regs);
/* Adjust the vmcb's hidden register state. */
- v->arch.hvm_svm.vmcb->rip = 0;
- v->arch.hvm_svm.vmcb->cs.sel = cs_sel;
- v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4);
+ vmcb->rip = 0;
+ vmcb->cs.sel = cs_sel;
+ vmcb->cs.base = (cs_sel << 4);
}
static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
@@ -960,8 +962,6 @@ static int svm_vcpu_initialise(struct vc
v->arch.schedule_tail = arch_svm_do_resume;
v->arch.ctxt_switch_from = svm_ctxt_switch_from;
v->arch.ctxt_switch_to = svm_ctxt_switch_to;
-
- v->arch.hvm_svm.saved_irq_vector = -1;
v->arch.hvm_svm.launch_core = -1;
@@ -2494,8 +2494,8 @@ void svm_handle_invlpg(const short invlp
*
* returns 0 on success, non-zero otherwise
*/
-static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
- struct cpu_user_regs *regs)
+static int svm_reset_to_realmode(struct vcpu *v,
+ struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb;
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Mar 28 10:38:41 2007 +0100
@@ -203,6 +203,7 @@ static int construct_vmcb(struct vcpu *v
vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG;
vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+ vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
}
return 0;
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/hvm/vmx/intr.c Wed Mar 28 10:38:41 2007 +0100
@@ -89,7 +89,7 @@ asmlinkage void vmx_intr_assist(void)
asmlinkage void vmx_intr_assist(void)
{
int intr_type = 0;
- int highest_vector;
+ int intr_vector;
unsigned long eflags;
struct vcpu *v = current;
unsigned int idtv_info_field;
@@ -106,8 +106,9 @@ asmlinkage void vmx_intr_assist(void)
if ( unlikely(v->arch.hvm_vmx.vector_injected) )
{
- v->arch.hvm_vmx.vector_injected=0;
- if (unlikely(has_ext_irq)) enable_irq_window(v);
+ v->arch.hvm_vmx.vector_injected = 0;
+ if ( unlikely(has_ext_irq) )
+ enable_irq_window(v);
return;
}
@@ -132,7 +133,6 @@ asmlinkage void vmx_intr_assist(void)
enable_irq_window(v);
HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
-
return;
}
@@ -154,30 +154,13 @@ asmlinkage void vmx_intr_assist(void)
return;
}
- highest_vector = cpu_get_interrupt(v, &intr_type);
- if ( highest_vector < 0 )
- return;
+ intr_vector = cpu_get_interrupt(v, &intr_type);
+ BUG_ON(intr_vector < 0);
- switch ( intr_type )
- {
- case APIC_DM_EXTINT:
- case APIC_DM_FIXED:
- case APIC_DM_LOWEST:
- HVMTRACE_2D(INJ_VIRQ, v, highest_vector, /*fake=*/ 0);
- vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
- break;
+ HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
+ vmx_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE);
- case APIC_DM_SMI:
- case APIC_DM_NMI:
- case APIC_DM_INIT:
- case APIC_DM_STARTUP:
- default:
- printk("Unsupported interrupt type\n");
- BUG();
- break;
- }
-
- pt_intr_post(v, highest_vector, intr_type);
+ pt_intr_post(v, intr_vector, intr_type);
}
/*
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/irq.c Wed Mar 28 10:38:41 2007 +0100
@@ -56,7 +56,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
irq_desc_t *desc = &irq_desc[vector];
struct irqaction *action;
- perfc_incrc(irqs);
+ perfc_incr(irqs);
spin_lock(&desc->lock);
desc->handler->ack(vector);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/mm.c Wed Mar 28 10:38:41 2007 +0100
@@ -1726,7 +1726,7 @@ int get_page_type(struct page_info *page
(!shadow_mode_enabled(page_get_owner(page)) ||
((nx & PGT_type_mask) == PGT_writable_page)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
@@ -1969,6 +1969,8 @@ int do_mmuext_op(
if ( unlikely(!guest_handle_is_null(pdone)) )
(void)copy_from_guest(&done, pdone, 1);
}
+ else
+ perfc_incr(calls_to_mmuext_op);
if ( unlikely(!guest_handle_okay(uops, count)) )
{
@@ -2223,6 +2225,8 @@ int do_mmuext_op(
UNLOCK_BIGLOCK(d);
+ perfc_add(num_mmuext_ops, i);
+
out:
/* Add incremental work we have done to the @done output parameter. */
if ( unlikely(!guest_handle_is_null(pdone)) )
@@ -2257,6 +2261,8 @@ int do_mmu_update(
if ( unlikely(!guest_handle_is_null(pdone)) )
(void)copy_from_guest(&done, pdone, 1);
}
+ else
+ perfc_incr(calls_to_mmu_update);
if ( unlikely(!guest_handle_okay(ureqs, count)) )
{
@@ -2272,9 +2278,6 @@ int do_mmu_update(
domain_mmap_cache_init(&mapcache);
domain_mmap_cache_init(&sh_mapcache);
-
- perfc_incrc(calls_to_mmu_update);
- perfc_addc(num_page_updates, count);
LOCK_BIGLOCK(d);
@@ -2431,12 +2434,14 @@ int do_mmu_update(
guest_handle_add_offset(ureqs, 1);
}
+ process_deferred_ops();
+
+ UNLOCK_BIGLOCK(d);
+
domain_mmap_cache_destroy(&mapcache);
domain_mmap_cache_destroy(&sh_mapcache);
- process_deferred_ops();
-
- UNLOCK_BIGLOCK(d);
+ perfc_add(num_page_updates, i);
out:
/* Add incremental work we have done to the @done output parameter. */
@@ -2724,7 +2729,7 @@ int do_update_va_mapping(unsigned long v
cpumask_t pmask;
int rc = 0;
- perfc_incrc(calls_to_update_va);
+ perfc_incr(calls_to_update_va);
if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
return -EINVAL;
@@ -2739,6 +2744,10 @@ int do_update_va_mapping(unsigned long v
if ( pl1e )
guest_unmap_l1e(v, pl1e);
pl1e = NULL;
+
+ process_deferred_ops();
+
+ UNLOCK_BIGLOCK(d);
switch ( flags & UVMF_FLUSHTYPE_MASK )
{
@@ -2785,10 +2794,6 @@ int do_update_va_mapping(unsigned long v
break;
}
- process_deferred_ops();
-
- UNLOCK_BIGLOCK(d);
-
return rc;
}
@@ -2805,6 +2810,9 @@ int do_update_va_mapping_otherdomain(uns
return -ESRCH;
rc = do_update_va_mapping(va, val64, flags);
+
+ BUG_ON(this_cpu(percpu_mm_info).deferred_ops);
+ process_deferred_ops(); /* only to clear foreigndom */
return rc;
}
@@ -3378,7 +3386,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
goto bail;
UNLOCK_BIGLOCK(d);
- perfc_incrc(ptwr_emulations);
+ perfc_incr(ptwr_emulations);
return EXCRET_fault_fixed;
bail:
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/mm/hap/hap.c Wed Mar 28 10:38:41 2007 +0100
@@ -135,6 +135,7 @@ void hap_free_p2m_page(struct domain *d,
HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
pg->count_info, pg->u.inuse.type_info);
}
+ pg->count_info = 0;
/* Free should not decrement domain's total allocation, since
* these pages were allocated without an owner. */
page_set_owner(pg, NULL);
@@ -182,6 +183,7 @@ hap_set_allocation(struct domain *d, uns
list_del(&sp->list);
d->arch.paging.hap.free_pages -= 1;
d->arch.paging.hap.total_pages -= 1;
+ sp->count_info = 0;
free_domheap_pages(sp, 0);
}
@@ -367,17 +369,7 @@ void hap_destroy_monitor_table(struct vc
{
struct domain *d = v->domain;
-#if CONFIG_PAGING_LEVELS == 4
- /* Need to destroy the l3 monitor page in slot 0 too */
- {
- mfn_t m3mfn;
- l4_pgentry_t *l4e = hap_map_domain_page(mmfn);
- ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
- m3mfn = _mfn(l4e_get_pfn(l4e[0]));
- hap_free(d, m3mfn);
- hap_unmap_domain_page(l4e);
- }
-#elif CONFIG_PAGING_LEVELS == 3
+#if CONFIG_PAGING_LEVELS == 3
/* Need to destroy the l2 monitor page in slot 4 too */
{
l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
@@ -632,10 +624,6 @@ void hap_update_paging_modes(struct vcpu
v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
- /* use p2m map */
- v->arch.guest_table =
- pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
-
if ( pagetable_is_null(v->arch.monitor_table) ) {
mfn_t mmfn = hap_make_monitor_table(v);
v->arch.monitor_table = pagetable_from_mfn(mmfn);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/mm/shadow/common.c Wed Mar 28 10:38:41 2007 +0100
@@ -276,7 +276,7 @@ hvm_emulate_write(enum x86_segment seg,
/* How many emulations could we save if we unshadowed on stack writes? */
if ( seg == x86_seg_ss )
- perfc_incrc(shadow_fault_emulate_stack);
+ perfc_incr(shadow_fault_emulate_stack);
rc = hvm_translate_linear_addr(
seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
@@ -804,7 +804,7 @@ void shadow_prealloc(struct domain *d, u
ASSERT(v != NULL); /* Shouldn't have enabled shadows if we've no vcpus */
/* Stage one: walk the list of pinned pages, unpinning them */
- perfc_incrc(shadow_prealloc_1);
+ perfc_incr(shadow_prealloc_1);
list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows)
{
sp = list_entry(l, struct shadow_page_info, list);
@@ -820,7 +820,7 @@ void shadow_prealloc(struct domain *d, u
/* Stage two: all shadow pages are in use in hierarchies that are
* loaded in cr3 on some vcpu. Walk them, unhooking the non-Xen
* mappings. */
- perfc_incrc(shadow_prealloc_2);
+ perfc_incr(shadow_prealloc_2);
for_each_vcpu(d, v2)
for ( i = 0 ; i < 4 ; i++ )
@@ -929,7 +929,7 @@ mfn_t shadow_alloc(struct domain *d,
ASSERT(shadow_locked_by_me(d));
ASSERT(order <= SHADOW_MAX_ORDER);
ASSERT(shadow_type != SH_type_none);
- perfc_incrc(shadow_alloc);
+ perfc_incr(shadow_alloc);
/* Find smallest order which can satisfy the request. */
for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
@@ -967,7 +967,7 @@ mfn_t shadow_alloc(struct domain *d,
tlbflush_filter(mask, sp[i].tlbflush_timestamp);
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(shadow_alloc_tlbflush);
+ perfc_incr(shadow_alloc_tlbflush);
flush_tlb_mask(mask);
}
/* Now safe to clear the page for reuse */
@@ -997,7 +997,7 @@ void shadow_free(struct domain *d, mfn_t
int i;
ASSERT(shadow_locked_by_me(d));
- perfc_incrc(shadow_free);
+ perfc_incr(shadow_free);
shadow_type = sp->type;
ASSERT(shadow_type != SH_type_none);
@@ -1406,7 +1406,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
sh_hash_audit(d);
- perfc_incrc(shadow_hash_lookups);
+ perfc_incr(shadow_hash_lookups);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
@@ -1434,7 +1434,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
}
else
{
- perfc_incrc(shadow_hash_lookup_head);
+ perfc_incr(shadow_hash_lookup_head);
}
return shadow_page_to_mfn(sp);
}
@@ -1442,7 +1442,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
sp = sp->next_shadow;
}
- perfc_incrc(shadow_hash_lookup_miss);
+ perfc_incr(shadow_hash_lookup_miss);
return _mfn(INVALID_MFN);
}
@@ -1460,7 +1460,7 @@ void shadow_hash_insert(struct vcpu *v,
sh_hash_audit(d);
- perfc_incrc(shadow_hash_inserts);
+ perfc_incr(shadow_hash_inserts);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
@@ -1486,7 +1486,7 @@ void shadow_hash_delete(struct vcpu *v,
sh_hash_audit(d);
- perfc_incrc(shadow_hash_deletes);
+ perfc_incr(shadow_hash_deletes);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
@@ -1713,7 +1713,7 @@ int sh_remove_write_access(struct vcpu *
|| (pg->u.inuse.type_info & PGT_count_mask) == 0 )
return 0;
- perfc_incrc(shadow_writeable);
+ perfc_incr(shadow_writeable);
/* If this isn't a "normal" writeable page, the domain is trying to
* put pagetables in special memory of some kind. We can't allow that. */
@@ -1735,7 +1735,7 @@ int sh_remove_write_access(struct vcpu *
#define GUESS(_a, _h) do { \
if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \
- perfc_incrc(shadow_writeable_h_ ## _h); \
+ perfc_incr(shadow_writeable_h_ ## _h); \
if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \
return 1; \
} while (0)
@@ -1808,7 +1808,7 @@ int sh_remove_write_access(struct vcpu *
callbacks[shtype](v, last_smfn, gmfn);
if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
- perfc_incrc(shadow_writeable_h_5);
+ perfc_incr(shadow_writeable_h_5);
}
if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )
@@ -1817,7 +1817,7 @@ int sh_remove_write_access(struct vcpu *
#endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
/* Brute-force search of all the shadows, by walking the hash */
- perfc_incrc(shadow_writeable_bf);
+ perfc_incr(shadow_writeable_bf);
hash_foreach(v, callback_mask, callbacks, gmfn);
/* If that didn't catch the mapping, something is very wrong */
@@ -1888,7 +1888,7 @@ int sh_remove_all_mappings(struct vcpu *
| 1 << SH_type_fl1_64_shadow
;
- perfc_incrc(shadow_mappings);
+ perfc_incr(shadow_mappings);
if ( (page->count_info & PGC_count_mask) == 0 )
return 0;
@@ -1903,7 +1903,7 @@ int sh_remove_all_mappings(struct vcpu *
* Heuristics for finding the (probably) single mapping of this gmfn */
/* Brute-force search of all the shadows, by walking the hash */
- perfc_incrc(shadow_mappings_bf);
+ perfc_incr(shadow_mappings_bf);
hash_foreach(v, callback_mask, callbacks, gmfn);
/* If that didn't catch the mapping, something is very wrong */
@@ -1992,9 +1992,9 @@ static int sh_remove_shadow_via_pointer(
sh_unmap_domain_page(vaddr);
if ( rc )
- perfc_incrc(shadow_up_pointer);
+ perfc_incr(shadow_up_pointer);
else
- perfc_incrc(shadow_unshadow_bf);
+ perfc_incr(shadow_unshadow_bf);
return rc;
}
@@ -2093,7 +2093,7 @@ void sh_remove_shadows(struct vcpu *v, m
}
/* Search for this shadow in all appropriate shadows */
- perfc_incrc(shadow_unshadow);
+ perfc_incr(shadow_unshadow);
sh_flags = pg->shadow_flags;
/* Lower-level shadows need to be excised from upper-level shadows.
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Mar 28 10:38:41 2007 +0100
@@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t
/* Look for shadows in the hash table */
{
mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
- perfc_incrc(shadow_get_shadow_status);
+ perfc_incr(shadow_get_shadow_status);
return smfn;
}
@@ -209,7 +209,7 @@ guest_walk_tables(struct vcpu *v, unsign
{
ASSERT(!guest_op || shadow_locked_by_me(v->domain));
- perfc_incrc(shadow_guest_walk);
+ perfc_incr(shadow_guest_walk);
memset(gw, 0, sizeof(*gw));
gw->va = va;
@@ -448,14 +448,14 @@ static u32 guest_set_ad_bits(struct vcpu
== (_PAGE_DIRTY | _PAGE_ACCESSED) )
return flags; /* Guest already has A and D bits set */
flags |= _PAGE_DIRTY | _PAGE_ACCESSED;
- perfc_incrc(shadow_ad_update);
+ perfc_incr(shadow_ad_update);
}
else
{
if ( flags & _PAGE_ACCESSED )
return flags; /* Guest already has A bit set */
flags |= _PAGE_ACCESSED;
- perfc_incrc(shadow_a_update);
+ perfc_incr(shadow_a_update);
}
/* Set the bit(s) */
@@ -863,7 +863,7 @@ shadow_write_entries(void *d, void *s, i
* using map_domain_page() to get a writeable mapping if we need to. */
if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
{
- perfc_incrc(shadow_linear_map_failed);
+ perfc_incr(shadow_linear_map_failed);
map = sh_map_domain_page(mfn);
ASSERT(map != NULL);
dst = map + ((unsigned long)dst & (PAGE_SIZE - 1));
@@ -925,7 +925,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
if ( unlikely(!res) )
{
- perfc_incrc(shadow_get_page_fail);
+ perfc_incr(shadow_get_page_fail);
SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n");
}
@@ -2198,7 +2198,7 @@ static int validate_gl4e(struct vcpu *v,
mfn_t sl3mfn = _mfn(INVALID_MFN);
int result = 0;
- perfc_incrc(shadow_validate_gl4e_calls);
+ perfc_incr(shadow_validate_gl4e_calls);
if ( guest_l4e_get_flags(*new_gl4e) & _PAGE_PRESENT )
{
@@ -2250,7 +2250,7 @@ static int validate_gl3e(struct vcpu *v,
mfn_t sl2mfn = _mfn(INVALID_MFN);
int result = 0;
- perfc_incrc(shadow_validate_gl3e_calls);
+ perfc_incr(shadow_validate_gl3e_calls);
if ( guest_l3e_get_flags(*new_gl3e) & _PAGE_PRESENT )
{
@@ -2277,7 +2277,7 @@ static int validate_gl2e(struct vcpu *v,
mfn_t sl1mfn = _mfn(INVALID_MFN);
int result = 0;
- perfc_incrc(shadow_validate_gl2e_calls);
+ perfc_incr(shadow_validate_gl2e_calls);
if ( guest_l2e_get_flags(*new_gl2e) & _PAGE_PRESENT )
{
@@ -2363,7 +2363,7 @@ static int validate_gl1e(struct vcpu *v,
mfn_t gmfn;
int result = 0, mmio;
- perfc_incrc(shadow_validate_gl1e_calls);
+ perfc_incr(shadow_validate_gl1e_calls);
gfn = guest_l1e_get_gfn(*new_gl1e);
gmfn = vcpu_gfn_to_mfn(v, gfn);
@@ -2523,7 +2523,7 @@ static inline void check_for_early_unsha
u32 flags = mfn_to_page(gmfn)->shadow_flags;
if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
{
- perfc_incrc(shadow_early_unshadow);
+ perfc_incr(shadow_early_unshadow);
sh_remove_shadows(v, gmfn, 0, 0 /* Slow, can fail to unshadow */ );
}
}
@@ -2642,7 +2642,7 @@ static int sh_page_fault(struct vcpu *v,
SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n",
v->domain->domain_id, v->vcpu_id, va, regs->error_code);
- perfc_incrc(shadow_fault);
+ perfc_incr(shadow_fault);
//
// XXX: Need to think about eventually mapping superpages directly in the
// shadow (when possible), as opposed to splintering them into a
@@ -2670,7 +2670,7 @@ static int sh_page_fault(struct vcpu *v,
ASSERT(regs->error_code & PFEC_page_present);
regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
reset_early_unshadow(v);
- perfc_incrc(shadow_fault_fast_gnp);
+ perfc_incr(shadow_fault_fast_gnp);
SHADOW_PRINTK("fast path not-present\n");
return 0;
}
@@ -2688,7 +2688,7 @@ static int sh_page_fault(struct vcpu *v,
<< PAGE_SHIFT)
| (va & ~PAGE_MASK);
}
- perfc_incrc(shadow_fault_fast_mmio);
+ perfc_incr(shadow_fault_fast_mmio);
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
reset_early_unshadow(v);
handle_mmio(gpa);
@@ -2699,7 +2699,7 @@ static int sh_page_fault(struct vcpu *v,
/* This should be exceptionally rare: another vcpu has fixed
* the tables between the fault and our reading the l1e.
* Retry and let the hardware give us the right fault next time. */
- perfc_incrc(shadow_fault_fast_fail);
+ perfc_incr(shadow_fault_fast_fail);
SHADOW_PRINTK("fast path false alarm!\n");
return EXCRET_fault_fixed;
}
@@ -2746,7 +2746,7 @@ static int sh_page_fault(struct vcpu *v,
goto mmio;
}
- perfc_incrc(shadow_fault_bail_not_present);
+ perfc_incr(shadow_fault_bail_not_present);
goto not_a_shadow_fault;
}
@@ -2761,7 +2761,7 @@ static int sh_page_fault(struct vcpu *v,
!(accumulated_gflags & _PAGE_USER) )
{
/* illegal user-mode access to supervisor-only page */
- perfc_incrc(shadow_fault_bail_user_supervisor);
+ perfc_incr(shadow_fault_bail_user_supervisor);
goto not_a_shadow_fault;
}
@@ -2772,7 +2772,7 @@ static int sh_page_fault(struct vcpu *v,
{
if ( unlikely(!(accumulated_gflags & _PAGE_RW)) )
{
- perfc_incrc(shadow_fault_bail_ro_mapping);
+ perfc_incr(shadow_fault_bail_ro_mapping);
goto not_a_shadow_fault;
}
}
@@ -2787,7 +2787,7 @@ static int sh_page_fault(struct vcpu *v,
if ( accumulated_gflags & _PAGE_NX_BIT )
{
/* NX prevented this code fetch */
- perfc_incrc(shadow_fault_bail_nx);
+ perfc_incr(shadow_fault_bail_nx);
goto not_a_shadow_fault;
}
}
@@ -2802,7 +2802,7 @@ static int sh_page_fault(struct vcpu *v,
if ( !mmio && !mfn_valid(gmfn) )
{
- perfc_incrc(shadow_fault_bail_bad_gfn);
+ perfc_incr(shadow_fault_bail_bad_gfn);
SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
gfn_x(gfn), mfn_x(gmfn));
goto not_a_shadow_fault;
@@ -2844,12 +2844,12 @@ static int sh_page_fault(struct vcpu *v,
{
if ( ft == ft_demand_write )
{
- perfc_incrc(shadow_fault_emulate_write);
+ perfc_incr(shadow_fault_emulate_write);
goto emulate;
}
else if ( shadow_mode_trap_reads(d) && ft == ft_demand_read )
{
- perfc_incrc(shadow_fault_emulate_read);
+ perfc_incr(shadow_fault_emulate_read);
goto emulate;
}
}
@@ -2860,7 +2860,7 @@ static int sh_page_fault(struct vcpu *v,
goto mmio;
}
- perfc_incrc(shadow_fault_fixed);
+ perfc_incr(shadow_fault_fixed);
d->arch.paging.shadow.fault_count++;
reset_early_unshadow(v);
@@ -2920,7 +2920,7 @@ static int sh_page_fault(struct vcpu *v,
{
SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
mfn_x(gmfn));
- perfc_incrc(shadow_fault_emulate_failed);
+ perfc_incr(shadow_fault_emulate_failed);
/* If this is actually a page table, then we have a bug, and need
* to support more operations in the emulator. More likely,
* though, this is a hint that this page should not be shadowed. */
@@ -2935,7 +2935,7 @@ static int sh_page_fault(struct vcpu *v,
mmio:
if ( !guest_mode(regs) )
goto not_a_shadow_fault;
- perfc_incrc(shadow_fault_mmio);
+ perfc_incr(shadow_fault_mmio);
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
@@ -2964,7 +2964,7 @@ sh_invlpg(struct vcpu *v, unsigned long
{
shadow_l2e_t sl2e;
- perfc_incrc(shadow_invlpg);
+ perfc_incr(shadow_invlpg);
/* First check that we can safely read the shadow l2e. SMP/PAE linux can
* run as high as 6% of invlpg calls where we haven't shadowed the l2
@@ -2983,7 +2983,7 @@ sh_invlpg(struct vcpu *v, unsigned long
+ shadow_l3_linear_offset(va)),
sizeof (sl3e)) != 0 )
{
- perfc_incrc(shadow_invlpg_fault);
+ perfc_incr(shadow_invlpg_fault);
return 0;
}
if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
@@ -3002,7 +3002,7 @@ sh_invlpg(struct vcpu *v, unsigned long
sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
sizeof (sl2e)) != 0 )
{
- perfc_incrc(shadow_invlpg_fault);
+ perfc_incr(shadow_invlpg_fault);
return 0;
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/smp.c Wed Mar 28 10:38:41 2007 +0100
@@ -169,7 +169,7 @@ fastcall void smp_invalidate_interrupt(v
fastcall void smp_invalidate_interrupt(void)
{
ack_APIC_irq();
- perfc_incrc(ipis);
+ perfc_incr(ipis);
irq_enter();
if ( !__sync_lazy_execstate() )
{
@@ -329,7 +329,7 @@ fastcall void smp_event_check_interrupt(
fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
- perfc_incrc(ipis);
+ perfc_incr(ipis);
}
fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
@@ -338,7 +338,7 @@ fastcall void smp_call_function_interrup
void *info = call_data->info;
ack_APIC_irq();
- perfc_incrc(ipis);
+ perfc_incr(ipis);
if ( !cpu_isset(smp_processor_id(), call_data->selected) )
return;
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/time.c Wed Mar 28 10:38:41 2007 +0100
@@ -670,13 +670,19 @@ static inline void version_update_end(u3
(*version)++;
}
-static inline void __update_vcpu_system_time(struct vcpu *v)
+void update_vcpu_system_time(struct vcpu *v)
{
struct cpu_time *t;
struct vcpu_time_info *u;
+ if ( v->vcpu_info == NULL )
+ return;
+
t = &this_cpu(cpu_time);
u = &vcpu_info(v, time);
+
+ if ( u->tsc_timestamp == t->local_tsc_stamp )
+ return;
version_update_begin(&u->version);
@@ -686,13 +692,6 @@ static inline void __update_vcpu_system_
u->tsc_shift = (s8)t->tsc_scale.shift;
version_update_end(&u->version);
-}
-
-void update_vcpu_system_time(struct vcpu *v)
-{
- if ( vcpu_info(v, time.tsc_timestamp) !=
- this_cpu(cpu_time).local_tsc_stamp )
- __update_vcpu_system_time(v);
}
void update_domain_wallclock_time(struct domain *d)
@@ -771,9 +770,10 @@ static void local_time_calibration(void
local_irq_enable();
#if 0
- printk("PRE%d: tsc=%lld stime=%lld master=%lld\n",
+ printk("PRE%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64"\n",
smp_processor_id(), prev_tsc, prev_local_stime, prev_master_stime);
- printk("CUR%d: tsc=%lld stime=%lld master=%lld -> %lld\n",
+ printk("CUR%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64
+ " -> %"PRId64"\n",
smp_processor_id(), curr_tsc, curr_local_stime, curr_master_stime,
curr_master_stime - curr_local_stime);
#endif
@@ -854,6 +854,8 @@ static void local_time_calibration(void
t->local_tsc_stamp = curr_tsc;
t->stime_local_stamp = curr_local_stime;
t->stime_master_stamp = curr_master_stime;
+
+ update_vcpu_system_time(current);
out:
set_timer(&t->calibration_timer, NOW() + EPOCH);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/traps.c Wed Mar 28 10:38:41 2007 +0100
@@ -637,28 +637,34 @@ asmlinkage int do_invalid_op(struct cpu_
memcmp(bug.ud2, "\xf\xb", sizeof(bug.ud2)) ||
(bug.ret != 0xc2) )
goto die;
+ eip += sizeof(bug);
id = bug.id & 3;
- if ( id == BUGFRAME_rsvd )
- goto die;
if ( id == BUGFRAME_dump )
{
show_execution_state(regs);
- regs->eip += sizeof(bug);
+ regs->eip = (unsigned long)eip;
return EXCRET_fault_fixed;
}
- /* BUG() or ASSERT(): decode the filename pointer and line number. */
- ASSERT((id == BUGFRAME_bug) || (id == BUGFRAME_assert));
- eip += sizeof(bug);
+ /* WARN, BUG or ASSERT: decode the filename pointer and line number. */
if ( !is_kernel(eip) ||
__copy_from_user(&bug_str, eip, sizeof(bug_str)) ||
memcmp(bug_str.mov, BUG_MOV_STR, sizeof(bug_str.mov)) )
goto die;
+ eip += sizeof(bug_str);
filename = is_kernel(bug_str.str) ? (char *)bug_str.str : "<unknown>";
lineno = bug.id >> 2;
+
+ if ( id == BUGFRAME_warn )
+ {
+ printk("Xen WARN at %.50s:%d\n", filename, lineno);
+ show_execution_state(regs);
+ regs->eip = (unsigned long)eip;
+ return EXCRET_fault_fixed;
+ }
if ( id == BUGFRAME_bug )
{
@@ -668,13 +674,13 @@ asmlinkage int do_invalid_op(struct cpu_
panic("Xen BUG at %.50s:%d\n", filename, lineno);
}
- /* ASSERT(): decode the predicate string pointer. */
+ /* ASSERT: decode the predicate string pointer. */
ASSERT(id == BUGFRAME_assert);
- eip += sizeof(bug_str);
if ( !is_kernel(eip) ||
__copy_from_user(&bug_str, eip, sizeof(bug_str)) ||
memcmp(bug_str.mov, BUG_MOV_STR, sizeof(bug_str.mov)) )
goto die;
+ eip += sizeof(bug_str);
predicate = is_kernel(bug_str.str) ? (char *)bug_str.str : "<unknown>";
printk("Assertion '%s' failed at %.50s:%d\n",
@@ -950,7 +956,7 @@ asmlinkage int do_page_fault(struct cpu_
DEBUGGER_trap_entry(TRAP_page_fault, regs);
- perfc_incrc(page_faults);
+ perfc_incr(page_faults);
if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
return rc;
@@ -962,7 +968,7 @@ asmlinkage int do_page_fault(struct cpu_
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
- perfc_incrc(copy_user_faults);
+ perfc_incr(copy_user_faults);
regs->eip = fixup;
return 0;
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Mar 28 10:38:41 2007 +0100
@@ -107,20 +107,10 @@ void __dummy__(void)
BLANK();
#if PERF_COUNTERS
- OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
- OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
+ DEFINE(PERFC_hypercalls, PERFC_hypercalls);
+ DEFINE(PERFC_exceptions, PERFC_exceptions);
BLANK();
#endif
-
- OFFSET(MULTICALL_op, struct multicall_entry, op);
- OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]);
- OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]);
- OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]);
- OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]);
- OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]);
- OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]);
- OFFSET(MULTICALL_result, struct multicall_entry, result);
- BLANK();
DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
BLANK();
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_32/domain_page.c Wed Mar 28 10:38:41 2007 +0100
@@ -50,7 +50,7 @@ void *map_domain_page(unsigned long mfn)
ASSERT(!in_irq());
- perfc_incrc(map_domain_page_count);
+ perfc_incr(map_domain_page_count);
v = mapcache_current_vcpu();
@@ -76,7 +76,7 @@ void *map_domain_page(unsigned long mfn)
cache->shadow_epoch[vcpu] = cache->epoch;
if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) )
{
- perfc_incrc(domain_page_tlb_flush);
+ perfc_incr(domain_page_tlb_flush);
local_flush_tlb();
}
}
@@ -92,7 +92,7 @@ void *map_domain_page(unsigned long mfn)
}
/* /Second/, flush TLBs. */
- perfc_incrc(domain_page_tlb_flush);
+ perfc_incr(domain_page_tlb_flush);
local_flush_tlb();
cache->shadow_epoch[vcpu] = ++cache->epoch;
cache->tlbflush_timestamp = tlbflush_current_time();
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_32/entry.S Wed Mar 28 10:38:41 2007 +0100
@@ -173,7 +173,7 @@ ENTRY(hypercall)
GET_CURRENT(%ebx)
cmpl $NR_hypercalls,%eax
jae bad_hypercall
- PERFC_INCR(PERFC_hypercalls, %eax)
+ PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
#ifndef NDEBUG
/* Create shadow parameters and corrupt those not used by this call. */
pushl %eax
@@ -429,7 +429,7 @@ 1: xorl %eax,%eax
movl %esp,%edx
pushl %edx # push the cpu_user_regs pointer
GET_CURRENT(%ebx)
- PERFC_INCR(PERFC_exceptions, %eax)
+ PERFC_INCR(PERFC_exceptions, %eax, %ebx)
call *exception_table(,%eax,4)
addl $4,%esp
movl UREGS_eflags(%esp),%eax
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_32/seg_fixup.c
--- a/xen/arch/x86/x86_32/seg_fixup.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_32/seg_fixup.c Wed Mar 28 10:38:41 2007 +0100
@@ -434,7 +434,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
goto fail;
/* Success! */
- perfc_incrc(seg_fixups);
+ perfc_incr(seg_fixups);
/* If requested, give a callback on otherwise unused vector 15. */
if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Mar 28 10:38:41 2007 +0100
@@ -121,30 +121,8 @@ void __dummy__(void)
BLANK();
#if PERF_COUNTERS
- OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
- OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
- BLANK();
-#endif
-
- OFFSET(MULTICALL_op, struct multicall_entry, op);
- OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]);
- OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]);
- OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]);
- OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]);
- OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]);
- OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]);
- OFFSET(MULTICALL_result, struct multicall_entry, result);
- BLANK();
-
-#ifdef CONFIG_COMPAT
- OFFSET(COMPAT_MULTICALL_op, struct compat_multicall_entry, op);
- OFFSET(COMPAT_MULTICALL_arg0, struct compat_multicall_entry, args[0]);
- OFFSET(COMPAT_MULTICALL_arg1, struct compat_multicall_entry, args[1]);
- OFFSET(COMPAT_MULTICALL_arg2, struct compat_multicall_entry, args[2]);
- OFFSET(COMPAT_MULTICALL_arg3, struct compat_multicall_entry, args[3]);
- OFFSET(COMPAT_MULTICALL_arg4, struct compat_multicall_entry, args[4]);
- OFFSET(COMPAT_MULTICALL_arg5, struct compat_multicall_entry, args[5]);
- OFFSET(COMPAT_MULTICALL_result, struct compat_multicall_entry, result);
+ DEFINE(PERFC_hypercalls, PERFC_hypercalls);
+ DEFINE(PERFC_exceptions, PERFC_exceptions);
BLANK();
#endif
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_64/compat/entry.S
--- a/xen/arch/x86/x86_64/compat/entry.S Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_64/compat/entry.S Wed Mar 28 10:38:41 2007 +0100
@@ -57,7 +57,7 @@ ENTRY(compat_hypercall)
movl UREGS_rbx(%rsp),%edi /* Arg 1 */
#endif
leaq compat_hypercall_table(%rip),%r10
- PERFC_INCR(PERFC_hypercalls, %rax)
+ PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
callq *(%r10,%rax,8)
#ifndef NDEBUG
/* Deliberately corrupt parameter regs used by this hypercall. */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_64/entry.S Wed Mar 28 10:38:41 2007 +0100
@@ -147,7 +147,7 @@ ENTRY(syscall_enter)
pushq UREGS_rip+8(%rsp)
#endif
leaq hypercall_table(%rip),%r10
- PERFC_INCR(PERFC_hypercalls, %rax)
+ PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
callq *(%r10,%rax,8)
#ifndef NDEBUG
/* Deliberately corrupt parameter regs used by this hypercall. */
@@ -396,7 +396,7 @@ 1: movq %rsp,%rdi
movl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
GET_CURRENT(%rbx)
- PERFC_INCR(PERFC_exceptions, %rax)
+ PERFC_INCR(PERFC_exceptions, %rax, %rbx)
callq *(%rdx,%rax,8)
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
diff -r 14aeb7981e4e -r 0b2794d3320f xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/arch/x86/x86_emulate.c Wed Mar 28 10:38:41 2007 +0100
@@ -1565,8 +1565,10 @@ x86_emulate(
if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() )
{
dst.bytes = op_bytes = 8;
- if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
- &dst.val, 8, ctxt)) != 0 )
+ if ( dst.type == OP_REG )
+ dst.val = *dst.reg;
+ else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
+ &dst.val, 8, ctxt)) != 0 )
goto done;
}
src.val = _regs.eip;
@@ -1579,8 +1581,10 @@ x86_emulate(
if ( mode_64bit() && (dst.bytes == 4) )
{
dst.bytes = 8;
- if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
- &dst.val, 8, ctxt)) != 0 )
+ if ( dst.type == OP_REG )
+ dst.val = *dst.reg;
+ else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
+ &dst.val, 8, ctxt)) != 0 )
goto done;
}
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
diff -r 14aeb7981e4e -r 0b2794d3320f xen/common/domain.c
--- a/xen/common/domain.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/common/domain.c Wed Mar 28 10:38:41 2007 +0100
@@ -96,14 +96,16 @@ struct vcpu *alloc_vcpu(
v->domain = d;
v->vcpu_id = vcpu_id;
- v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
spin_lock_init(&v->pause_lock);
v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
v->runstate.state_entry_time = NOW();
if ( !is_idle_domain(d) )
+ {
set_bit(_VCPUF_down, &v->vcpu_flags);
+ v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
+ }
if ( sched_init_vcpu(v, cpu_id) != 0 )
{
diff -r 14aeb7981e4e -r 0b2794d3320f xen/common/multicall.c
--- a/xen/common/multicall.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/common/multicall.c Wed Mar 28 10:38:41 2007 +0100
@@ -10,6 +10,7 @@
#include <xen/event.h>
#include <xen/multicall.h>
#include <xen/guest_access.h>
+#include <xen/perfc.h>
#include <asm/current.h>
#include <asm/hardirq.h>
@@ -69,14 +70,18 @@ do_multicall(
guest_handle_add_offset(call_list, 1);
}
+ perfc_incr(calls_to_multicall);
+ perfc_add(calls_from_multicall, nr_calls);
mcs->flags = 0;
return 0;
fault:
+ perfc_incr(calls_to_multicall);
mcs->flags = 0;
return -EFAULT;
preempted:
+ perfc_add(calls_from_multicall, i);
mcs->flags = 0;
return hypercall_create_continuation(
__HYPERVISOR_multicall, "hi", call_list, nr_calls-i);
diff -r 14aeb7981e4e -r 0b2794d3320f xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/common/page_alloc.c Wed Mar 28 10:38:41 2007 +0100
@@ -423,7 +423,7 @@ static struct page_info *alloc_heap_page
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/common/perfc.c
--- a/xen/common/perfc.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/common/perfc.c Wed Mar 28 10:38:41 2007 +0100
@@ -10,81 +10,98 @@
#include <public/sysctl.h>
#include <asm/perfc.h>
-#undef PERFCOUNTER
-#undef PERFCOUNTER_CPU
-#undef PERFCOUNTER_ARRAY
-#undef PERFSTATUS
-#undef PERFSTATUS_CPU
-#undef PERFSTATUS_ARRAY
#define PERFCOUNTER( var, name ) { name, TYPE_SINGLE, 0 },
-#define PERFCOUNTER_CPU( var, name ) { name, TYPE_CPU, 0 },
#define PERFCOUNTER_ARRAY( var, name, size ) { name, TYPE_ARRAY, size },
#define PERFSTATUS( var, name ) { name, TYPE_S_SINGLE, 0 },
-#define PERFSTATUS_CPU( var, name ) { name, TYPE_S_CPU, 0 },
#define PERFSTATUS_ARRAY( var, name, size ) { name, TYPE_S_ARRAY, size },
-static struct {
- char *name;
- enum { TYPE_SINGLE, TYPE_CPU, TYPE_ARRAY,
- TYPE_S_SINGLE, TYPE_S_CPU, TYPE_S_ARRAY
+static const struct {
+ const char *name;
+ enum { TYPE_SINGLE, TYPE_ARRAY,
+ TYPE_S_SINGLE, TYPE_S_ARRAY
} type;
- int nr_elements;
+ unsigned int nr_elements;
} perfc_info[] = {
#include <xen/perfc_defn.h>
};
#define NR_PERFCTRS (sizeof(perfc_info) / sizeof(perfc_info[0]))
-struct perfcounter perfcounters;
+DEFINE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters);
void perfc_printall(unsigned char key)
{
- unsigned int i, j, sum;
+ unsigned int i, j;
s_time_t now = NOW();
- atomic_t *counters = (atomic_t *)&perfcounters;
printk("Xen performance counters SHOW (now = 0x%08X:%08X)\n",
(u32)(now>>32), (u32)now);
- for ( i = 0; i < NR_PERFCTRS; i++ )
- {
+ for ( i = j = 0; i < NR_PERFCTRS; i++ )
+ {
+ unsigned int k, cpu;
+ unsigned long long sum = 0;
+
printk("%-32s ", perfc_info[i].name);
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- printk("TOTAL[%10d]", atomic_read(&counters[0]));
- counters += 1;
- break;
- case TYPE_CPU:
- case TYPE_S_CPU:
- sum = 0;
- for_each_online_cpu ( j )
- sum += atomic_read(&counters[j]);
- printk("TOTAL[%10u]", sum);
+ for_each_online_cpu ( cpu )
+ sum += per_cpu(perfcounters, cpu)[j];
+ printk("TOTAL[%12Lu]", sum);
+ if ( sum )
+ {
+ k = 0;
+ for_each_online_cpu ( cpu )
+ {
+ if ( k > 0 && (k % 4) == 0 )
+ printk("\n%46s", "");
+ printk(" CPU%02u[%10"PRIperfc"u]", cpu,
per_cpu(perfcounters, cpu)[j]);
+ ++k;
+ }
+ }
+ ++j;
+ break;
+ case TYPE_ARRAY:
+ case TYPE_S_ARRAY:
+ for_each_online_cpu ( cpu )
+ {
+ perfc_t *counters = per_cpu(perfcounters, cpu) + j;
+
+ for ( k = 0; k < perfc_info[i].nr_elements; k++ )
+ sum += counters[k];
+ }
+ printk("TOTAL[%12Lu]", sum);
if (sum)
{
- for_each_online_cpu ( j )
- printk(" CPU%02d[%10d]", j, atomic_read(&counters[j]));
- }
- counters += NR_CPUS;
- break;
- case TYPE_ARRAY:
- case TYPE_S_ARRAY:
- for ( j = sum = 0; j < perfc_info[i].nr_elements; j++ )
- sum += atomic_read(&counters[j]);
- printk("TOTAL[%10u]", sum);
#ifdef PERF_ARRAYS
- if (sum)
- {
- for ( j = 0; j < perfc_info[i].nr_elements; j++ )
- {
- if ( (j % 4) == 0 )
- printk("\n ");
- printk(" ARR%02d[%10d]", j, atomic_read(&counters[j]));
- }
- }
+ for ( k = 0; k < perfc_info[i].nr_elements; k++ )
+ {
+ sum = 0;
+ for_each_online_cpu ( cpu )
+ sum += per_cpu(perfcounters, cpu)[j + k];
+ if ( (k % 4) == 0 )
+ printk("\n%16s", "");
+ printk(" ARR%02u[%10Lu]", k, sum);
+ }
+#else
+ k = 0;
+ for_each_online_cpu ( cpu )
+ {
+ perfc_t *counters = per_cpu(perfcounters, cpu) + j;
+ unsigned int n;
+
+ sum = 0;
+ for ( n = 0; n < perfc_info[i].nr_elements; n++ )
+ sum += counters[n];
+ if ( k > 0 && (k % 4) == 0 )
+ printk("\n%46s", "");
+ printk(" CPU%02u[%10Lu]", cpu, sum);
+ ++k;
+ }
#endif
- counters += j;
+ }
+ j += perfc_info[i].nr_elements;
break;
}
printk("\n");
@@ -97,7 +114,6 @@ void perfc_reset(unsigned char key)
{
unsigned int i, j;
s_time_t now = NOW();
- atomic_t *counters = (atomic_t *)&perfcounters;
if ( key != '\0' )
printk("Xen performance counters RESET (now = 0x%08X:%08X)\n",
@@ -105,43 +121,39 @@ void perfc_reset(unsigned char key)
/* leave STATUS counters alone -- don't reset */
- for ( i = 0; i < NR_PERFCTRS; i++ )
- {
- switch ( perfc_info[i].type )
- {
- case TYPE_SINGLE:
- atomic_set(&counters[0],0);
- case TYPE_S_SINGLE:
- counters += 1;
- break;
- case TYPE_CPU:
- for ( j = 0; j < NR_CPUS; j++ )
- atomic_set(&counters[j],0);
- case TYPE_S_CPU:
- counters += NR_CPUS;
- break;
- case TYPE_ARRAY:
- for ( j = 0; j < perfc_info[i].nr_elements; j++ )
- atomic_set(&counters[j],0);
- case TYPE_S_ARRAY:
- counters += perfc_info[i].nr_elements;
- break;
- }
- }
-
- arch_perfc_reset ();
+ for ( i = j = 0; i < NR_PERFCTRS; i++ )
+ {
+ unsigned int cpu;
+
+ switch ( perfc_info[i].type )
+ {
+ case TYPE_SINGLE:
+ for_each_cpu ( cpu )
+ per_cpu(perfcounters, cpu)[j] = 0;
+ case TYPE_S_SINGLE:
+ ++j;
+ break;
+ case TYPE_ARRAY:
+ for_each_cpu ( cpu )
+ memset(per_cpu(perfcounters, cpu) + j, 0,
+ perfc_info[i].nr_elements * sizeof(perfc_t));
+ case TYPE_S_ARRAY:
+ j += perfc_info[i].nr_elements;
+ break;
+ }
+ }
+
+ arch_perfc_reset();
}
static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
static xen_sysctl_perfc_val_t *perfc_vals;
-static int perfc_nbr_vals;
+static unsigned int perfc_nbr_vals;
static int perfc_init = 0;
static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val)
{
- unsigned int i, j;
- unsigned int v = 0;
- atomic_t *counters = (atomic_t *)&perfcounters;
+ unsigned int i, j, v;
/* We only copy the name and array-size information once. */
if ( !perfc_init )
@@ -154,11 +166,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- perfc_d[i].nr_vals = 1;
- break;
- case TYPE_CPU:
- case TYPE_S_CPU:
- perfc_d[i].nr_vals = num_online_cpus();
+ perfc_d[i].nr_vals = num_possible_cpus();
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
@@ -181,26 +189,31 @@ static int perfc_copy_info(XEN_GUEST_HAN
arch_perfc_gather();
/* We gather the counts together every time. */
- for ( i = 0; i < NR_PERFCTRS; i++ )
- {
- switch ( perfc_info[i].type )
- {
- case TYPE_SINGLE:
- case TYPE_S_SINGLE:
- perfc_vals[v++] = atomic_read(&counters[0]);
- counters += 1;
- break;
- case TYPE_CPU:
- case TYPE_S_CPU:
- for ( j = 0; j < perfc_d[i].nr_vals; j++ )
- perfc_vals[v++] = atomic_read(&counters[j]);
- counters += NR_CPUS;
- break;
- case TYPE_ARRAY:
- case TYPE_S_ARRAY:
- for ( j = 0; j < perfc_d[i].nr_vals; j++ )
- perfc_vals[v++] = atomic_read(&counters[j]);
- counters += perfc_info[i].nr_elements;
+ for ( i = j = v = 0; i < NR_PERFCTRS; i++ )
+ {
+ unsigned int cpu;
+
+ switch ( perfc_info[i].type )
+ {
+ case TYPE_SINGLE:
+ case TYPE_S_SINGLE:
+ for_each_cpu ( cpu )
+ perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
+ ++j;
+ break;
+ case TYPE_ARRAY:
+ case TYPE_S_ARRAY:
+ memset(perfc_vals + v, 0, perfc_d[i].nr_vals *
sizeof(*perfc_vals));
+ for_each_cpu ( cpu )
+ {
+ perfc_t *counters = per_cpu(perfcounters, cpu) + j;
+ unsigned int k;
+
+ for ( k = 0; k < perfc_d[i].nr_vals; k++ )
+ perfc_vals[v + k] += counters[k];
+ }
+ v += perfc_d[i].nr_vals;
+ j += perfc_info[i].nr_elements;
break;
}
}
@@ -224,14 +237,12 @@ int perfc_control(xen_sysctl_perfc_op_t
switch ( pc->cmd )
{
case XEN_SYSCTL_PERFCOP_reset:
- perfc_copy_info(pc->desc, pc->val);
+ rc = perfc_copy_info(pc->desc, pc->val);
perfc_reset(0);
- rc = 0;
break;
case XEN_SYSCTL_PERFCOP_query:
- perfc_copy_info(pc->desc, pc->val);
- rc = 0;
+ rc = perfc_copy_info(pc->desc, pc->val);
break;
default:
diff -r 14aeb7981e4e -r 0b2794d3320f xen/common/schedule.c
--- a/xen/common/schedule.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/common/schedule.c Wed Mar 28 10:38:41 2007 +0100
@@ -606,7 +606,7 @@ static void schedule(void)
ASSERT(!in_irq());
ASSERT(this_cpu(mc_state).flags == 0);
- perfc_incrc(sched_run);
+ perfc_incr(sched_run);
sd = &this_cpu(schedule_data);
@@ -654,16 +654,13 @@ static void schedule(void)
spin_unlock_irq(&sd->schedule_lock);
- perfc_incrc(sched_ctx);
+ perfc_incr(sched_ctx);
stop_timer(&prev->periodic_timer);
/* Ensure that the domain has an up-to-date time base. */
- if ( !is_idle_vcpu(next) )
- {
- update_vcpu_system_time(next);
- vcpu_periodic_timer_work(next);
- }
+ update_vcpu_system_time(next);
+ vcpu_periodic_timer_work(next);
TRACE_4D(TRC_SCHED_SWITCH,
prev->domain->domain_id, prev->vcpu_id,
@@ -684,7 +681,7 @@ static void s_timer_fn(void *unused)
static void s_timer_fn(void *unused)
{
raise_softirq(SCHEDULE_SOFTIRQ);
- perfc_incrc(sched_irq);
+ perfc_incr(sched_irq);
}
/* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/drivers/char/console.c
--- a/xen/drivers/char/console.c Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/drivers/char/console.c Wed Mar 28 10:38:41 2007 +0100
@@ -900,10 +900,16 @@ void __bug(char *file, int line)
void __bug(char *file, int line)
{
console_start_sync();
- printk("BUG at %s:%d\n", file, line);
+ printk("Xen BUG at %s:%d\n", file, line);
dump_execution_state();
- panic("BUG at %s:%d\n", file, line);
+ panic("Xen BUG at %s:%d\n", file, line);
for ( ; ; ) ;
+}
+
+void __warn(char *file, int line)
+{
+ printk("Xen WARN at %s:%d\n", file, line);
+ dump_execution_state();
}
/*
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-ia64/bug.h
--- a/xen/include/asm-ia64/bug.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-ia64/bug.h Wed Mar 28 10:38:41 2007 +0100
@@ -2,5 +2,6 @@
#define __IA64_BUG_H__
#define BUG() __bug(__FILE__, __LINE__)
+#define WARN() __warn(__FILE__, __LINE__)
#endif /* __IA64_BUG_H__ */
diff -r 14aeb7981e4e -r 0b2794d3320f
xen/include/asm-ia64/linux-xen/asm/asmmacro.h
--- a/xen/include/asm-ia64/linux-xen/asm/asmmacro.h Tue Mar 27 12:21:48
2007 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/asmmacro.h Wed Mar 28 10:38:41
2007 +0100
@@ -116,4 +116,8 @@ 2:{ .mib; \
# define dv_serialize_instruction
#endif
+#ifdef PERF_COUNTERS
+#define PERFC(n) (THIS_CPU(perfcounters) + (IA64_PERFC_ ## n) * 4)
+#endif
+
#endif /* _ASM_IA64_ASMMACRO_H */
diff -r 14aeb7981e4e -r 0b2794d3320f
xen/include/asm-ia64/linux-xen/asm/iosapic.h
--- a/xen/include/asm-ia64/linux-xen/asm/iosapic.h Tue Mar 27 12:21:48
2007 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/iosapic.h Wed Mar 28 10:38:41
2007 +0100
@@ -123,13 +123,6 @@ static inline void list_move(struct list
#define move_irq(x)
-#define WARN_ON(condition) do { \
- if (unlikely((condition)!=0)) { \
- printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__,
__LINE__); \
- dump_stack(); \
- } \
-} while (0)
-
#ifdef nop
#undef nop
#endif
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-ia64/perfc_defn.h
--- a/xen/include/asm-ia64/perfc_defn.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-ia64/perfc_defn.h Wed Mar 28 10:38:41 2007 +0100
@@ -1,34 +1,34 @@
/* This file is legitimately included multiple times. */
-PERFCOUNTER_CPU(dtlb_translate, "dtlb hit")
+PERFCOUNTER(dtlb_translate, "dtlb hit")
-PERFCOUNTER_CPU(tr_translate, "TR hit")
+PERFCOUNTER(tr_translate, "TR hit")
-PERFCOUNTER_CPU(vhpt_translate, "virtual vhpt translation")
-PERFCOUNTER_CPU(fast_vhpt_translate, "virtual vhpt fast translation")
+PERFCOUNTER(vhpt_translate, "virtual vhpt translation")
+PERFCOUNTER(fast_vhpt_translate, "virtual vhpt fast translation")
PERFCOUNTER(recover_to_page_fault, "recoveries to page fault")
PERFCOUNTER(recover_to_break_fault, "recoveries to break fault")
-PERFCOUNTER_CPU(phys_translate, "metaphysical translation")
+PERFCOUNTER(phys_translate, "metaphysical translation")
-PERFCOUNTER_CPU(idle_when_pending, "vcpu idle at event")
+PERFCOUNTER(idle_when_pending, "vcpu idle at event")
-PERFCOUNTER_CPU(pal_halt_light, "calls to pal_halt_light")
+PERFCOUNTER(pal_halt_light, "calls to pal_halt_light")
-PERFCOUNTER_CPU(lazy_cover, "lazy cover")
+PERFCOUNTER(lazy_cover, "lazy cover")
-PERFCOUNTER_CPU(mov_to_ar_imm, "privop mov_to_ar_imm")
-PERFCOUNTER_CPU(mov_to_ar_reg, "privop mov_to_ar_reg")
-PERFCOUNTER_CPU(mov_from_ar, "privop privified-mov_from_ar")
-PERFCOUNTER_CPU(ssm, "privop ssm")
-PERFCOUNTER_CPU(rsm, "privop rsm")
-PERFCOUNTER_CPU(rfi, "privop rfi")
-PERFCOUNTER_CPU(bsw0, "privop bsw0")
-PERFCOUNTER_CPU(bsw1, "privop bsw1")
-PERFCOUNTER_CPU(cover, "privop cover")
-PERFCOUNTER_CPU(fc, "privop privified-fc")
-PERFCOUNTER_CPU(cpuid, "privop privified-cpuid")
+PERFCOUNTER(mov_to_ar_imm, "privop mov_to_ar_imm")
+PERFCOUNTER(mov_to_ar_reg, "privop mov_to_ar_reg")
+PERFCOUNTER(mov_from_ar, "privop privified-mov_from_ar")
+PERFCOUNTER(ssm, "privop ssm")
+PERFCOUNTER(rsm, "privop rsm")
+PERFCOUNTER(rfi, "privop rfi")
+PERFCOUNTER(bsw0, "privop bsw0")
+PERFCOUNTER(bsw1, "privop bsw1")
+PERFCOUNTER(cover, "privop cover")
+PERFCOUNTER(fc, "privop privified-fc")
+PERFCOUNTER(cpuid, "privop privified-cpuid")
PERFCOUNTER_ARRAY(mov_to_cr, "privop mov to cr", 128)
PERFCOUNTER_ARRAY(mov_from_cr, "privop mov from cr", 128)
@@ -36,45 +36,45 @@ PERFCOUNTER_ARRAY(misc_privop, "p
PERFCOUNTER_ARRAY(misc_privop, "privop misc", 64)
// privileged instructions to fall into vmx_entry
-PERFCOUNTER_CPU(vmx_rsm, "vmx privop rsm")
-PERFCOUNTER_CPU(vmx_ssm, "vmx privop ssm")
-PERFCOUNTER_CPU(vmx_mov_to_psr, "vmx privop mov_to_psr")
-PERFCOUNTER_CPU(vmx_mov_from_psr, "vmx privop mov_from_psr")
-PERFCOUNTER_CPU(vmx_mov_from_cr, "vmx privop mov_from_cr")
-PERFCOUNTER_CPU(vmx_mov_to_cr, "vmx privop mov_to_cr")
-PERFCOUNTER_CPU(vmx_bsw0, "vmx privop bsw0")
-PERFCOUNTER_CPU(vmx_bsw1, "vmx privop bsw1")
-PERFCOUNTER_CPU(vmx_cover, "vmx privop cover")
-PERFCOUNTER_CPU(vmx_rfi, "vmx privop rfi")
-PERFCOUNTER_CPU(vmx_itr_d, "vmx privop itr_d")
-PERFCOUNTER_CPU(vmx_itr_i, "vmx privop itr_i")
-PERFCOUNTER_CPU(vmx_ptr_d, "vmx privop ptr_d")
-PERFCOUNTER_CPU(vmx_ptr_i, "vmx privop ptr_i")
-PERFCOUNTER_CPU(vmx_itc_d, "vmx privop itc_d")
-PERFCOUNTER_CPU(vmx_itc_i, "vmx privop itc_i")
-PERFCOUNTER_CPU(vmx_ptc_l, "vmx privop ptc_l")
-PERFCOUNTER_CPU(vmx_ptc_g, "vmx privop ptc_g")
-PERFCOUNTER_CPU(vmx_ptc_ga, "vmx privop ptc_ga")
-PERFCOUNTER_CPU(vmx_ptc_e, "vmx privop ptc_e")
-PERFCOUNTER_CPU(vmx_mov_to_rr, "vmx privop mov_to_rr")
-PERFCOUNTER_CPU(vmx_mov_from_rr, "vmx privop mov_from_rr")
-PERFCOUNTER_CPU(vmx_thash, "vmx privop thash")
-PERFCOUNTER_CPU(vmx_ttag, "vmx privop ttag")
-PERFCOUNTER_CPU(vmx_tpa, "vmx privop tpa")
-PERFCOUNTER_CPU(vmx_tak, "vmx privop tak")
-PERFCOUNTER_CPU(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm")
-PERFCOUNTER_CPU(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg")
-PERFCOUNTER_CPU(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg")
-PERFCOUNTER_CPU(vmx_mov_to_dbr, "vmx privop mov_to_dbr")
-PERFCOUNTER_CPU(vmx_mov_to_ibr, "vmx privop mov_to_ibr")
-PERFCOUNTER_CPU(vmx_mov_to_pmc, "vmx privop mov_to_pmc")
-PERFCOUNTER_CPU(vmx_mov_to_pmd, "vmx privop mov_to_pmd")
-PERFCOUNTER_CPU(vmx_mov_to_pkr, "vmx privop mov_to_pkr")
-PERFCOUNTER_CPU(vmx_mov_from_dbr, "vmx privop mov_from_dbr")
-PERFCOUNTER_CPU(vmx_mov_from_ibr, "vmx privop mov_from_ibr")
-PERFCOUNTER_CPU(vmx_mov_from_pmc, "vmx privop mov_from_pmc")
-PERFCOUNTER_CPU(vmx_mov_from_pkr, "vmx privop mov_from_pkr")
-PERFCOUNTER_CPU(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid")
+PERFCOUNTER(vmx_rsm, "vmx privop rsm")
+PERFCOUNTER(vmx_ssm, "vmx privop ssm")
+PERFCOUNTER(vmx_mov_to_psr, "vmx privop mov_to_psr")
+PERFCOUNTER(vmx_mov_from_psr, "vmx privop mov_from_psr")
+PERFCOUNTER(vmx_mov_from_cr, "vmx privop mov_from_cr")
+PERFCOUNTER(vmx_mov_to_cr, "vmx privop mov_to_cr")
+PERFCOUNTER(vmx_bsw0, "vmx privop bsw0")
+PERFCOUNTER(vmx_bsw1, "vmx privop bsw1")
+PERFCOUNTER(vmx_cover, "vmx privop cover")
+PERFCOUNTER(vmx_rfi, "vmx privop rfi")
+PERFCOUNTER(vmx_itr_d, "vmx privop itr_d")
+PERFCOUNTER(vmx_itr_i, "vmx privop itr_i")
+PERFCOUNTER(vmx_ptr_d, "vmx privop ptr_d")
+PERFCOUNTER(vmx_ptr_i, "vmx privop ptr_i")
+PERFCOUNTER(vmx_itc_d, "vmx privop itc_d")
+PERFCOUNTER(vmx_itc_i, "vmx privop itc_i")
+PERFCOUNTER(vmx_ptc_l, "vmx privop ptc_l")
+PERFCOUNTER(vmx_ptc_g, "vmx privop ptc_g")
+PERFCOUNTER(vmx_ptc_ga, "vmx privop ptc_ga")
+PERFCOUNTER(vmx_ptc_e, "vmx privop ptc_e")
+PERFCOUNTER(vmx_mov_to_rr, "vmx privop mov_to_rr")
+PERFCOUNTER(vmx_mov_from_rr, "vmx privop mov_from_rr")
+PERFCOUNTER(vmx_thash, "vmx privop thash")
+PERFCOUNTER(vmx_ttag, "vmx privop ttag")
+PERFCOUNTER(vmx_tpa, "vmx privop tpa")
+PERFCOUNTER(vmx_tak, "vmx privop tak")
+PERFCOUNTER(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm")
+PERFCOUNTER(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg")
+PERFCOUNTER(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg")
+PERFCOUNTER(vmx_mov_to_dbr, "vmx privop mov_to_dbr")
+PERFCOUNTER(vmx_mov_to_ibr, "vmx privop mov_to_ibr")
+PERFCOUNTER(vmx_mov_to_pmc, "vmx privop mov_to_pmc")
+PERFCOUNTER(vmx_mov_to_pmd, "vmx privop mov_to_pmd")
+PERFCOUNTER(vmx_mov_to_pkr, "vmx privop mov_to_pkr")
+PERFCOUNTER(vmx_mov_from_dbr, "vmx privop mov_from_dbr")
+PERFCOUNTER(vmx_mov_from_ibr, "vmx privop mov_from_ibr")
+PERFCOUNTER(vmx_mov_from_pmc, "vmx privop mov_from_pmc")
+PERFCOUNTER(vmx_mov_from_pkr, "vmx privop mov_from_pkr")
+PERFCOUNTER(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid")
PERFCOUNTER_ARRAY(slow_hyperprivop, "slow hyperprivops", HYPERPRIVOP_MAX + 1)
@@ -84,12 +84,12 @@ PERFCOUNTER_ARRAY(fast_reflect, "f
PERFCOUNTER_ARRAY(fast_reflect, "fast reflection", 0x80)
PERFSTATUS(vhpt_nbr_entries, "nbr of entries per VHPT")
-PERFSTATUS_CPU(vhpt_valid_entries, "nbr of valid entries in VHPT")
+PERFSTATUS(vhpt_valid_entries, "nbr of valid entries in VHPT")
PERFCOUNTER_ARRAY(vmx_mmio_access, "vmx_mmio_access", 8)
-PERFCOUNTER_CPU(vmx_pal_emul, "vmx_pal_emul")
+PERFCOUNTER(vmx_pal_emul, "vmx_pal_emul")
PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8)
-PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break")
+PERFCOUNTER(vmx_ia64_handle_break,"vmx_ia64_handle_break")
PERFCOUNTER_ARRAY(vmx_inject_guest_interruption,
"vmx_inject_guest_interruption", 0x80)
PERFCOUNTER_ARRAY(fw_hypercall, "fw_hypercall", 0x20)
@@ -106,69 +106,71 @@ PERFSTATUS(privop_addr_##name##_overflow
PERFPRIVOPADDR(get_ifa)
PERFPRIVOPADDR(thash)
+
+#undef PERFPRIVOPADDR
#endif
// vhpt.c
-PERFCOUNTER_CPU(local_vhpt_flush, "local_vhpt_flush")
-PERFCOUNTER_CPU(vcpu_vhpt_flush, "vcpu_vhpt_flush")
-PERFCOUNTER_CPU(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all")
-PERFCOUNTER_CPU(domain_flush_vtlb_all, "domain_flush_vtlb_all")
-PERFCOUNTER_CPU(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range")
-PERFCOUNTER_CPU(domain_flush_vtlb_track_entry,
"domain_flush_vtlb_track_entry")
-PERFCOUNTER_CPU(domain_flush_vtlb_local, "domain_flush_vtlb_local")
-PERFCOUNTER_CPU(domain_flush_vtlb_global, "domain_flush_vtlb_global")
-PERFCOUNTER_CPU(domain_flush_vtlb_range, "domain_flush_vtlb_range")
+PERFCOUNTER(local_vhpt_flush, "local_vhpt_flush")
+PERFCOUNTER(vcpu_vhpt_flush, "vcpu_vhpt_flush")
+PERFCOUNTER(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all")
+PERFCOUNTER(domain_flush_vtlb_all, "domain_flush_vtlb_all")
+PERFCOUNTER(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range")
+PERFCOUNTER(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry")
+PERFCOUNTER(domain_flush_vtlb_local, "domain_flush_vtlb_local")
+PERFCOUNTER(domain_flush_vtlb_global, "domain_flush_vtlb_global")
+PERFCOUNTER(domain_flush_vtlb_range, "domain_flush_vtlb_range")
// domain.c
-PERFCOUNTER_CPU(flush_vtlb_for_context_switch,
"flush_vtlb_for_context_switch")
+PERFCOUNTER(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch")
// mm.c
-PERFCOUNTER_CPU(assign_domain_page_replace, "assign_domain_page_replace")
-PERFCOUNTER_CPU(assign_domain_pge_cmpxchg_rel,
"assign_domain_pge_cmpxchg_rel")
-PERFCOUNTER_CPU(zap_dcomain_page_one, "zap_dcomain_page_one")
-PERFCOUNTER_CPU(dom0vp_zap_physmap, "dom0vp_zap_physmap")
-PERFCOUNTER_CPU(dom0vp_add_physmap, "dom0vp_add_physmap")
-PERFCOUNTER_CPU(create_grant_host_mapping, "create_grant_host_mapping")
-PERFCOUNTER_CPU(destroy_grant_host_mapping, "destroy_grant_host_mapping")
-PERFCOUNTER_CPU(steal_page_refcount, "steal_page_refcount")
-PERFCOUNTER_CPU(steal_page, "steal_page")
-PERFCOUNTER_CPU(guest_physmap_add_page, "guest_physmap_add_page")
-PERFCOUNTER_CPU(guest_physmap_remove_page, "guest_physmap_remove_page")
-PERFCOUNTER_CPU(domain_page_flush_and_put, "domain_page_flush_and_put")
+PERFCOUNTER(assign_domain_page_replace, "assign_domain_page_replace")
+PERFCOUNTER(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel")
+PERFCOUNTER(zap_dcomain_page_one, "zap_dcomain_page_one")
+PERFCOUNTER(dom0vp_zap_physmap, "dom0vp_zap_physmap")
+PERFCOUNTER(dom0vp_add_physmap, "dom0vp_add_physmap")
+PERFCOUNTER(create_grant_host_mapping, "create_grant_host_mapping")
+PERFCOUNTER(destroy_grant_host_mapping, "destroy_grant_host_mapping")
+PERFCOUNTER(steal_page_refcount, "steal_page_refcount")
+PERFCOUNTER(steal_page, "steal_page")
+PERFCOUNTER(guest_physmap_add_page, "guest_physmap_add_page")
+PERFCOUNTER(guest_physmap_remove_page, "guest_physmap_remove_page")
+PERFCOUNTER(domain_page_flush_and_put, "domain_page_flush_and_put")
// dom0vp
-PERFCOUNTER_CPU(dom0vp_phystomach, "dom0vp_phystomach")
-PERFCOUNTER_CPU(dom0vp_machtophys, "dom0vp_machtophys")
+PERFCOUNTER(dom0vp_phystomach, "dom0vp_phystomach")
+PERFCOUNTER(dom0vp_machtophys, "dom0vp_machtophys")
#ifdef CONFIG_XEN_IA64_TLB_TRACK
// insert or dirty
-PERFCOUNTER_CPU(tlb_track_iod, "tlb_track_iod")
-PERFCOUNTER_CPU(tlb_track_iod_again, "tlb_track_iod_again")
-PERFCOUNTER_CPU(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked")
-PERFCOUNTER_CPU(tlb_track_iod_force_many, "tlb_track_iod_force_many")
-PERFCOUNTER_CPU(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many")
-PERFCOUNTER_CPU(tlb_track_iod_tracked_many_del,
"tlb_track_iod_tracked_many_del")
-PERFCOUNTER_CPU(tlb_track_iod_found, "tlb_track_iod_found")
-PERFCOUNTER_CPU(tlb_track_iod_new_entry, "tlb_track_iod_new_entry")
-PERFCOUNTER_CPU(tlb_track_iod_new_failed, "tlb_track_iod_new_failed")
-PERFCOUNTER_CPU(tlb_track_iod_new_many, "tlb_track_iod_new_many")
-PERFCOUNTER_CPU(tlb_track_iod_insert, "tlb_track_iod_insert")
-PERFCOUNTER_CPU(tlb_track_iod_dirtied, "tlb_track_iod_dirtied")
+PERFCOUNTER(tlb_track_iod, "tlb_track_iod")
+PERFCOUNTER(tlb_track_iod_again, "tlb_track_iod_again")
+PERFCOUNTER(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked")
+PERFCOUNTER(tlb_track_iod_force_many, "tlb_track_iod_force_many")
+PERFCOUNTER(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many")
+PERFCOUNTER(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del")
+PERFCOUNTER(tlb_track_iod_found, "tlb_track_iod_found")
+PERFCOUNTER(tlb_track_iod_new_entry, "tlb_track_iod_new_entry")
+PERFCOUNTER(tlb_track_iod_new_failed, "tlb_track_iod_new_failed")
+PERFCOUNTER(tlb_track_iod_new_many, "tlb_track_iod_new_many")
+PERFCOUNTER(tlb_track_iod_insert, "tlb_track_iod_insert")
+PERFCOUNTER(tlb_track_iod_dirtied, "tlb_track_iod_dirtied")
// search and remove
-PERFCOUNTER_CPU(tlb_track_sar, "tlb_track_sar")
-PERFCOUNTER_CPU(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked")
-PERFCOUNTER_CPU(tlb_track_sar_not_found, "tlb_track_sar_not_found")
-PERFCOUNTER_CPU(tlb_track_sar_found, "tlb_track_sar_found")
-PERFCOUNTER_CPU(tlb_track_sar_many, "tlb_track_sar_many")
+PERFCOUNTER(tlb_track_sar, "tlb_track_sar")
+PERFCOUNTER(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked")
+PERFCOUNTER(tlb_track_sar_not_found, "tlb_track_sar_not_found")
+PERFCOUNTER(tlb_track_sar_found, "tlb_track_sar_found")
+PERFCOUNTER(tlb_track_sar_many, "tlb_track_sar_many")
// flush
-PERFCOUNTER_CPU(tlb_track_use_rr7, "tlb_track_use_rr7")
-PERFCOUNTER_CPU(tlb_track_swap_rr0, "tlb_track_swap_rr0")
+PERFCOUNTER(tlb_track_use_rr7, "tlb_track_use_rr7")
+PERFCOUNTER(tlb_track_swap_rr0, "tlb_track_swap_rr0")
#endif
// tlb flush clock
#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
-PERFCOUNTER_CPU(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge")
-PERFCOUNTER_CPU(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip")
+PERFCOUNTER(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge")
+PERFCOUNTER(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip")
#endif
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-ia64/privop_stat.h
--- a/xen/include/asm-ia64/privop_stat.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-ia64/privop_stat.h Wed Mar 28 10:38:41 2007 +0100
@@ -1,5 +1,5 @@
-#ifndef _XEN_UA64_PRIVOP_STAT_H
-#define _XEN_UA64_PRIVOP_STAT_H
+#ifndef _XEN_IA64_PRIVOP_STAT_H
+#define _XEN_IA64_PRIVOP_STAT_H
#include <asm/config.h>
#include <xen/types.h>
#include <public/xen.h>
@@ -9,30 +9,23 @@ extern void gather_privop_addrs(void);
extern void gather_privop_addrs(void);
extern void reset_privop_addrs(void);
-#undef PERFCOUNTER
#define PERFCOUNTER(var, name)
-
-#undef PERFCOUNTER_CPU
-#define PERFCOUNTER_CPU(var, name)
-
-#undef PERFCOUNTER_ARRAY
#define PERFCOUNTER_ARRAY(var, name, size)
-#undef PERFSTATUS
#define PERFSTATUS(var, name)
-
-#undef PERFSTATUS_CPU
-#define PERFSTATUS_CPU(var, name)
-
-#undef PERFSTATUS_ARRAY
#define PERFSTATUS_ARRAY(var, name, size)
-#undef PERFPRIVOPADDR
#define PERFPRIVOPADDR(name) privop_inst_##name,
enum privop_inst {
#include <asm/perfc_defn.h>
};
+
+#undef PERFCOUNTER
+#undef PERFCOUNTER_ARRAY
+
+#undef PERFSTATUS
+#undef PERFSTATUS_ARRAY
#undef PERFPRIVOPADDR
@@ -45,4 +38,4 @@ extern void privop_count_addr(unsigned l
#define reset_privop_addrs() do {} while (0)
#endif
-#endif /* _XEN_UA64_PRIVOP_STAT_H */
+#endif /* _XEN_IA64_PRIVOP_STAT_H */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-ia64/tlb_track.h
--- a/xen/include/asm-ia64/tlb_track.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-ia64/tlb_track.h Wed Mar 28 10:38:41 2007 +0100
@@ -97,9 +97,9 @@ vcpu_tlb_track_insert_or_dirty(struct vc
{
/* optimization.
non-tracking pte is most common. */
- perfc_incrc(tlb_track_iod);
+ perfc_incr(tlb_track_iod);
if (!pte_tlb_tracking(entry->used)) {
- perfc_incrc(tlb_track_iod_not_tracked);
+ perfc_incr(tlb_track_iod_not_tracked);
return;
}
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-powerpc/bug.h
--- a/xen/include/asm-powerpc/bug.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-powerpc/bug.h Wed Mar 28 10:38:41 2007 +0100
@@ -2,5 +2,6 @@
#define __POWERPC_BUG_H__
#define BUG() __bug(__FILE__, __LINE__)
+#define WARN() __warn(__FILE__, __LINE__)
#endif /* __POWERPC_BUG_H__ */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-powerpc/debugger.h
--- a/xen/include/asm-powerpc/debugger.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-powerpc/debugger.h Wed Mar 28 10:38:41 2007 +0100
@@ -67,10 +67,6 @@ static inline void unimplemented(void)
#endif
}
-extern void __warn(char *file, int line);
-#define WARN() __warn(__FILE__, __LINE__)
-#define WARN_ON(_p) do { if (_p) WARN(); } while ( 0 )
-
extern void __attn(void);
#define ATTN() __attn();
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/bug.h
--- a/xen/include/asm-x86/bug.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/bug.h Wed Mar 28 10:38:41 2007 +0100
@@ -14,8 +14,8 @@ struct bug_frame {
} __attribute__((packed));
#define BUGFRAME_dump 0
-#define BUGFRAME_bug 1
-#define BUGFRAME_assert 2
-#define BUGFRAME_rsvd 3
+#define BUGFRAME_warn 1
+#define BUGFRAME_bug 2
+#define BUGFRAME_assert 3
#endif /* __X86_BUG_H__ */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Wed Mar 28 10:38:41 2007 +0100
@@ -446,7 +446,6 @@ struct arch_svm_struct {
u64 vmcb_pa;
u32 *msrpm;
u64 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
- int saved_irq_vector;
int launch_core;
unsigned long flags; /* VMCB flags */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/multicall.h
--- a/xen/include/asm-x86/multicall.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/multicall.h Wed Mar 28 10:38:41 2007 +0100
@@ -6,84 +6,94 @@
#define __ASM_X86_MULTICALL_H__
#include <xen/errno.h>
-#include <asm/asm_defns.h>
#ifdef __x86_64__
#define do_multicall_call(_call) \
do { \
__asm__ __volatile__ ( \
- " movq "STR(MULTICALL_op)"(%0),%%rax; " \
+ " movq %c1(%0),%%rax; " \
+ " leaq hypercall_table(%%rip),%%rdi; " \
" cmpq $("STR(NR_hypercalls)"),%%rax; " \
" jae 2f; " \
- " leaq hypercall_table(%%rip),%%rdi; " \
- " leaq (%%rdi,%%rax,8),%%rax; " \
- " movq "STR(MULTICALL_arg0)"(%0),%%rdi; " \
- " movq "STR(MULTICALL_arg1)"(%0),%%rsi; " \
- " movq "STR(MULTICALL_arg2)"(%0),%%rdx; " \
- " movq "STR(MULTICALL_arg3)"(%0),%%rcx; " \
- " movq "STR(MULTICALL_arg4)"(%0),%%r8; " \
- " callq *(%%rax); " \
- "1: movq %%rax,"STR(MULTICALL_result)"(%0)\n" \
+ " movq (%%rdi,%%rax,8),%%rax; " \
+ " movq %c2+0*%c3(%0),%%rdi; " \
+ " movq %c2+1*%c3(%0),%%rsi; " \
+ " movq %c2+2*%c3(%0),%%rdx; " \
+ " movq %c2+3*%c3(%0),%%rcx; " \
+ " movq %c2+4*%c3(%0),%%r8; " \
+ " callq *%%rax; " \
+ "1: movq %%rax,%c4(%0)\n" \
".section .fixup,\"ax\"\n" \
"2: movq $-"STR(ENOSYS)",%%rax\n" \
" jmp 1b\n" \
".previous\n" \
- : : "b" (_call) \
+ : \
+ : "b" (_call), \
+ "i" (offsetof(__typeof__(*_call), op)), \
+ "i" (offsetof(__typeof__(*_call), args)), \
+ "i" (sizeof(*(_call)->args)), \
+ "i" (offsetof(__typeof__(*_call), result)) \
/* all the caller-saves registers */ \
: "rax", "rcx", "rdx", "rsi", "rdi", \
"r8", "r9", "r10", "r11" ); \
} while ( 0 )
-#define compat_multicall_call(_call) \
- do { \
- __asm__ __volatile__ ( \
- " movl "STR(COMPAT_MULTICALL_op)"(%0),%%eax; " \
- " leaq compat_hypercall_table(%%rip),%%rdi; " \
- " cmpl $("STR(NR_hypercalls)"),%%eax; " \
- " jae 2f; " \
- " movq (%%rdi,%%rax,8),%%rax; " \
- " movl "STR(COMPAT_MULTICALL_arg0)"(%0),%%edi; " \
- " movl "STR(COMPAT_MULTICALL_arg1)"(%0),%%esi; " \
- " movl "STR(COMPAT_MULTICALL_arg2)"(%0),%%edx; " \
- " movl "STR(COMPAT_MULTICALL_arg3)"(%0),%%ecx; " \
- " movl "STR(COMPAT_MULTICALL_arg4)"(%0),%%r8d; " \
- " callq *%%rax; " \
- "1: movl %%eax,"STR(COMPAT_MULTICALL_result)"(%0)\n"\
- ".section .fixup,\"ax\"\n" \
- "2: movl $-"STR(ENOSYS)",%%eax\n" \
- " jmp 1b\n" \
- ".previous\n" \
- : : "b" (_call) \
- /* all the caller-saves registers */ \
- : "rax", "rcx", "rdx", "rsi", "rdi", \
- "r8", "r9", "r10", "r11" ); \
- } while ( 0 )
+#define compat_multicall_call(_call) \
+ __asm__ __volatile__ ( \
+ " movl %c1(%0),%%eax; " \
+ " leaq compat_hypercall_table(%%rip),%%rdi; "\
+ " cmpl $("STR(NR_hypercalls)"),%%eax; " \
+ " jae 2f; " \
+ " movq (%%rdi,%%rax,8),%%rax; " \
+ " movl %c2+0*%c3(%0),%%edi; " \
+ " movl %c2+1*%c3(%0),%%esi; " \
+ " movl %c2+2*%c3(%0),%%edx; " \
+ " movl %c2+3*%c3(%0),%%ecx; " \
+ " movl %c2+4*%c3(%0),%%r8d; " \
+ " callq *%%rax; " \
+ "1: movl %%eax,%c4(%0)\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: movl $-"STR(ENOSYS)",%%eax\n" \
+ " jmp 1b\n" \
+ ".previous\n" \
+ : \
+ : "b" (_call), \
+ "i" (offsetof(__typeof__(*_call), op)), \
+ "i" (offsetof(__typeof__(*_call), args)), \
+ "i" (sizeof(*(_call)->args)), \
+ "i" (offsetof(__typeof__(*_call), result)) \
+ /* all the caller-saves registers */ \
+ : "rax", "rcx", "rdx", "rsi", "rdi", \
+ "r8", "r9", "r10", "r11" ) \
#else
#define do_multicall_call(_call) \
- do { \
__asm__ __volatile__ ( \
- " pushl "STR(MULTICALL_arg4)"(%0); " \
- " pushl "STR(MULTICALL_arg3)"(%0); " \
- " pushl "STR(MULTICALL_arg2)"(%0); " \
- " pushl "STR(MULTICALL_arg1)"(%0); " \
- " pushl "STR(MULTICALL_arg0)"(%0); " \
- " movl "STR(MULTICALL_op)"(%0),%%eax; " \
+ " movl %c1(%0),%%eax; " \
+ " pushl %c2+4*%c3(%0); " \
+ " pushl %c2+3*%c3(%0); " \
+ " pushl %c2+2*%c3(%0); " \
+ " pushl %c2+1*%c3(%0); " \
+ " pushl %c2+0*%c3(%0); " \
" cmpl $("STR(NR_hypercalls)"),%%eax; " \
" jae 2f; " \
" call *hypercall_table(,%%eax,4); " \
- "1: movl %%eax,"STR(MULTICALL_result)"(%0); " \
+ "1: movl %%eax,%c4(%0); " \
" addl $20,%%esp\n" \
".section .fixup,\"ax\"\n" \
"2: movl $-"STR(ENOSYS)",%%eax\n" \
" jmp 1b\n" \
".previous\n" \
- : : "b" (_call) \
+ : \
+ : "bSD" (_call), \
+ "i" (offsetof(__typeof__(*_call), op)), \
+ "i" (offsetof(__typeof__(*_call), args)), \
+ "i" (sizeof(*(_call)->args)), \
+ "i" (offsetof(__typeof__(*_call), result)) \
/* all the caller-saves registers */ \
- : "eax", "ecx", "edx" ); \
- } while ( 0 )
+ : "eax", "ecx", "edx" ) \
#endif
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/perfc_defn.h
--- a/xen/include/asm-x86/perfc_defn.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/perfc_defn.h Wed Mar 28 10:38:41 2007 +0100
@@ -12,81 +12,83 @@ PERFCOUNTER_ARRAY(cause_vector,
#define SVM_PERF_EXIT_REASON_SIZE (1+136)
PERFCOUNTER_ARRAY(svmexits, "SVMexits", SVM_PERF_EXIT_REASON_SIZE)
-PERFCOUNTER_CPU(seg_fixups, "segmentation fixups")
+PERFCOUNTER(seg_fixups, "segmentation fixups")
-PERFCOUNTER_CPU(apic_timer, "apic timer interrupts")
+PERFCOUNTER(apic_timer, "apic timer interrupts")
-PERFCOUNTER_CPU(domain_page_tlb_flush, "domain page tlb flushes")
+PERFCOUNTER(domain_page_tlb_flush, "domain page tlb flushes")
-PERFCOUNTER_CPU(calls_to_mmu_update, "calls_to_mmu_update")
-PERFCOUNTER_CPU(num_page_updates, "num_page_updates")
-PERFCOUNTER_CPU(calls_to_update_va, "calls_to_update_va_map")
-PERFCOUNTER_CPU(page_faults, "page faults")
-PERFCOUNTER_CPU(copy_user_faults, "copy_user faults")
+PERFCOUNTER(calls_to_mmuext_op, "calls to mmuext_op")
+PERFCOUNTER(num_mmuext_ops, "mmuext ops")
+PERFCOUNTER(calls_to_mmu_update, "calls to mmu_update")
+PERFCOUNTER(num_page_updates, "page updates")
+PERFCOUNTER(calls_to_update_va, "calls to update_va_map")
+PERFCOUNTER(page_faults, "page faults")
+PERFCOUNTER(copy_user_faults, "copy_user faults")
-PERFCOUNTER_CPU(map_domain_page_count, "map_domain_page count")
-PERFCOUNTER_CPU(ptwr_emulations, "writable pt emulations")
+PERFCOUNTER(map_domain_page_count, "map_domain_page count")
+PERFCOUNTER(ptwr_emulations, "writable pt emulations")
-PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed")
+PERFCOUNTER(exception_fixed, "pre-exception fixed")
/* Shadow counters */
-PERFCOUNTER_CPU(shadow_alloc, "calls to shadow_alloc")
-PERFCOUNTER_CPU(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs")
+PERFCOUNTER(shadow_alloc, "calls to shadow_alloc")
+PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs")
/* STATUS counters do not reset when 'P' is hit */
PERFSTATUS(shadow_alloc_count, "number of shadow pages in use")
-PERFCOUNTER_CPU(shadow_free, "calls to shadow_free")
-PERFCOUNTER_CPU(shadow_prealloc_1, "shadow recycles old shadows")
-PERFCOUNTER_CPU(shadow_prealloc_2, "shadow recycles in-use shadows")
-PERFCOUNTER_CPU(shadow_linear_map_failed, "shadow hit read-only linear map")
-PERFCOUNTER_CPU(shadow_a_update, "shadow A bit update")
-PERFCOUNTER_CPU(shadow_ad_update, "shadow A&D bit update")
-PERFCOUNTER_CPU(shadow_fault, "calls to shadow_fault")
-PERFCOUNTER_CPU(shadow_fault_fast_gnp, "shadow_fault fast path n/p")
-PERFCOUNTER_CPU(shadow_fault_fast_mmio, "shadow_fault fast path mmio")
-PERFCOUNTER_CPU(shadow_fault_fast_fail, "shadow_fault fast path error")
-PERFCOUNTER_CPU(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn")
-PERFCOUNTER_CPU(shadow_fault_bail_not_present,
+PERFCOUNTER(shadow_free, "calls to shadow_free")
+PERFCOUNTER(shadow_prealloc_1, "shadow recycles old shadows")
+PERFCOUNTER(shadow_prealloc_2, "shadow recycles in-use shadows")
+PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map")
+PERFCOUNTER(shadow_a_update, "shadow A bit update")
+PERFCOUNTER(shadow_ad_update, "shadow A&D bit update")
+PERFCOUNTER(shadow_fault, "calls to shadow_fault")
+PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p")
+PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio")
+PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error")
+PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn")
+PERFCOUNTER(shadow_fault_bail_not_present,
"shadow_fault guest not-present")
-PERFCOUNTER_CPU(shadow_fault_bail_nx, "shadow_fault guest NX fault")
-PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault")
-PERFCOUNTER_CPU(shadow_fault_bail_user_supervisor,
+PERFCOUNTER(shadow_fault_bail_nx, "shadow_fault guest NX fault")
+PERFCOUNTER(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault")
+PERFCOUNTER(shadow_fault_bail_user_supervisor,
"shadow_fault guest U/S fault")
-PERFCOUNTER_CPU(shadow_fault_emulate_read, "shadow_fault emulates a read")
-PERFCOUNTER_CPU(shadow_fault_emulate_write, "shadow_fault emulates a write")
-PERFCOUNTER_CPU(shadow_fault_emulate_failed, "shadow_fault emulator fails")
-PERFCOUNTER_CPU(shadow_fault_emulate_stack, "shadow_fault emulate stack write")
-PERFCOUNTER_CPU(shadow_fault_mmio, "shadow_fault handled as mmio")
-PERFCOUNTER_CPU(shadow_fault_fixed, "shadow_fault fixed fault")
-PERFCOUNTER_CPU(shadow_ptwr_emulate, "shadow causes ptwr to emulate")
-PERFCOUNTER_CPU(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e")
-PERFCOUNTER_CPU(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e")
-PERFCOUNTER_CPU(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e")
-PERFCOUNTER_CPU(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e")
-PERFCOUNTER_CPU(shadow_hash_lookups, "calls to shadow_hash_lookup")
-PERFCOUNTER_CPU(shadow_hash_lookup_head, "shadow hash hit in bucket head")
-PERFCOUNTER_CPU(shadow_hash_lookup_miss, "shadow hash misses")
-PERFCOUNTER_CPU(shadow_get_shadow_status, "calls to get_shadow_status")
-PERFCOUNTER_CPU(shadow_hash_inserts, "calls to shadow_hash_insert")
-PERFCOUNTER_CPU(shadow_hash_deletes, "calls to shadow_hash_delete")
-PERFCOUNTER_CPU(shadow_writeable, "shadow removes write access")
-PERFCOUNTER_CPU(shadow_writeable_h_1, "shadow writeable: 32b w2k3")
-PERFCOUNTER_CPU(shadow_writeable_h_2, "shadow writeable: 32pae w2k3")
-PERFCOUNTER_CPU(shadow_writeable_h_3, "shadow writeable: 64b w2k3")
-PERFCOUNTER_CPU(shadow_writeable_h_4, "shadow writeable: 32b linux low")
-PERFCOUNTER_CPU(shadow_writeable_h_5, "shadow writeable: 32b linux high")
-PERFCOUNTER_CPU(shadow_writeable_bf, "shadow writeable brute-force")
-PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings")
-PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force")
-PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
-PERFCOUNTER_CPU(shadow_unshadow, "shadow unshadows a page")
-PERFCOUNTER_CPU(shadow_up_pointer, "shadow unshadow by up-pointer")
-PERFCOUNTER_CPU(shadow_unshadow_bf, "shadow unshadow brute-force")
-PERFCOUNTER_CPU(shadow_get_page_fail, "shadow_get_page_from_l1e failed")
-PERFCOUNTER_CPU(shadow_guest_walk, "shadow walks guest tables")
-PERFCOUNTER_CPU(shadow_invlpg, "shadow emulates invlpg")
-PERFCOUNTER_CPU(shadow_invlpg_fault, "shadow invlpg faults")
+PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read")
+PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write")
+PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails")
+PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write")
+PERFCOUNTER(shadow_fault_mmio, "shadow_fault handled as mmio")
+PERFCOUNTER(shadow_fault_fixed, "shadow_fault fixed fault")
+PERFCOUNTER(shadow_ptwr_emulate, "shadow causes ptwr to emulate")
+PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e")
+PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e")
+PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e")
+PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e")
+PERFCOUNTER(shadow_hash_lookups, "calls to shadow_hash_lookup")
+PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head")
+PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses")
+PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status")
+PERFCOUNTER(shadow_hash_inserts, "calls to shadow_hash_insert")
+PERFCOUNTER(shadow_hash_deletes, "calls to shadow_hash_delete")
+PERFCOUNTER(shadow_writeable, "shadow removes write access")
+PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3")
+PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3")
+PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3")
+PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: 32b linux low")
+PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: 32b linux high")
+PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force")
+PERFCOUNTER(shadow_mappings, "shadow removes all mappings")
+PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force")
+PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit")
+PERFCOUNTER(shadow_unshadow, "shadow unshadows a page")
+PERFCOUNTER(shadow_up_pointer, "shadow unshadow by up-pointer")
+PERFCOUNTER(shadow_unshadow_bf, "shadow unshadow brute-force")
+PERFCOUNTER(shadow_get_page_fail, "shadow_get_page_from_l1e failed")
+PERFCOUNTER(shadow_guest_walk, "shadow walks guest tables")
+PERFCOUNTER(shadow_invlpg, "shadow emulates invlpg")
+PERFCOUNTER(shadow_invlpg_fault, "shadow invlpg faults")
/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/x86_32/asm_defns.h
--- a/xen/include/asm-x86/x86_32/asm_defns.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/x86_32/asm_defns.h Wed Mar 28 10:38:41 2007 +0100
@@ -1,5 +1,7 @@
#ifndef __X86_32_ASM_DEFNS_H__
#define __X86_32_ASM_DEFNS_H__
+
+#include <asm/percpu.h>
#ifndef NDEBUG
/* Indicate special exception stack frame by inverting the frame pointer. */
@@ -47,10 +49,14 @@
1:
#ifdef PERF_COUNTERS
-#define PERFC_INCR(_name,_idx) \
- lock incl perfcounters+_name(,_idx,4)
+#define PERFC_INCR(_name,_idx,_cur) \
+ pushl _cur; \
+ movl VCPU_processor(_cur),_cur; \
+ shll $PERCPU_SHIFT,_cur; \
+ incl per_cpu__perfcounters+_name*4(_cur,_idx,4);\
+ popl _cur
#else
-#define PERFC_INCR(_name,_idx)
+#define PERFC_INCR(_name,_idx,_cur)
#endif
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/x86_32/bug.h
--- a/xen/include/asm-x86/x86_32/bug.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/x86_32/bug.h Wed Mar 28 10:38:41 2007 +0100
@@ -11,6 +11,12 @@ struct bug_frame_str {
asm volatile ( \
"ud2 ; ret $%c0" \
: : "i" (BUGFRAME_dump) )
+
+#define WARN() \
+ asm volatile ( \
+ "ud2 ; ret $%c0 ; .byte 0xbc ; .long %c1" \
+ : : "i" (BUGFRAME_warn | (__LINE__<<2)), \
+ "i" (__FILE__) )
#define BUG() \
asm volatile ( \
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/x86_64/asm_defns.h
--- a/xen/include/asm-x86/x86_64/asm_defns.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/x86_64/asm_defns.h Wed Mar 28 10:38:41 2007 +0100
@@ -1,5 +1,7 @@
#ifndef __X86_64_ASM_DEFNS_H__
#define __X86_64_ASM_DEFNS_H__
+
+#include <asm/percpu.h>
#ifndef NDEBUG
/* Indicate special exception stack frame by inverting the frame pointer. */
@@ -47,13 +49,18 @@
popq %rdi;
#ifdef PERF_COUNTERS
-#define PERFC_INCR(_name,_idx) \
- pushq %rdx; \
- leaq perfcounters+_name(%rip),%rdx; \
- lock incl (%rdx,_idx,4); \
- popq %rdx;
+#define PERFC_INCR(_name,_idx,_cur) \
+ pushq _cur; \
+ movslq VCPU_processor(_cur),_cur; \
+ pushq %rdx; \
+ leaq per_cpu__perfcounters(%rip),%rdx; \
+ shlq $PERCPU_SHIFT,_cur; \
+ addq %rdx,_cur; \
+ popq %rdx; \
+ incl _name*4(_cur,_idx,4); \
+ popq _cur
#else
-#define PERFC_INCR(_name,_idx)
+#define PERFC_INCR(_name,_idx,_cur)
#endif
/* Work around AMD erratum #88 */
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/asm-x86/x86_64/bug.h
--- a/xen/include/asm-x86/x86_64/bug.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/asm-x86/x86_64/bug.h Wed Mar 28 10:38:41 2007 +0100
@@ -11,6 +11,12 @@ struct bug_frame_str {
asm volatile ( \
"ud2 ; ret $%c0" \
: : "i" (BUGFRAME_dump) )
+
+#define WARN() \
+ asm volatile ( \
+ "ud2 ; ret $%c0 ; .byte 0x48,0xbc ; .quad %c1" \
+ : : "i" (BUGFRAME_warn | (__LINE__<<2)), \
+ "i" (__FILE__) )
#define BUG() \
asm volatile ( \
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/public/foreign/Makefile
--- a/xen/include/public/foreign/Makefile Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/public/foreign/Makefile Wed Mar 28 10:38:41 2007 +0100
@@ -1,5 +1,5 @@ XEN_ROOT := ../../../..
-XEN_ROOT := ../../../..
-include $(XEN_ROOT)/tools/Rules.mk
+XEN_ROOT=../../../..
+include $(XEN_ROOT)/Config.mk
architectures := x86_32 x86_64 ia64
headers := $(patsubst %, %.h, $(architectures))
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/xen/lib.h
--- a/xen/include/xen/lib.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/xen/lib.h Wed Mar 28 10:38:41 2007 +0100
@@ -10,8 +10,10 @@
#include <asm/bug.h>
void __bug(char *file, int line) __attribute__((noreturn));
+void __warn(char *file, int line);
-#define BUG_ON(_p) do { if (_p) BUG(); } while ( 0 )
+#define BUG_ON(p) do { if (p) BUG(); } while (0)
+#define WARN_ON(p) do { if (p) WARN(); } while (0)
/* Force a compilation error if condition is true */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/xen/perfc.h
--- a/xen/include/xen/perfc.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/xen/perfc.h Wed Mar 28 10:38:41 2007 +0100
@@ -1,4 +1,3 @@
-
#ifndef __XEN_PERFC_H__
#define __XEN_PERFC_H__
@@ -6,102 +5,92 @@
#include <xen/lib.h>
#include <xen/smp.h>
-#include <asm/atomic.h>
+#include <xen/percpu.h>
-/*
+/*
* NOTE: new counters must be defined in perfc_defn.h
*
+ * Counter declarations:
* PERFCOUNTER (counter, string) define a new performance counter
- * PERFCOUNTER_CPU (counter, string, size) define a counter per CPU
- * PERFCOUNTER_ARRY (counter, string, size) define an array of counters
+ * PERFCOUNTER_ARRAY (counter, string, size) define an array of counters
*
- * unlike "COUNTERS", "STATUS" variables DO NOT RESET
+ * Unlike counters, status variables do not reset:
* PERFSTATUS (counter, string) define a new performance stauts
- * PERFSTATUS_CPU (counter, string, size) define a status var per CPU
- * PERFSTATUS_ARRY (counter, string, size) define an array of status vars
+ * PERFSTATUS_ARRAY (counter, string, size) define an array of status vars
*
* unsigned long perfc_value (counter) get value of a counter
- * unsigned long perfc_valuec (counter) get value of a per CPU counter
* unsigned long perfc_valuea (counter, index) get value of an array counter
* unsigned long perfc_set (counter, val) set value of a counter
- * unsigned long perfc_setc (counter, val) set value of a per CPU counter
* unsigned long perfc_seta (counter, index, val) set value of an array counter
* void perfc_incr (counter) increment a counter
- * void perfc_incrc (counter, index) increment a per CPU counter
+ * void perfc_decr (counter) decrement a status
* void perfc_incra (counter, index) increment an array counter
* void perfc_add (counter, value) add a value to a counter
- * void perfc_addc (counter, value) add a value to a per CPU counter
* void perfc_adda (counter, index, value) add a value to array counter
* void perfc_print (counter) print out the counter
*/
-#define PERFCOUNTER( var, name ) \
- atomic_t var[1];
-#define PERFCOUNTER_CPU( var, name ) \
- atomic_t var[NR_CPUS];
-#define PERFCOUNTER_ARRAY( var, name, size ) \
- atomic_t var[size];
-#define PERFSTATUS( var, name ) \
- atomic_t var[1];
-#define PERFSTATUS_CPU( var, name ) \
- atomic_t var[NR_CPUS];
-#define PERFSTATUS_ARRAY( var, name, size ) \
- atomic_t var[size];
+#define PERFCOUNTER( name, descr ) \
+ PERFC_##name,
+#define PERFCOUNTER_ARRAY( name, descr, size ) \
+ PERFC_##name, \
+ PERFC_LAST_##name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) - 1]),
-struct perfcounter {
+#define PERFSTATUS PERFCOUNTER
+#define PERFSTATUS_ARRAY PERFCOUNTER_ARRAY
+
+enum perfcounter {
#include <xen/perfc_defn.h>
+ NUM_PERFCOUNTERS
};
-extern struct perfcounter perfcounters;
+#undef PERFCOUNTER
+#undef PERFCOUNTER_ARRAY
+#undef PERFSTATUS
+#undef PERFSTATUS_ARRAY
-#define perfc_value(x) atomic_read(&perfcounters.x[0])
-#define perfc_valuec(x) atomic_read(&perfcounters.x[smp_processor_id()])
+typedef unsigned perfc_t;
+#define PRIperfc ""
+
+DECLARE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters);
+
+#define perfc_value(x) this_cpu(perfcounters)[PERFC_ ## x]
#define perfc_valuea(x,y) \
- ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ? \
- atomic_read(&perfcounters.x[y]) : 0 )
-#define perfc_set(x,v) atomic_set(&perfcounters.x[0], v)
-#define perfc_setc(x,v) atomic_set(&perfcounters.x[smp_processor_id()], v)
+ ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \
+ this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 )
+#define perfc_set(x,v) (this_cpu(perfcounters)[PERFC_ ## x] = (v))
#define perfc_seta(x,y,v) \
- do { \
- if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
- atomic_set(&perfcounters.x[y], v); \
- } while ( 0 )
-#define perfc_incr(x) atomic_inc(&perfcounters.x[0])
-#define perfc_decr(x) atomic_dec(&perfcounters.x[0])
-#define perfc_incrc(x) atomic_inc(&perfcounters.x[smp_processor_id()])
-#define perfc_decrc(x) atomic_dec(&perfcounters.x[smp_processor_id()])
+ ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \
+ this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) )
+#define perfc_incr(x) (++this_cpu(perfcounters)[PERFC_ ## x])
+#define perfc_decr(x) (--this_cpu(perfcounters)[PERFC_ ## x])
#define perfc_incra(x,y) \
- do { \
- if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
- atomic_inc(&perfcounters.x[y]); \
- } while ( 0 )
-#define perfc_add(x,y) atomic_add((y), &perfcounters.x[0])
-#define perfc_addc(x,y) atomic_add((y), &perfcounters.x[smp_processor_id()])
-#define perfc_adda(x,y,z) \
- do { \
- if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
- atomic_add((z), &perfcounters.x[y]); \
- } while ( 0 )
+ ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \
+ ++this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 )
+#define perfc_add(x,v) (this_cpu(perfcounters)[PERFC_ ## x] += (v))
+#define perfc_adda(x,y,v) \
+ ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \
+ this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) )
/*
* Histogram: special treatment for 0 and 1 count. After that equally spaced
* with last bucket taking the rest.
*/
#ifdef PERF_ARRAYS
-#define perfc_incr_histo(_x,_v,_n) \
- do { \
- if ( (_v) == 0 ) \
- perfc_incra(_x, 0); \
- else if ( (_v) == 1 ) \
- perfc_incra(_x, 1); \
- else if ( (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) < \
- (PERFC_MAX_ ## _n - 3) ) \
- perfc_incra(_x, (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) + 2); \
- else \
- perfc_incra(_x, PERFC_MAX_ ## _n - 1); \
+#define perfc_incr_histo(x,v) \
+ do { \
+ if ( (v) == 0 ) \
+ perfc_incra(x, 0); \
+ else if ( (v) == 1 ) \
+ perfc_incra(x, 1); \
+ else if ( (((v) - 2) / PERFC_ ## x ## _BUCKET_SIZE) < \
+ (PERFC_LAST_ ## x - PERFC_ ## x - 2) ) \
+ perfc_incra(x, (((v) - 2) / PERFC_ ## x ## _BUCKET_SIZE) + 2); \
+ else \
+ perfc_incra(x, PERFC_LAST_ ## x - PERFC_ ## x); \
} while ( 0 )
#else
-#define perfc_incr_histo(_x,_v,_n) ((void)0)
+#define perfc_incr_histo(x,v) ((void)0)
#endif
struct xen_sysctl_perfc_op;
@@ -110,19 +99,14 @@ int perfc_control(struct xen_sysctl_perf
#else /* PERF_COUNTERS */
#define perfc_value(x) (0)
-#define perfc_valuec(x) (0)
#define perfc_valuea(x,y) (0)
#define perfc_set(x,v) ((void)0)
-#define perfc_setc(x,v) ((void)0)
#define perfc_seta(x,y,v) ((void)0)
#define perfc_incr(x) ((void)0)
#define perfc_decr(x) ((void)0)
-#define perfc_incrc(x) ((void)0)
-#define perfc_decrc(x) ((void)0)
#define perfc_incra(x,y) ((void)0)
#define perfc_decra(x,y) ((void)0)
#define perfc_add(x,y) ((void)0)
-#define perfc_addc(x,y) ((void)0)
#define perfc_adda(x,y,z) ((void)0)
#define perfc_incr_histo(x,y,z) ((void)0)
diff -r 14aeb7981e4e -r 0b2794d3320f xen/include/xen/perfc_defn.h
--- a/xen/include/xen/perfc_defn.h Tue Mar 27 12:21:48 2007 -0600
+++ b/xen/include/xen/perfc_defn.h Wed Mar 28 10:38:41 2007 +0100
@@ -6,13 +6,16 @@
PERFCOUNTER_ARRAY(hypercalls, "hypercalls", NR_hypercalls)
-PERFCOUNTER_CPU(irqs, "#interrupts")
-PERFCOUNTER_CPU(ipis, "#IPIs")
+PERFCOUNTER(calls_to_multicall, "calls to multicall")
+PERFCOUNTER(calls_from_multicall, "calls from multicall")
-PERFCOUNTER_CPU(sched_irq, "sched: timer")
-PERFCOUNTER_CPU(sched_run, "sched: runs through scheduler")
-PERFCOUNTER_CPU(sched_ctx, "sched: context switches")
+PERFCOUNTER(irqs, "#interrupts")
+PERFCOUNTER(ipis, "#IPIs")
-PERFCOUNTER_CPU(need_flush_tlb_flush, "PG_need_flush tlb flushes")
+PERFCOUNTER(sched_irq, "sched: timer")
+PERFCOUNTER(sched_run, "sched: runs through scheduler")
+PERFCOUNTER(sched_ctx, "sched: context switches")
+
+PERFCOUNTER(need_flush_tlb_flush, "PG_need_flush tlb flushes")
/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|