WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: [PATCHv2] valgrind support for Xen privcmd ioctls / hype

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] Re: [PATCHv2] valgrind support for Xen privcmd ioctls / hypercalls
From: hellokitty <zhuce5555@xxxxxxx>
Date: Thu, 7 Jul 2011 01:53:48 -0700 (PDT)
Delivery-date: Thu, 07 Jul 2011 01:54:54 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1282222316.3170.2740.camel@xxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1282222316.3170.2740.camel@xxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
HI, Lan
           I am from Chinese. and I have patch the PATCHv2 file in valgrind
(the version is valgrind-3.6.1).
but when i do the configure & make . it has the error message here :

make[3]: Entering directory `/home/ibm/develop/valgrind-3.6.1/memcheck'
../coregrind/link_tool_exe_linux 0x38000000 gcc  -Wno-long-long 
-Wno-pointer-sign -fno-stack-protector   -o memcheck-x86-linux -m32
-mpreferred-stack-boundary=2 -O2 -g -Wall -Wmissing-prototypes -Wshadow
-Wpointer-arith -Wstrict-prototypes -Wmissing-declarations
-Wno-format-zero-length -fno-strict-aliasing -O2 -static -nodefaultlibs
-nostartfiles -u _start -Wl,--build-id=none -m32
memcheck_x86_linux-mc_leakcheck.o memcheck_x86_linux-mc_malloc_wrappers.o
memcheck_x86_linux-mc_main.o memcheck_x86_linux-mc_translate.o
memcheck_x86_linux-mc_machine.o memcheck_x86_linux-mc_errors.o
../coregrind/libcoregrind-x86-linux.a ../VEX/libvex-x86-linux.a -lgcc 
../coregrind/libcoregrind-x86-linux.a(libcoregrind_x86_linux_a-syswrap-linux.o):
In function `vgSysWrap_linux_sys_ioctl_after':
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:5700:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_mmapbatch_after'
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:5694:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_hypercall_after'
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:5697:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_mmap_after'
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:5703:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_mmapbatch_v2_after'
../coregrind/libcoregrind-x86-linux.a(libcoregrind_x86_linux_a-syswrap-linux.o):
In function `vgSysWrap_linux_sys_ioctl_before':
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:4878:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_mmapbatch_v2_before'
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:4869:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_hypercall_before'
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:4875:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_mmapbatch_before'
/home/ibm/develop/valgrind-3.6.1/coregrind/m_syswrap/syswrap-linux.c:4872:
undefined reference to `vgSysWrap_xen_ioctl_privcmd_mmap_before'
collect2: ld return 1
make[3]: *** [memcheck-x86-linux] error 1



Below is my patch for valgrind-3.6.1:
diff --git a/configure.in b/configure.in
index 62e1837..e71ecd6 100644
--- a/configure.in
+++ b/configure.in
@@ -1608,6 +1608,11 @@ elif test x$VGCONF_PLATFORM_SEC_CAPS = xPPC32_AIX5 ;
then
   mflag_secondary=-q32
 fi

+AC_ARG_WITH(xen,
+   [  --with-xen=             Specify location of Xen headers],
+   XEN_CFLAGS=-I$withval
+)
+AC_SUBST(XEN_CFLAGS)

 AC_ARG_WITH(mpicc,
    [  --with-mpicc=           Specify name of MPI2-ised C compiler],
diff --git a/coregrind/Makefile.am b/coregrind/Makefile.am
index d9d1bca..d7216f9 100644
--- a/coregrind/Makefile.am
+++ b/coregrind/Makefile.am
@@ -211,6 +211,7 @@
        m_syswrap/priv_syswrap-aix5.h \
        m_syswrap/priv_syswrap-darwin.h \
        m_syswrap/priv_syswrap-main.h \
+       m_syswrap/priv_syswrap-xen.h \
        m_ume/priv_ume.h


#----------------------------------------------------------------------------
@@ -338,6 +339,7 @@
        m_syswrap/syswrap-ppc64-aix5.c \
        m_syswrap/syswrap-x86-darwin.c \
        m_syswrap/syswrap-amd64-darwin.c \
+       m_syswrap/syswrap-xen.c \
        m_ume/elf.c \
        m_ume/macho.c \
        m_ume/main.c \
@@ -350,7 +352,7 @@
 libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CPPFLAGS = \
     $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
 libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CFLAGS = \
-    $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+    $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) @XEN_CFLAGS@
 libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CCASFLAGS = \
     $(AM_CCASFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
 if VGCONF_HAVE_PLATFORM_SEC
diff --git a/coregrind/m_debuginfo/debuginfo.c
b/coregrind/m_debuginfo/debuginfo.c
index 08babd0..5272fae 100644
--- a/coregrind/m_debuginfo/debuginfo.c
+++ b/coregrind/m_debuginfo/debuginfo.c
@@ -637,6 +637,11 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV )
    if (!filename)
       return 0;

+   if (strncmp(filename, "/proc/xen/", 10) == 0) {
+      //VG_(printf)("ignoring mmap of %s\n", filename);
+      return 0;
+   }
+
    if (debug)
       VG_(printf)("di_notify_mmap-2: %s\n", filename);

diff --git a/coregrind/m_syswrap/priv_syswrap-xen.h
b/coregrind/m_syswrap/priv_syswrap-xen.h
new file mode 100644
index 0000000..42505bb
--- /dev/null
+++ b/coregrind/m_syswrap/priv_syswrap-xen.h
@@ -0,0 +1,13 @@
+#ifndef __PRIV_SYSWRAP_XEN_H
+#define __PRIV_SYSWRAP_XEN_H
+
+DECL_TEMPLATE(xen, ioctl_privcmd_hypercall);
+DECL_TEMPLATE(xen, ioctl_privcmd_mmap);
+DECL_TEMPLATE(xen, ioctl_privcmd_mmapbatch);
+DECL_TEMPLATE(xen, ioctl_privcmd_mmapbatch_v2);
+
+#endif   // __PRIV_SYSWRAP_XEN_H
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/syswrap-linux.c
b/coregrind/m_syswrap/syswrap-linux.c
index 247402d..baa33c2 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -57,7 +57,7 @@
 #include "priv_types_n_macros.h"
 #include "priv_syswrap-generic.h"
 #include "priv_syswrap-linux.h"
-
+#include "priv_syswrap-xen.h"

 // Run a thread from beginning to end and return the thread's
 // scheduler-return-code.
@@ -4864,6 +4864,20 @@ PRE(sys_ioctl)
       }
       break;

+
+   case VKI_XEN_IOCTL_PRIVCMD_HYPERCALL:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_hypercall)(tid, layout, arrghs,
status, flags);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAP:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_mmap)(tid, layout, arrghs,
status, flags);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_mmapbatch)(tid, layout, arrghs,
status, flags);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_mmapbatch_v2)(tid, layout,
arrghs, status, flags);
+      break;
+
    default:
       /* EVIOC* are variable length and return size written on success */
       switch (ARG2 & ~(_VKI_IOC_SIZEMASK << _VKI_IOC_SIZESHIFT)) {
@@ -5676,6 +5676,19 @@ POST(sys_ioctl)
       }
       break;

+   case VKI_XEN_IOCTL_PRIVCMD_HYPERCALL:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_hypercall)(tid, arrghs, status);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAP:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_mmap)(tid, arrghs, status);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_mmapbatch)(tid, arrghs, status);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_mmapbatch_v2)(tid, arrghs,
status);
+      break;
+
    default:
       /* EVIOC* are variable length and return size written on success */
       switch (ARG2 & ~(_VKI_IOC_SIZEMASK << _VKI_IOC_SIZESHIFT)) {
diff --git a/coregrind/m_syswrap/syswrap-xen.c
b/coregrind/m_syswrap/syswrap-xen.c
new file mode 100644
index 0000000..5e87f8e
--- /dev/null
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -0,0 +1,751 @@
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debuginfo.h"    // VG_(di_notify_*)
+#include "pub_core_transtab.h"     // VG_(discard_translations)
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_options.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_signals.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h"
+#include "priv_syswrap-xen.h"
+
+#include <stdint.h>
+
+#define __XEN_TOOLS__
+
+#include &lt;xen/xen.h&gt;
+#include &lt;xen/sysctl.h&gt;
+#include &lt;xen/domctl.h&gt;
+#include &lt;xen/memory.h&gt;
+#include &lt;xen/event_channel.h&gt;
+#include &lt;xen/version.h&gt;
+
+#include &lt;xen/hvm/hvm_op.h&gt;
+
+#define PRE(name)       DEFN_PRE_TEMPLATE(xen, name)
+#define POST(name)      DEFN_POST_TEMPLATE(xen, name)
+
+PRE(ioctl_privcmd_hypercall)
+{
+   struct vki_xen_privcmd_hypercall *args = (struct
vki_xen_privcmd_hypercall *)(ARG3);
+
+   if (!args)
+      return;
+
+
+   switch (args->op) {
+   case __HYPERVISOR_memory_op:
+      PRINT("__HYPERVISOR_memory_op ( %lld, %llx )", args->arg[0],
args->arg[1]);
+
+      switch (args->arg[0]) {
+      case XENMEM_set_memory_map: {
+        xen_foreign_memory_map_t *arg = (xen_foreign_memory_map_t
*)(unsigned int)args->arg[1];
+        PRE_MEM_READ("XENMEM_set_memory_map", (Addr)&arg->domid,
sizeof(arg->domid));
+        PRE_MEM_READ("XENMEM_set_memory_map", (Addr)&arg->map,
sizeof(arg->map));
+        break;
+      }
+      case XENMEM_increase_reservation:
+      case XENMEM_decrease_reservation:
+      case XENMEM_populate_physmap: {
+        struct xen_memory_reservation *memory_reservation = (struct
xen_memory_reservation *)(unsigned int)args->arg[1];
+        char *which;
+
+        switch (args->arg[0]) {
+        case XENMEM_increase_reservation:
+           which = "XENMEM_increase_reservation";
+           break;
+        case XENMEM_decrease_reservation:
+           which = "XENMEM_decrease_reservation";
+           PRE_MEM_READ(which, (Addr)memory_reservation->extent_start.p,
sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+        case XENMEM_populate_physmap:
+           which = "XENMEM_populate_physmap";
+           PRE_MEM_READ(which, (Addr)memory_reservation->extent_start.p,
sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+           break;
+        default:
+           which = "XENMEM_unknown";
+           break;
+        }
+
+        PRE_MEM_READ(which, (Addr)&memory_reservation->extent_start,
sizeof(memory_reservation->extent_start));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->nr_extents,
sizeof(memory_reservation->nr_extents));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->extent_order,
sizeof(memory_reservation->extent_order));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->mem_flags,
sizeof(memory_reservation->mem_flags));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->domid,
sizeof(memory_reservation->domid));
+
+        break;
+      }
+
+      default:
+        VG_(printf)("pre __HYPERVISOR_memory_op unknown command %lld\n",
args->arg[0]);
+        break;
+      }
+      break;
+
+   case __HYPERVISOR_mmuext_op: {
+          mmuext_op_t *ops = (void *)(unsigned int)args->arg[0];
+          unsigned int i, nr = args->arg[1];
+          //unsigned int *pdone = (void *)(unsigned int)args->arg[2];
+          //unsigned int foreigndom = args->arg[3];
+          //VG_(printf)("HYPERVISOR_mmuext_op %d ops at %p on dom%d done at
%p\n", nr, ops, foreigndom, pdone);
+          for (i=0; i&lt;nr; i++) {
+                  mmuext_op_t *op = ops + i;
+                  PRE_MEM_READ(&quot;__HYPERVISOR_MMUEXT_OP&quot;,
(Addr)&amp;op-&gt;cmd, sizeof(op->cmd));
+                  switch(op->cmd) {
+                  case MMUEXT_PIN_L1_TABLE:
+                  case MMUEXT_PIN_L2_TABLE:
+                  case MMUEXT_PIN_L3_TABLE:
+                  case MMUEXT_PIN_L4_TABLE:
+                  case MMUEXT_UNPIN_TABLE:
+                  case MMUEXT_NEW_BASEPTR:
+                  case MMUEXT_CLEAR_PAGE:
+                  case MMUEXT_COPY_PAGE:
+                  case MMUEXT_MARK_SUPER:
+                  case MMUEXT_UNMARK_SUPER:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
(Addr)&op->arg1.mfn, sizeof(op->arg1.mfn));
+                          break;
+
+                  case MMUEXT_INVLPG_LOCAL:
+                  case MMUEXT_INVLPG_ALL:
+                  case MMUEXT_SET_LDT:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
(Addr)&op->arg1.linear_addr, sizeof(op->arg1.linear_addr));
+                          break;
+
+                  case MMUEXT_TLB_FLUSH_LOCAL:
+                  case MMUEXT_TLB_FLUSH_MULTI:
+                  case MMUEXT_INVLPG_MULTI:
+                  case MMUEXT_TLB_FLUSH_ALL:
+                  case MMUEXT_FLUSH_CACHE:
+                  case MMUEXT_NEW_USER_BASEPTR:
+                  case MMUEXT_FLUSH_CACHE_GLOBAL:
+                          /* None */
+                          break;
+                  }
+
+                  switch(op->cmd) {
+                  case MMUEXT_SET_LDT:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP
arg2.nr_ents", (Addr)&op->arg2.nr_ents, sizeof(op->arg2.nr_ents));
+                          break;
+
+                  case MMUEXT_TLB_FLUSH_MULTI:
+                  case MMUEXT_INVLPG_MULTI:
+                          /* How many??? */
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP
arg2.vcpumask", (Addr)&op->arg2.vcpumask, sizeof(op->arg2.vcpumask));
+                          break;
+
+                  case MMUEXT_COPY_PAGE:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP
arg2.src_mfn", (Addr)&op->arg2.src_mfn, sizeof(op->arg2.src_mfn));
+                          break;
+
+                  case MMUEXT_PIN_L1_TABLE:
+                  case MMUEXT_PIN_L2_TABLE:
+                  case MMUEXT_PIN_L3_TABLE:
+                  case MMUEXT_PIN_L4_TABLE:
+                  case MMUEXT_UNPIN_TABLE:
+                  case MMUEXT_NEW_BASEPTR:
+                  case MMUEXT_TLB_FLUSH_LOCAL:
+                  case MMUEXT_INVLPG_LOCAL:
+                  case MMUEXT_TLB_FLUSH_ALL:
+                  case MMUEXT_INVLPG_ALL:
+                  case MMUEXT_FLUSH_CACHE:
+                  case MMUEXT_NEW_USER_BASEPTR:
+                  case MMUEXT_CLEAR_PAGE:
+                  case MMUEXT_FLUSH_CACHE_GLOBAL:
+                  case MMUEXT_MARK_SUPER:
+                  case MMUEXT_UNMARK_SUPER:
+                          /* None */
+                          break;
+                  }
+          }
+          break;
+   }
+
+   case __HYPERVISOR_event_channel_op:
+   case __HYPERVISOR_event_channel_op_compat: {
+      __vki_u32 cmd;
+      void *arg;
+      int compat = 0;
+
+      if (args->op == __HYPERVISOR_event_channel_op) {
+        cmd = args->arg[0];
+        arg = (void *)(unsigned int)args->arg[1];
+      } else {
+        struct evtchn_op *evtchn = (struct evtchn_op *)(unsigned
int)args->arg[0];
+        cmd = evtchn->cmd;
+        arg = &evtchn->u;
+        compat = 1;
+      }
+      PRINT("__HYPERVISOR_event_channel_op ( %d, %p )%s", cmd, arg, compat
? " compat" : "");
+
+      switch (cmd) {
+      case EVTCHNOP_alloc_unbound: {
+        struct evtchn_alloc_unbound *alloc_unbound = arg;
+        PRE_MEM_READ("EVTCHNOP_alloc_unbound", (Addr)&alloc_unbound->dom,
sizeof(alloc_unbound->dom));
+        PRE_MEM_READ("EVTCHNOP_alloc_unbound",
(Addr)&alloc_unbound->remote_dom, sizeof(alloc_unbound->remote_dom));
+        break;
+      }
+      default:
+        VG_(printf)("pre __HYPERVISOR_event_channel_op unknown command
%d\n", cmd);
+        break;
+      }
+      break;
+   }
+
+   case __HYPERVISOR_xen_version:
+      PRINT("__HYPERVISOR_xen_version ( %lld, %llx )", args->arg[0],
args->arg[1]);
+
+      switch (args->arg[0]) {
+      case XENVER_version:
+      case XENVER_extraversion:
+      case XENVER_compile_info:
+      case XENVER_capabilities:
+      case XENVER_changeset:
+      case XENVER_platform_parameters:
+      case XENVER_get_features:
+      case XENVER_pagesize:
+      case XENVER_guest_handle:
+      case XENVER_commandline:
+        /* No inputs */
+        break;
+
+      default:
+        VG_(printf)("pre __HYPERVISOR_xen_version unknown command %lld\n",
args->arg[0]);
+        break;
+      }
+      break;
+      break;
+   case __HYPERVISOR_sysctl: {
+      struct xen_sysctl *sysctl = (struct xen_sysctl *)(unsigned
int)args->arg[0];
+
+      PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
+
+      /* Single argument hypercall */
+      PRE_MEM_READ("hypercall", ARG3, 8 + ( 8 * 1 ) );
+
+      /*
+       * Common part of xen_sysctl:
+       *    uint32_t cmd;
+       *    uint32_t interface_version;
+       */
+      PRE_MEM_READ("__HYPERVISOR_sysctl", args->arg[0], sizeof(uint32_t) +
sizeof(uint32_t));
+
+      if (!sysctl || sysctl->interface_version !=
XEN_SYSCTL_INTERFACE_VERSION)
+        /* BUG ? */
+        return;
+
+#define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field)
PRE_MEM_READ("XEN_SYSCTL_" # _sysctl, \
+                                                       
(Addr)&sysctl->u._union._field, \
+                                                       
sizeof(sysctl->u._union._field))
+#define PRE_XEN_SYSCTL_READ(_sysctl, _field) __PRE_XEN_SYSCTL_READ(_sysctl,
_sysctl, _field)
+      switch (sysctl->cmd) {
+      case XEN_SYSCTL_getdomaininfolist:
+        PRE_XEN_SYSCTL_READ(getdomaininfolist, first_domain);
+        PRE_XEN_SYSCTL_READ(getdomaininfolist, max_domains);
+        PRE_XEN_SYSCTL_READ(getdomaininfolist, buffer);
+        break;
+
+      case XEN_SYSCTL_cpupool_op:
+        /* yes the interface is this fucking barking */
+        PRE_XEN_SYSCTL_READ(cpupool_op, op);
+
+        switch(sysctl->u.cpupool_op.op) {
+        case XEN_SYSCTL_CPUPOOL_OP_CREATE:
+        case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
+        case XEN_SYSCTL_CPUPOOL_OP_INFO:
+        case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
+        case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
+        case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
+           PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
+        }
+
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_CREATE)
+           PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
+
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
+           PRE_XEN_SYSCTL_READ(cpupool_op, domid);
+
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
+            sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_RMCPU)
+           PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
+
+        break;
+
+      case XEN_SYSCTL_physinfo:
+        /* No input params */
+        break;
+
+      default:
+        VG_(printf)("pre sysctl version %x unknown cmd %d\n",
+                    sysctl->interface_version, sysctl->cmd);
+        break;
+      }
+#undef PRE_XEN_SYSCTL_READ
+#undef __PRE_XEN_SYSCTL_READ
+   }
+      break;
+
+   case __HYPERVISOR_domctl: {
+      struct xen_domctl *domctl = (struct xen_domctl *)(unsigned
int)args->arg[0];
+
+      PRINT("__HYPERVISOR_domctl ( %d )", domctl->cmd);
+
+      /* Single argument hypercall */
+      PRE_MEM_READ("hypercall", ARG3, 8 + ( 8 * 1 ) );
+
+      /*
+       * Common part of xen_domctl:
+       *    uint32_t cmd;
+       *    uint32_t interface_version;
+       *    domid_t  domain;
+       */
+      PRE_MEM_READ("__HYPERVISOR_domctl", args->arg[0], sizeof(uint32_t) +
sizeof(uint32_t) + sizeof(domid_t));
+
+      if (!domctl || domctl->interface_version !=
XEN_DOMCTL_INTERFACE_VERSION)
+        /* BUG ? */
+        return;
+
+      //PRE_REG_READ1(long, "__HYPERVISOR_domctl",);
+#define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field)
PRE_MEM_READ("XEN_DOMCTL_" # _domctl, \
+                                                       
(Addr)&domctl->u._union._field, \
+                                                       
sizeof(domctl->u._union._field))
+#define PRE_XEN_DOMCTL_READ(_domctl, _field) __PRE_XEN_DOMCTL_READ(_domctl,
_domctl, _field)
+
+      switch (domctl->cmd) {
+      case XEN_DOMCTL_destroydomain:
+      case XEN_DOMCTL_pausedomain:
+      case XEN_DOMCTL_max_vcpus:
+      case XEN_DOMCTL_get_address_size:
+      case XEN_DOMCTL_gettscinfo:
+      case XEN_DOMCTL_getdomaininfo:
+      case XEN_DOMCTL_unpausedomain:
+        /* No input fields. */
+        break;
+
+      case XEN_DOMCTL_createdomain:
+        PRE_XEN_DOMCTL_READ(createdomain, ssidref);
+        PRE_XEN_DOMCTL_READ(createdomain, handle);
+        PRE_XEN_DOMCTL_READ(createdomain, flags);
+        break;
+
+      case XEN_DOMCTL_max_mem:
+        PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
+        break;
+
+      case XEN_DOMCTL_set_address_size:
+        __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
+        break;
+
+      case XEN_DOMCTL_settscinfo:
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
+        break;
+
+      case XEN_DOMCTL_hypercall_init:
+        PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
+        break;
+
+      case XEN_DOMCTL_getvcpuinfo:
+        PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
+        break;
+
+      case XEN_DOMCTL_getvcpuaffinity:
+        __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity, vcpu);
+        break;
+
+      case XEN_DOMCTL_setvcpuaffinity:
+        __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
+        PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity",
+                     (Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
+                       domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+        break;
+
+      case XEN_DOMCTL_getvcpucontext:
+        __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
+        break;
+
+      case XEN_DOMCTL_setvcpucontext:
+        __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
+        __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
+        break;
+
+      case XEN_DOMCTL_set_cpuid:
+        PRE_MEM_READ("XEN_DOMCTL_set_cpuid", (Addr)&domctl->u.cpuid,
sizeof(domctl->u.cpuid));
+        break;
+      default:
+        VG_(printf)("pre domctl version %x unknown cmd %d on domain %d\n",
+                    domctl->interface_version, domctl->cmd,
domctl->domain);
+        break;
+      }
+#undef PRE_XEN_DOMCTL_READ
+#undef __PRE_XEN_DOMCTL_READ
+   }
+      break;
+
+   case __HYPERVISOR_hvm_op: {
+      unsigned long op = args->arg[0];
+      void *arg = (void *)(unsigned long)args->arg[1];
+
+      PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op, arg);
+
+      //PRE_REG_READ1(long, "__HYPERVISOR_hvm_op",);
+#define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field)
PRE_MEM_READ("XEN_HVMOP_" # _hvm_op, \
+                                                                 
(Addr)&((_type*)arg)->_field, \
+                                                       
sizeof(((_type*)arg)->_field))
+#define PRE_XEN_HVMOP_READ(_hvm_op, _field) __PRE_XEN_HVMOP_READ(_hvm_op,
"xen_hvm_" # _hvm_op "_t", _field)
+
+      switch (op) {
+      case HVMOP_set_param:
+        __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, domid);
+        __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, index);
+        __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, value);
+        break;
+
+      case HVMOP_get_param:
+        __PRE_XEN_HVMOP_READ(get_param, xen_hvm_param_t, domid);
+        __PRE_XEN_HVMOP_READ(get_param, xen_hvm_param_t, index);
+        break;
+
+      default:
+        VG_(printf)("pre hvm_op unknown OP %ld\n", op);
+        break;
+#undef __PRE_XEN_HVMOP_READ
+#undef PRE_XEN_HVMOP_READ
+      }
+   }
+      break;
+
+   default:
+      VG_(printf)("pre unknown hypercall %lld ( %#llx, %#llx, %#llx, %#llx,
%#llx )\n",
+                 args->op, args->arg[0], args->arg[1], args->arg[2],
args->arg[3], args->arg[4]);
+   }
+}
+
+POST(ioctl_privcmd_hypercall)
+{
+   struct vki_xen_privcmd_hypercall *args = (struct
vki_xen_privcmd_hypercall *)(ARG3);
+
+   if (!args)
+      return;
+
+   switch (args->op) {
+   case __HYPERVISOR_memory_op:
+      switch (args->arg[0]) {
+      case XENMEM_set_memory_map:
+      case XENMEM_decrease_reservation:
+        /* No outputs */
+        break;
+      case XENMEM_increase_reservation:
+      case XENMEM_populate_physmap: {
+        struct xen_memory_reservation *memory_reservation = (struct
xen_memory_reservation *)(unsigned int)args->arg[1];
+
+        POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
sizeof(xen_pfn_t) * ARG1);
+      }
+        break;
+
+      default:
+        VG_(printf)("post __HYPERVISOR_memory_op unknown command %lld\n",
args->arg[0]);
+        break;
+      }
+      break;
+
+   case __HYPERVISOR_mmuext_op: {
+          //mmuext_op_t *ops = (void *)(unsigned int)args->arg[0];
+          //unsigned int nr = args->arg[1];
+          unsigned int *pdone = (void *)(unsigned int)args->arg[2];
+          //unsigned int foreigndom = args->arg[3];
+          /* simplistic */
+          POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
+          break;
+   }
+
+   case __HYPERVISOR_event_channel_op:
+   case __HYPERVISOR_event_channel_op_compat: {
+      __vki_u32 cmd;
+      void *arg;
+
+      if (args->op == __HYPERVISOR_event_channel_op) {
+        cmd = args->arg[0];
+        arg = (void *)(unsigned int)args->arg[1];
+      } else {
+        struct evtchn_op *evtchn = (struct evtchn_op *)(unsigned
int)args->arg[0];
+        cmd = evtchn->cmd;
+        arg = &evtchn->u;
+      }
+      switch (cmd) {
+      case EVTCHNOP_alloc_unbound: {
+        struct evtchn_alloc_unbound *alloc_unbound = arg;
+        POST_MEM_WRITE((Addr)&alloc_unbound->port,
sizeof(alloc_unbound->port));
+        break;
+      }
+      default:
+        VG_(printf)("post __HYPERVISOR_event_channel_op unknown command
%d\n", cmd);
+        break;
+      }
+      break;
+
+   }
+
+   case __HYPERVISOR_xen_version:
+      switch (args->arg[0]) {
+      case XENVER_version:
+        /* No outputs */
+        break;
+      case XENVER_extraversion:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_extraversion_t));
+        break;
+      case XENVER_compile_info:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_compile_info_t));
+        break;
+      case XENVER_capabilities:
+        POST_MEM_WRITE((Addr)args->arg[1],
sizeof(xen_capabilities_info_t));
+        break;
+      case XENVER_changeset:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_changeset_info_t));
+        break;
+      case XENVER_platform_parameters:
+        POST_MEM_WRITE((Addr)args->arg[1],
sizeof(xen_platform_parameters_t));
+        break;
+      case XENVER_get_features:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_feature_info_t));
+        break;
+      case XENVER_pagesize:
+        /* No outputs */
+        break;
+      case XENVER_guest_handle:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_domain_handle_t));
+        break;
+      case XENVER_commandline:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_commandline_t));
+        break;
+      default:
+        VG_(printf)("post __HYPERVISOR_xen_version unknown command %lld\n",
args->arg[0]);
+        break;
+      }
+      break;
+
+   case __HYPERVISOR_sysctl: {
+      struct xen_sysctl *sysctl = (struct xen_sysctl *)(unsigned
int)args->arg[0];
+
+      if (!sysctl || sysctl->interface_version !=
XEN_SYSCTL_INTERFACE_VERSION)
+        return;
+
+#define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field)
POST_MEM_WRITE((Addr)&sysctl->u._union._field,
sizeof(sysctl->u._union._field));
+#define POST_XEN_SYSCTL_WRITE(_sysctl, _field)
__POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
+      switch (sysctl->cmd) {
+      case XEN_SYSCTL_getdomaininfolist:
+        POST_XEN_SYSCTL_WRITE(getdomaininfolist, num_domains);
+        POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist.buffer.p,
+                       sizeof(xen_domctl_getdomaininfo_t) *
sysctl->u.getdomaininfolist.num_domains);
+        break;
+
+      case XEN_SYSCTL_cpupool_op:
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_CREATE ||
+            sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO)
+           POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO) {
+           POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
+           POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
+        }
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO ||
+            sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
+           POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
+        break;
+
+      case XEN_SYSCTL_physinfo:
+        POST_XEN_SYSCTL_WRITE(physinfo, threads_per_core);
+        POST_XEN_SYSCTL_WRITE(physinfo, cores_per_socket);
+        POST_XEN_SYSCTL_WRITE(physinfo, nr_cpus);
+        POST_XEN_SYSCTL_WRITE(physinfo, max_cpu_id);
+        POST_XEN_SYSCTL_WRITE(physinfo, nr_nodes);
+        POST_XEN_SYSCTL_WRITE(physinfo, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo, cpu_khz);
+        POST_XEN_SYSCTL_WRITE(physinfo, total_pages);
+        POST_XEN_SYSCTL_WRITE(physinfo, free_pages);
+        POST_XEN_SYSCTL_WRITE(physinfo, scrub_pages);
+        POST_XEN_SYSCTL_WRITE(physinfo, hw_cap[8]);
+        POST_XEN_SYSCTL_WRITE(physinfo, capabilities);
+        break;
+
+      default:
+        VG_(printf)("post sysctl version %x cmd %d\n",
+                    sysctl->interface_version, sysctl->cmd);
+        break;
+      }
+#undef POST_XEN_SYSCTL_WRITE
+#undef __POST_XEN_SYSCTL_WRITE
+      break;
+   }
+
+   case __HYPERVISOR_domctl: {
+      struct xen_domctl *domctl = (struct xen_domctl *)(unsigned
int)args->arg[0];
+
+      if (!domctl || domctl->interface_version !=
XEN_DOMCTL_INTERFACE_VERSION)
+        return;
+
+#define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field)
POST_MEM_WRITE((Addr)&domctl->u._union._field,
sizeof(domctl->u._union._field));
+#define POST_XEN_DOMCTL_WRITE(_domctl, _field)
__POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
+      switch (domctl->cmd) {
+      case XEN_DOMCTL_createdomain:
+      case XEN_DOMCTL_destroydomain:
+      case XEN_DOMCTL_pausedomain:
+      case XEN_DOMCTL_max_mem:
+      case XEN_DOMCTL_set_address_size:
+      case XEN_DOMCTL_settscinfo:
+      case XEN_DOMCTL_hypercall_init:
+      case XEN_DOMCTL_setvcpuaffinity:
+      case XEN_DOMCTL_setvcpucontext:
+      case XEN_DOMCTL_set_cpuid:
+      case XEN_DOMCTL_unpausedomain:
+        /* No output fields */
+        break;
+
+      case XEN_DOMCTL_max_vcpus:
+        POST_XEN_DOMCTL_WRITE(max_vcpus, max);
+
+      case XEN_DOMCTL_get_address_size:
+        __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
+        break;
+
+      case XEN_DOMCTL_gettscinfo:
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
+        break;
+
+      case XEN_DOMCTL_getvcpuinfo:
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
+        break;
+
+      case XEN_DOMCTL_getvcpuaffinity:
+        POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
+                       domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+        break;
+
+      case XEN_DOMCTL_getdomaininfo:
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, domain);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, flags);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, tot_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, max_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, shr_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, shared_info_frame);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, cpu_time);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, nr_online_vcpus);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, max_vcpu_id);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, ssidref);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, handle);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, cpupool);
+        break;
+
+      case XEN_DOMCTL_getvcpucontext:
+        __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
+        break;
+
+      default:
+        VG_(printf)("post domctl version %x cmd %d on domain %d\n",
+                    domctl->interface_version, domctl->cmd,
domctl->domain);
+        break;
+      }
+#undef POST_XEN_DOMCTL_WRITE
+#undef __POST_XEN_DOMCTL_WRITE
+      break;
+   }
+
+
+   case __HYPERVISOR_hvm_op: {
+      unsigned long op = args->arg[0];
+      void *arg = (void *)(unsigned long)args->arg[1];
+
+#define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field)
POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
+                                                                     
sizeof(((_type*)arg)->_field))
+#define POST_XEN_HVMOP_WRITE(_hvm_op, _field) __PRE_XEN_HVMOP_READ(_hvm_op,
"xen_hvm_" # _hvm_op "_t", _field)
+      switch (op) {
+      case HVMOP_set_param:
+        /* No output paramters */
+        break;
+
+      case HVMOP_get_param:
+        __POST_XEN_HVMOP_WRITE(get_param, xen_hvm_param_t, value);
+        break;
+
+      default:
+        VG_(printf)("post hvm_op unknown OP %ld\n", op);
+        break;
+#undef __POST_XEN_HVMOP_WRITE
+#undef POST_XEN_HVMOP_WRITE
+      }
+   }
+      break;
+
+   default:
+      VG_(printf)("post unknown hypercall %lld ( %#llx, %#llx, %#llx,
%#llx, %#llx )\n",
+                 args->op, args->arg[0], args->arg[1], args->arg[2],
args->arg[3], args->arg[4]);
+      break;
+   }
+}
+
+
+PRE(ioctl_privcmd_mmap)
+{
+   struct vki_xen_privcmd_mmap *args = (struct vki_xen_privcmd_mmap
*)(ARG3);
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP", (Addr)&args->num,
sizeof(args->num));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP", (Addr)&args->dom,
sizeof(args->dom));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP", (Addr)args->entry,
sizeof(*(args->entry)) * args->num);
+}
+
+PRE(ioctl_privcmd_mmapbatch)
+{
+   struct vki_xen_privcmd_mmapbatch *args = (struct
vki_xen_privcmd_mmapbatch *)(ARG3);
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)&args->num,
sizeof(args->num));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)&args->dom,
sizeof(args->dom));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)&args->addr,
sizeof(args->addr));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)args->arr,
sizeof(*(args->arr)) * args->num);
+}
+
+PRE(ioctl_privcmd_mmapbatch_v2)
+{
+   struct vki_xen_privcmd_mmapbatch_v2 *args = (struct
vki_xen_privcmd_mmapbatch_v2 *)(ARG3);
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)&args->num,
sizeof(args->num));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)&args->dom,
sizeof(args->dom));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)&args->addr,
sizeof(args->addr));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)args->arr,
sizeof(*(args->arr)) * args->num);
+}
+
+POST(ioctl_privcmd_mmap)
+{
+   //struct vki_xen_privcmd_mmap *args = (struct vki_xen_privcmd_mmap
*)(ARG3);
+}
+
+POST(ioctl_privcmd_mmapbatch)
+{
+   struct vki_xen_privcmd_mmapbatch *args = (struct
vki_xen_privcmd_mmapbatch *)(ARG3);
+   POST_MEM_WRITE((Addr)args->arr, sizeof(*(args->arr)) * args->num);
+}
+
+POST(ioctl_privcmd_mmapbatch_v2)
+{
+   struct vki_xen_privcmd_mmapbatch_v2 *args = (struct
vki_xen_privcmd_mmapbatch_v2 *)(ARG3);
+   POST_MEM_WRITE((Addr)args->err, sizeof(*(args->err)) * args->num);
+}
diff --git a/include/Makefile.am b/include/Makefile.am
index 33d0857..22bffa7 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -54,7 +54,8 @@       vki/vki-scnums-ppc32-linux.h    \
        vki/vki-scnums-ppc64-linux.h    \
        vki/vki-scnums-x86-linux.h      \
        vki/vki-scnums-arm-linux.h      \
-       vki/vki-scnums-darwin.h
+       vki/vki-scnums-darwin.h
+       vki/vki-xen.h

 noinst_HEADERS = \
        vki/vki-ppc32-aix5.h            \
diff --git a/include/pub_tool_vki.h b/include/pub_tool_vki.h
index 73a4174..c4c117f 100644
--- a/include/pub_tool_vki.h
+++ b/include/pub_tool_vki.h
@@ -47,6 +47,7 @@

 #if defined(VGO_linux)
 #  include "vki/vki-linux.h"
+#  include "vki/vki-xen.h"
 #elif defined(VGP_ppc32_aix5)
 #  include "vki/vki-ppc32-aix5.h"
 #elif defined(VGP_ppc64_aix5)
diff --git a/include/vki/vki-linux.h b/include/vki/vki-linux.h
index beff378..1214300 100644
--- a/include/vki/vki-linux.h
+++ b/include/vki/vki-linux.h
@@ -2717,6 +2717,51 @@ struct vki_getcpu_cache {
 #define VKI_EV_MAX             0x1f
 #define VKI_EV_CNT             (VKI_EV_MAX+1)

+//----------------------------------------------------------------------
+// Xen privcmd IOCTL
+//----------------------------------------------------------------------
+
+typedef unsigned long __vki_xen_pfn_t;
+
+struct vki_xen_privcmd_hypercall {
+       __vki_u64 op;
+       __vki_u64 arg[5];
+};
+
+struct vki_xen_privcmd_mmap_entry {
+        __vki_u64 va;
+        __vki_u64 mfn;
+        __vki_u64 npages;
+};
+
+struct vki_xen_privcmd_mmap {
+        int num;
+        __vki_u16 dom; /* target domain */
+        struct vki_xen_privcmd_mmap_entry *entry;
+};
+
+struct vki_xen_privcmd_mmapbatch {
+        int num;     /* number of pages to populate */
+        __vki_u16 dom; /* target domain */
+        __vki_u64 addr;  /* virtual address */
+        __vki_xen_pfn_t *arr; /* array of mfns - top nibble set on err */
+};
+
+struct vki_xen_privcmd_mmapbatch_v2 {
+        unsigned int num; /* number of pages to populate */
+        __vki_u16 dom;      /* target domain */
+        __vki_u64 addr;       /* virtual address */
+        const __vki_xen_pfn_t *arr; /* array of mfns */
+        int __user *err;  /* array of error codes */
+};
+
+#define VKI_XEN_IOCTL_PRIVCMD_HYPERCALL    _VKI_IOC(_VKI_IOC_NONE, 'P', 0,
sizeof(struct vki_xen_privcmd_hypercall))
+#define VKI_XEN_IOCTL_PRIVCMD_MMAP         _VKI_IOC(_VKI_IOC_NONE, 'P', 2,
sizeof(struct vki_xen_privcmd_mmap))
+
+#define VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH    _VKI_IOC(_VKI_IOC_NONE, 'P', 3,
sizeof(struct vki_xen_privcmd_mmapbatch))
+#define VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2 _VKI_IOC(_VKI_IOC_NONE, 'P', 4,
sizeof(struct vki_xen_privcmd_mmapbatch_v2))
+
+
 #endif // __VKI_LINUX_H

 /*--------------------------------------------------------------------*/
diff --git a/include/vki/vki-xen.h b/include/vki/vki-xen.h
new file mode 100644
index 0000000..7842cc9
--- /dev/null
+++ b/include/vki/vki-xen.h
@@ -0,0 +1,8 @@
+#ifndef __VKI_XEN_H
+#define __VKI_XEN_H
+
+#endif // __VKI_XEN_H
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/

after patch this file in valgrind-3.6.1, I just run ./configure and make .
but i has the error message above .and I don't know how to do that . Can U
help me ? Thank you!

--
View this message in context: 
http://xen.1045712.n5.nabble.com/PATCHv2-valgrind-support-for-Xen-privcmd-ioctls-hypercalls-tp2640861p4560379.html
Sent from the Xen - Dev mailing list archive at Nabble.com.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel