# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1286892401 -3600
# Node ID 29a5439889c36e72df0f0828aee8f2b002a545b9
# Parent 73a05c8f7c3ec924c7a334a8840b54fcba31c3c1
libxc: pass an xc_interface handle to page locking functions
Not actually used here but useful to confirm that a handle is passed
down to each location where it will be required once we switch to
hypercall buffers.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_acm.c
--- a/tools/libxc/xc_acm.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_acm.c Tue Oct 12 15:06:41 2010 +0100
@@ -92,7 +92,7 @@ int xc_acm_op(xc_interface *xch, int cmd
hypercall.op = __HYPERVISOR_xsm_op;
hypercall.arg[0] = (unsigned long)&acmctl;
- if ( lock_pages(&acmctl, sizeof(acmctl)) != 0)
+ if ( lock_pages(xch, &acmctl, sizeof(acmctl)) != 0)
{
PERROR("Could not lock memory for Xen hypercall");
return -EFAULT;
@@ -103,7 +103,7 @@ int xc_acm_op(xc_interface *xch, int cmd
DPRINTF("acmctl operation failed -- need to"
" rebuild the user-space tool set?\n");
}
- unlock_pages(&acmctl, sizeof(acmctl));
+ unlock_pages(xch, &acmctl, sizeof(acmctl));
switch (cmd) {
case ACMOP_getdecision: {
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_cpupool.c Tue Oct 12 15:06:41 2010 +0100
@@ -85,13 +85,13 @@ int xc_cpupool_getinfo(xc_interface *xch
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
break;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(xch, local, sizeof (local));
if ( err < 0 )
break;
@@ -161,14 +161,14 @@ int xc_cpupool_freeinfo(xc_interface *xc
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
return err;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(xch, local, sizeof (local));
if (err < 0)
return err;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_domain.c Tue Oct 12 15:06:41 2010 +0100
@@ -94,7 +94,7 @@ int xc_domain_shutdown(xc_interface *xch
arg.domain_id = domid;
arg.reason = reason;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(xch, &arg, sizeof(arg)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -102,7 +102,7 @@ int xc_domain_shutdown(xc_interface *xch
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
out1:
return ret;
@@ -133,7 +133,7 @@ int xc_vcpu_setaffinity(xc_interface *xc
domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
- if ( lock_pages(local, cpusize) != 0 )
+ if ( lock_pages(xch, local, cpusize) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
@@ -141,7 +141,7 @@ int xc_vcpu_setaffinity(xc_interface *xc
ret = do_domctl(xch, &domctl);
- unlock_pages(local, cpusize);
+ unlock_pages(xch, local, cpusize);
out:
free(local);
@@ -172,7 +172,7 @@ int xc_vcpu_getaffinity(xc_interface *xc
set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
- if ( lock_pages(local, sizeof(local)) != 0 )
+ if ( lock_pages(xch, local, sizeof(local)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
@@ -180,7 +180,7 @@ int xc_vcpu_getaffinity(xc_interface *xc
ret = do_domctl(xch, &domctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(xch, local, sizeof (local));
bitmap_byte_to_64(cpumap, local, cpusize * 8);
out:
free(local);
@@ -257,7 +257,7 @@ int xc_domain_getinfolist(xc_interface *
int ret = 0;
DECLARE_SYSCTL;
- if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
+ if ( lock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
return -1;
sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
@@ -270,7 +270,7 @@ int xc_domain_getinfolist(xc_interface *
else
ret = sysctl.u.getdomaininfolist.num_domains;
- unlock_pages(info, max_domains*sizeof(xc_domaininfo_t));
+ unlock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t));
return ret;
}
@@ -290,13 +290,13 @@ int xc_domain_hvm_getcontext(xc_interfac
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
if ( ctxt_buf )
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
return ret;
ret = do_domctl(xch, &domctl);
if ( ctxt_buf )
- unlock_pages(ctxt_buf, size);
+ unlock_pages(xch, ctxt_buf, size);
return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
}
@@ -322,13 +322,13 @@ int xc_domain_hvm_getcontext_partial(xc_
domctl.u.hvmcontext_partial.instance = instance;
set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
return ret;
ret = do_domctl(xch, &domctl);
if ( ctxt_buf )
- unlock_pages(ctxt_buf, size);
+ unlock_pages(xch, ctxt_buf, size);
return ret ? -1 : 0;
}
@@ -347,12 +347,12 @@ int xc_domain_hvm_setcontext(xc_interfac
domctl.u.hvmcontext.size = size;
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
return ret;
ret = do_domctl(xch, &domctl);
- unlock_pages(ctxt_buf, size);
+ unlock_pages(xch, ctxt_buf, size);
return ret;
}
@@ -372,10 +372,10 @@ int xc_vcpu_getcontext(xc_interface *xch
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
- if ( (rc = lock_pages(ctxt, sz)) != 0 )
+ if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
return rc;
rc = do_domctl(xch, &domctl);
- unlock_pages(ctxt, sz);
+ unlock_pages(xch, ctxt, sz);
return rc;
}
@@ -394,7 +394,7 @@ int xc_watchdog(xc_interface *xch,
arg.id = id;
arg.timeout = timeout;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(xch, &arg, sizeof(arg)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -402,7 +402,7 @@ int xc_watchdog(xc_interface *xch,
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
out1:
return ret;
@@ -488,7 +488,7 @@ int xc_domain_set_memmap_limit(xc_interf
set_xen_guest_handle(fmap.map.buffer, &e820);
- if ( lock_pages(&fmap, sizeof(fmap)) || lock_pages(&e820, sizeof(e820)) )
+ if ( lock_pages(xch, &fmap, sizeof(fmap)) || lock_pages(xch, &e820,
sizeof(e820)) )
{
PERROR("Could not lock memory for Xen hypercall");
rc = -1;
@@ -498,8 +498,8 @@ int xc_domain_set_memmap_limit(xc_interf
rc = xc_memory_op(xch, XENMEM_set_memory_map, &fmap);
out:
- unlock_pages(&fmap, sizeof(fmap));
- unlock_pages(&e820, sizeof(e820));
+ unlock_pages(xch, &fmap, sizeof(fmap));
+ unlock_pages(xch, &e820, sizeof(e820));
return rc;
}
#else
@@ -564,7 +564,7 @@ int xc_domain_get_tsc_info(xc_interface
domctl.cmd = XEN_DOMCTL_gettscinfo;
domctl.domain = (domid_t)domid;
set_xen_guest_handle(domctl.u.tsc_info.out_info, &info);
- if ( (rc = lock_pages(&info, sizeof(info))) != 0 )
+ if ( (rc = lock_pages(xch, &info, sizeof(info))) != 0 )
return rc;
rc = do_domctl(xch, &domctl);
if ( rc == 0 )
@@ -574,7 +574,7 @@ int xc_domain_get_tsc_info(xc_interface
*gtsc_khz = info.gtsc_khz;
*incarnation = info.incarnation;
}
- unlock_pages(&info,sizeof(info));
+ unlock_pages(xch, &info,sizeof(info));
return rc;
}
@@ -849,11 +849,11 @@ int xc_vcpu_setcontext(xc_interface *xch
domctl.u.vcpucontext.vcpu = vcpu;
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
- if ( (rc = lock_pages(ctxt, sz)) != 0 )
+ if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
return rc;
rc = do_domctl(xch, &domctl);
- unlock_pages(ctxt, sz);
+ unlock_pages(xch, ctxt, sz);
return rc;
}
@@ -917,10 +917,10 @@ int xc_set_hvm_param(xc_interface *handl
arg.domid = dom;
arg.index = param;
arg.value = value;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(handle, &arg, sizeof(arg)) != 0 )
return -1;
rc = do_xen_hypercall(handle, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(handle, &arg, sizeof(arg));
return rc;
}
@@ -935,10 +935,10 @@ int xc_get_hvm_param(xc_interface *handl
hypercall.arg[1] = (unsigned long)&arg;
arg.domid = dom;
arg.index = param;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(handle, &arg, sizeof(arg)) != 0 )
return -1;
rc = do_xen_hypercall(handle, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(handle, &arg, sizeof(arg));
*value = arg.value;
return rc;
}
@@ -988,13 +988,13 @@ int xc_get_device_group(
set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
- if ( lock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 )
+ if ( lock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 )
{
PERROR("Could not lock memory for xc_get_device_group");
return -ENOMEM;
}
rc = do_domctl(xch, &domctl);
- unlock_pages(sdev_array, max_sdevs * sizeof(*sdev_array));
+ unlock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array));
*num_sdevs = domctl.u.get_device_group.num_sdevs;
return rc;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_domain_restore.c Tue Oct 12 15:06:41 2010 +0100
@@ -1181,13 +1181,13 @@ int xc_domain_restore(xc_interface *xch,
memset(ctx->p2m_batch, 0,
ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
- if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
+ if ( lock_pages(xch, region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
{
PERROR("Could not lock region_mfn");
goto out;
}
- if ( lock_pages(ctx->p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
+ if ( lock_pages(xch, ctx->p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
{
ERROR("Could not lock p2m_batch");
goto out;
@@ -1547,7 +1547,7 @@ int xc_domain_restore(xc_interface *xch,
}
}
- if ( lock_pages(&ctxt, sizeof(ctxt)) )
+ if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
{
PERROR("Unable to lock ctxt");
return 1;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_domain_save.c Tue Oct 12 15:06:41 2010 +0100
@@ -1046,14 +1046,14 @@ int xc_domain_save(xc_interface *xch, in
memset(to_send, 0xff, BITMAP_SIZE);
- if ( lock_pages(to_send, BITMAP_SIZE) )
+ if ( lock_pages(xch, to_send, BITMAP_SIZE) )
{
PERROR("Unable to lock to_send");
return 1;
}
/* (to fix is local only) */
- if ( lock_pages(to_skip, BITMAP_SIZE) )
+ if ( lock_pages(xch, to_skip, BITMAP_SIZE) )
{
PERROR("Unable to lock to_skip");
return 1;
@@ -1091,7 +1091,7 @@ int xc_domain_save(xc_interface *xch, in
memset(pfn_type, 0,
ROUNDUP(MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
- if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
+ if ( lock_pages(xch, pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
{
PERROR("Unable to lock pfn_type array");
goto out;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_evtchn.c
--- a/tools/libxc/xc_evtchn.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_evtchn.c Tue Oct 12 15:06:41 2010 +0100
@@ -33,7 +33,7 @@ static int do_evtchn_op(xc_interface *xc
hypercall.arg[0] = cmd;
hypercall.arg[1] = (unsigned long)arg;
- if ( lock_pages(arg, arg_size) != 0 )
+ if ( lock_pages(xch, arg, arg_size) != 0 )
{
PERROR("do_evtchn_op: arg lock failed");
goto out;
@@ -42,7 +42,7 @@ static int do_evtchn_op(xc_interface *xc
if ((ret = do_xen_hypercall(xch, &hypercall)) < 0 && !silently_fail)
ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
- unlock_pages(arg, arg_size);
+ unlock_pages(xch, arg, arg_size);
out:
return ret;
}
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_flask.c
--- a/tools/libxc/xc_flask.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_flask.c Tue Oct 12 15:06:41 2010 +0100
@@ -44,7 +44,7 @@ int xc_flask_op(xc_interface *xch, flask
hypercall.op = __HYPERVISOR_xsm_op;
hypercall.arg[0] = (unsigned long)op;
- if ( lock_pages(op, sizeof(*op)) != 0 )
+ if ( lock_pages(xch, op, sizeof(*op)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
@@ -56,7 +56,7 @@ int xc_flask_op(xc_interface *xch, flask
fprintf(stderr, "XSM operation failed!\n");
}
- unlock_pages(op, sizeof(*op));
+ unlock_pages(xch, op, sizeof(*op));
out:
return ret;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_linux.c
--- a/tools/libxc/xc_linux.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_linux.c Tue Oct 12 15:06:41 2010 +0100
@@ -618,7 +618,7 @@ int xc_gnttab_op(xc_interface *xch, int
hypercall.arg[1] = (unsigned long)op;
hypercall.arg[2] = count;
- if ( lock_pages(op, count* op_size) != 0 )
+ if ( lock_pages(xch, op, count* op_size) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -626,7 +626,7 @@ int xc_gnttab_op(xc_interface *xch, int
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(op, count * op_size);
+ unlock_pages(xch, op, count * op_size);
out1:
return ret;
@@ -670,7 +670,7 @@ static void *_gnttab_map_table(xc_interf
*gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) );
frame_list = malloc(query.nr_frames * sizeof(unsigned long));
- if ( !frame_list || lock_pages(frame_list,
+ if ( !frame_list || lock_pages(xch, frame_list,
query.nr_frames * sizeof(unsigned long)) )
{
ERROR("Alloc/lock frame_list in xc_gnttab_map_table\n");
@@ -714,7 +714,7 @@ err:
err:
if ( frame_list )
{
- unlock_pages(frame_list, query.nr_frames * sizeof(unsigned long));
+ unlock_pages(xch, frame_list, query.nr_frames * sizeof(unsigned long));
free(frame_list);
}
if ( pfn_list )
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_misc.c Tue Oct 12 15:06:41 2010 +0100
@@ -42,7 +42,7 @@ int xc_readconsolering(xc_interface *xch
sysctl.u.readconsole.incremental = incremental;
}
- if ( (ret = lock_pages(buffer, nr_chars)) != 0 )
+ if ( (ret = lock_pages(xch, buffer, nr_chars)) != 0 )
return ret;
if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
@@ -52,7 +52,7 @@ int xc_readconsolering(xc_interface *xch
*pindex = sysctl.u.readconsole.index;
}
- unlock_pages(buffer, nr_chars);
+ unlock_pages(xch, buffer, nr_chars);
return ret;
}
@@ -66,12 +66,12 @@ int xc_send_debug_keys(xc_interface *xch
set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
sysctl.u.debug_keys.nr_keys = len;
- if ( (ret = lock_pages(keys, len)) != 0 )
+ if ( (ret = lock_pages(xch, keys, len)) != 0 )
return ret;
ret = do_sysctl(xch, &sysctl);
- unlock_pages(keys, len);
+ unlock_pages(xch, keys, len);
return ret;
}
@@ -154,7 +154,7 @@ int xc_mca_op(xc_interface *xch, struct
DECLARE_HYPERCALL;
mc->interface_version = XEN_MCA_INTERFACE_VERSION;
- if ( lock_pages(mc, sizeof(mc)) )
+ if ( lock_pages(xch, mc, sizeof(mc)) )
{
PERROR("Could not lock xen_mc memory");
return -EINVAL;
@@ -163,7 +163,7 @@ int xc_mca_op(xc_interface *xch, struct
hypercall.op = __HYPERVISOR_mca;
hypercall.arg[0] = (unsigned long)mc;
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(mc, sizeof(mc));
+ unlock_pages(xch, mc, sizeof(mc));
return ret;
}
#endif
@@ -227,12 +227,12 @@ int xc_getcpuinfo(xc_interface *xch, int
sysctl.u.getcpuinfo.max_cpus = max_cpus;
set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
- if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 )
+ if ( (rc = lock_pages(xch, info, max_cpus*sizeof(*info))) != 0 )
return rc;
rc = do_sysctl(xch, &sysctl);
- unlock_pages(info, max_cpus*sizeof(*info));
+ unlock_pages(xch, info, max_cpus*sizeof(*info));
if ( nr_cpus )
*nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
@@ -250,7 +250,7 @@ int xc_hvm_set_pci_intx_level(
struct xen_hvm_set_pci_intx_level _arg, *arg = &_arg;
int rc;
- if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
+ if ( (rc = hcall_buf_prep(xch, (void **)&arg, sizeof(*arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
@@ -269,7 +269,7 @@ int xc_hvm_set_pci_intx_level(
rc = do_xen_hypercall(xch, &hypercall);
- hcall_buf_release((void **)&arg, sizeof(*arg));
+ hcall_buf_release(xch, (void **)&arg, sizeof(*arg));
return rc;
}
@@ -283,7 +283,7 @@ int xc_hvm_set_isa_irq_level(
struct xen_hvm_set_isa_irq_level _arg, *arg = &_arg;
int rc;
- if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
+ if ( (rc = hcall_buf_prep(xch, (void **)&arg, sizeof(*arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
@@ -299,7 +299,7 @@ int xc_hvm_set_isa_irq_level(
rc = do_xen_hypercall(xch, &hypercall);
- hcall_buf_release((void **)&arg, sizeof(*arg));
+ hcall_buf_release(xch, (void **)&arg, sizeof(*arg));
return rc;
}
@@ -319,7 +319,7 @@ int xc_hvm_set_pci_link_route(
arg.link = link;
arg.isa_irq = isa_irq;
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
@@ -327,7 +327,7 @@ int xc_hvm_set_pci_link_route(
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
@@ -350,7 +350,7 @@ int xc_hvm_track_dirty_vram(
arg.nr = nr;
set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap);
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
@@ -358,7 +358,7 @@ int xc_hvm_track_dirty_vram(
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
@@ -378,7 +378,7 @@ int xc_hvm_modified_memory(
arg.first_pfn = first_pfn;
arg.nr = nr;
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
@@ -386,7 +386,7 @@ int xc_hvm_modified_memory(
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
@@ -407,7 +407,7 @@ int xc_hvm_set_mem_type(
arg.first_pfn = first_pfn;
arg.nr = nr;
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
@@ -415,7 +415,7 @@ int xc_hvm_set_mem_type(
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_offline_page.c
--- a/tools/libxc/xc_offline_page.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_offline_page.c Tue Oct 12 15:06:41 2010 +0100
@@ -71,7 +71,7 @@ int xc_mark_page_online(xc_interface *xc
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
+ if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
{
ERROR("Could not lock memory for xc_mark_page_online\n");
return -EINVAL;
@@ -84,7 +84,7 @@ int xc_mark_page_online(xc_interface *xc
set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
+ unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
return ret;
}
@@ -98,7 +98,7 @@ int xc_mark_page_offline(xc_interface *x
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
+ if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
{
ERROR("Could not lock memory for xc_mark_page_offline");
return -EINVAL;
@@ -111,7 +111,7 @@ int xc_mark_page_offline(xc_interface *x
set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
+ unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
return ret;
}
@@ -125,7 +125,7 @@ int xc_query_page_offline_status(xc_inte
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
+ if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
{
ERROR("Could not lock memory for xc_query_page_offline_status\n");
return -EINVAL;
@@ -138,7 +138,7 @@ int xc_query_page_offline_status(xc_inte
set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
+ unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
return ret;
}
@@ -291,7 +291,7 @@ static int init_mem_info(xc_interface *x
minfo->pfn_type[i] = pfn_to_mfn(i, minfo->p2m_table,
minfo->guest_width);
- if ( lock_pages(minfo->pfn_type, minfo->p2m_size *
sizeof(*minfo->pfn_type)) )
+ if ( lock_pages(xch, minfo->pfn_type, minfo->p2m_size *
sizeof(*minfo->pfn_type)) )
{
ERROR("Unable to lock pfn_type array");
goto failed;
@@ -310,7 +310,7 @@ static int init_mem_info(xc_interface *x
return 0;
unlock:
- unlock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type));
+ unlock_pages(xch, minfo->pfn_type, minfo->p2m_size *
sizeof(*minfo->pfn_type));
failed:
if (minfo->pfn_type)
{
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_pm.c
--- a/tools/libxc/xc_pm.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_pm.c Tue Oct 12 15:06:41 2010 +0100
@@ -53,14 +53,14 @@ int xc_pm_get_pxstat(xc_interface *xch,
if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
return ret;
- if ( (ret = lock_pages(pxpt->trans_pt,
+ if ( (ret = lock_pages(xch, pxpt->trans_pt,
max_px * max_px * sizeof(uint64_t))) != 0 )
return ret;
- if ( (ret = lock_pages(pxpt->pt,
+ if ( (ret = lock_pages(xch, pxpt->pt,
max_px * sizeof(struct xc_px_val))) != 0 )
{
- unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
return ret;
}
@@ -75,8 +75,8 @@ int xc_pm_get_pxstat(xc_interface *xch,
ret = xc_sysctl(xch, &sysctl);
if ( ret )
{
- unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
- unlock_pages(pxpt->pt, max_px * sizeof(struct xc_px_val));
+ unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
return ret;
}
@@ -85,8 +85,8 @@ int xc_pm_get_pxstat(xc_interface *xch,
pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
- unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
- unlock_pages(pxpt->pt, max_px * sizeof(struct xc_px_val));
+ unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
return ret;
}
@@ -128,11 +128,11 @@ int xc_pm_get_cxstat(xc_interface *xch,
if ( (ret = xc_pm_get_max_cx(xch, cpuid, &max_cx)) )
goto unlock_0;
- if ( (ret = lock_pages(cxpt, sizeof(struct xc_cx_stat))) )
+ if ( (ret = lock_pages(xch, cxpt, sizeof(struct xc_cx_stat))) )
goto unlock_0;
- if ( (ret = lock_pages(cxpt->triggers, max_cx * sizeof(uint64_t))) )
+ if ( (ret = lock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t))) )
goto unlock_1;
- if ( (ret = lock_pages(cxpt->residencies, max_cx * sizeof(uint64_t))) )
+ if ( (ret = lock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t)))
)
goto unlock_2;
sysctl.cmd = XEN_SYSCTL_get_pmstat;
@@ -155,11 +155,11 @@ int xc_pm_get_cxstat(xc_interface *xch,
cxpt->cc6 = sysctl.u.get_pmstat.u.getcx.cc6;
unlock_3:
- unlock_pages(cxpt->residencies, max_cx * sizeof(uint64_t));
+ unlock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t));
unlock_2:
- unlock_pages(cxpt->triggers, max_cx * sizeof(uint64_t));
+ unlock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t));
unlock_1:
- unlock_pages(cxpt, sizeof(struct xc_cx_stat));
+ unlock_pages(xch, cxpt, sizeof(struct xc_cx_stat));
unlock_0:
return ret;
}
@@ -200,13 +200,13 @@ int xc_get_cpufreq_para(xc_interface *xc
(!user_para->scaling_available_governors) )
return -EINVAL;
- if ( (ret = lock_pages(user_para->affected_cpus,
+ if ( (ret = lock_pages(xch, user_para->affected_cpus,
user_para->cpu_num * sizeof(uint32_t))) )
goto unlock_1;
- if ( (ret = lock_pages(user_para->scaling_available_frequencies,
+ if ( (ret = lock_pages(xch, user_para->scaling_available_frequencies,
user_para->freq_num * sizeof(uint32_t))) )
goto unlock_2;
- if ( (ret = lock_pages(user_para->scaling_available_governors,
+ if ( (ret = lock_pages(xch, user_para->scaling_available_governors,
user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char))) )
goto unlock_3;
@@ -263,13 +263,13 @@ int xc_get_cpufreq_para(xc_interface *xc
}
unlock_4:
- unlock_pages(user_para->scaling_available_governors,
+ unlock_pages(xch, user_para->scaling_available_governors,
user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char));
unlock_3:
- unlock_pages(user_para->scaling_available_frequencies,
+ unlock_pages(xch, user_para->scaling_available_frequencies,
user_para->freq_num * sizeof(uint32_t));
unlock_2:
- unlock_pages(user_para->affected_cpus,
+ unlock_pages(xch, user_para->affected_cpus,
user_para->cpu_num * sizeof(uint32_t));
unlock_1:
return ret;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_private.c Tue Oct 12 15:06:41 2010 +0100
@@ -71,7 +71,7 @@ xc_interface *xc_interface_open(xentooll
return 0;
}
-static void xc_clean_hcall_buf(void);
+static void xc_clean_hcall_buf(xc_interface *xch);
int xc_interface_close(xc_interface *xch)
{
@@ -85,7 +85,7 @@ int xc_interface_close(xc_interface *xch
if (rc) PERROR("Could not close hypervisor interface");
}
- xc_clean_hcall_buf();
+ xc_clean_hcall_buf(xch);
free(xch);
return rc;
@@ -193,17 +193,17 @@ void xc_report_progress_step(xc_interfac
#ifdef __sun__
-int lock_pages(void *addr, size_t len) { return 0; }
-void unlock_pages(void *addr, size_t len) { }
+int lock_pages(xc_interface *xch, void *addr, size_t len) { return 0; }
+void unlock_pages(xc_interface *xch, void *addr, size_t len) { }
-int hcall_buf_prep(void **addr, size_t len) { return 0; }
-void hcall_buf_release(void **addr, size_t len) { }
+int hcall_buf_prep(xc_interface *xch, void **addr, size_t len) { return 0; }
+void hcall_buf_release(xc_interface *xch, void **addr, size_t len) { }
-static void xc_clean_hcall_buf(void) { }
+static void xc_clean_hcall_buf(xc_interface *xch) { }
#else /* !__sun__ */
-int lock_pages(void *addr, size_t len)
+int lock_pages(xc_interface *xch, void *addr, size_t len)
{
int e;
void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
@@ -213,7 +213,7 @@ int lock_pages(void *addr, size_t len)
return e;
}
-void unlock_pages(void *addr, size_t len)
+void unlock_pages(xc_interface *xch, void *addr, size_t len)
{
void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
@@ -226,6 +226,7 @@ static pthread_key_t hcall_buf_pkey;
static pthread_key_t hcall_buf_pkey;
static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT;
struct hcall_buf {
+ xc_interface *xch;
void *buf;
void *oldbuf;
};
@@ -238,7 +239,7 @@ static void _xc_clean_hcall_buf(void *m)
{
if ( hcall_buf->buf )
{
- unlock_pages(hcall_buf->buf, PAGE_SIZE);
+ unlock_pages(hcall_buf->xch, hcall_buf->buf, PAGE_SIZE);
free(hcall_buf->buf);
}
@@ -253,14 +254,14 @@ static void _xc_init_hcall_buf(void)
pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf);
}
-static void xc_clean_hcall_buf(void)
+static void xc_clean_hcall_buf(xc_interface *xch)
{
pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf);
_xc_clean_hcall_buf(pthread_getspecific(hcall_buf_pkey));
}
-int hcall_buf_prep(void **addr, size_t len)
+int hcall_buf_prep(xc_interface *xch, void **addr, size_t len)
{
struct hcall_buf *hcall_buf;
@@ -272,13 +273,14 @@ int hcall_buf_prep(void **addr, size_t l
hcall_buf = calloc(1, sizeof(*hcall_buf));
if ( !hcall_buf )
goto out;
+ hcall_buf->xch = xch;
pthread_setspecific(hcall_buf_pkey, hcall_buf);
}
if ( !hcall_buf->buf )
{
hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE);
- if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) )
+ if ( !hcall_buf->buf || lock_pages(xch, hcall_buf->buf, PAGE_SIZE) )
{
free(hcall_buf->buf);
hcall_buf->buf = NULL;
@@ -295,10 +297,10 @@ int hcall_buf_prep(void **addr, size_t l
}
out:
- return lock_pages(*addr, len);
+ return lock_pages(xch, *addr, len);
}
-void hcall_buf_release(void **addr, size_t len)
+void hcall_buf_release(xc_interface *xch, void **addr, size_t len)
{
struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey);
@@ -310,7 +312,7 @@ void hcall_buf_release(void **addr, size
}
else
{
- unlock_pages(*addr, len);
+ unlock_pages(xch, *addr, len);
}
}
@@ -337,7 +339,7 @@ int xc_mmuext_op(
DECLARE_HYPERCALL;
long ret = -EINVAL;
- if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 )
+ if ( hcall_buf_prep(xch, (void **)&op, nr_ops*sizeof(*op)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -351,7 +353,7 @@ int xc_mmuext_op(
ret = do_xen_hypercall(xch, &hypercall);
- hcall_buf_release((void **)&op, nr_ops*sizeof(*op));
+ hcall_buf_release(xch, (void **)&op, nr_ops*sizeof(*op));
out1:
return ret;
@@ -371,7 +373,7 @@ static int flush_mmu_updates(xc_interfac
hypercall.arg[2] = 0;
hypercall.arg[3] = mmu->subject;
- if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
+ if ( lock_pages(xch, mmu->updates, sizeof(mmu->updates)) != 0 )
{
PERROR("flush_mmu_updates: mmu updates lock_pages failed");
err = 1;
@@ -386,7 +388,7 @@ static int flush_mmu_updates(xc_interfac
mmu->idx = 0;
- unlock_pages(mmu->updates, sizeof(mmu->updates));
+ unlock_pages(xch, mmu->updates, sizeof(mmu->updates));
out:
return err;
@@ -438,38 +440,38 @@ int xc_memory_op(xc_interface *xch,
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
- if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
+ if ( lock_pages(xch, reservation, sizeof(*reservation)) != 0 )
{
PERROR("Could not lock");
goto out1;
}
get_xen_guest_handle(extent_start, reservation->extent_start);
if ( (extent_start != NULL) &&
- (lock_pages(extent_start,
+ (lock_pages(xch, extent_start,
reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
{
PERROR("Could not lock");
- unlock_pages(reservation, sizeof(*reservation));
+ unlock_pages(xch, reservation, sizeof(*reservation));
goto out1;
}
break;
case XENMEM_machphys_mfn_list:
- if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
+ if ( lock_pages(xch, xmml, sizeof(*xmml)) != 0 )
{
PERROR("Could not lock");
goto out1;
}
get_xen_guest_handle(extent_start, xmml->extent_start);
- if ( lock_pages(extent_start,
+ if ( lock_pages(xch, extent_start,
xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
{
PERROR("Could not lock");
- unlock_pages(xmml, sizeof(*xmml));
+ unlock_pages(xch, xmml, sizeof(*xmml));
goto out1;
}
break;
case XENMEM_add_to_physmap:
- if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
+ if ( lock_pages(xch, arg, sizeof(struct xen_add_to_physmap)) )
{
PERROR("Could not lock");
goto out1;
@@ -478,7 +480,7 @@ int xc_memory_op(xc_interface *xch,
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
case XENMEM_maximum_gpfn:
- if ( lock_pages(arg, sizeof(domid_t)) )
+ if ( lock_pages(xch, arg, sizeof(domid_t)) )
{
PERROR("Could not lock");
goto out1;
@@ -486,7 +488,7 @@ int xc_memory_op(xc_interface *xch,
break;
case XENMEM_set_pod_target:
case XENMEM_get_pod_target:
- if ( lock_pages(arg, sizeof(struct xen_pod_target)) )
+ if ( lock_pages(xch, arg, sizeof(struct xen_pod_target)) )
{
PERROR("Could not lock");
goto out1;
@@ -501,29 +503,29 @@ int xc_memory_op(xc_interface *xch,
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
- unlock_pages(reservation, sizeof(*reservation));
+ unlock_pages(xch, reservation, sizeof(*reservation));
get_xen_guest_handle(extent_start, reservation->extent_start);
if ( extent_start != NULL )
- unlock_pages(extent_start,
+ unlock_pages(xch, extent_start,
reservation->nr_extents * sizeof(xen_pfn_t));
break;
case XENMEM_machphys_mfn_list:
- unlock_pages(xmml, sizeof(*xmml));
+ unlock_pages(xch, xmml, sizeof(*xmml));
get_xen_guest_handle(extent_start, xmml->extent_start);
- unlock_pages(extent_start,
+ unlock_pages(xch, extent_start,
xmml->max_extents * sizeof(xen_pfn_t));
break;
case XENMEM_add_to_physmap:
- unlock_pages(arg, sizeof(struct xen_add_to_physmap));
+ unlock_pages(xch, arg, sizeof(struct xen_add_to_physmap));
break;
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
case XENMEM_maximum_gpfn:
- unlock_pages(arg, sizeof(domid_t));
+ unlock_pages(xch, arg, sizeof(domid_t));
break;
case XENMEM_set_pod_target:
case XENMEM_get_pod_target:
- unlock_pages(arg, sizeof(struct xen_pod_target));
+ unlock_pages(xch, arg, sizeof(struct xen_pod_target));
break;
}
@@ -565,7 +567,7 @@ int xc_get_pfn_list(xc_interface *xch,
memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
#endif
- if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
+ if ( lock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
{
PERROR("xc_get_pfn_list: pfn_buf lock failed");
return -1;
@@ -573,7 +575,7 @@ int xc_get_pfn_list(xc_interface *xch,
ret = do_domctl(xch, &domctl);
- unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf));
+ unlock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf));
return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
}
@@ -648,7 +650,7 @@ int xc_version(xc_interface *xch, int cm
break;
}
- if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
+ if ( (argsize != 0) && (lock_pages(xch, arg, argsize) != 0) )
{
PERROR("Could not lock memory for version hypercall");
return -ENOMEM;
@@ -662,7 +664,7 @@ int xc_version(xc_interface *xch, int cm
rc = do_xen_version(xch, cmd, arg);
if ( argsize != 0 )
- unlock_pages(arg, argsize);
+ unlock_pages(xch, arg, argsize);
return rc;
}
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_private.h Tue Oct 12 15:06:41 2010 +0100
@@ -100,11 +100,11 @@ void xc_report_progress_step(xc_interfac
void *xc_memalign(size_t alignment, size_t size);
-int lock_pages(void *addr, size_t len);
-void unlock_pages(void *addr, size_t len);
+int lock_pages(xc_interface *xch, void *addr, size_t len);
+void unlock_pages(xc_interface *xch, void *addr, size_t len);
-int hcall_buf_prep(void **addr, size_t len);
-void hcall_buf_release(void **addr, size_t len);
+int hcall_buf_prep(xc_interface *xch, void **addr, size_t len);
+void hcall_buf_release(xc_interface *xch, void **addr, size_t len);
int do_xen_hypercall(xc_interface *xch, privcmd_hypercall_t *hypercall);
@@ -125,7 +125,7 @@ static inline int do_physdev_op(xc_inter
DECLARE_HYPERCALL;
- if ( hcall_buf_prep(&op, len) != 0 )
+ if ( hcall_buf_prep(xch, &op, len) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -142,7 +142,7 @@ static inline int do_physdev_op(xc_inter
" rebuild the user-space tool set?\n");
}
- hcall_buf_release(&op, len);
+ hcall_buf_release(xch, &op, len);
out1:
return ret;
@@ -153,7 +153,7 @@ static inline int do_domctl(xc_interface
int ret = -1;
DECLARE_HYPERCALL;
- if ( hcall_buf_prep((void **)&domctl, sizeof(*domctl)) != 0 )
+ if ( hcall_buf_prep(xch, (void **)&domctl, sizeof(*domctl)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -171,7 +171,7 @@ static inline int do_domctl(xc_interface
" rebuild the user-space tool set?\n");
}
- hcall_buf_release((void **)&domctl, sizeof(*domctl));
+ hcall_buf_release(xch, (void **)&domctl, sizeof(*domctl));
out1:
return ret;
@@ -182,7 +182,7 @@ static inline int do_sysctl(xc_interface
int ret = -1;
DECLARE_HYPERCALL;
- if ( hcall_buf_prep((void **)&sysctl, sizeof(*sysctl)) != 0 )
+ if ( hcall_buf_prep(xch, (void **)&sysctl, sizeof(*sysctl)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
@@ -200,7 +200,7 @@ static inline int do_sysctl(xc_interface
" rebuild the user-space tool set?\n");
}
- hcall_buf_release((void **)&sysctl, sizeof(*sysctl));
+ hcall_buf_release(xch, (void **)&sysctl, sizeof(*sysctl));
out1:
return ret;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_resume.c
--- a/tools/libxc/xc_resume.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_resume.c Tue Oct 12 15:06:41 2010 +0100
@@ -196,7 +196,7 @@ static int xc_domain_resume_any(xc_inter
goto out;
}
- if ( lock_pages(&ctxt, sizeof(ctxt)) )
+ if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
{
ERROR("Unable to lock ctxt");
goto out;
@@ -235,7 +235,7 @@ static int xc_domain_resume_any(xc_inter
#if defined(__i386__) || defined(__x86_64__)
out:
- unlock_pages((void *)&ctxt, sizeof ctxt);
+ unlock_pages(xch, (void *)&ctxt, sizeof ctxt);
if (p2m)
munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
if (p2m_frame_list)
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_tbuf.c
--- a/tools/libxc/xc_tbuf.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_tbuf.c Tue Oct 12 15:06:41 2010 +0100
@@ -129,7 +129,7 @@ int xc_tbuf_set_cpu_mask(xc_interface *x
set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8;
- if ( lock_pages(&bytemap, sizeof(bytemap)) != 0 )
+ if ( lock_pages(xch, &bytemap, sizeof(bytemap)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
@@ -137,7 +137,7 @@ int xc_tbuf_set_cpu_mask(xc_interface *x
ret = do_sysctl(xch, &sysctl);
- unlock_pages(&bytemap, sizeof(bytemap));
+ unlock_pages(xch, &bytemap, sizeof(bytemap));
out:
return ret;
diff -r 73a05c8f7c3e -r 29a5439889c3 tools/libxc/xc_tmem.c
--- a/tools/libxc/xc_tmem.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_tmem.c Tue Oct 12 15:06:41 2010 +0100
@@ -28,7 +28,7 @@ static int do_tmem_op(xc_interface *xch,
hypercall.op = __HYPERVISOR_tmem_op;
hypercall.arg[0] = (unsigned long)op;
- if (lock_pages(op, sizeof(*op)) != 0)
+ if (lock_pages(xch, op, sizeof(*op)) != 0)
{
PERROR("Could not lock memory for Xen hypercall");
return -EFAULT;
@@ -39,7 +39,7 @@ static int do_tmem_op(xc_interface *xch,
DPRINTF("tmem operation failed -- need to"
" rebuild the user-space tool set?\n");
}
- unlock_pages(op, sizeof(*op));
+ unlock_pages(xch, op, sizeof(*op));
return ret;
}
@@ -69,7 +69,7 @@ int xc_tmem_control(xc_interface *xch,
op.u.ctrl.oid[2] = 0;
if (subop == TMEMC_LIST) {
- if ((arg1 != 0) && (lock_pages(buf, arg1) != 0))
+ if ((arg1 != 0) && (lock_pages(xch, buf, arg1) != 0))
{
PERROR("Could not lock memory for Xen hypercall");
return -ENOMEM;
@@ -85,7 +85,7 @@ int xc_tmem_control(xc_interface *xch,
if (subop == TMEMC_LIST) {
if (arg1 != 0)
- unlock_pages(buf, arg1);
+ unlock_pages(xch, buf, arg1);
}
return rc;
@@ -115,7 +115,7 @@ int xc_tmem_control_oid(xc_interface *xc
op.u.ctrl.oid[2] = oid.oid[2];
if (subop == TMEMC_LIST) {
- if ((arg1 != 0) && (lock_pages(buf, arg1) != 0))
+ if ((arg1 != 0) && (lock_pages(xch, buf, arg1) != 0))
{
PERROR("Could not lock memory for Xen hypercall");
return -ENOMEM;
@@ -131,7 +131,7 @@ int xc_tmem_control_oid(xc_interface *xc
if (subop == TMEMC_LIST) {
if (arg1 != 0)
- unlock_pages(buf, arg1);
+ unlock_pages(xch, buf, arg1);
}
return rc;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|