WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [rfc][patch] privcmp from user and kernel

To: hollisb@xxxxxxxxxx
Subject: [XenPPC] [rfc][patch] privcmp from user and kernel
From: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
Date: Tue, 06 Jun 2006 16:08:16 -0400
Cc: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Tue, 06 Jun 2006 13:08:08 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ppc-devel-request@lists.xensource.com?subject=help>
List-id: Xen PPC development <xen-ppc-devel.lists.xensource.com>
List-post: <mailto:xen-ppc-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ppc-devel-bounces@xxxxxxxxxxxxxxxxxxx
Hollis,
I think I got this right, I need to make the priv hcall available for
the kernel as well. Does this look right to you?

Ignore the HYPERVISOR_multicall() for now.

---

diff -r 28208ae097cb arch/powerpc/platforms/xen/hcall.c
--- a/arch/powerpc/platforms/xen/hcall.c        Mon Jun 05 16:55:22 2006 -0500
+++ b/arch/powerpc/platforms/xen/hcall.c        Tue Jun 06 13:21:39 2006 -0400
@@ -179,6 +179,13 @@ int HYPERVISOR_grant_table_op(unsigned i
        return rc;
 }
 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
+
+int HYPERVISOR_multicall(void *call_list, int nr_calls)
+{
+       BUG_ON(1);
+       return -ENOSYS;
+}
+EXPORT_SYMBOL(HYPERVISOR_multicall);
 
 static int xenppc_privcmd_dom0_op(privcmd_hypercall_t *hypercall)
 {
@@ -314,56 +321,83 @@ out:
        return ret;
 }
 
-static int xenppc_privcmd_memory_op(privcmd_hypercall_t *hypercall)
-{
-       xen_memory_reservation_t kern_op;
-       xen_memory_reservation_t __user *user_op;
+int HYPERVISOR_memory_op(unsigned int cmd, void *arg)
+{
+       int ret;
        struct xencomm_desc *op_desc;
-       const unsigned long cmd = hypercall->arg[0];
-       int ret = 0;
-
-       user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
-       if (copy_from_user(&kern_op, user_op, sizeof(xen_memory_reservation_t)))
-               return -EFAULT;
-
-       ret = xencomm_create(&kern_op, sizeof(xen_memory_reservation_t),
+       xen_memory_reservation_t *mop;
+
+
+       mop = (xen_memory_reservation_t *)arg;
+       ret = xencomm_create(mop, sizeof(xen_memory_reservation_t),
                             &op_desc, GFP_KERNEL);
        if (ret)
                return ret;
 
        switch (cmd) {
        case XENMEM_increase_reservation:
-       case XENMEM_decrease_reservation: {
+       case XENMEM_decrease_reservation:
+       {
                struct xencomm_desc *desc = NULL;
-               if (xen_guest_handle(kern_op.extent_start)) {
+
+               if (xen_guest_handle(mop->extent_start)) {
                        ret = xencomm_create(
-                               xen_guest_handle(kern_op.extent_start),
-                               kern_op.nr_extents *
-                               sizeof(*xen_guest_handle(kern_op.extent_start)),
+                               xen_guest_handle(mop->extent_start),
+                               mop->nr_extents *
+                               sizeof(*xen_guest_handle(mop->extent_start)),
                                &desc, GFP_KERNEL);
                        if (ret)
-                               goto out;
-
-                       set_xen_guest_handle(kern_op.extent_start,
-                                                (void *)__pa(desc));
+                               return ret;
+
+                       set_xen_guest_handle(mop->extent_start,
+                                            (void *)__pa(desc));
                }
 
-               ret = plpar_hcall_norets(XEN_MARK(hypercall->op), cmd, 
__pa(op_desc));
+               ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
+                                        cmd, __pa(op_desc));
 
                xencomm_free(desc);
-               }
-               break;
-       default:
-               printk("%s: unknown memory cmd %ld\n", __func__, cmd);
+       }
+               break;
+               
+       case XENMEM_maximum_ram_page:
+               /* arg is NULL so we can call thru here */
+               ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
+                                       cmd, NULL);
+               break;
+       case XENMEM_populate_physmap:
+       case XENMEM_current_reservation:
+       case XENMEM_maximum_reservation:
+       case XENMEM_machphys_mfn_list:
+       case XENMEM_add_to_physmap:
+       case XENMEM_translate_gpfn_list:
+       default:
+               printk("%s: unknown grant table op %d\n", __func__, cmd);
                ret = -ENOSYS;
-               goto out;
-       }
-
-       if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
+       }
+       xencomm_free(op_desc);
+       return ret;
+
+}
+EXPORT_SYMBOL(memory_op);
+
+static int xenppc_privcmd_memory_op(privcmd_hypercall_t *hypercall)
+{
+       xen_memory_reservation_t kern_op;
+       xen_memory_reservation_t __user *user_op;
+       const unsigned long cmd = hypercall->arg[0];
+       int ret = 0;
+
+       user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
+       if (copy_from_user(&kern_op, user_op,
+                          sizeof(xen_memory_reservation_t)))
                return -EFAULT;
 
-out:
-       xencomm_free(op_desc);
+       if (!HYPERVISOR_memory_op(cmd, &kern_op)) {
+               if (copy_to_user(user_op, &kern_op,
+                                sizeof(xen_memory_reservation_t)))
+                       return -EFAULT;
+       }
        return ret;
 }

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>