[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC] Re: [Xen-devel] [PATCH] fix xenctl_cpumap translation to handle bitops accessed like arrays



Is this what you were looking for? let me know and I'll complete it.

-JX


diff -r c145c6638187 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Dec 19 09:47:54 2006 -0500
+++ b/tools/libxc/xc_domain.c   Wed Dec 20 13:49:05 2006 -0500
@@ -96,16 +96,25 @@ int xc_vcpu_setaffinity(int xc_handle,
 {
     DECLARE_DOMCTL;
     int ret = -1;
+    uint8_t cpumap_array[sizeof (cpumap)];
+    int i;
 
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
 
+    memset(cpumap_array, 0, sizeof(cpumap_array));
+
+    for (i = 0; i < sizeof (cpumap) * 8; i ++) {
+        if (cpumap & (1UL << i))
+            set_bit8(i, cpumap_array);
+    }
+
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
-                         (uint8_t *)&cpumap);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+                         cpumap_array);
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap_array) * 8;
     
-    if ( lock_pages(&cpumap, sizeof(cpumap)) != 0 )
+    if ( lock_pages(cpumap_array, sizeof(cpumap_array)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -113,7 +122,7 @@ int xc_vcpu_setaffinity(int xc_handle,
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(&cpumap, sizeof(cpumap));
+    unlock_pages(cpumap_array, sizeof(cpumap_array));
 
  out:
     return ret;
@@ -127,16 +136,18 @@ int xc_vcpu_getaffinity(int xc_handle,
 {
     DECLARE_DOMCTL;
     int ret = -1;
+    uint8_t cpumap_array[sizeof (*cpumap)];
+    int i;
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
-                         (uint8_t *)cpumap);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+                         (uint8_t *)cpumap_array);
+    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap_array) * 8;
     
-    if ( lock_pages(cpumap, sizeof(*cpumap)) != 0 )
+    if ( lock_pages(cpumap_array, sizeof(cpumap_array)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -144,7 +155,13 @@ int xc_vcpu_getaffinity(int xc_handle,
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(cpumap, sizeof(*cpumap));
+    unlock_pages(cpumap_array, sizeof(cpumap_array));
+
+    *cpumap = 0;
+    for (i = 0; i < sizeof (*cpumap) * 8; i ++) {
+        if (test_bit8(i, cpumap_array))
+            *cpumap |= 1UL << i;
+    }
 
  out:
     return ret;
diff -r c145c6638187 xen/common/domctl.c
--- a/xen/common/domctl.c       Tue Dec 19 09:47:54 2006 -0500
+++ b/xen/common/domctl.c       Wed Dec 20 13:36:32 2006 -0500
@@ -28,20 +28,24 @@ extern void arch_getdomaininfo_ctxt(
     struct vcpu *, struct vcpu_guest_context *);
 
 void cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
+    struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
 {
     unsigned int guest_bytes, copy_bytes, i;
     uint8_t zero = 0;
+    uint8_t local[sizeof (*cpumask)];
 
     if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
         return;
 
+    memset(local, 0, sizeof (local));
+    for_each_cpu_mask(i, *cpumask) {
+        set_bit8(i, local);
+    }
+
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
     copy_bytes  = min_t(unsigned int, guest_bytes, (NR_CPUS + 7) / 8);
 
-    copy_to_guest(xenctl_cpumap->bitmap,
-                  (uint8_t *)cpus_addr(*cpumask),
-                  copy_bytes);
+    copy_to_guest(xenctl_cpumap->bitmap, &local[0], copy_bytes);
 
     for ( i = copy_bytes; i < guest_bytes; i++ )
         copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
@@ -51,6 +55,8 @@ void xenctl_cpumap_to_cpumask(
     cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
 {
     unsigned int guest_bytes, copy_bytes;
+    unsigned int i;
+    uint8_t local[sizeof (*cpumask)];
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
     copy_bytes  = min_t(unsigned int, guest_bytes, (NR_CPUS + 7) / 8);
@@ -60,9 +66,14 @@ void xenctl_cpumap_to_cpumask(
     if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
         return;
 
-    copy_from_guest((uint8_t *)cpus_addr(*cpumask),
-                    xenctl_cpumap->bitmap,
-                    copy_bytes);
+    memset(local, 0, sizeof(local));
+
+    copy_from_guest(&local[0], xenctl_cpumap->bitmap, copy_bytes);
+
+    for ( i = 0; i < NR_CPUS; i++) {
+        if (test_bit8(i, local))
+            cpu_set(i, *cpumask);
+    }
 }
 
 static inline int is_free_domid(domid_t dom)
diff -r c145c6638187 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Tue Dec 19 09:47:54 2006 -0500
+++ b/xen/include/public/domctl.h       Wed Dec 20 13:35:54 2006 -0500
@@ -424,6 +424,22 @@ typedef struct xen_domctl xen_domctl_t;
 typedef struct xen_domctl xen_domctl_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
 
+/*
+ * these routine implement bitmasks as uint8_t arrays for portability
+ */
+static inline int test_bit8(ulong nr, const uint8_t *addr)
+{
+       return (1U & (addr[nr >> 3] >> (nr & 15)));
+}
+
+static inline void set_bit8(ulong nr, uint8_t *addr)
+{
+       uint8_t bit = 1UL << (nr & 15);
+       uint8_t *p = addr + (nr >> 3);
+
+       *p |= bit;
+}
+
 #endif /* __XEN_PUBLIC_DOMCTL_H__ */
 
 /*
diff -r c145c6638187 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Tue Dec 19 09:47:54 2006 -0500
+++ b/xen/include/xen/cpumask.h Tue Dec 19 10:11:25 2006 -0500
@@ -382,7 +382,7 @@ extern cpumask_t cpu_present_map;
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;
 void cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
+    struct xenctl_cpumap *enctl_cpumap, const cpumask_t *cpumask);
 void xenctl_cpumap_to_cpumask(
     cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.