[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 02 of 22] tools: merge several bitop functions into xc_bitops.h


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Fri, 10 Jun 2011 11:12:51 +0200
  • Delivery-date: Fri, 10 Jun 2011 02:23:29 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1307695623 -7200
# Node ID 9476d85932e5eb8f1e7ce0a6814b6c0634341e61
# Parent  5553bd24a3d950d7d1388a95fd9a69b215cd798a
tools: merge several bitop functions into xc_bitops.h

Bitmaps are used in save/restore, xenpaging and blktap2.  Merge the code into a
private xc_bitops.h file. All users are single threaded, so locking is not an
issue. The array of bits is handled as volatile because the x86 save/restore
code passes the bitmap to the hypervisor which in turn modifies the bitmap.

blktap2 uses a private bitmap. There was a possible overflow in the
bitmap_size() function, the remainder was not considered.

ia64 save/restore uses a bitmap to send the number of vcpus to the host.

x86 save/restore uses a bitmap to track dirty pages. This bitmap is shared with
the hypervisor. An unused function count_bits() was removed and a new
bitmap_size() function is now used.

xenpaging uses 3 private bitmaps to track the gfns which are in paged-out
state.  It had a copy of some Linux bitops.h, which is now obsolete. Also the
BITS_PER_LONG macro was hardcoded to 64 which made it impossible to run 32bit
tools on a 64bit host. Wether this works at all has to be tested, yet.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 5553bd24a3d9 -r 9476d85932e5 tools/blktap2/drivers/block-log.c
--- a/tools/blktap2/drivers/block-log.c Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/blktap2/drivers/block-log.c Fri Jun 10 10:47:03 2011 +0200
@@ -47,6 +47,7 @@
 #include <sys/socket.h>
 #include <sys/un.h>
 
+#include "xc_bitops.h"
 #include "log.h"
 #include "tapdisk.h"
 #include "tapdisk-server.h"
@@ -89,31 +90,6 @@ static void ctl_request(event_id_t, char
 
 /* large flat bitmaps don't scale particularly well either in size or scan
  * time, but they'll do for now */
-#define BITS_PER_LONG (sizeof(unsigned long) * 8)
-#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-
-#define BITMAP_ENTRY(_nr, _bmap) ((unsigned long*)(_bmap))[(_nr)/BITS_PER_LONG]
-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
-
-static inline int test_bit(int nr, void* bmap)
-{
-  return (BITMAP_ENTRY(nr, bmap) >> BITMAP_SHIFT(nr)) & 1;
-}
-
-static inline void clear_bit(int nr, void* bmap)
-{
-  BITMAP_ENTRY(nr, bmap) &= ~(1UL << BITMAP_SHIFT(nr));
-}
-
-static inline void set_bit(int nr, void* bmap)
-{
-  BITMAP_ENTRY(nr, bmap) |= (1UL << BITMAP_SHIFT(nr));
-}
-
-static inline int bitmap_size(uint64_t sz)
-{
-  return sz >> 3;
-}
 
 static int writelog_create(struct tdlog_state *s)
 {
@@ -123,7 +99,8 @@ static int writelog_create(struct tdlog_
 
   BDPRINTF("allocating %"PRIu64" bytes for dirty bitmap", bmsize);
 
-  if (!(s->writelog = calloc(bmsize, 1))) {
+  s->writelog = bitmap_alloc(s->size);
+  if (!s->writelog) {
     BWPRINTF("could not allocate dirty bitmap of size %"PRIu64, bmsize);
     return -1;
   }
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/libxc/ia64/xc_ia64_linux_restore.c
--- a/tools/libxc/ia64/xc_ia64_linux_restore.c  Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/libxc/ia64/xc_ia64_linux_restore.c  Fri Jun 10 10:47:03 2011 +0200
@@ -218,14 +218,12 @@ xc_ia64_recv_vcpumap(xc_interface *xch,
               max_virt_cpus, info->max_vcpu_id);
         return -1;
     }
-    vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
-        sizeof(vcpumap[0]);
-    vcpumap = malloc(vcpumap_size);
-    if (vcpumap == NULL) {
+    vcpumap_size = bitmap_size(max_virt_cpus);
+    rc = bitmap_alloc(&vcpumap, max_virt_cpus);
+    if (rc < 0) {
         ERROR("memory alloc for vcpumap");
-        return -1;
+        return rc;
     }
-    memset(vcpumap, 0, vcpumap_size);
     if (read_exact(io_fd, vcpumap, vcpumap_size)) {
         ERROR("read vcpumap");
         free(vcpumap);
@@ -353,7 +351,7 @@ xc_ia64_pv_recv_context_ver_three(xc_int
 
     /* vcpu context */
     for (i = 0; i <= info.max_vcpu_id; i++) {
-        if (!__test_bit(i, vcpumap))
+        if (!test_bit(i, vcpumap))
             continue;
 
         rc = xc_ia64_pv_recv_vcpu_context(xch, io_fd, dom, i);
@@ -454,7 +452,7 @@ xc_ia64_hvm_recv_context(xc_interface *x
         /* A copy of the CPU context of the guest. */
         vcpu_guest_context_any_t ctxt_any;
 
-        if (!__test_bit(i, vcpumap))
+        if (!test_bit(i, vcpumap))
             continue;
 
         if (xc_ia64_recv_vcpu_context(xch, io_fd, dom, i, &ctxt_any))
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/libxc/ia64/xc_ia64_linux_save.c
--- a/tools/libxc/ia64/xc_ia64_linux_save.c     Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/libxc/ia64/xc_ia64_linux_save.c     Fri Jun 10 10:47:03 2011 +0200
@@ -32,6 +32,7 @@
 #include <sys/time.h>
 
 #include "xg_private.h"
+#include "xc_bitops.h"
 #include "xc_ia64.h"
 #include "xc_ia64_save_restore.h"
 #include "xc_efi.h"
@@ -51,20 +52,6 @@
 ** During (live) save/migrate, we maintain a number of bitmaps to track
 ** which pages we have to send, and to skip.
 */
-static inline int test_bit(int nr, volatile void * addr)
-{
-    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
-}
-
-static inline void clear_bit(int nr, volatile void * addr)
-{
-    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
-}
-
-static inline void set_bit(int nr, volatile void * addr)
-{
-    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
-}
 
 static int
 suspend_and_state(int (*suspend)(void*), void* data,
@@ -207,19 +194,17 @@ xc_ia64_send_vcpumap(xc_interface *xch, 
     unsigned long vcpumap_size;
     uint64_t *vcpumap = NULL;
 
-    vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
-        sizeof(vcpumap[0]);
-    vcpumap = malloc(vcpumap_size);
-    if (vcpumap == NULL) {
+    vcpumap_size = bitmap_size(max_virt_cpus);
+    rc = bitmap_alloc(&vcpumap, max_virt_cpus);
+    if (rc < 0) {
         ERROR("memory alloc for vcpumap");
         goto out;
     }
-    memset(vcpumap, 0, vcpumap_size);
 
     for (i = 0; i <= info->max_vcpu_id; i++) {
         xc_vcpuinfo_t vinfo;
         if ((xc_vcpu_getinfo(xch, dom, i, &vinfo) == 0) && vinfo.online)
-            __set_bit(i, vcpumap);
+            set_bit(i, vcpumap);
     }
 
     if (write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
@@ -265,7 +250,7 @@ xc_ia64_pv_send_context(xc_interface *xc
 
         char *mem;
 
-        if (!__test_bit(i, vcpumap))
+        if (!test_bit(i, vcpumap))
             continue;
 
         if (xc_ia64_send_vcpu_context(xch, io_fd, dom, i, &ctxt_any))
@@ -332,7 +317,7 @@ xc_ia64_hvm_send_context(xc_interface *x
         /* A copy of the CPU context of the guest. */
         vcpu_guest_context_any_t ctxt_any;
 
-        if (!__test_bit(i, vcpumap))
+        if (!test_bit(i, vcpumap))
             continue;
 
         if (xc_ia64_send_vcpu_context(xch, io_fd, dom, i, &ctxt_any))
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/libxc/ia64/xc_ia64_save_restore.h
--- a/tools/libxc/ia64/xc_ia64_save_restore.h   Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/libxc/ia64/xc_ia64_save_restore.h   Fri Jun 10 10:47:03 2011 +0200
@@ -33,26 +33,6 @@
 
 #define XC_IA64_SR_FORMAT_VER_CURRENT   XC_IA64_SR_FORMAT_VER_THREE
 
-/*
-** During (live) save/migrate, we maintain a number of bitmaps to track
-** which pages we have to send, and to skip.
-*/
-#define BITS_PER_LONG (sizeof(unsigned long) * 8)
-
-#define BITMAP_ENTRY(_nr,_bmap) \
-   ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
-
-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
-
-static inline int __test_bit(int nr, void * addr)
-{
-    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
-}
-
-static inline void __set_bit(int nr, void * addr)
-{
-    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
-}
 
 #endif /* XC_IA64_SAVE_RESTORE_H */
 
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/libxc/xc_bitops.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitops.h   Fri Jun 10 10:47:03 2011 +0200
@@ -0,0 +1,57 @@
+#ifndef XC_BITOPS_H
+#define XC_BITOPS_H 1
+
+/* bitmap operations for single threaded access */
+
+#include <stdlib.h>
+
+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
+#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6)
+
+#define BITMAP_ENTRY(_nr,_bmap) ((_bmap))[(_nr)/BITS_PER_LONG]
+#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
+
+/* calculate required space for number of longs needed to hold nr_bits */
+static inline int bitmap_size(int nr_bits)
+{
+    int nr_long, nr_bytes;
+    nr_long = (nr_bits + BITS_PER_LONG - 1) >> ORDER_LONG;
+    nr_bytes = nr_long * sizeof(unsigned long);
+    return nr_bytes;
+}
+
+static inline unsigned long *bitmap_alloc(int nr_bits)
+{
+    return calloc(1, bitmap_size(nr_bits));
+}
+
+static inline int test_bit(int nr, volatile unsigned long *addr)
+{
+    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
+}
+
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
+}
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
+}
+
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+    int oldbit = test_bit(nr, addr);
+    clear_bit(nr, addr);
+    return oldbit;
+}
+
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+    int oldbit = test_bit(nr, addr);
+    set_bit(nr, addr);
+    return oldbit;
+}
+
+#endif  /* XC_BITOPS_H */
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/libxc/xc_domain_save.c      Fri Jun 10 10:47:03 2011 +0200
@@ -27,6 +27,7 @@
 #include <sys/time.h>
 
 #include "xc_private.h"
+#include "xc_bitops.h"
 #include "xc_dom.h"
 #include "xg_private.h"
 #include "xg_save_restore.h"
@@ -88,57 +89,6 @@ struct outbuf {
 
 #define SUPER_PAGE_START(pfn)    (((pfn) & (SUPERPAGE_NR_PFNS-1)) == 0 )
 
-/*
-** During (live) save/migrate, we maintain a number of bitmaps to track
-** which pages we have to send, to fixup, and to skip.
-*/
-
-#define BITS_PER_LONG (sizeof(unsigned long) * 8)
-#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-#define BITMAP_SIZE   (BITS_TO_LONGS(dinfo->p2m_size) * sizeof(unsigned long))
-
-#define BITMAP_ENTRY(_nr,_bmap) \
-   ((volatile unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
-
-#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
-
-#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6)
-
-static inline int test_bit (int nr, volatile void * addr)
-{
-    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
-}
-
-static inline void clear_bit (int nr, volatile void * addr)
-{
-    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
-}
-
-static inline void set_bit ( int nr, volatile void * addr)
-{
-    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
-}
-
-/* Returns the hamming weight (i.e. the number of bits set) in a N-bit word */
-static inline unsigned int hweight32(unsigned int w)
-{
-    unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
-    res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
-    res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
-    res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
-    return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
-}
-
-static inline int count_bits ( int nr, volatile void *addr)
-{
-    int i, count = 0;
-    volatile unsigned long *p = (volatile unsigned long *)addr;
-    /* We know that the array is padded to unsigned long. */
-    for ( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ )
-        count += hweight32(*p);
-    return count;
-}
-
 static uint64_t tv_to_us(struct timeval *new)
 {
     return (new->tv_sec * 1000000) + new->tv_usec;
@@ -974,9 +924,9 @@ int xc_domain_save(xc_interface *xch, in
     sent_last_iter = dinfo->p2m_size;
 
     /* Setup to_send / to_fix and to_skip bitmaps */
-    to_send = xc_hypercall_buffer_alloc_pages(xch, to_send, 
NRPAGES(BITMAP_SIZE));
-    to_skip = xc_hypercall_buffer_alloc_pages(xch, to_skip, 
NRPAGES(BITMAP_SIZE));
-    to_fix  = calloc(1, BITMAP_SIZE);
+    to_send = xc_hypercall_buffer_alloc_pages(xch, to_send, 
NRPAGES(bitmap_size(dinfo->p2m_size)));
+    to_skip = xc_hypercall_buffer_alloc_pages(xch, to_skip, 
NRPAGES(bitmap_size(dinfo->p2m_size)));
+    to_fix  = calloc(1, bitmap_size(dinfo->p2m_size));
 
     if ( !to_send || !to_fix || !to_skip )
     {
@@ -984,7 +934,7 @@ int xc_domain_save(xc_interface *xch, in
         goto out;
     }
 
-    memset(to_send, 0xff, BITMAP_SIZE);
+    memset(to_send, 0xff, bitmap_size(dinfo->p2m_size));
 
     if ( hvm )
     {
@@ -1407,7 +1357,7 @@ int xc_domain_save(xc_interface *xch, in
         if ( last_iter && debug )
         {
             int id = XC_SAVE_ID_ENABLE_VERIFY_MODE;
-            memset(to_send, 0xff, BITMAP_SIZE);
+            memset(to_send, 0xff, bitmap_size(dinfo->p2m_size));
             debug = 0;
             DPRINTF("Entering debug resend-all mode\n");
 
@@ -1875,8 +1825,8 @@ int xc_domain_save(xc_interface *xch, in
     if ( ctx->live_m2p )
         munmap(ctx->live_m2p, M2P_SIZE(ctx->max_mfn));
 
-    xc_hypercall_buffer_free_pages(xch, to_send, NRPAGES(BITMAP_SIZE));
-    xc_hypercall_buffer_free_pages(xch, to_skip, NRPAGES(BITMAP_SIZE));
+    xc_hypercall_buffer_free_pages(xch, to_send, 
NRPAGES(bitmap_size(dinfo->p2m_size)));
+    xc_hypercall_buffer_free_pages(xch, to_skip, 
NRPAGES(bitmap_size(dinfo->p2m_size)));
 
     free(pfn_type);
     free(pfn_batch);
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/xenpaging/bitops.h
--- a/tools/xenpaging/bitops.h  Fri Jun 10 10:47:02 2011 +0200
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,448 +0,0 @@
-#ifndef _X86_BITOPS_H
-#define _X86_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-//#include <xen/config.h>
-
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
-/*
- * We specify the memory operand as both input and output because the memory
- * operand is both read from and written to. Since the operand is in fact a
- * word array, we also specify "memory" in the clobbers list to indicate that
- * words other than the one directly addressed by the memory operand may be
- * modified. We don't use "+m" because the gcc manual says that it should be
- * used only when the constraint allows the operand to reside in a register.
- */
-
-#define ADDR (*(volatile long *) addr)
-#define CONST_ADDR (*(const volatile long *) addr)
-
-extern void __bitop_bad_size(void);
-#define bitop_bad_size(addr) (sizeof(*(addr)) < 4)
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile void *addr)
-{
-    asm volatile (
-        LOCK_PREFIX
-        "btsl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-}
-#define set_bit(nr, addr) ({                            \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    set_bit(nr, addr);                                  \
-})
-
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(int nr, volatile void *addr)
-{
-    asm volatile (
-        "btsl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-}
-#define __set_bit(nr, addr) ({                          \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    __set_bit(nr, addr);                                \
-})
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile void *addr)
-{
-    asm volatile (
-        LOCK_PREFIX
-        "btrl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-}
-#define clear_bit(nr, addr) ({                          \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    clear_bit(nr, addr);                                \
-})
-
-/**
- * __clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * Unlike clear_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __clear_bit(int nr, volatile void *addr)
-{
-    asm volatile (
-        "btrl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-}
-#define __clear_bit(nr, addr) ({                        \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    __clear_bit(nr, addr);                              \
-})
-
-#define smp_mb__before_clear_bit() ((void)0)
-#define smp_mb__after_clear_bit()  ((void)0)
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(int nr, volatile void *addr)
-{
-    asm volatile (
-        "btcl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-}
-#define __change_bit(nr, addr) ({                       \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    __change_bit(nr, addr);                             \
-})
-
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile void *addr)
-{
-    asm volatile (
-        LOCK_PREFIX
-        "btcl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-}
-#define change_bit(nr, addr) ({                         \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    change_bit(nr, addr);                               \
-})
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        LOCK_PREFIX
-        "btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-    return oldbit;
-}
-#define test_and_set_bit(nr, addr) ({                   \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    test_and_set_bit(nr, addr);                         \
-})
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.  
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(int nr, volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        "btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-    return oldbit;
-}
-#define __test_and_set_bit(nr, addr) ({                 \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    __test_and_set_bit(nr, addr);                       \
-})
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        LOCK_PREFIX
-        "btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-    return oldbit;
-}
-#define test_and_clear_bit(nr, addr) ({                 \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    test_and_clear_bit(nr, addr);                       \
-})
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.  
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(int nr, volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        "btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-    return oldbit;
-}
-#define __test_and_clear_bit(nr, addr) ({               \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    __test_and_clear_bit(nr, addr);                     \
-})
-
-/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        "btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-    return oldbit;
-}
-#define __test_and_change_bit(nr, addr) ({              \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    __test_and_change_bit(nr, addr);                    \
-})
-
-/**
- * test_and_change_bit - Change a bit and return its new value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        LOCK_PREFIX
-        "btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
-    return oldbit;
-}
-#define test_and_change_bit(nr, addr) ({                \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    test_and_change_bit(nr, addr);                      \
-})
-
-static inline int constant_test_bit(int nr, const volatile void *addr)
-{
-    return ((1U << (nr & 31)) &
-            (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int variable_test_bit(int nr, const volatile void *addr)
-{
-    int oldbit;
-
-    asm volatile (
-        "btl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit)
-        : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
-    return oldbit;
-}
-
-#define test_bit(nr, addr) ({                           \
-    if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
-    (__builtin_constant_p(nr) ?                         \
-     constant_test_bit((nr),(addr)) :                   \
-     variable_test_bit((nr),(addr)));                   \
-})
-
-extern unsigned int __find_first_bit(
-    const unsigned long *addr, unsigned int size);
-extern unsigned int __find_next_bit(
-    const unsigned long *addr, unsigned int size, unsigned int offset);
-extern unsigned int __find_first_zero_bit(
-    const unsigned long *addr, unsigned int size);
-extern unsigned int __find_next_zero_bit(
-    const unsigned long *addr, unsigned int size, unsigned int offset);
-
-static inline unsigned int __scanbit(unsigned long val, unsigned long max)
-{
-    asm ( "bsf %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max) );
-    return (unsigned int)val;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr,size)                               \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?       \
-  (__scanbit(*(const unsigned long *)addr, size)) :             \
-  __find_first_bit(addr,size)))
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-#define find_next_bit(addr,size,off)                                     \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?                \
-  ((off) + (__scanbit((*(const unsigned long *)addr) >> (off), size))) : \
-  __find_next_bit(addr,size,off)))
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_zero_bit(addr,size)                          \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?       \
-  (__scanbit(~*(const unsigned long *)addr, size)) :            \
-  __find_first_zero_bit(addr,size)))
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-#define find_next_zero_bit(addr,size,off)                                   \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?                   \
-  ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off)), size))) : \
-  __find_next_zero_bit(addr,size,off)))
-
-
-/**
- * find_first_set_bit - find the first set bit in @word
- * @word: the word to search
- * 
- * Returns the bit-number of the first set bit. The input must *not* be zero.
- */
-static inline unsigned int find_first_set_bit(unsigned long word)
-{
-    asm ( "bsf %1,%0" : "=r" (word) : "r" (word) );
-    return (unsigned int)word;
-}
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as the libc and compiler builtin ffs routines.
- */
-#if 0
-static inline int ffs(unsigned long x)
-{
-    long r;
-
-    asm ( "bsf %1,%0\n\t"
-          "jnz 1f\n\t"
-          "mov $-1,%0\n"
-          "1:" : "=r" (r) : "rm" (x));
-    return (int)r+1;
-}
-#endif
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- */
-static inline int fls(unsigned long x)
-{
-    long r;
-
-    asm ( "bsr %1,%0\n\t"
-          "jnz 1f\n\t"
-          "mov $-1,%0\n"
-          "1:" : "=r" (r) : "rm" (x));
-    return (int)r+1;
-}
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-#endif /* _X86_BITOPS_H */
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/xenpaging/policy_default.c
--- a/tools/xenpaging/policy_default.c  Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/xenpaging/policy_default.c  Fri Jun 10 10:47:03 2011 +0200
@@ -21,8 +21,7 @@
  */
 
 
-#include "bitops.h"
-#include "xc.h"
+#include "xc_bitops.h"
 #include "policy.h"
 
 
@@ -35,26 +34,23 @@ static unsigned int mru_size;
 static unsigned long *bitmap;
 static unsigned long *unconsumed;
 static unsigned long current_gfn;
-static unsigned long bitmap_size;
 static unsigned long max_pages;
 
 
 int policy_init(xenpaging_t *paging)
 {
     int i;
-    int rc;
+    int rc = -ENOMEM;
 
     /* Allocate bitmap for pages not to page out */
-    rc = alloc_bitmap(&bitmap, paging->bitmap_size);
-    if ( rc != 0 )
+    bitmap = bitmap_alloc(paging->domain_info->max_pages);
+    if ( !bitmap )
         goto out;
     /* Allocate bitmap to track unusable pages */
-    rc = alloc_bitmap(&unconsumed, paging->bitmap_size);
-    if ( rc != 0 )
+    unconsumed = bitmap_alloc(paging->domain_info->max_pages);
+    if ( !unconsumed )
         goto out;
 
-    /* record bitmap_size */
-    bitmap_size = paging->bitmap_size;
     max_pages = paging->domain_info->max_pages;
 
     /* Initialise MRU list of paged in pages */
@@ -65,10 +61,7 @@ int policy_init(xenpaging_t *paging)
 
     mru = malloc(sizeof(*mru) * mru_size);
     if ( mru == NULL )
-    {
-        rc = -ENOMEM;
         goto out;
-    }
 
     for ( i = 0; i < mru_size; i++ )
         mru[i] = INVALID_MFN;
@@ -76,6 +69,7 @@ int policy_init(xenpaging_t *paging)
     /* Don't page out page 0 */
     set_bit(0, bitmap);
 
+    rc = 0;
  out:
     return rc;
 }
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/xenpaging/xc.c
--- a/tools/xenpaging/xc.c      Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/xenpaging/xc.c      Fri Jun 10 10:47:03 2011 +0200
@@ -31,20 +31,6 @@
 #include "xc.h"
 
 
-int alloc_bitmap(unsigned long **bitmap, unsigned long bitmap_size)
-{
-    if ( *bitmap == NULL )
-    {
-        *bitmap = calloc(bitmap_size / BITS_PER_LONG, sizeof(unsigned long));
-
-        if ( *bitmap == NULL )
-            return -ENOMEM;
-    }
-
-    memset(*bitmap, 0, bitmap_size / 8);
-
-    return 0;
-}
 
 int xc_mem_paging_flush_ioemu_cache(domid_t domain_id)
 {
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/xenpaging/xc.h
--- a/tools/xenpaging/xc.h      Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/xenpaging/xc.h      Fri Jun 10 10:47:03 2011 +0200
@@ -39,7 +39,6 @@
 #endif
 
 
-#define BITS_PER_LONG 64
 
 
 typedef struct xc_platform_info {
@@ -50,7 +49,6 @@ typedef struct xc_platform_info {
 } xc_platform_info_t;
 
 
-int alloc_bitmap(unsigned long **bitmap, unsigned long bitmap_size);
 
 int xc_mem_paging_flush_ioemu_cache(domid_t domain_id);
 int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce);
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c       Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/xenpaging/xenpaging.c       Fri Jun 10 10:47:03 2011 +0200
@@ -31,7 +31,7 @@
 
 #include <xen/mem_event.h>
 
-#include "bitops.h"
+#include "xc_bitops.h"
 #include "file_ops.h"
 #include "xc.h"
 
@@ -200,11 +200,8 @@ static xenpaging_t *xenpaging_init(domid
     }
 
     /* Allocate bitmap for tracking pages that have been paged out */
-    paging->bitmap_size = (paging->domain_info->max_pages + BITS_PER_LONG) &
-                          ~(BITS_PER_LONG - 1);
-
-    rc = alloc_bitmap(&paging->bitmap, paging->bitmap_size);
-    if ( rc != 0 )
+    paging->bitmap = bitmap_alloc(paging->domain_info->max_pages);
+    if ( !paging->bitmap )
     {
         ERROR("Error allocating bitmap");
         goto err;
diff -r 5553bd24a3d9 -r 9476d85932e5 tools/xenpaging/xenpaging.h
--- a/tools/xenpaging/xenpaging.h       Fri Jun 10 10:47:02 2011 +0200
+++ b/tools/xenpaging/xenpaging.h       Fri Jun 10 10:47:03 2011 +0200
@@ -40,7 +40,6 @@ typedef struct xenpaging {
     xc_platform_info_t *platform_info;
     xc_domaininfo_t    *domain_info;
 
-    unsigned long  bitmap_size;
     unsigned long *bitmap;
 
     mem_event_t mem_event;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.