WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] linux/xenoprof: dynamic buffer array allocation

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] linux/xenoprof: dynamic buffer array allocation
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Tue, 05 Jan 2010 12:54:38 +0000
Delivery-date: Tue, 05 Jan 2010 04:54:32 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
The recent change to locally define MAX_VIRT_CPUS wasn't really
appropriate - with there not being a hard limit on the number of
vCPU-s anymore, these arrays should be allocated dynamically.

As usual, written against 2.6.32.2 and made apply to the 2.6.18
tree without further testing.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- head-2010-01-04.orig/drivers/xen/xenoprof/xenoprofile.c     2010-01-04 
13:31:29.000000000 +0100
+++ head-2010-01-04/drivers/xen/xenoprof/xenoprofile.c  2010-01-04 
16:51:11.000000000 +0100
@@ -32,15 +32,14 @@
 #include "../../../drivers/oprofile/event_buffer.h"
 
 #define MAX_XENOPROF_SAMPLES 16
-#define MAX_VIRT_CPUS 128
 
 /* sample buffers shared with Xen */
-static xenoprof_buf_t *xenoprof_buf[MAX_VIRT_CPUS];
+static xenoprof_buf_t **__read_mostly xenoprof_buf;
 /* Shared buffer area */
 static struct xenoprof_shared_buffer shared_buffer;
 
 /* Passive sample buffers shared with Xen */
-static xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
+static xenoprof_buf_t **__read_mostly p_xenoprof_buf[MAX_OPROF_DOMAINS];
 /* Passive shared buffer area */
 static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
 
@@ -250,11 +249,32 @@ static int bind_virq(void)
 }
 
 
+static xenoprof_buf_t **get_buffer_array(unsigned int nbuf)
+{
+       size_t size = nbuf * sizeof(xenoprof_buf_t);
+
+       if (size <= PAGE_SIZE)
+               return kmalloc(size, GFP_KERNEL);
+       return vmalloc(size);
+}
+
+static void release_buffer_array(xenoprof_buf_t **buf, unsigned int nbuf)
+{
+       if (nbuf * sizeof(xenoprof_buf_t) <= PAGE_SIZE)
+               kfree(buf);
+       else
+               vfree(buf);
+}
+
+
 static void unmap_passive_list(void)
 {
        int i;
-       for (i = 0; i < pdomains; i++)
+       for (i = 0; i < pdomains; i++) {
                xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
+               release_buffer_array(p_xenoprof_buf[i],
+                                    passive_domains[i].nbuf);
+       }
        pdomains = 0;
 }
 
@@ -274,10 +294,16 @@ static int map_xenoprof_buffer(int max_s
                return ret;
        nbuf = get_buffer.nbuf;
 
+       xenoprof_buf = get_buffer_array(nbuf);
+       if (!xenoprof_buf) {
+               xenoprof_arch_unmap_shared_buffer(&shared_buffer);
+               return -ENOMEM;
+       }
+
        for (i=0; i< nbuf; i++) {
                buf = (struct xenoprof_buf*) 
                        &shared_buffer.buffer[i * get_buffer.bufsize];
-               BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
+               BUG_ON(buf->vcpu_id >= nbuf);
                xenoprof_buf[buf->vcpu_id] = buf;
        }
 
@@ -292,8 +318,10 @@ static int xenoprof_setup(void)
        if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
                return ret;
 
-       if ( (ret = bind_virq()) )
+       if ( (ret = bind_virq()) ) {
+               release_buffer_array(xenoprof_buf, nbuf);
                return ret;
+       }
 
        if (xenoprof_is_primary) {
                /* Define dom0 as an active domain if not done yet */
@@ -336,6 +364,7 @@ static int xenoprof_setup(void)
        return 0;
  err:
        unbind_virq();
+       release_buffer_array(xenoprof_buf, nbuf);
        return ret;
 }
 
@@ -357,6 +386,7 @@ static void xenoprof_shutdown(void)
        xenoprof_arch_unmap_shared_buffer(&shared_buffer);
        if (xenoprof_is_primary)
                unmap_passive_list();
+       release_buffer_array(xenoprof_buf, nbuf);
 }
 
 
@@ -449,11 +479,19 @@ static int xenoprof_set_passive(int * p_
                                                &p_shared_buffer[i]);
                if (ret)
                        goto out;
+
+               p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf);
+               if (!p_xenoprof_buf[i]) {
+                       ++i;
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
                for (j = 0; j < passive_domains[i].nbuf; j++) {
                        buf = (struct xenoprof_buf *)
                                &p_shared_buffer[i].buffer[
                                j * passive_domains[i].bufsize];
-                       BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
+                       BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf);
                        p_xenoprof_buf[i][buf->vcpu_id] = buf;
                }
        }
@@ -462,8 +500,11 @@ static int xenoprof_set_passive(int * p_
        return 0;
 
 out:
-       for (j = 0; j < i; j++)
+       for (j = 0; j < i; j++) {
                xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
+               release_buffer_array(p_xenoprof_buf[i],
+                                    passive_domains[i].nbuf);
+       }
 
        return ret;
 }


Attachment: xenlinux-oprofile-buffer-alloc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] linux/xenoprof: dynamic buffer array allocation, Jan Beulich <=