WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 4 of 5] Update __insert_record() to copy the trace re

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 4 of 5] Update __insert_record() to copy the trace record to individual mfns
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Fri, 06 May 2011 20:25:35 +0200
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Delivery-date: Fri, 06 May 2011 11:30:56 -0700
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1304706332; l=4257; s=domk; d=aepfle.de; h=Cc:To:From:Date:References:In-Reply-To:Subject: Content-Transfer-Encoding:MIME-Version:Content-Type:X-RZG-CLASS-ID: X-RZG-AUTH; bh=LCHef7OoFhMYDZDycWbEImIi/XE=; b=nXgQtqKuhvMAAfU+M+2NPrhmNyfvFFzGiMMpzYkAsAVDAdFIzVL2os+nKGClqfvcgYY PfrOqTBX+/r0AMZa0nJE234vSjN9s9kcnbhBDq39IdI0Dw1gBQgEzKFOjPPIwbABQtq55 qc+yWgRxtscpNOur3pWkMLOGMZccdJViSsU=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1304706331@localhost>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1304706331@localhost>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.8.1
# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1304700881 -7200
# Node ID 1c5da4d9e33c821b9e3276d7aefe7ee16ce7b162
# Parent  1631b61acaa8e88437d0f1861409ab1824de2721
Update __insert_record() to copy the trace record to individual mfns.
This is a prereq before changing the per-cpu allocation from contiguous
to non-contiguous allocation.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 1631b61acaa8 -r 1c5da4d9e33c xen/common/trace.c
--- a/xen/common/trace.c        Fri May 06 17:56:35 2011 +0200
+++ b/xen/common/trace.c        Fri May 06 18:54:41 2011 +0200
@@ -52,7 +52,6 @@ static struct t_info *t_info;
 static unsigned int t_info_pages;
 
 static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
-static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
 static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
 static u32 data_size __read_mostly;
 
@@ -193,7 +192,6 @@ static int alloc_trace_bufs(unsigned int
 
         per_cpu(t_bufs, cpu) = buf = rawbuf;
         buf->cons = buf->prod = 0;
-        per_cpu(t_data, cpu) = (unsigned char *)(buf + 1);
     }
 
     offset = t_info_first_offset;
@@ -457,10 +455,16 @@ static inline u32 calc_bytes_avail(const
     return data_size - calc_unconsumed_bytes(buf);
 }
 
-static inline struct t_rec *next_record(const struct t_buf *buf,
-                                        uint32_t *next)
+static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
+                                 unsigned char **next_page,
+                                 uint32_t *offset_in_page)
 {
     u32 x = buf->prod, cons = buf->cons;
+    uint32_t per_cpu_mfn_offset;
+    uint32_t per_cpu_mfn_nr;
+    uint32_t *mfn_list;
+    uint32_t mfn;
+    unsigned char *this_page;
 
     barrier(); /* must read buf->prod and buf->cons only once */
     *next = x;
@@ -472,7 +476,27 @@ static inline struct t_rec *next_record(
 
     ASSERT(x < data_size);
 
-    return (struct t_rec *)&this_cpu(t_data)[x];
+    /* add leading header to get total offset of next record */
+    x += sizeof(struct t_buf);
+    *offset_in_page = x % PAGE_SIZE;
+
+    /* offset into array of mfns */
+    per_cpu_mfn_nr = x / PAGE_SIZE;
+    per_cpu_mfn_offset = t_info->mfn_offset[smp_processor_id()];
+    mfn_list = (uint32_t *)t_info;
+    mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr];
+    this_page = mfn_to_virt(mfn);
+    if (per_cpu_mfn_nr + 1 >= opt_tbuf_size)
+    {
+        /* reached end of buffer? */
+        *next_page = NULL;
+    }
+    else
+    {
+        mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1];
+        *next_page = mfn_to_virt(mfn);
+    }
+    return this_page;
 }
 
 static inline void __insert_record(struct t_buf *buf,
@@ -482,28 +506,37 @@ static inline void __insert_record(struc
                                    unsigned int rec_size,
                                    const void *extra_data)
 {
-    struct t_rec *rec;
+    struct t_rec split_rec, *rec;
     uint32_t *dst;
+    unsigned char *this_page, *next_page;
     unsigned int extra_word = extra / sizeof(u32);
     unsigned int local_rec_size = calc_rec_size(cycles, extra);
     uint32_t next;
+    uint32_t offset;
+    uint32_t remaining;
 
     BUG_ON(local_rec_size != rec_size);
     BUG_ON(extra & 3);
 
-    rec = next_record(buf, &next);
-    if ( !rec )
+    this_page = next_record(buf, &next, &next_page, &offset);
+    if ( !this_page )
         return;
-    /* Double-check once more that we have enough space.
-     * Don't bugcheck here, in case the userland tool is doing
-     * something stupid. */
-    if ( (unsigned char *)rec + rec_size > this_cpu(t_data) + data_size )
+
+    remaining = PAGE_SIZE - offset;
+
+    if ( unlikely(rec_size > remaining) )
     {
-        if ( printk_ratelimit() )
+        if ( next_page == NULL )
+        {
+            /* access beyond end of buffer */
             printk(XENLOG_WARNING
-                   "%s: size=%08x prod=%08x cons=%08x rec=%u\n",
-                   __func__, data_size, next, buf->cons, rec_size);
-        return;
+                   "%s: size=%08x prod=%08x cons=%08x rec=%u remaining=%u\n",
+                   __func__, data_size, next, buf->cons, rec_size, remaining);
+            return;
+        }
+        rec = &split_rec;
+    } else {
+        rec = (struct t_rec*)(this_page + offset);
     }
 
     rec->event = event;
@@ -520,6 +553,12 @@ static inline void __insert_record(struc
     if ( extra_data && extra )
         memcpy(dst, extra_data, extra);
 
+    if ( unlikely(rec_size > remaining) )
+    {
+        memcpy(this_page + offset, rec, remaining);
+        memcpy(next_page, (char *)rec + remaining, rec_size - remaining);
+    }
+
     wmb();
 
     next += rec_size;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel