WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] Reduce overhead in find_domain_by_id() [1/2]

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] Reduce overhead in find_domain_by_id() [1/2]
From: "Santos, Jose Renato G" <joserenato.santos@xxxxxx>
Date: Tue, 5 Dec 2006 19:35:47 -0600
Cc: Yoshio Turner <yoshiotu@xxxxxxxxxx>, Jose Renato Santos <jsantos@xxxxxxxxxx>, G John Janakiraman <john@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Tue, 05 Dec 2006 17:36:40 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AccY1tptRzO/fIFzQ6+yIQ8hZp+QPA==
Thread-topic: [PATCH] Reduce overhead in find_domain_by_id() [1/2]
This is a set of patches to improve performance of find_domain_by_id().
find_domain_by_id shows up high in profiles for network I/O intensive
workloads.
Most of the cost for this function comes from 3 main functions (of
aproximate equal costs): 1)read_lock(), 2)read_unlock() and
3)get_domain().
These patches replace the lock used for accessing domain_list and
domain_hash with a lock free RCU scheme. Experiments confirm that the
cost of find_domain_by_id() is in fact reduced by 2/3. 
The patches apply cleanly to changeset 12732.

Renato

Patch:
  1/2 - Import linux RCU code into Xen
        This is simplified version of the linux code, removing several
components not needed in Xen (at least for now)

================================================================
Signed-off-by: Jose Renato Santos <jsantos@xxxxxxxxxx>

# HG changeset patch
# User jsantos@xxxxxxxxxx
# Node ID 555277cafc3576a3a96bd7ec345c3871b18aa867
# Parent  7df4d8cfba3b2fbd12ac1f287ab854ff4b242721
Add RCU support to Xen

diff -r 7df4d8cfba3b -r 555277cafc35 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Tue Dec  5 12:42:29 2006 +0000
+++ b/xen/arch/x86/setup.c      Tue Dec  5 14:47:26 2006 -0800
@@ -17,6 +17,7 @@
 #include <xen/hypercall.h>
 #include <xen/keyhandler.h>
 #include <xen/numa.h>
+#include <xen/rcupdate.h>
 #include <public/version.h>
 #include <asm/bitops.h>
 #include <asm/smp.h>
@@ -629,6 +630,8 @@ void __init __start_xen(multiboot_info_t
 
     trap_init();
 
+    rcu_init();
+    
     timer_init();
 
     early_time_init();
@@ -664,8 +667,10 @@ void __init __start_xen(multiboot_info_t
     {
         if ( num_online_cpus() >= max_cpus )
             break;
-        if ( !cpu_online(i) )
+        if ( !cpu_online(i) ) {
+            rcu_online_cpu(i);
             __cpu_up(i);
+        }
 
         /* Set up cpu_to_node[]. */
         srat_detect_node(i);
diff -r 7df4d8cfba3b -r 555277cafc35 xen/common/Makefile
--- a/xen/common/Makefile       Tue Dec  5 12:42:29 2006 +0000
+++ b/xen/common/Makefile       Tue Dec  5 14:47:26 2006 -0800
@@ -27,6 +27,7 @@ obj-y += version.o
 obj-y += version.o
 obj-y += vsprintf.o
 obj-y += xmalloc.o
+obj-y += rcupdate.o
 
 obj-$(perfc)       += perfc.o
 obj-$(crash_debug) += gdbstub.o
diff -r 7df4d8cfba3b -r 555277cafc35 xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Dec  5 12:42:29 2006 +0000
+++ b/xen/common/schedule.c     Tue Dec  5 14:47:26 2006 -0800
@@ -29,6 +29,7 @@
 #include <xen/mm.h>
 #include <xen/errno.h>
 #include <xen/guest_access.h>
+#include <xen/rcupdate.h>
 #include <xen/multicall.h>
 #include <public/sched.h>
 
@@ -546,9 +547,15 @@ static void schedule(void)
     struct schedule_data *sd;
     struct task_slice     next_slice;
     s32                   r_time;     /* time for new dom to run */
+    int                   cpu = smp_processor_id();
 
     ASSERT(!in_irq());
     ASSERT(this_cpu(mc_state).flags == 0);
+
+    if (rcu_pending(cpu))
+        /* since __enter_scheduler is called from softirq context 
+         * we know we are in a quiescent state */
+        rcu_check_callbacks(cpu,1);
 
     perfc_incrc(sched_run);
 
@@ -643,6 +650,12 @@ static void t_timer_fn(void *unused)
 static void t_timer_fn(void *unused)
 {
     struct vcpu *v   = current;
+    int cpu = smp_processor_id();
+
+    if (rcu_pending(cpu))
+        /* since t_timer_fn is called from softirq context 
+         * we know we are in a quiescent state */
+        rcu_check_callbacks(cpu,1);
 
     this_cpu(schedule_data).tick++;
 
diff -r 7df4d8cfba3b -r 555277cafc35 xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Tue Dec  5 12:42:29 2006 +0000
+++ b/xen/include/xen/softirq.h Tue Dec  5 14:47:26 2006 -0800
@@ -9,8 +9,9 @@
 #define NMI_SOFTIRQ                       4
 #define PAGE_SCRUB_SOFTIRQ                5
 #define TRACE_SOFTIRQ                     6
+#define RCU_SOFTIRQ                       7
 
-#define NR_COMMON_SOFTIRQS                7
+#define NR_COMMON_SOFTIRQS                8
 
 #include <asm/softirq.h>
 
diff -r 7df4d8cfba3b -r 555277cafc35 xen/common/rcupdate.c
--- /dev/null   Thu Jan  1 00:00:00 1970 +0000
+++ b/xen/common/rcupdate.c     Tue Dec  5 14:47:26 2006 -0800
@@ -0,0 +1,381 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2001
+ *
+ * Authors: Dipankar Sarma <dipankar@xxxxxxxxxx>
+ *         Manfred Spraul <manfred@xxxxxxxxxxxxxxxx>
+ * 
+ * Modifications for Xen: Jose Renato Santos
+ * Copyright (C) Hewlett-Packard, 2006
+ *
+ * Based on the original work by Paul McKenney <paulmck@xxxxxxxxxx>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf
(OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             http://lse.sourceforge.net/locking/rcupdate.html
+ *
+ */
+#include <xen/types.h>
+#include <xen/kernel.h>
+#include <xen/init.h>
+#include <xen/spinlock.h>
+#include <xen/smp.h>
+#include <xen/rcupdate.h>
+#include <xen/sched.h>
+#include <asm/atomic.h>
+#include <xen/bitops.h>
+#include <xen/percpu.h>
+#include <xen/softirq.h>
+
+/* Definition for rcupdate control block. */
+struct rcu_ctrlblk rcu_ctrlblk = {
+       .cur = -300,
+       .completed = -300,
+       .lock = SPIN_LOCK_UNLOCKED,
+       .cpumask = CPU_MASK_NONE,
+};
+
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+#ifdef CONFIG_SMP
+static int rsinterval = 1000;
+#endif
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+                       struct rcu_ctrlblk *rcp)
+{
+       cpumask_t cpumask;
+       raise_softirq(SCHEDULE_SOFTIRQ);
+       if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
+               rdp->last_rs_qlen = rdp->qlen;
+               /*
+                * Don't send IPI to itself. With irqs disabled,
+                * rdp->cpu is the current cpu.
+                */
+               cpumask = rcp->cpumask;
+               cpu_clear(rdp->cpu, cpumask);
+               cpumask_raise_softirq(cpumask, SCHEDULE_SOFTIRQ);
+       }
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+                       struct rcu_ctrlblk *rcp)
+{
+       raise_softirq(SCHEDULE_SOFTIRQ);
+}
+#endif
+
+/**
+ * call_rcu - Queue an RCU callback for invocation after a grace
period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed.  RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void fastcall call_rcu(struct rcu_head *head,
+                               void (*func)(struct rcu_head *rcu))
+{
+       unsigned long flags;
+       struct rcu_data *rdp;
+
+       head->func = func;
+       head->next = NULL;
+       local_irq_save(flags);
+       rdp = &__get_cpu_var(rcu_data);
+       *rdp->nxttail = head;
+       rdp->nxttail = &head->next;
+       /* This should be very unlikely in Xen */
+       if (unlikely(++rdp->qlen > qhimark)) {
+               rdp->blimit = INT_MAX;
+               force_quiescent_state(rdp, &rcu_ctrlblk);
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Return the number of RCU batches processed thus far.  Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+       return rcu_ctrlblk.completed;
+}
+
+/*
+ * Invoke the completed RCU callbacks. They are expected to be in
+ * a per-cpu list.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+       struct rcu_head *next, *list;
+       int count = 0;
+
+       list = rdp->donelist;
+       while (list) {
+               next = rdp->donelist = list->next;
+               list->func(list);
+               list = next;
+               rdp->qlen--;
+               if (++count >= rdp->blimit)
+                       break;
+       }
+       if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+               rdp->blimit = blimit;
+       if (!rdp->donelist)
+               rdp->donetail = &rdp->donelist;
+       else
+               raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Grace period handling:
+ * The grace period handling consists out of two steps:
+ * - A new grace period is started.
+ *   This is done by rcu_start_batch. The start is not broadcasted to
+ *   all cpus, they must pick this up by comparing rcp->cur with
+ *   rdp->quiescbatch. All cpus are recorded  in the
+ *   rcu_ctrlblk.cpumask bitmap.
+ * - All cpus must go through a quiescent state.
+ *   Since the start of the grace period is not broadcasted, at least
two
+ *   calls to rcu_check_quiescent_state are required:
+ *   The first call just notices that a new grace period is running.
The
+ *   following calls check if there was a quiescent state since the
beginning
+ *   of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
+ *   the bitmap is empty, then the grace period is completed.
+ *   rcu_check_quiescent_state calls rcu_start_batch(0) to start the
next grace
+ *   period (if necessary).
+ */
+/*
+ * Register a new batch of callbacks, and start it up if there is
currently no
+ * active batch and the batch to be registered has not already
occurred.
+ * Caller must hold rcu_ctrlblk.lock.
+ */
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
+{
+       if (rcp->next_pending &&
+                       rcp->completed == rcp->cur) {
+               rcp->next_pending = 0;
+               /*
+                * next_pending == 0 must be visible in
+                * __rcu_process_callbacks() before it can see new value
of cur.
+                */
+               smp_wmb();
+               rcp->cur++;
+
+               rcp->cpumask = cpu_online_map;
+       }
+}
+
+/*
+ * cpu went through a quiescent state since the beginning of the grace
period.
+ * Clear it from the cpu mask and complete the grace period if it was
the last
+ * cpu. Start another grace period if someone has further entries
pending
+ */
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
+{
+       cpu_clear(cpu, rcp->cpumask);
+       if (cpus_empty(rcp->cpumask)) {
+               /* batch completed ! */
+               rcp->completed = rcp->cur;
+               rcu_start_batch(rcp);
+       }
+}
+
+/*
+ * Check if the cpu has gone through a quiescent state (say context
+ * switch). If so and if it already hasn't done so in this RCU
+ * quiescent cycle, then indicate that it has done so.
+ */
+static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
+                                       struct rcu_data *rdp)
+{
+       if (rdp->quiescbatch != rcp->cur) {
+               /* start new grace period: */
+               rdp->qs_pending = 1;
+               rdp->passed_quiesc = 0;
+               rdp->quiescbatch = rcp->cur;
+               return;
+       }
+
+       /* Grace period already completed for this cpu?
+        * qs_pending is checked instead of the actual bitmap to avoid
+        * cacheline trashing.
+        */
+       if (!rdp->qs_pending)
+               return;
+
+       /* 
+        * Was there a quiescent state since the beginning of the grace
+        * period? If no, then exit and wait for the next call.
+        */
+       if (!rdp->passed_quiesc)
+               return;
+       rdp->qs_pending = 0;
+
+       spin_lock(&rcp->lock);
+       /*
+        * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of
sync
+        * during cpu startup. Ignore the quiescent state.
+        */
+       if (likely(rdp->quiescbatch == rcp->cur))
+               cpu_quiet(rdp->cpu, rcp);
+
+       spin_unlock(&rcp->lock);
+}
+
+
+/*
+ * This does the RCU processing work from softirq context. 
+ */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
+                                       struct rcu_data *rdp)
+{
+       if (rdp->curlist && !rcu_batch_before(rcp->completed,
rdp->batch)) {
+               *rdp->donetail = rdp->curlist;
+               rdp->donetail = rdp->curtail;
+               rdp->curlist = NULL;
+               rdp->curtail = &rdp->curlist;
+       }
+
+       local_irq_disable();
+       if (rdp->nxtlist && !rdp->curlist) {
+               rdp->curlist = rdp->nxtlist;
+               rdp->curtail = rdp->nxttail;
+               rdp->nxtlist = NULL;
+               rdp->nxttail = &rdp->nxtlist;
+               local_irq_enable();
+
+               /*
+                * start the next batch of callbacks
+                */
+
+               /* determine batch number */
+               rdp->batch = rcp->cur + 1;
+               /* see the comment and corresponding wmb() in
+                * the rcu_start_batch()
+                */
+               smp_rmb();
+
+               if (!rcp->next_pending) {
+                       /* and start it/schedule start if it's a new
batch */
+                       spin_lock(&rcp->lock);
+                       rcp->next_pending = 1;
+                       rcu_start_batch(rcp);
+                       spin_unlock(&rcp->lock);
+               }
+       } else {
+               local_irq_enable();
+       }
+       rcu_check_quiescent_state(rcp, rdp);
+       if (rdp->donelist)
+               rcu_do_batch(rdp);
+}
+
+static void rcu_process_callbacks(void)
+{
+       __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+       /* This cpu has pending rcu entries and the grace period
+        * for them has completed.
+        */
+       if (rdp->curlist && !rcu_batch_before(rcp->completed,
rdp->batch))
+               return 1;
+
+       /* This cpu has no pending entries, but there are new entries */
+       if (!rdp->curlist && rdp->nxtlist)
+               return 1;
+
+       /* This cpu has finished callbacks to invoke */
+       if (rdp->donelist)
+               return 1;
+
+       /* The rcu core waits for a quiescent state from the cpu */
+       if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+               return 1;
+
+       /* nothing to do */
+       return 0;
+}
+
+int rcu_pending(int cpu)
+{
+       return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so.  This function is part of the RCU implementation; it is
-not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+
+       return (!!rdp->curlist || rcu_pending(cpu));
+}
+
+void rcu_check_callbacks(int cpu, int qstate)
+{
+       if (qstate)  {
+               rcu_qsctr_inc(cpu);
+       }
+       raise_softirq(RCU_SOFTIRQ);
+}
+
+static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+                                               struct rcu_data *rdp)
+{
+       memset(rdp, 0, sizeof(*rdp));
+       rdp->curtail = &rdp->curlist;
+       rdp->nxttail = &rdp->nxtlist;
+       rdp->donetail = &rdp->donelist;
+       rdp->quiescbatch = rcp->completed;
+       rdp->qs_pending = 0;
+       rdp->cpu = cpu;
+       rdp->blimit = blimit;
+}
+
+void __devinit rcu_online_cpu(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+
+       rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+}
+
+void rcu_init(void)
+{
+       rcu_online_cpu(smp_processor_id());
+       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+
diff -r 7df4d8cfba3b -r 555277cafc35 xen/include/xen/rcupdate.h
--- /dev/null   Thu Jan  1 00:00:00 1970 +0000
+++ b/xen/include/xen/rcupdate.h        Tue Dec  5 14:47:26 2006 -0800
@@ -0,0 +1,213 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2001
+ *
+ * Author: Dipankar Sarma <dipankar@xxxxxxxxxx>
+ * 
+ * Based on the original work by Paul McKenney
<paul.mckenney@xxxxxxxxxx>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf
(OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             http://lse.sourceforge.net/locking/rcupdate.html
+ *
+ */
+
+#ifndef __XEN_RCUPDATE_H
+#define __XEN_RCUPDATE_H
+
+#include <xen/cache.h>
+#include <xen/spinlock.h>
+#include <xen/percpu.h>
+#include <xen/cpumask.h>
+
+/**
+ * struct rcu_head - callback structure for use with RCU
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ */
+struct rcu_head {
+       struct rcu_head *next;
+       void (*func)(struct rcu_head *head);
+};
+
+#define RCU_HEAD_INIT  { .next = NULL, .func = NULL }
+#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
+#define INIT_RCU_HEAD(ptr) do { \
+       (ptr)->next = NULL; (ptr)->func = NULL; \
+} while (0)
+
+
+
+/* Global control variables for rcupdate callback mechanism. */
+struct rcu_ctrlblk {
+       long    cur;            /* Current batch number.
*/
+       long    completed;      /* Number of the last completed batch
*/
+       int     next_pending;   /* Is the next batch already waiting?
*/
+
+       spinlock_t      lock    __cacheline_aligned;
+       cpumask_t       cpumask; /* CPUs that need to switch in order
*/
+                                /* for current batch to proceed.
*/
+} __cacheline_aligned;
+
+/* Is batch a before batch b ? */
+static inline int rcu_batch_before(long a, long b)
+{
+        return (a - b) < 0;
+}
+
+/* Is batch a after batch b ? */
+static inline int rcu_batch_after(long a, long b)
+{
+        return (a - b) > 0;
+}
+
+/*
+ * Per-CPU data for Read-Copy UPdate.
+ * nxtlist - new callbacks are added here
+ * curlist - current batch for which quiescent cycle started if any
+ */
+struct rcu_data {
+       /* 1) quiescent state handling : */
+       long            quiescbatch;     /* Batch # for grace period */
+       int             passed_quiesc;   /* User-mode/idle loop etc. */
+       int             qs_pending;      /* core waits for quiesc state
*/
+
+       /* 2) batch handling */
+       long            batch;           /* Batch # for current RCU
batch */
+       struct rcu_head *nxtlist;
+       struct rcu_head **nxttail;
+       long            qlen;            /* # of queued callbacks */
+       struct rcu_head *curlist;
+       struct rcu_head **curtail;
+       struct rcu_head *donelist;
+       struct rcu_head **donetail;
+       long            blimit;          /* Upper limit on a processed
batch */
+       int cpu;
+       struct rcu_head barrier;
+#ifdef CONFIG_SMP
+       long            last_rs_qlen;    /* qlen during the last resched
*/
+#endif
+};
+
+DECLARE_PER_CPU(struct rcu_data, rcu_data);
+extern struct rcu_ctrlblk rcu_ctrlblk;
+
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+static inline void rcu_qsctr_inc(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       rdp->passed_quiesc = 1;
+}
+
+extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
+
+/**
+ * rcu_read_lock - mark the beginning of an RCU read-side critical
section.
+ *
+ * When call_rcu() is invoked
+ * on one CPU while other CPUs are within RCU read-side critical
+ * sections, invocation of the corresponding RCU callback is deferred
+ * until after the all the other CPUs exit their critical sections.
+ *
+ * Note, however, that RCU callbacks are permitted to run concurrently
+ * with RCU read-side critical sections.  One way that this can happen
+ * is via the following sequence of events: (1) CPU 0 enters an RCU
+ * read-side critical section, (2) CPU 1 invokes call_rcu() to register
+ * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
+ * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
+ * callback is invoked.  This is legal, because the RCU read-side
critical
+ * section that was running concurrently with the call_rcu() (and which
+ * therefore might be referencing something that the corresponding RCU
+ * callback would free up) has completed before the corresponding
+ * RCU callback is invoked.
+ *
+ * RCU read-side critical sections may be nested.  Any deferred actions
+ * will be deferred until the outermost RCU read-side critical section
+ * completes.
+ *
+ * It is illegal to block while in an RCU read-side critical section.
+ */
+#define rcu_read_lock()                do { } while (0)
+
+/**
+ * rcu_read_unlock - marks the end of an RCU read-side critical
section.
+ *
+ * See rcu_read_lock() for more information.
+ */
+#define rcu_read_unlock()      do { } while (0)
+
+/*
+ * So where is rcu_write_lock()?  It does not exist, as there is no
+ * way for writers to lock out RCU readers.  This is a feature, not
+ * a bug -- this property is what provides RCU's performance benefits.
+ * Of course, writers must coordinate with each other.  The normal
+ * spinlock primitives work well for this, but any other technique may
be
+ * used as well.  RCU does not care how the writers keep out of each
+ * others' way, as long as they do so.
+ */
+
+/**
+ * rcu_dereference - fetch an RCU-protected pointer in an
+ * RCU read-side critical section.  This pointer may later
+ * be safely dereferenced.
+ *
+ * Inserts memory barriers on architectures that require them
+ * (currently only the Alpha), and, more importantly, documents
+ * exactly which pointers are protected by RCU.
+ */
+
+/* As Xen does not run on alpha this is a no op */
+#define rcu_dereference(p)     (p)
+
+/**
+ * rcu_assign_pointer - assign (publicize) a pointer to a newly
+ * initialized structure that will be dereferenced by RCU read-side
+ * critical sections.  Returns the value assigned.
+ *
+ * Inserts memory barriers on architectures that require them
+ * (pretty much all of them other than x86), and also prevents
+ * the compiler from reordering the code that initializes the
+ * structure after the pointer assignment.  More importantly, this
+ * call documents which pointers will be dereferenced by RCU read-side
+ * code.
+ */
+
+#define rcu_assign_pointer(p, v)       ({ \
+                                               smp_wmb(); \
+                                               (p) = (v); \
+                                       })
+
+extern void rcu_init(void);
+extern void __devinit rcu_online_cpu(int cpu);
+extern void rcu_check_callbacks(int cpu, int qstate);
+extern long rcu_batches_completed(void);
+
+/* Exported interfaces */
+extern void fastcall call_rcu(struct rcu_head *head, 
+                             void (*func)(struct rcu_head *head));
+
+#endif /* __XEN_RCUPDATE_H */

Attachment: rcu_domain_1.patch
Description: rcu_domain_1.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] Reduce overhead in find_domain_by_id() [1/2], Santos, Jose Renato G <=