WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 01/12] x86/vsyscall: use common implementation for vg

To: Linux Kernel Mailing List <linux-kernel@xxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 01/12] x86/vsyscall: use common implementation for vgetcpu
From: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
Date: Wed, 14 Oct 2009 12:28:25 -0700
Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>, kurt.hackel@xxxxxxxxxx, the arch/x86 maintainers <x86@xxxxxxxxxx>, Glauber de Oliveira Costa <gcosta@xxxxxxxxxx>, Andi Kleen <ak@xxxxxxxxxxxxxxx>, Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>, Avi Kivity <avi@xxxxxxxxxx>, Ingo Molnar <mingo@xxxxxxx>, Chris Mason <chris.mason@xxxxxxxxxx>
Delivery-date: Wed, 14 Oct 2009 12:33:29 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1255548516-15260-1-git-send-email-jeremy.fitzhardinge@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1255548516-15260-1-git-send-email-jeremy.fitzhardinge@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
There were two implementations of vgetcpu: one for vdso and one for
vsyscalls.  They are actually functionally different, as the vdso one
ignores the tcache parameter (due to change 4307d1e5ada595c8), but the
vsyscall implementation honours it.  This patch preserves that functional
difference, but a subsequent will make both behave the same way.

The actual compiled code is still duplicated as the vdso and vsyscall
environments are quite different (variable vs fixed address, for example),
and the access to kernel variables is different as well.  The
implementation is made common as an inline functions, so there's only
one copy of the source.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxx>
---
 arch/x86/include/asm/vsyscall.h |   38 ++++++++++++++++++++++++++++++++++++++
 arch/x86/kernel/vsyscall_64.c   |   30 +-----------------------------
 arch/x86/vdso/vgetcpu.c         |   18 +++---------------
 3 files changed, 42 insertions(+), 44 deletions(-)

diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d0983d2..2fcd505 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -39,6 +39,44 @@ extern struct timezone sys_tz;
 
 extern void map_vsyscall(void);
 
+#include <linux/getcpu.h>
+
+static __always_inline
+int __vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache,
+             unsigned long jiffies, int vgetcpu_mode)
+{
+       unsigned int p;
+       unsigned long j = 0;
+
+       /* Fast cache - only recompute value once per jiffies and avoid
+          relatively costly rdtscp/cpuid otherwise.
+          This works because the scheduler usually keeps the process
+          on the same CPU and this syscall doesn't guarantee its
+          results anyways.
+          We do this here because otherwise user space would do it on
+          its own in a likely inferior way (no access to jiffies).
+          If you don't like it pass NULL. */
+       if (tcache && tcache->blob[0] == (j = jiffies)) {
+               p = tcache->blob[1];
+       } else if (vgetcpu_mode == VGETCPU_RDTSCP) {
+               /* Load per CPU data from RDTSCP */
+               native_read_tscp(&p);
+       } else {
+               /* Load per CPU data from GDT */
+               asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       }
+       if (tcache) {
+               tcache->blob[0] = j;
+               tcache->blob[1] = p;
+       }
+       if (cpu)
+               *cpu = p & 0xfff;
+       if (node)
+               *node = p >> 12;
+       return 0;
+}
+
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 25ee06a..f71dda9 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -190,35 +190,7 @@ time_t __vsyscall(1) vtime(time_t *t)
 long __vsyscall(2)
 vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
 {
-       unsigned int p;
-       unsigned long j = 0;
-
-       /* Fast cache - only recompute value once per jiffies and avoid
-          relatively costly rdtscp/cpuid otherwise.
-          This works because the scheduler usually keeps the process
-          on the same CPU and this syscall doesn't guarantee its
-          results anyways.
-          We do this here because otherwise user space would do it on
-          its own in a likely inferior way (no access to jiffies).
-          If you don't like it pass NULL. */
-       if (tcache && tcache->blob[0] == (j = __jiffies)) {
-               p = tcache->blob[1];
-       } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
-               /* Load per CPU data from RDTSCP */
-               native_read_tscp(&p);
-       } else {
-               /* Load per CPU data from GDT */
-               asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
-       }
-       if (tcache) {
-               tcache->blob[0] = j;
-               tcache->blob[1] = p;
-       }
-       if (cpu)
-               *cpu = p & 0xfff;
-       if (node)
-               *node = p >> 12;
-       return 0;
+       return __vgetcpu(cpu, node, tcache, __jiffies, __vgetcpu_mode);
 }
 
 static long __vsyscall(3) venosys_1(void)
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
index 9fbc6b2..1f19f74 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/vdso/vgetcpu.c
@@ -14,22 +14,10 @@
 #include "vextern.h"
 
 notrace long
-__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
 {
-       unsigned int p;
-
-       if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
-               /* Load per CPU data from RDTSCP */
-               native_read_tscp(&p);
-       } else {
-               /* Load per CPU data from GDT */
-               asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
-       }
-       if (cpu)
-               *cpu = p & 0xfff;
-       if (node)
-               *node = p >> 12;
-       return 0;
+       /* Ignore tcache to preserve old behaviour */
+       return __vgetcpu(cpu, node, NULL, *vdso_jiffies, *vdso_vgetcpu_mode);
 }
 
 long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-- 
1.6.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel