# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1182319573 -32400 # Node ID e215072b79e6d1f969aa4c641c5ac69470d65f6e # Parent 72a16c9a054eace7f3b3c5c103ab897f2965b440 inlinize xen hyperprivops to eliminate branch call. PATCHNAME: inlinize_xen_hyperprivops Signed-off-by: Isaku Yamahata diff -r 72a16c9a054e -r e215072b79e6 arch/ia64/xen/hypercall.S --- a/arch/ia64/xen/hypercall.S Wed Jun 20 15:06:13 2007 +0900 +++ b/arch/ia64/xen/hypercall.S Wed Jun 20 15:06:13 2007 +0900 @@ -7,6 +7,13 @@ #include #include +#ifdef __INTEL_COMPILER +# undef ASM_SUPPORTED +#else +# define ASM_SUPPORTED +#endif + +#ifndef ASM_SUPPORTED GLOBAL_ENTRY(xen_get_psr) XEN_HYPER_GET_PSR br.ret.sptk.many rp @@ -113,7 +120,8 @@ GLOBAL_ENTRY(xen_set_eflag) XEN_HYPER_SET_EFLAG br.ret.sptk.many rp END(xen_set_eflag) -#endif +#endif /* CONFIG_IA32_SUPPORT */ +#endif /* ASM_SUPPORTED */ GLOBAL_ENTRY(xen_send_ipi) mov r14=r32 diff -r 72a16c9a054e -r e215072b79e6 include/asm-ia64/privop.h --- a/include/asm-ia64/privop.h Wed Jun 20 15:06:13 2007 +0900 +++ b/include/asm-ia64/privop.h Wed Jun 20 15:06:13 2007 +0900 @@ -1,6 +1,9 @@ #ifndef _ASM_IA64_PRIVOP_H #define _ASM_IA64_PRIVOP_H +#ifndef _ASM_IA64_INTRINSICS_H +#error "don't include privop.h directly. instead include intrinsics.h" +#endif /* * Copyright (C) 2005 Hewlett-Packard Co * Dan Magenheimer diff -r 72a16c9a054e -r e215072b79e6 include/asm-ia64/xen/privop.h --- a/include/asm-ia64/xen/privop.h Wed Jun 20 15:06:13 2007 +0900 +++ b/include/asm-ia64/xen/privop.h Wed Jun 20 15:06:13 2007 +0900 @@ -11,6 +11,7 @@ #ifndef __ASSEMBLY__ #include /* arch-ia64.h requires uint64_t */ +#include #endif #include @@ -77,10 +78,32 @@ * may have different semantics depending on whether they are executed * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't * be allowed to execute directly, lest incorrect semantics result. */ -extern unsigned long xen_fc(unsigned long addr); +#ifdef ASM_SUPPORTED +static inline void +xen_fc(unsigned long addr) +{ + register __u64 __addr asm ("r8") = addr; + asm volatile ("break %0":: "i"(HYPERPRIVOP_FC), "r"(__addr)); +} + +static inline unsigned long +xen_thash(unsigned long addr) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __addr asm ("r8") = addr; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_THASH), "0"(__addr)); + return ia64_intri_res; +} +#else +extern void xen_fc(unsigned long addr); +extern unsigned long xen_thash(unsigned long addr); +#endif + #define ia64_fc(addr) xen_fc((unsigned long)(addr)) -extern unsigned long xen_thash(unsigned long addr); #define ia64_thash(addr) xen_thash((unsigned long)(addr)) + /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" * is not currently used (though it may be in a long-format VHPT system!) * and the semantics of cover only change if psr.ic is off which is very @@ -88,12 +111,57 @@ extern unsigned long xen_thash(unsigned /* There are also privilege-sensitive registers. These registers are * readable at any privilege level but only writable at PL0. */ +#ifdef ASM_SUPPORTED +static inline unsigned long +xen_get_cpuid(int index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_GET_CPUID), "0"(__index)); + return ia64_intri_res; +} + +static inline unsigned long +xen_get_pmd(int index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_GET_PMD), "0O"(__index)); + return ia64_intri_res; +} +#else extern unsigned long xen_get_cpuid(int index); +extern unsigned long xen_get_pmd(int index); +#endif + #define ia64_get_cpuid(i) xen_get_cpuid(i) -extern unsigned long xen_get_pmd(int index); #define ia64_get_pmd(i) xen_get_pmd(i) + + +#ifdef ASM_SUPPORTED +static inline unsigned long +xen_get_eflag(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_EFLAG)); + return ia64_intri_res; +} + +static inline void +xen_set_eflag(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_EFLAG), "r"(__val)); +} +#else extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ +#endif /************************************************/ /* Instructions paravirtualized for performance */ @@ -185,6 +253,7 @@ extern void xen_set_eflag(unsigned long) * be properly handled by Xen, some are frequent enough that we use * hyperprivops for performance. */ +#ifndef ASM_SUPPORTED extern unsigned long xen_get_psr(void); extern unsigned long xen_get_ivr(void); extern unsigned long xen_get_tpr(void); @@ -195,6 +264,95 @@ extern unsigned long xen_get_rr(unsigned extern unsigned long xen_get_rr(unsigned long index); extern void xen_set_kr(unsigned long index, unsigned long val); extern void xen_ptcga(unsigned long addr, unsigned long size); +#else +static inline unsigned long +xen_get_psr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_PSR)); + return ia64_intri_res; +} + +static inline unsigned long +xen_get_ivr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_IVR)); + return ia64_intri_res; +} + +static inline unsigned long +xen_get_tpr(void) +{ + register __u64 ia64_intri_res asm ("r8"); + asm volatile ("break %1": + "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_TPR)); + return ia64_intri_res; +} + +static inline void +xen_set_tpr(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_GET_TPR), "r"(__val)); +} + +static inline void +xen_eoi(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_EOI), "r"(__val)); +} + +static inline void +xen_set_itm(unsigned long val) +{ + register __u64 __val asm ("r8") = val; + asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_ITM), "r"(__val)); +} + +static inline void +xen_ptcga(unsigned long addr, unsigned long size) +{ + register __u64 __addr asm ("r8") = addr; + register __u64 __size asm ("r9") = size; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_PTC_GA), "r"(__addr), "r"(__size)); +} + +static inline unsigned long +xen_get_rr(unsigned long index) +{ + register __u64 ia64_intri_res asm ("r8"); + register __u64 __index asm ("r8") = index; + asm volatile ("break %1": + "=r"(ia64_intri_res): + "i"(HYPERPRIVOP_GET_RR), "0"(__index)); + return ia64_intri_res; +} + +static inline void +xen_set_rr(unsigned long index, unsigned long val) +{ + register __u64 __index asm ("r8") = index; + register __u64 __val asm ("r9") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_SET_RR), "r"(__index), "r"(__val)); +} + +static inline void +xen_set_kr(unsigned long index, unsigned long val) +{ + register __u64 __index asm ("r8") = index; + register __u64 __val asm ("r9") = val; + asm volatile ("break %0":: + "i"(HYPERPRIVOP_SET_KR), "r"(__index), "r"(__val)); +} +#endif /* Note: It may look wrong to test for is_running_on_xen() in each case. * However regnum is always a constant so, as written, the compiler