# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID bf396988059eee110a8e7a26bce69ff751e57067
# Parent 0e5635d68de38807ed86611d7a2e3bed3b8d5433
[IA64] Allow guest to set the address of shared_info.
Add a new hypercall: SET_SHARED_INFO_VA.
Cleanup of asm-xsi-offsets: do not define absolute address, use a new macro.
Cleanup of linux asm-offsets: use a macro for xen mapped regs.
xensetup.S: set the shared_info address (disabled if using compatibility).
privop.h: May redefined XSI_BASE (not yet enabled for compatibility).
Vocabulary coherence: use XMAPPEDREGS_ prefix.
Cleanup of xensystem.h
Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c | 49 +++-----
linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S | 21 ++-
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h | 19 ++-
xen/arch/ia64/asm-xsi-offsets.c | 86 ++++----------
xen/arch/ia64/vmx/vmx_entry.S | 33 -----
xen/arch/ia64/xen/domain.c | 43 +++++++
xen/arch/ia64/xen/faults.c | 4
xen/arch/ia64/xen/hypercall.c | 3
xen/arch/ia64/xen/hyperprivop.S | 117 ++++++++++++--------
xen/arch/ia64/xen/ivt.S | 7 -
xen/arch/ia64/xen/vcpu.c | 2
xen/arch/ia64/xen/xenasm.S | 30 +++--
xen/include/asm-ia64/dom_fw.h | 3
xen/include/asm-ia64/domain.h | 5
xen/include/asm-ia64/xenkregs.h | 2
xen/include/asm-ia64/xensystem.h | 18 +--
xen/include/public/arch-ia64.h | 14 +-
17 files changed, 241 insertions(+), 215 deletions(-)
diff -r 0e5635d68de3 -r bf396988059e
linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c Wed Jun 21
11:17:08 2006 -0600
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c Fri Jun 23
09:46:39 2006 -0600
@@ -265,34 +265,25 @@ void foo(void)
#ifdef CONFIG_XEN
BLANK();
- DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t,
interrupt_mask_addr)));
- DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
- DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
- DEFINE(XSI_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifs)));
- DEFINE(XSI_PRECOVER_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t,
precover_ifs)));
- DEFINE(XSI_ISR_OFS, (XSI_OFS + offsetof(mapped_regs_t, isr)));
- DEFINE(XSI_IFA_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifa)));
- DEFINE(XSI_IIPA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iipa)));
- DEFINE(XSI_IIM_OFS, (XSI_OFS + offsetof(mapped_regs_t, iim)));
- DEFINE(XSI_TPR_OFS, (XSI_OFS + offsetof(mapped_regs_t, tpr)));
- DEFINE(XSI_IHA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iha)));
- DEFINE(XSI_ITIR_OFS, (XSI_OFS + offsetof(mapped_regs_t, itir)));
- DEFINE(XSI_ITV_OFS, (XSI_OFS + offsetof(mapped_regs_t, itv)));
- DEFINE(XSI_PTA_OFS, (XSI_OFS + offsetof(mapped_regs_t, pta)));
- DEFINE(XSI_PSR_IC_OFS, (XSI_OFS + offsetof(mapped_regs_t,
interrupt_collection_enabled)));
- DEFINE(XSI_PEND_OFS, (XSI_OFS + offsetof(mapped_regs_t,
pending_interruption)));
- DEFINE(XSI_INCOMPL_REGFR_OFS, (XSI_OFS + offsetof(mapped_regs_t,
incomplete_regframe)));
- DEFINE(XSI_METAPHYS_OFS, (XSI_OFS + offsetof(mapped_regs_t,
metaphysical_mode)));
-
- DEFINE(XSI_BANKNUM_OFS, (XSI_OFS + offsetof(mapped_regs_t, banknum)));
-
- DEFINE(XSI_BANK0_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t,
bank0_regs[0])));
- DEFINE(XSI_BANK1_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t,
bank1_regs[0])));
- DEFINE(XSI_B0NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vbnat)));
- DEFINE(XSI_B1NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vnat)));
- DEFINE(XSI_RR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, rrs[0])));
- DEFINE(XSI_KR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, krs[0])));
- DEFINE(XSI_PKR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, pkrs[0])));
- DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
+#define DEFINE_MAPPED_REG_OFS(sym, field) \
+ DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
+
+ DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
+ DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
+ DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
+ DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
+ DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
+ DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
+ DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
+ DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
+ DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
+ DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
+ DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
+ DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
+ DEFINE_MAPPED_REG_OFS(XSI_PEND_OFS, pending_interruption);
+ DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
+ DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
+ DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
+ DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
#endif /* CONFIG_XEN */
}
diff -r 0e5635d68de3 -r bf396988059e
linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S Wed Jun 21 11:17:08
2006 -0600
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S Fri Jun 23 09:46:39
2006 -0600
@@ -15,10 +15,21 @@ GLOBAL_ENTRY(early_xen_setup)
mov r8=ar.rsc // Initialized in head.S
(isBP) movl r9=running_on_xen;;
extr.u r8=r8,2,2;; // Extract pl fields
- cmp.ne p7,p0=r8,r0;; // p7: running on xen
-(p7) mov r8=1 // booleanize.
-(p7) movl r10=xen_ivt;;
+ cmp.eq p7,p0=r8,r0 // p7: !running on xen
+ mov r8=1 // booleanize.
+(p7) br.ret.sptk.many rp;;
(isBP) st4 [r9]=r8
-(p7) mov cr.iva=r10
- br.ret.sptk.many rp;;
+ movl r10=xen_ivt;;
+
+ mov cr.iva=r10
+
+#if XSI_BASE != 0xf100000000000000UL
+ /* Backward compatibility. */
+(isBP) mov r2=0x600
+(isBP) movl r28=XSI_BASE;;
+(isBP) break 0x1000;;
+#endif
+
+ br.ret.sptk.many rp
+ ;;
END(early_xen_setup)
diff -r 0e5635d68de3 -r bf396988059e
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h Wed Jun 21
11:17:08 2006 -0600
+++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h Fri Jun 23
09:46:39 2006 -0600
@@ -14,8 +14,15 @@
#define IA64_PARAVIRTUALIZED
-#define XSI_OFS XSI_SIZE
-#define XPRIVREG_BASE (XSI_BASE + XSI_SIZE)
+#if 0
+#undef XSI_BASE
+/* At 1 MB, before per-cpu space but still addressable using addl instead
+ of movl. */
+#define XSI_BASE 0xfffffffffff00000
+#endif
+
+/* Address of mapped regs. */
+#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
#ifdef __ASSEMBLY__
#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
@@ -98,16 +105,16 @@ extern void xen_set_eflag(unsigned long)
* Others, like "pend", are abstractions based on privileged registers.
* "Pend" is guaranteed to be set if reading cr.ivr would return a
* (non-spurious) interrupt. */
-#define XEN_PRIVREGS ((struct mapped_regs *)XPRIVREG_BASE)
+#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
#define XSI_PSR_I \
- (*XEN_PRIVREGS->interrupt_mask_addr)
+ (*XEN_MAPPEDREGS->interrupt_mask_addr)
#define xen_get_virtual_psr_i() \
(!XSI_PSR_I)
#define xen_set_virtual_psr_i(_val) \
({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
#define xen_set_virtual_psr_ic(_val) \
- ({ XEN_PRIVREGS->interrupt_collection_enabled = _val ? 1 : 0; })
-#define xen_get_virtual_pend() (XEN_PRIVREGS->pending_interruption)
+ ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
+#define xen_get_virtual_pend() (XEN_MAPPEDREGS->pending_interruption)
/* Hyperprivops are "break" instructions with a well-defined API.
* In particular, the virtual psr.ic bit must be off; in this way
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/asm-xsi-offsets.c
--- a/xen/arch/ia64/asm-xsi-offsets.c Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/asm-xsi-offsets.c Fri Jun 23 09:46:39 2006 -0600
@@ -42,66 +42,34 @@
#define BLANK() asm volatile("\n->" : : )
-#define OFFSET(_sym, _str, _mem) \
- DEFINE(_sym, offsetof(_str, _mem));
+#define DEFINE_MAPPED_REG_OFS(sym, field) \
+ DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
void foo(void)
{
- /* First is shared info page, and then arch specific vcpu context */
- //DEFINE(XSI_BASE, SHAREDINFO_ADDR);
-
- DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t,
interrupt_mask_addr)));
- DEFINE(XSI_PSR_I_ADDR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
interrupt_mask_addr)));
- DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
- DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
- DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
- DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
- DEFINE(XSI_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifs)));
- DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
- DEFINE(XSI_PRECOVER_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t,
precover_ifs)));
- DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
precover_ifs)));
- DEFINE(XSI_ISR_OFS, (XSI_OFS + offsetof(mapped_regs_t, isr)));
- DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr)));
- DEFINE(XSI_IFA_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifa)));
- DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
- DEFINE(XSI_IIPA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iipa)));
- DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa)));
- DEFINE(XSI_IIM_OFS, (XSI_OFS + offsetof(mapped_regs_t, iim)));
- DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim)));
- DEFINE(XSI_TPR_OFS, (XSI_OFS + offsetof(mapped_regs_t, tpr)));
- DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr)));
- DEFINE(XSI_IHA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iha)));
- DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha)));
- DEFINE(XSI_ITIR_OFS, (XSI_OFS + offsetof(mapped_regs_t, itir)));
- DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
- DEFINE(XSI_ITV_OFS, (XSI_OFS + offsetof(mapped_regs_t, itv)));
- DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv)));
- DEFINE(XSI_PTA_OFS, (XSI_OFS + offsetof(mapped_regs_t, pta)));
- DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta)));
- DEFINE(XSI_PSR_IC_OFS, (XSI_OFS + offsetof(mapped_regs_t,
interrupt_collection_enabled)));
- DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
interrupt_collection_enabled)));
- DEFINE(XSI_PEND_OFS, (XSI_OFS + offsetof(mapped_regs_t,
pending_interruption)));
- DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
pending_interruption)));
- DEFINE(XSI_INCOMPL_REGFR_OFS, (XSI_OFS + offsetof(mapped_regs_t,
incomplete_regframe)));
- DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
incomplete_regframe)));
- DEFINE(XSI_METAPHYS_OFS, (XSI_OFS + offsetof(mapped_regs_t,
metaphysical_mode)));
- DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
metaphysical_mode)));
-
- DEFINE(XSI_BANKNUM_OFS, (XSI_OFS + offsetof(mapped_regs_t, banknum)));
- DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
banknum)));
-
- DEFINE(XSI_BANK0_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t,
bank0_regs[0])));
- DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
bank0_regs[0])));
- DEFINE(XSI_BANK1_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t,
bank1_regs[0])));
- DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
bank1_regs[0])));
- DEFINE(XSI_B0NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vbnat)));
- DEFINE(XSI_B1NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vnat)));
- DEFINE(XSI_RR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, rrs[0])));
- DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0])));
- DEFINE(XSI_KR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, krs[0])));
- DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0])));
- DEFINE(XSI_PKR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, pkrs[0])));
- DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
pkrs[0])));
- DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
- DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
tmp[0])));
+ DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
+ DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
+ DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
+ DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
+ DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
+ DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
+ DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
+ DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
+ DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
+ DEFINE_MAPPED_REG_OFS(XSI_TPR_OFS, tpr);
+ DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
+ DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
+ DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
+ DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
+ DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
+ DEFINE_MAPPED_REG_OFS(XSI_PEND_OFS, pending_interruption);
+ DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
+ DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
+ DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
+ DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
+ DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
+ DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
+ DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
+ DEFINE_MAPPED_REG_OFS(XSI_RR0_OFS, rrs[0]);
+ DEFINE_MAPPED_REG_OFS(XSI_KR0_OFS, krs[0]);
}
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_entry.S Fri Jun 23 09:46:39 2006 -0600
@@ -675,39 +675,6 @@ 1:
itr.d dtr[r24]=loc2 // wire in new mapping...
;;
-
-#if 0
- // re-pin mappings for shared_info
-
- mov r24=IA64_TR_SHARED_INFO
- movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
- ;;
- or loc3 = r25,loc3 // construct PA | page properties
- mov r23 = PAGE_SHIFT<<2
- ;;
- ptr.d in1,r23
- ;;
- mov cr.itir=r23
- mov cr.ifa=in1
- ;;
- itr.d dtr[r24]=loc3 // wire in new mapping...
- ;;
- // re-pin mappings for shared_arch_info
-
- mov r24=IA64_TR_ARCH_INFO
- or loc4 = r25,loc4 // construct PA | page properties
- mov r23 = PAGE_SHIFT<<2
- ;;
- ptr.d in2,r23
- ;;
- mov cr.itir=r23
- mov cr.ifa=in2
- ;;
- itr.d dtr[r24]=loc4 // wire in new mapping...
- ;;
-#endif
-
-
// re-pin mappings for guest_vhpt
mov r24=IA64_TR_PERVP_VHPT
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c Fri Jun 23 09:46:39 2006 -0600
@@ -88,6 +88,7 @@ extern struct vcpu *ia64_switch_to (stru
/* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu.
This is a Xen virtual address. */
DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
+DEFINE_PER_CPU(int *, current_psr_ic_addr);
#include <xen/sched-if.h>
@@ -106,6 +107,8 @@ void schedule_tail(struct vcpu *prev)
vcpu_load_kernel_regs(current);
__ia64_per_cpu_var(current_psr_i_addr) = ¤t->domain->
shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask;
+ __ia64_per_cpu_var(current_psr_ic_addr) = (int *)
+ (current->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
}
}
@@ -159,6 +162,8 @@ if (!i--) { i = 1000000; printk("+"); }
vcpu_pend_timer(current);
__ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info->
vcpu_info[current->vcpu_id].evtchn_upcall_mask;
+ __ia64_per_cpu_var(current_psr_ic_addr) =
+ (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
} else {
/* When switching to idle domain, only need to disable vhpt
* walker. Then all accesses happen within idle context will
@@ -167,6 +172,7 @@ if (!i--) { i = 1000000; printk("+"); }
pta = ia64_get_pta();
ia64_set_pta(pta & ~VHPT_ENABLED);
__ia64_per_cpu_var(current_psr_i_addr) = NULL;
+ __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
}
}
local_irq_restore(spsr);
@@ -304,7 +310,7 @@ int arch_domain_create(struct domain *d)
int arch_domain_create(struct domain *d)
{
// the following will eventually need to be negotiated dynamically
- d->arch.shared_info_va = SHAREDINFO_ADDR;
+ d->arch.shared_info_va = DEFAULT_SHAREDINFO_ADDR;
d->arch.breakimm = 0x1000;
if (is_idle_domain(d))
@@ -513,6 +519,41 @@ void build_physmap_table(struct domain *
}
d->arch.physmap_built = 1;
}
+
+unsigned long
+domain_set_shared_info_va (unsigned long va)
+{
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+ struct vcpu *v1;
+
+ /* Check virtual address:
+ must belong to region 7,
+ must be 64Kb aligned,
+ must not be within Xen virtual space. */
+ if ((va >> 61) != 7
+ || (va & 0xffffUL) != 0
+ || (va >= HYPERVISOR_VIRT_START && va < HYPERVISOR_VIRT_END))
+ panic_domain (NULL, "%s: bad va (0x%016lx)\n", __func__, va);
+
+ /* Note: this doesn't work well if other cpus are already running.
+ However this is part of the spec :-) */
+ printf ("Domain set shared_info_va to 0x%016lx\n", va);
+ d->arch.shared_info_va = va;
+
+ for_each_vcpu (d, v1) {
+ VCPU(v1, interrupt_mask_addr) =
+ (unsigned char *)va + INT_ENABLE_OFFSET(v1);
+ }
+
+ __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS);
+
+ /* Remap the shared pages. */
+ set_one_rr (7UL << 61, PSCB(v,rrs[7]));
+
+ return 0;
+}
+
// remove following line if not privifying in memory
//#define HAVE_PRIVIFY_MEMORY
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/faults.c Fri Jun 23 09:46:39 2006 -0600
@@ -118,7 +118,7 @@ void reflect_interruption(unsigned long
regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->r31 = XSI_IPSR;
+ regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
v->vcpu_info->evtchn_upcall_mask = 1;
PSCB(v,interrupt_collection_enabled) = 0;
@@ -172,7 +172,7 @@ void reflect_event(struct pt_regs *regs)
regs->cr_iip = v->arch.event_callback_ip;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->r31 = XSI_IPSR;
+ regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
v->vcpu_info->evtchn_upcall_mask = 1;
PSCB(v,interrupt_collection_enabled) = 0;
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/hypercall.c Fri Jun 23 09:46:39 2006 -0600
@@ -267,6 +267,9 @@ fw_hypercall (struct pt_regs *regs)
case FW_HYPERCALL_IPI:
fw_hypercall_ipi (regs);
break;
+ case FW_HYPERCALL_SET_SHARED_INFO_VA:
+ regs->r8 = domain_set_shared_info_va (regs->r28);
+ break;
case FW_HYPERCALL_FPSWA:
fpswa_ret = fw_hypercall_fpswa (v);
regs->r8 = fpswa_ret.status;
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S Fri Jun 23 09:46:39 2006 -0600
@@ -304,9 +304,13 @@ ENTRY(hyper_ssm_i)
add r24=r24,r23;;
mov cr.iip=r24;;
// OK, now all set to go except for switch to virtual bank0
- mov r30=r2; mov r29=r3;;
+ mov r30=r2
+ mov r29=r3
+ mov r28=r4
+ ;;
adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
+ adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
bsw.1;;
// FIXME?: ar.unat is not really handled correctly,
// but may not matter if the OS is NaT-clean
@@ -326,9 +330,11 @@ ENTRY(hyper_ssm_i)
.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
.mem.offset 0,0; st8.spill [r2]=r30,16;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
- movl r31=XSI_IPSR;;
+ mov r31=r4
bsw.0 ;;
- mov r2=r30; mov r3=r29;;
+ mov r2=r30
+ mov r3=r29
+ mov r4=r28
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
mov pr=r31,-1 ;;
@@ -372,7 +378,10 @@ GLOBAL_ENTRY(fast_tick_reflect)
st8 [r20]=r21;;
#endif
// vcpu_pend_timer(current)
- movl r18=XSI_PSR_IC;;
+ movl r18=THIS_CPU(current_psr_ic_addr)
+ ;;
+ ld8 r18=[r18]
+ ;;
adds r20=XSI_ITV_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r20=[r20];;
cmp.eq p6,p0=r20,r0 // if cr.itv==0 done
@@ -481,12 +490,17 @@ GLOBAL_ENTRY(fast_tick_reflect)
add r24=r24,r23;;
mov cr.iip=r24;;
// OK, now all set to go except for switch to virtual bank0
- mov r30=r2; mov r29=r3;;
+ mov r30=r2
+ mov r29=r3
+ mov r27=r4
#ifdef HANDLE_AR_UNAT
mov r28=ar.unat;
#endif
- adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
- adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
+ ;;
+ adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
+ adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
+ adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
+ ;;
bsw.1;;
.mem.offset 0,0; st8.spill [r2]=r16,16;
.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
@@ -506,28 +520,32 @@ GLOBAL_ENTRY(fast_tick_reflect)
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
#ifdef HANDLE_AR_UNAT
// r16~r23 are preserved regsin bank0 regs, we need to restore them,
- // r24~r31 are scratch regs, we don't need to handle NaT bit,
- // because OS handler must assign it before access it
- ld8 r16=[r2],16;
- ld8 r17=[r3],16;;
- ld8 r18=[r2],16;
- ld8 r19=[r3],16;;
- ld8 r20=[r2],16;
- ld8 r21=[r3],16;;
- ld8 r22=[r2],16;
- ld8 r23=[r3],16;;
-#endif
- movl r31=XSI_IPSR;;
- bsw.0 ;;
- mov r24=ar.unat;
- mov r2=r30; mov r3=r29;;
+ // r24~r31 are scratch regs, we don't need to handle NaT bit,
+ // because OS handler must assign it before access it
+ ld8 r16=[r2],16;
+ ld8 r17=[r3],16;;
+ ld8 r18=[r2],16;
+ ld8 r19=[r3],16;;
+ ld8 r20=[r2],16;
+ ld8 r21=[r3],16;;
+ ld8 r22=[r2],16;
+ ld8 r23=[r3],16;;
+#endif
+ mov r31=r4
+ ;;
+ bsw.0 ;;
+ mov r24=ar.unat;
+ mov r2=r30
+ mov r3=r29
+ mov r4=r27
#ifdef HANDLE_AR_UNAT
- mov ar.unat=r28;
-#endif
- adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
- adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r25]=r24;
- st4 [r20]=r0 ;;
+ mov ar.unat=r28;
+#endif
+ ;;
+ adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
+ adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r25]=r24;
+ st4 [r20]=r0 ;;
fast_tick_reflect_done:
mov pr=r31,-1 ;;
rfi
@@ -659,12 +677,16 @@ ENTRY(fast_reflect)
add r20=r20,r23;;
mov cr.iip=r20;;
// OK, now all set to go except for switch to virtual bank0
- mov r30=r2; mov r29=r3;;
+ mov r30=r2
+ mov r29=r3
#ifdef HANDLE_AR_UNAT
mov r28=ar.unat;
#endif
+ mov r27=r4
adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
- adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
+ adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
+ adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
+ ;;
bsw.1;;
.mem.offset 0,0; st8.spill [r2]=r16,16;
.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
@@ -687,24 +709,28 @@ ENTRY(fast_reflect)
// r24~r31 are scratch regs, we don't need to handle NaT bit,
// because OS handler must assign it before access it
ld8 r16=[r2],16;
- ld8 r17=[r3],16;;
- ld8 r18=[r2],16;
- ld8 r19=[r3],16;;
+ ld8 r17=[r3],16;;
+ ld8 r18=[r2],16;
+ ld8 r19=[r3],16;;
ld8 r20=[r2],16;
- ld8 r21=[r3],16;;
- ld8 r22=[r2],16;
- ld8 r23=[r3],16;;
-#endif
- movl r31=XSI_IPSR;;
+ ld8 r21=[r3],16;;
+ ld8 r22=[r2],16;
+ ld8 r23=[r3],16;;
+#endif
+ mov r31=r4
+ ;;
bsw.0 ;;
- mov r24=ar.unat;
- mov r2=r30; mov r3=r29;;
+ mov r24=ar.unat;
+ mov r2=r30
+ mov r3=r29
#ifdef HANDLE_AR_UNAT
mov ar.unat=r28;
#endif
- adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
+ mov r4=r27
+ ;;
+ adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r25]=r24;
+ st8 [r25]=r24;
st4 [r20]=r0 ;;
mov pr=r31,-1 ;;
rfi
@@ -732,7 +758,8 @@ GLOBAL_ENTRY(fast_access_reflect)
extr.u r21=r30,IA64_PSR_CPL0_BIT,2 ;;
cmp.eq p7,p0=r21,r0
(p7) br.spnt.few dispatch_reflection ;;
- movl r18=XSI_PSR_IC;;
+ movl r18=THIS_CPU(current_psr_ic_addr);;
+ ld8 r18=[r18];;
ld4 r21=[r18];;
cmp.eq p7,p0=r0,r21
(p7) br.spnt.few dispatch_reflection ;;
@@ -1043,8 +1070,8 @@ 1:
// validate vcr.iip, if in Xen range, do it the slow way
adds r20=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r22=[r20];;
- movl r23=XEN_VIRT_SPACE_LOW
- movl r24=XEN_VIRT_SPACE_HIGH ;;
+ movl r23=HYPERVISOR_VIRT_START
+ movl r24=HYPERVISOR_VIRT_END;;
cmp.ltu p0,p7=r22,r23 ;; // if !(iip<low) &&
(p7) cmp.geu p0,p7=r22,r24 ;; // !(iip>=high)
(p7) br.spnt.few dispatch_break_fault ;;
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/ivt.S Fri Jun 23 09:46:39 2006 -0600
@@ -508,10 +508,9 @@ late_alt_dtlb_miss:
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r21=cr.ipsr
;;
-#else
#endif
#ifdef CONFIG_DISABLE_VHPT
- shr.u r22=r16,61 // get the region number into
r21
+ shr.u r22=r16,61 // get the region into r22
;;
cmp.gt p8,p0=6,r22 // access to region 0-5
;;
@@ -992,7 +991,9 @@ ENTRY(break_fault)
cmp.eq p7,p0=r17,r18 ;;
(p7) br.spnt.few dispatch_break_fault ;;
#endif
- movl r18=XSI_PSR_IC
+ movl r18=THIS_CPU(current_psr_ic_addr)
+ ;;
+ ld8 r18=[r18]
;;
ld4 r19=[r18]
;;
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c Fri Jun 23 09:46:39 2006 -0600
@@ -1354,7 +1354,7 @@ check_xen_space_overlap (const char *fun
base &= ~(page_size - 1);
/* FIXME: ideally an MCA should be generated... */
- if (range_overlap (XEN_VIRT_SPACE_LOW, XEN_VIRT_SPACE_HIGH,
+ if (range_overlap (HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
base, base + page_size))
panic_domain (NULL, "%s on Xen virtual space (%lx)\n",
func, base);
diff -r 0e5635d68de3 -r bf396988059e xen/arch/ia64/xen/xenasm.S
--- a/xen/arch/ia64/xen/xenasm.S Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/arch/ia64/xen/xenasm.S Fri Jun 23 09:46:39 2006 -0600
@@ -10,7 +10,8 @@
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/vhpt.h>
-
+#include <public/arch-ia64.h>
+
// Change rr7 to the passed value while ensuring
// Xen is mapped into the new region.
#define PSR_BITS_TO_CLEAR \
@@ -140,8 +141,8 @@ 1:
;;
itr.d dtr[r21]=r23 // wire in new mapping...
- // Map for arch_vcpu_info_t
- movl r22=XSI_OFS
+ // Map mapped_regs
+ mov r22=XMAPPEDREGS_OFS
mov r24=PAGE_SHIFT<<2
;;
add r22=r22,in3
@@ -150,7 +151,7 @@ 1:
or r23=loc7,r25 // construct PA | page properties
mov cr.itir=r24
mov cr.ifa=r22
- mov r21=IA64_TR_ARCH_INFO
+ mov r21=IA64_TR_MAPPED_REGS
;;
itr.d dtr[r21]=r23 // wire in new mapping...
@@ -239,19 +240,24 @@ END(__get_domain_bundle)
END(__get_domain_bundle)
GLOBAL_ENTRY(dorfirfi)
- movl r16 = XSI_IIP
- movl r17 = XSI_IPSR
- movl r18 = XSI_IFS
+ // Read current vcpu shared info
+ movl r16=THIS_CPU(current_psr_ic_addr)
+ ;;
+ ld8 r19 = [r16]
+ ;;
+ add r16 = XSI_IIP_OFS - XSI_PSR_IC_OFS, r19
+ add r17 = XSI_IPSR_OFS - XSI_PSR_IC_OFS, r19
+ add r18 = XSI_IFS_OFS - XSI_PSR_IC_OFS, r19
;;
ld8 r16 = [r16]
ld8 r17 = [r17]
ld8 r18 = [r18]
;;
- mov cr.iip=r16
- mov cr.ipsr=r17
- mov cr.ifs=r18
- ;;
- rfi
+ mov cr.iip=r16
+ mov cr.ipsr=r17
+ mov cr.ifs=r18
+ ;;
+ rfi
;;
END(dorfirfi)
diff -r 0e5635d68de3 -r bf396988059e xen/include/asm-ia64/dom_fw.h
--- a/xen/include/asm-ia64/dom_fw.h Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/include/asm-ia64/dom_fw.h Fri Jun 23 09:46:39 2006 -0600
@@ -145,6 +145,9 @@
#define FW_HYPERCALL_FPSWA_PATCH_PADDR
FW_HYPERCALL_PADDR(FW_HYPERCALL_FPSWA_PATCH_INDEX)
#define FW_HYPERCALL_FPSWA 0x500UL
+/* Set the shared_info base virtual address. */
+#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600UL
+
/* Hypercalls index bellow _FIRST_ARCH are reserved by Xen, while those above
are for the architecture.
Note: this limit was defined by Xen/ia64 (and not by Xen).²
diff -r 0e5635d68de3 -r bf396988059e xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/include/asm-ia64/domain.h Fri Jun 23 09:46:39 2006 -0600
@@ -27,7 +27,7 @@ static inline int
static inline int
p2m_entry_retry(struct p2m_entry* entry)
{
- //XXX see lookup_domian_pte().
+ //XXX see lookup_domain_pte().
// NULL is set for invalid gpaddr for the time being.
if (entry->pte == NULL)
return 0;
@@ -40,6 +40,9 @@ extern void domain_relinquish_resources(
/* given a current domain metaphysical address, return the physical address */
extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
struct p2m_entry* entry);
+
+/* Set shared_info virtual address. */
+extern unsigned long domain_set_shared_info_va (unsigned long va);
/* Flush cache of domain d.
If sync_only is true, only synchronize I&D caches,
diff -r 0e5635d68de3 -r bf396988059e xen/include/asm-ia64/xenkregs.h
--- a/xen/include/asm-ia64/xenkregs.h Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/include/asm-ia64/xenkregs.h Fri Jun 23 09:46:39 2006 -0600
@@ -6,7 +6,7 @@
*/
#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
#define IA64_TR_VHPT 4 /* dtr4: vhpt */
-#define IA64_TR_ARCH_INFO 5
+#define IA64_TR_MAPPED_REGS 5 /* dtr5: vcpu mapped regs */
#define IA64_TR_PERVP_VHPT 6
#define IA64_DTR_GUEST_KERNEL 7
#define IA64_ITR_GUEST_KERNEL 2
diff -r 0e5635d68de3 -r bf396988059e xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/include/asm-ia64/xensystem.h Fri Jun 23 09:46:39 2006 -0600
@@ -16,26 +16,20 @@
/* Define HV space hierarchy.
VMM memory space is protected by CPL for paravirtualized domains and
by VA for VTi domains. VTi imposes VA bit 60 != VA bit 59 for VMM. */
-#define XEN_VIRT_SPACE_LOW 0xe800000000000000
-#define XEN_VIRT_SPACE_HIGH 0xf800000000000000
-#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
-
-#define XEN_START_ADDR 0xf000000000000000
-#define HYPERVISOR_VIRT_START 0xf000000000000000
+#define HYPERVISOR_VIRT_START 0xe800000000000000
#define KERNEL_START 0xf000000004000000
-#define SHAREDINFO_ADDR 0xf100000000000000
-#define XSI_OFS PAGE_SIZE
-#define SHARED_ARCHINFO_ADDR (SHAREDINFO_ADDR + XSI_OFS)
-#define PERCPU_ADDR (SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
+#define DEFAULT_SHAREDINFO_ADDR 0xf100000000000000
+#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
#define VHPT_ADDR 0xf200000000000000
#ifdef CONFIG_VIRTUAL_FRAME_TABLE
#define VIRT_FRAME_TABLE_ADDR 0xf300000000000000
#define VIRT_FRAME_TABLE_END 0xf400000000000000
#endif
-#define XEN_END_ADDR 0xf400000000000000
+#define HYPERVISOR_VIRT_END 0xf800000000000000
-#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
+#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
+#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
diff -r 0e5635d68de3 -r bf396988059e xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Wed Jun 21 11:17:08 2006 -0600
+++ b/xen/include/public/arch-ia64.h Fri Jun 23 09:46:39 2006 -0600
@@ -380,13 +380,17 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
#endif /* !__ASSEMBLY__ */
-/* Address of shared_info in domain virtual space. */
-#define XSI_BASE 0xf100000000000000
+/* Address of shared_info in domain virtual space.
+ This is the default address, for compatibility only. */
+#define XSI_BASE 0xf100000000000000
+
/* Size of the shared_info area (this is not related to page size). */
-#define XSI_LOG_SIZE 14
-#define XSI_SIZE (1 << XSI_LOG_SIZE)
+#define XSI_LOG_SIZE 14
+#define XSI_SIZE (1 << XSI_LOG_SIZE)
/* Log size of mapped_regs area (64 KB - only 4KB is used). */
-#define XASI_LOG_SIZE 16
+#define XMAPPEDREGS_LOG_SIZE 16
+/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
+#define XMAPPEDREGS_OFS XSI_SIZE
/* Hyperprivops. */
#define HYPERPRIVOP_RFI 0x1
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|