# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 685586518b2e8c5d52781ff3a4983df604cf43cb
# Parent 0c18c6009448284911dc6ec7c338051ed9471521
[IA64] Remove VHPT_ADDR
Remove VHPT_ADDR by mapping vhpt to xen identity mapping area.
and some clean ups.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
xen/arch/ia64/linux-xen/entry.S | 8 +++++---
xen/arch/ia64/xen/domain.c | 6 +++---
xen/arch/ia64/xen/regionreg.c | 4 ++--
xen/arch/ia64/xen/vhpt.c | 10 +++++-----
xen/arch/ia64/xen/xenasm.S | 32 ++++++++++++++++++++++++--------
xen/include/asm-ia64/vhpt.h | 3 ++-
xen/include/asm-ia64/xensystem.h | 1 -
7 files changed, 41 insertions(+), 23 deletions(-)
diff -r 0c18c6009448 -r 685586518b2e xen/arch/ia64/linux-xen/entry.S
--- a/xen/arch/ia64/linux-xen/entry.S Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/arch/ia64/linux-xen/entry.S Sat Oct 14 17:52:09 2006 -0600
@@ -262,13 +262,15 @@ GLOBAL_ENTRY(ia64_switch_to)
#endif
rsm psr.ic // interrupts (psr.i) are already
disabled here
movl r25=PAGE_KERNEL
+ movl r26 = IA64_GRANULE_SHIFT << 2
;;
srlz.d
or r23=r25,r20 // construct PA | page properties
- mov r25=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r25
+ ptr.d in0, r26 // to purge dtr[IA64_TR_VHPT]
+ ;;
+ mov cr.itir=r26
mov cr.ifa=in0 // VA of next task...
+ srlz.d
;;
mov r25=IA64_TR_CURRENT_STACK
#ifdef XEN
diff -r 0c18c6009448 -r 685586518b2e xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c Sat Oct 14 17:52:09 2006 -0600
@@ -118,13 +118,13 @@ void schedule_tail(struct vcpu *prev)
extern char ia64_ivt;
context_saved(prev);
+ ia64_disable_vhpt_walker();
if (VMX_DOMAIN(current)) {
vmx_do_launch(current);
migrate_timer(¤t->arch.arch_vmx.vtm.vtm_timer,
current->processor);
} else {
ia64_set_iva(&ia64_ivt);
- ia64_disable_vhpt_walker();
load_region_regs(current);
ia64_set_pta(vcpu_pta(current));
vcpu_load_kernel_regs(current);
@@ -157,6 +157,8 @@ void context_switch(struct vcpu *prev, s
}
if (VMX_DOMAIN(next))
vmx_load_state(next);
+
+ ia64_disable_vhpt_walker();
/*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
prev = ia64_switch_to(next);
@@ -176,7 +178,6 @@ void context_switch(struct vcpu *prev, s
nd = current->domain;
if (!is_idle_domain(nd)) {
- ia64_disable_vhpt_walker();
load_region_regs(current);
ia64_set_pta(vcpu_pta(current));
vcpu_load_kernel_regs(current);
@@ -192,7 +193,6 @@ void context_switch(struct vcpu *prev, s
* walker. Then all accesses happen within idle context will
* be handled by TR mapping and identity mapping.
*/
- ia64_disable_vhpt_walker();
__ia64_per_cpu_var(current_psr_i_addr) = NULL;
__ia64_per_cpu_var(current_psr_ic_addr) = NULL;
}
diff -r 0c18c6009448 -r 685586518b2e xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/arch/ia64/xen/regionreg.c Sat Oct 14 17:52:09 2006 -0600
@@ -17,7 +17,7 @@
#include <asm/vcpu.h>
/* Defined in xemasm.S */
-extern void ia64_new_rr7(unsigned long rid, void *shared_info, void
*shared_arch_info, unsigned long shared_info_va, unsigned long p_vhpt);
+extern void ia64_new_rr7(unsigned long rid, void *shared_info, void
*shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
/* RID virtualization mechanism is really simple: domains have less rid bits
than the host and the host rid space is shared among the domains. (Values
@@ -260,7 +260,7 @@ int set_one_rr(unsigned long rr, unsigne
} else if (rreg == 7) {
ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
v->arch.privregs, v->domain->arch.shared_info_va,
- vcpu_vhpt_maddr(v));
+ __va_ul(vcpu_vhpt_maddr(v)));
} else {
set_rr(rr,newrrv.rrval);
}
diff -r 0c18c6009448 -r 685586518b2e xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/arch/ia64/xen/vhpt.c Sat Oct 14 17:52:09 2006 -0600
@@ -30,7 +30,7 @@ DEFINE_PER_CPU (unsigned long, vhpt_pend
DEFINE_PER_CPU (unsigned long, vhpt_pend);
static void
- __vhpt_flush(unsigned long vhpt_maddr)
+__vhpt_flush(unsigned long vhpt_maddr)
{
struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
int i;
@@ -158,8 +158,7 @@ pervcpu_vhpt_alloc(struct vcpu *v)
v->arch.pta.ve = 1; // enable vhpt
v->arch.pta.size = VHPT_SIZE_LOG2;
v->arch.pta.vf = 1; // long format
- //v->arch.pta.base = __va(v->arch.vhpt_maddr) >> 15;
- v->arch.pta.base = VHPT_ADDR >> 15;
+ v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15;
vhpt_erase(v->arch.vhpt_maddr);
smp_mb(); // per vcpu vhpt may be used by another physical cpu.
@@ -284,7 +283,8 @@ __flush_vhpt_range(unsigned long vhpt_ma
while ((long)addr_range > 0) {
/* Get the VHPT entry. */
- unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
+ unsigned int off = ia64_thash(vadr) -
+ __va_ul(vcpu_vhpt_maddr(current));
struct vhpt_lf_entry *v = vhpt_base + off;
v->ti_tag = INVALID_TI_TAG;
addr_range -= PAGE_SIZE;
@@ -444,7 +444,7 @@ void domain_flush_tlb_vhpt(struct domain
void domain_flush_tlb_vhpt(struct domain *d)
{
/* Very heavy... */
- if (HAS_PERVCPU_VHPT(d) /* || VMX_DOMAIN(v) */)
+ if (HAS_PERVCPU_VHPT(d) || d->arch.is_vti)
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
else
on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
diff -r 0c18c6009448 -r 685586518b2e xen/arch/ia64/xen/xenasm.S
--- a/xen/arch/ia64/xen/xenasm.S Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/arch/ia64/xen/xenasm.S Sat Oct 14 17:52:09 2006 -0600
@@ -26,10 +26,11 @@
// void *shared_info, /* in1 */
// void *shared_arch_info, /* in2 */
// unsigned long shared_info_va, /* in3 */
-// unsigned long p_vhpt) /* in4 */
+// unsigned long va_vhpt) /* in4 */
//Local usage:
// loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
// loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
+// r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
GLOBAL_ENTRY(ia64_new_rr7)
// FIXME? not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
@@ -118,16 +119,31 @@ 1:
// VHPT
#if VHPT_ENABLED
- mov r24=VHPT_SIZE_LOG2<<2
- movl r22=VHPT_ADDR
+#if IA64_GRANULE_SHIFT < VHPT_SIZE_LOG2
+#error "it must be that VHPT_SIZE_LOG2 <= IA64_GRANULE_SHIFT"
+#endif
+ // unless overlaps with KERNEL_TR and IA64_TR_CURRENT_STACK
+ dep r14=0,in4,0,KERNEL_TR_PAGE_SHIFT
+ dep r15=0,in4,0,IA64_GRANULE_SHIFT
+ dep r21=0,r13,0,IA64_GRANULE_SHIFT
+ ;;
+ cmp.eq p7,p0=r17,r14
+ cmp.eq p8,p0=r15,r21
+(p7) br.cond.sptk .vhpt_overlaps
+(p8) br.cond.sptk .vhpt_overlaps
mov r21=IA64_TR_VHPT
- ;;
- ptr.d r22,r24
- or r23=in4,r26 // construct PA | page properties
- mov cr.itir=r24
- mov cr.ifa=r22
+ dep r22=0,r15,60,4 // physical address of
+ // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
+ mov r24=IA64_GRANULE_SHIFT<<2
+ ;;
+ ptr.d r15,r24
+ or r23=r22,r26 // construct PA | page properties
+ mov cr.itir=r24
+ mov cr.ifa=r15
+ srlz.d
;;
itr.d dtr[r21]=r23 // wire in new mapping...
+.vhpt_overlaps:
#endif
// Shared info
diff -r 0c18c6009448 -r 685586518b2e xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/include/asm-ia64/vhpt.h Sat Oct 14 17:52:09 2006 -0600
@@ -79,7 +79,8 @@ vcpu_pta(struct vcpu* v)
if (HAS_PERVCPU_VHPT(v->domain))
return v->arch.pta.val;
#endif
- return VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
+ return __va_ul(__get_cpu_var(vhpt_paddr)) | (1 << 8) |
+ (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
}
#endif /* !__ASSEMBLY */
diff -r 0c18c6009448 -r 685586518b2e xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h Sat Oct 14 17:42:00 2006 -0600
+++ b/xen/include/asm-ia64/xensystem.h Sat Oct 14 17:52:09 2006 -0600
@@ -22,7 +22,6 @@
#define GATE_ADDR KERNEL_START
#define DEFAULT_SHAREDINFO_ADDR 0xf100000000000000
#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
-#define VHPT_ADDR 0xf200000000000000
#ifdef CONFIG_VIRTUAL_FRAME_TABLE
#define VIRT_FRAME_TABLE_ADDR 0xf300000000000000
#define VIRT_FRAME_TABLE_END 0xf400000000000000
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|