# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 27ccf13dc3b7c37eadcc84f2682995ab3afc2543
# Parent 00dd5eb7adc123cbbb4aabfafaad191777e1f3c7
[IA64] boot windows server 2003: support 8k guest pagesize
Make HASH VTLB support 8K page size which is used by windows
Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
[whitespace and masking cleanups]
Signed-off-by: Alex Williamson <alex.williamson@xxxxxx>
---
xen/arch/ia64/vmx/vmmu.c | 19 +++-
xen/arch/ia64/vmx/vmx_phy_mode.c | 47 ++++++-----
xen/arch/ia64/vmx/vmx_process.c | 32 ++++----
xen/arch/ia64/vmx/vtlb.c | 106 +++++++++++++--------------
xen/include/asm-ia64/linux-xen/asm/pgtable.h | 3
xen/include/asm-ia64/vmx_vcpu.h | 3
6 files changed, 116 insertions(+), 94 deletions(-)
diff -r 00dd5eb7adc1 -r 27ccf13dc3b7 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Mon Jul 31 15:14:47 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c Tue Aug 01 14:44:04 2006 -0600
@@ -316,7 +316,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
u64 gpip=0; // guest physical IP
u64 *vpa;
thash_data_t *tlb;
- u64 mfn;
+ u64 mfn, maddr;
struct page_info* page;
again:
@@ -333,11 +333,14 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
if( gpip){
mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code:
invalid memory\n");
+ maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
}else{
tlb = vhpt_lookup(gip);
if( tlb == NULL)
panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT);
+ maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
+ (gip & (PSIZE(tlb->ps) - 1));
}
page = mfn_to_page(mfn);
@@ -349,7 +352,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
}
goto again;
}
- vpa = (u64 *)__va((mfn << PAGE_SHIFT) | (gip & (PAGE_SIZE - 1)));
+ vpa = (u64 *)__va(maddr);
*code1 = *vpa++;
*code2 = *vpa;
@@ -371,6 +374,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
return IA64_FAULT;
}
#endif //VTLB_DEBUG
+ pte &= ~PAGE_FLAGS_RV_MASK;
thash_purge_and_insert(vcpu, pte, itir, ifa);
return IA64_NO_FAULT;
}
@@ -390,6 +394,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
return IA64_FAULT;
}
#endif //VTLB_DEBUG
+ pte &= ~PAGE_FLAGS_RV_MASK;
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
pte |= VTLB_PTE_IO;
@@ -418,7 +423,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
return IA64_FAULT;
}
thash_purge_entries(vcpu, va, ps);
-#endif
+#endif
+ pte &= ~PAGE_FLAGS_RV_MASK;
vcpu_get_rr(vcpu, va, &rid);
rid = rid& RR_RID_MASK;
p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
@@ -432,8 +438,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
{
#ifdef VTLB_DEBUG
int index;
+#endif
u64 gpfn;
-#endif
u64 ps, va, rid;
thash_data_t * p_dtr;
ps = itir_ps(itir);
@@ -445,11 +451,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
+#endif
+ pte &= ~PAGE_FLAGS_RV_MASK;
thash_purge_entries(vcpu, va, ps);
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if(VMX_DOMAIN(vcpu) && _gpfn_is_io(vcpu->domain,gpfn))
+ if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
pte |= VTLB_PTE_IO;
-#endif
vcpu_get_rr(vcpu, va, &rid);
rid = rid& RR_RID_MASK;
p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
diff -r 00dd5eb7adc1 -r 27ccf13dc3b7 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c Mon Jul 31 15:14:47 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Aug 01 14:44:04 2006 -0600
@@ -110,9 +110,14 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr)
physical_tlb_miss(VCPU *vcpu, u64 vadr)
{
u64 pte;
+ ia64_rr rr;
+ rr.rrval = ia64_get_rr(vadr);
pte = vadr& _PAGE_PPN_MASK;
- pte = pte | PHY_PAGE_WB;
- thash_purge_and_insert(vcpu, pte, (PAGE_SHIFT<<2), vadr);
+ if (vadr >> 63)
+ pte = pte | PHY_PAGE_UC;
+ else
+ pte = pte | PHY_PAGE_WB;
+ thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr);
return;
}
@@ -120,19 +125,14 @@ void
void
vmx_init_all_rr(VCPU *vcpu)
{
- VMX(vcpu,vrr[VRN0]) = 0x38;
- VMX(vcpu,vrr[VRN1]) = 0x138;
- VMX(vcpu,vrr[VRN2]) = 0x238;
- VMX(vcpu,vrr[VRN3]) = 0x338;
- VMX(vcpu,vrr[VRN4]) = 0x438;
- VMX(vcpu,vrr[VRN5]) = 0x538;
- VMX(vcpu,vrr[VRN6]) = 0x660;
- VMX(vcpu,vrr[VRN7]) = 0x760;
-#if 0
- VMX(vcpu,mrr5) = vrrtomrr(vcpu, 0x38);
- VMX(vcpu,mrr6) = vrrtomrr(vcpu, 0x60);
- VMX(vcpu,mrr7) = vrrtomrr(vcpu, 0x60);
-#endif
+ VMX(vcpu, vrr[VRN0]) = 0x38;
+ VMX(vcpu, vrr[VRN1]) = 0x38;
+ VMX(vcpu, vrr[VRN2]) = 0x38;
+ VMX(vcpu, vrr[VRN3]) = 0x38;
+ VMX(vcpu, vrr[VRN4]) = 0x38;
+ VMX(vcpu, vrr[VRN5]) = 0x38;
+ VMX(vcpu, vrr[VRN6]) = 0x38;
+ VMX(vcpu, vrr[VRN7]) = 0x738;
}
extern void * pal_vaddr;
@@ -208,18 +208,19 @@ switch_to_physical_rid(VCPU *vcpu)
switch_to_physical_rid(VCPU *vcpu)
{
UINT64 psr;
- ia64_rr phy_rr;
-
+ ia64_rr phy_rr, mrr;
/* Save original virtual mode rr[0] and rr[4] */
psr=ia64_clear_ic();
phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
-// phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+ mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT);
+ phy_rr.ps = mrr.ps;
phy_rr.ve = 1;
ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
ia64_srlz_d();
phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-// phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+ mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT);
+ phy_rr.ps = mrr.ps;
phy_rr.ve = 1;
ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
ia64_srlz_d();
@@ -262,6 +263,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
act = mm_switch_action(old_psr, new_psr);
switch (act) {
case SW_V2P:
+// printf("V -> P mode transition: (0x%lx -> 0x%lx)\n",
+// old_psr.val, new_psr.val);
vcpu->arch.old_rsc = regs->ar_rsc;
switch_to_physical_rid(vcpu);
/*
@@ -272,6 +275,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
vcpu->arch.mode_flags |= GUEST_IN_PHY;
break;
case SW_P2V:
+// printf("P -> V mode transition: (0x%lx -> 0x%lx)\n",
+// old_psr.val, new_psr.val);
switch_to_virtual_rid(vcpu);
/*
* recover old mode which is saved when entering
@@ -285,8 +290,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
old_psr.val);
break;
case SW_NOP:
- printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
- old_psr.val, new_psr.val);
+// printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
+// old_psr.val, new_psr.val);
break;
default:
/* Sanity check */
diff -r 00dd5eb7adc1 -r 27ccf13dc3b7 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Mon Jul 31 15:14:47 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c Tue Aug 01 14:44:04 2006 -0600
@@ -273,21 +273,24 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
// prepare_if_physical_mode(v);
if((data=vtlb_lookup(v, vadr,type))!=0){
-// gppa =
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
-// if(v->domain!=dom0&&type==DSIDE_TLB &&
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
- if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
- if(data->pl >= ((regs->cr_ipsr>>IA64_PSR_CPL0_BIT)&3)){
- gppa =
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
- emulate_io_inst(v, gppa, data->ma);
- }else{
- vcpu_set_isr(v,misr.val);
- data_access_rights(v, vadr);
- }
- return IA64_FAULT;
- }
-
+ if (v->domain != dom0 && type == DSIDE_TLB) {
+ gppa = (vadr & ((1UL << data->ps) - 1)) +
+ (data->ppn >> (data->ps - 12) << data->ps);
+ if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
+ if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
+ emulate_io_inst(v, gppa, data->ma);
+ else {
+ vcpu_set_isr(v, misr.val);
+ data_access_rights(v, vadr);
+ }
+ return IA64_FAULT;
+ }
+ }
thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
+
}else if(type == DSIDE_TLB){
+ if (misr.sp)
+ return vmx_handle_lds(regs);
if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
if(vpsr.ic){
vcpu_set_isr(v, misr.val);
@@ -306,7 +309,8 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
} else{
vmx_vcpu_thash(v, vadr, &vhpt_adr);
if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
- if (pteval & _PAGE_P){
+ if ((pteval & _PAGE_P) &&
+ ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST)) {
vcpu_get_rr(v, vadr, &rr);
itir = rr&(RR_RID_MASK | RR_PS_MASK);
thash_purge_and_insert(v, pteval, itir , vadr);
diff -r 00dd5eb7adc1 -r 27ccf13dc3b7 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Mon Jul 31 15:14:47 2006 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c Tue Aug 01 14:44:04 2006 -0600
@@ -141,14 +141,18 @@ static void thash_recycle_cch(thash_cb_t
static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
{
- u64 tag;
+ u64 tag ,len;
+ ia64_rr rr;
thash_data_t *head, *cch;
pte = pte & ~PAGE_FLAGS_RV_MASK;
-
+ rr.rrval = ia64_get_rr(ifa);
head = (thash_data_t *)ia64_thash(ifa);
tag = ia64_ttag(ifa);
if( INVALID_VHPT(head) ) {
+ len = head->len;
head->page_flags = pte;
+ head->len = len;
+ head->itir = rr.ps << 2;
head->etag = tag;
return;
}
@@ -160,10 +164,9 @@ static void vmx_vhpt_insert(thash_cb_t *
else{
cch = __alloc_chain(hcb);
}
- cch->page_flags=head->page_flags;
- cch->etag=head->etag;
- cch->next=head->next;
+ *cch = *head;
head->page_flags=pte;
+ head->itir = rr.ps << 2;
head->etag=tag;
head->next = cch;
head->len = cch->len+1;
@@ -210,7 +213,13 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
u64 guest_vhpt_lookup(u64 iha, u64 *pte)
{
u64 ret;
- vhpt_lookup(iha);
+ thash_data_t * data;
+ data = vhpt_lookup(iha);
+ if (data == NULL) {
+ data = vtlb_lookup(current, iha, DSIDE_TLB);
+ if (data != NULL)
+ thash_vhpt_insert(current, data->page_flags, data->itir ,iha);
+ }
asm volatile ("rsm psr.ic|psr.i;;"
"srlz.d;;"
"ld8.s r9=[%1];;"
@@ -231,10 +240,10 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
* purge software guest tlb
*/
-static void vtlb_purge(VCPU *v, u64 va, u64 ps)
+void vtlb_purge(VCPU *v, u64 va, u64 ps)
{
thash_cb_t *hcb = &v->arch.vtlb;
- thash_data_t *hash_table, *prev, *next;
+ thash_data_t *cur;
u64 start, end, size, tag, rid, def_size;
ia64_rr vrr;
vcpu_get_rr(v, va, &vrr.rrval);
@@ -244,23 +253,11 @@ static void vtlb_purge(VCPU *v, u64 va,
end = start + size;
def_size = PSIZE(vrr.ps);
while(start < end){
- hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
- if(!INVALID_TLB(hash_table)){
- if(hash_table->etag == tag){
- hash_table->etag = 1UL<<63;
- }
- else{
- prev=hash_table;
- next=prev->next;
- while(next){
- if(next->etag == tag){
- next->etag = 1UL<<63;
- break;
- }
- prev=next;
- next=next->next;
- }
- }
+ cur = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
+ while (cur) {
+ if (cur->etag == tag)
+ cur->etag = 1UL << 63;
+ cur = cur->next;
}
start += def_size;
}
@@ -274,30 +271,23 @@ static void vhpt_purge(VCPU *v, u64 va,
static void vhpt_purge(VCPU *v, u64 va, u64 ps)
{
//thash_cb_t *hcb = &v->arch.vhpt;
- thash_data_t *hash_table, *prev, *next;
+ thash_data_t *cur;
u64 start, end, size, tag;
+ ia64_rr rr;
size = PSIZE(ps);
start = va & (-size);
end = start + size;
+ rr.rrval = ia64_get_rr(va);
+ size = PSIZE(rr.ps);
while(start < end){
- hash_table = (thash_data_t *)ia64_thash(start);
+ cur = (thash_data_t *)ia64_thash(start);
tag = ia64_ttag(start);
- if(hash_table->etag == tag ){
- hash_table->etag = 1UL<<63;
- }
- else{
- prev=hash_table;
- next=prev->next;
- while(next){
- if(next->etag == tag){
- next->etag = 1UL<<63;
- break;
- }
- prev=next;
- next=next->next;
- }
- }
- start += PAGE_SIZE;
+ while (cur) {
+ if (cur->etag == tag)
+ cur->etag = 1UL << 63;
+ cur = cur->next;
+ }
+ start += size;
}
machine_tlb_purge(va, ps);
}
@@ -349,7 +339,7 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
/* int flag; */
ia64_rr vrr;
/* u64 gppn, ppns, ppne; */
- u64 tag;
+ u64 tag, len;
vcpu_get_rr(current, va, &vrr.rrval);
#ifdef VTLB_DEBUG
if (vrr.ps != itir_ps(itir)) {
@@ -361,7 +351,9 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
#endif
hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
if( INVALID_TLB(hash_table) ) {
+ len = hash_table->len;
hash_table->page_flags = pte;
+ hash_table->len = len;
hash_table->itir=itir;
hash_table->etag=tag;
return;
@@ -425,18 +417,23 @@ void thash_purge_entries(VCPU *v, u64 va
u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
{
- u64 ps, addr;
+ u64 ps, ps_mask, paddr, maddr;
+// ia64_rr rr;
union pte_flags phy_pte;
ps = itir_ps(itir);
+ ps_mask = ~((1UL << ps) - 1);
phy_pte.val = *pte;
- addr = *pte;
- addr = ((addr & _PAGE_PPN_MASK)>>ps<<ps)|(va&((1UL<<ps)-1));
- addr = lookup_domain_mpa(v->domain, addr, NULL);
- if(addr & GPFN_IO_MASK){
+ paddr = *pte;
+ paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
+ maddr = lookup_domain_mpa(v->domain, paddr, NULL);
+ if (maddr & GPFN_IO_MASK) {
*pte |= VTLB_PTE_IO;
return -1;
}
- phy_pte.ppn = addr >> ARCH_PAGE_SHIFT;
+// rr.rrval = ia64_get_rr(va);
+// ps = rr.ps;
+ maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
+ phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
return phy_pte.val;
}
@@ -449,8 +446,13 @@ void thash_purge_and_insert(VCPU *v, u64
{
u64 ps;//, va;
u64 phy_pte;
+ ia64_rr vrr;
ps = itir_ps(itir);
-
+ vcpu_get_rr(current, ifa, &vrr.rrval);
+// if (vrr.ps != itir_ps(itir)) {
+// printf("not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n",
+// ifa, vrr.ps, itir_ps(itir));
+// }
if(VMX_DOMAIN(v)){
/* Ensure WB attribute if pte is related to a normal mem page,
* which is required by vga acceleration since qemu maps shared
@@ -460,7 +462,7 @@ void thash_purge_and_insert(VCPU *v, u64
pte &= ~_PAGE_MA_MASK;
phy_pte = translate_phy_pte(v, &pte, itir, ifa);
- if(ps==PAGE_SHIFT){
+ if (vrr.ps <= PAGE_SHIFT) {
if(!(pte&VTLB_PTE_IO)){
vhpt_purge(v, ifa, ps);
vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
diff -r 00dd5eb7adc1 -r 27ccf13dc3b7
xen/include/asm-ia64/linux-xen/asm/pgtable.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h Mon Jul 31 15:14:47
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h Tue Aug 01 14:44:04
2006 -0600
@@ -38,6 +38,9 @@
#define _PAGE_P (1 << _PAGE_P_BIT) /* page present
bit */
#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute
*/
+#ifdef XEN
+#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use
*/
+#endif
#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute
*/
#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory
attribute */
diff -r 00dd5eb7adc1 -r 27ccf13dc3b7 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Mon Jul 31 15:14:47 2006 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h Tue Aug 01 14:44:04 2006 -0600
@@ -467,7 +467,8 @@ vrrtomrr(VCPU *v, unsigned long val)
rr.rrval=val;
rr.rid = rr.rid + v->arch.starting_rid;
- rr.ps = PAGE_SHIFT;
+ if (rr.ps > PAGE_SHIFT)
+ rr.ps = PAGE_SHIFT;
rr.ve = 1;
return vmMangleRID(rr.rrval);
/* Disable this rid allocation algorithm for now */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|