# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 52d2d520857548ea216aae702ebbd57772b6d781
# Parent 6dadf4d93ee31ec2c6470176d4d617f132f1b7f9
Merge latest xen-unstable into xen-ia64-unstable
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/asm-offsets.c Wed Sep 14 21:26:35 2005
@@ -147,13 +147,6 @@
DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct pt_regs, cr_isr));
DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs,
eml_unat));
DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
- DEFINE(RFI_IIP_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_iip));
- DEFINE(RFI_IPSR_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_ipsr));
- DEFINE(RFI_IFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_ifs));
- DEFINE(RFI_PFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_pfs));
- DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr5));
- DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr6));
- DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr7));
DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
#endif //CONFIG_VTI
DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
@@ -228,8 +221,8 @@
BLANK();
#ifdef CONFIG_VTI
- DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd));
- DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu,
arch.arch_vmx.in_service[0]));
+ DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
+ DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu,
arch.insvc[0]));
DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/mmio.c Wed Sep 14 21:26:35 2005
@@ -147,7 +147,7 @@
if(dir==IOREQ_WRITE) //write;
p->u.data = *val;
p->pdata_valid = 0;
- p->port_mm = 1;
+ p->type = 1;
p->df = 0;
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
@@ -180,7 +180,7 @@
if(dir==IOREQ_WRITE) //write;
p->u.data = *val;
p->pdata_valid = 0;
- p->port_mm = 0;
+ p->type = 0;
p->df = 0;
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
@@ -353,7 +353,7 @@
vcpu = d->vcpu[i];
if (!vcpu)
continue;
- lid.val = VPD_CR(vcpu, lid);
+ lid.val = VCPU(vcpu, lid);
if ( lid.id == id && lid.eid == eid ) {
return vcpu;
}
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vlsapic.c Wed Sep 14 21:26:35 2005
@@ -89,8 +89,8 @@
vtm=&(vcpu->arch.arch_vmx.vtm);
vtm->vtm_offset = 0;
vtm->vtm_local_drift = 0;
- VPD_CR(vcpu, itm) = 0;
- VPD_CR(vcpu, itv) = 0x10000;
+ VCPU(vcpu, itm) = 0;
+ VCPU(vcpu, itv) = 0x10000;
cur_itc = ia64_get_itc();
vtm->last_itc = vtm->vtm_offset + cur_itc;
}
@@ -104,12 +104,12 @@
UINT64 vec;
- vec = VPD_CR(vcpu, itv) & 0xff;
+ vec = VCPU(vcpu, itv) & 0xff;
vmx_vcpu_pend_interrupt(vcpu, vec);
vtm=&(vcpu->arch.arch_vmx.vtm);
cur_itc = now_itc(vtm);
- vitm =VPD_CR(vcpu, itm);
+ vitm =VCPU(vcpu, itm);
//fire_itc2 = cur_itc;
//fire_itm2 = vitm;
update_last_itc(vtm,cur_itc); // pseudo read to update vITC
@@ -167,7 +167,7 @@
vtm=&(vcpu->arch.arch_vmx.vtm);
local_irq_save(spsr);
- itv = VPD_CR(vcpu, itv);
+ itv = VCPU(vcpu, itv);
if ( ITV_IRQ_MASK(itv) )
rem_ac_timer(&vtm->vtm_timer);
vtm_interruption_update(vcpu, vtm);
@@ -190,12 +190,12 @@
long diff_now, diff_last;
uint64_t spsr;
- vitv = VPD_CR(vcpu, itv);
+ vitv = VCPU(vcpu, itv);
if ( ITV_IRQ_MASK(vitv) ) {
return;
}
- vitm =VPD_CR(vcpu, itm);
+ vitm =VCPU(vcpu, itm);
local_irq_save(spsr);
cur_itc =now_itc(vtm);
diff_last = vtm->last_itc - vitm;
@@ -249,7 +249,6 @@
#define NMI_VECTOR 2
#define ExtINT_VECTOR 0
#define NULL_VECTOR -1
-#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i])
static void update_vhpi(VCPU *vcpu, int vec)
{
u64 vhpi;
@@ -265,11 +264,11 @@
vhpi = vec / 16;
}
- VMX_VPD(vcpu,vhpi) = vhpi;
+ VCPU(vcpu,vhpi) = vhpi;
// TODO: Add support for XENO
- if ( VMX_VPD(vcpu,vac).a_int ) {
+ if ( VCPU(vcpu,vac).a_int ) {
ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
- (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
+ (uint64_t) &(vcpu->arch.privregs), 0, 0,0,0,0,0);
}
}
@@ -284,7 +283,7 @@
return;
ps = get_psapic(vcpu);
- ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16;
+ ps->vl_lapic_id = ((VCPU(vcpu, lid) >> 16) & 0xffff) << 16;
printf("vl_lapic_id = %x\n", ps->vl_lapic_id);
ps->vl_apr = 0;
// skip ps->vl_logical_dest && ps->vl_dest_format
@@ -316,18 +315,18 @@
vl_apic_info *psapic; // shared lapic inf.
#endif
- VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
- VPD_CR(vcpu, ivr) = 0;
- VPD_CR(vcpu,tpr) = 0x10000;
- VPD_CR(vcpu, eoi) = 0;
- VPD_CR(vcpu, irr[0]) = 0;
- VPD_CR(vcpu, irr[1]) = 0;
- VPD_CR(vcpu, irr[2]) = 0;
- VPD_CR(vcpu, irr[3]) = 0;
- VPD_CR(vcpu, pmv) = 0x10000;
- VPD_CR(vcpu, cmcv) = 0x10000;
- VPD_CR(vcpu, lrr0) = 0x10000; // default reset value?
- VPD_CR(vcpu, lrr1) = 0x10000; // default reset value?
+ VCPU(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
+ VCPU(vcpu, ivr) = 0;
+ VCPU(vcpu,tpr) = 0x10000;
+ VCPU(vcpu, eoi) = 0;
+ VCPU(vcpu, irr[0]) = 0;
+ VCPU(vcpu, irr[1]) = 0;
+ VCPU(vcpu, irr[2]) = 0;
+ VCPU(vcpu, irr[3]) = 0;
+ VCPU(vcpu, pmv) = 0x10000;
+ VCPU(vcpu, cmcv) = 0x10000;
+ VCPU(vcpu, lrr0) = 0x10000; // default reset value?
+ VCPU(vcpu, lrr1) = 0x10000; // default reset value?
update_vhpi(vcpu, NULL_VECTOR);
for ( i=0; i<4; i++) {
VLSAPIC_INSVC(vcpu,i) = 0;
@@ -367,9 +366,9 @@
*/
static int highest_pending_irq(VCPU *vcpu)
{
- if ( VPD_CR(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
- if ( VPD_CR(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
- return highest_bits(&VPD_CR(vcpu, irr[0]));
+ if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
+ if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
+ return highest_bits(&VCPU(vcpu, irr[0]));
}
static int highest_inservice_irq(VCPU *vcpu)
@@ -410,7 +409,7 @@
tpr_t vtpr;
uint64_t mmi;
- vtpr.val = VPD_CR(vcpu, tpr);
+ vtpr.val = VCPU(vcpu, tpr);
if ( h_inservice == NMI_VECTOR ) {
return IRQ_MASKED_BY_INSVC;
@@ -468,7 +467,7 @@
return;
}
local_irq_save(spsr);
- VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
+ VCPU(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
//vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
vcpu->arch.irq_new_pending = 1;
@@ -486,7 +485,7 @@
local_irq_save(spsr);
for (i=0 ; i<4; i++ ) {
- VPD_CR(vcpu,irr[i]) |= pend_irr[i];
+ VCPU(vcpu,irr[i]) |= pend_irr[i];
}
//vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
@@ -554,7 +553,7 @@
local_irq_save(spsr);
VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
local_irq_restore(spsr);
- VPD_CR(vcpu, eoi)=0; // overwrite the data
+ VCPU(vcpu, eoi)=0; // overwrite the data
vmx_check_pending_irq(vcpu);
}
@@ -573,7 +572,7 @@
}
VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
- VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
+ VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
//vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
@@ -600,10 +599,10 @@
IA64_PSR vpsr;
vpsr.val = vmx_vcpu_get_psr(vcpu);
- vtpr.val = VPD_CR(vcpu, tpr);
+ vtpr.val = VCPU(vcpu, tpr);
threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
- vhpi = VMX_VPD(vcpu,vhpi);
+ vhpi = VCPU(vcpu,vhpi);
if ( vhpi > threshold ) {
// interrupt actived
generate_exirq (vcpu);
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmmu.c Wed Sep 14 21:26:35 2005
@@ -434,7 +434,7 @@
ia64_rr vrr;
u64 mfn;
- if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
+ if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
gpip = gip;
}
else {
@@ -726,12 +726,12 @@
if(data){
if(data->p==0){
visr.na=1;
- vmx_vcpu_set_isr(vcpu,visr.val);
+ vcpu_set_isr(vcpu,visr.val);
page_not_present(vcpu, vadr);
return IA64_FAULT;
}else if(data->ma == VA_MATTR_NATPAGE){
visr.na = 1;
- vmx_vcpu_set_isr(vcpu, visr.val);
+ vcpu_set_isr(vcpu, visr.val);
dnat_page_consumption(vcpu, vadr);
return IA64_FAULT;
}else{
@@ -741,7 +741,7 @@
}else{
if(!vhpt_enabled(vcpu, vadr, NA_REF)){
if(vpsr.ic){
- vmx_vcpu_set_isr(vcpu, visr.val);
+ vcpu_set_isr(vcpu, visr.val);
alt_dtlb(vcpu, vadr);
return IA64_FAULT;
}
@@ -756,7 +756,7 @@
data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
if(data){
if(vpsr.ic){
- vmx_vcpu_set_isr(vcpu, visr.val);
+ vcpu_set_isr(vcpu, visr.val);
dtlb_fault(vcpu, vadr);
return IA64_FAULT;
}
@@ -767,7 +767,7 @@
}
else{
if(vpsr.ic){
- vmx_vcpu_set_isr(vcpu, visr.val);
+ vcpu_set_isr(vcpu, visr.val);
dvhpt_fault(vcpu, vadr);
return IA64_FAULT;
}
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_entry.S Wed Sep 14 21:26:35 2005
@@ -218,6 +218,7 @@
adds out0=16,r12
;;
br.call.sptk.many b0=leave_hypervisor_tail
+ ;;
mov ar.pfs=loc0
adds r8=IA64_VPD_BASE_OFFSET,r13
;;
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Sep 14 21:26:35 2005
@@ -98,7 +98,8 @@
vmx_vcpu_get_gr(vcpu,18,&r34);
vmx_vcpu_get_gr(vcpu,19,&r35);
vmx_vcpu_get_gr(vcpu,20,&r36);
- ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
+// ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
+ ret = 0;
printf("do_dom_mem return value: %lx\n", ret);
vmx_vcpu_set_gr(vcpu, 8, ret, 0);
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_init.c Wed Sep 14 21:26:35 2005
@@ -217,7 +217,7 @@
vmx_create_vp(struct vcpu *v)
{
u64 ret;
- vpd_t *vpd = v->arch.arch_vmx.vpd;
+ vpd_t *vpd = v->arch.privregs;
u64 ivt_base;
extern char vmx_ia64_ivt;
/* ia64_ivt is function pointer, so need this tranlation */
@@ -255,7 +255,7 @@
u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
/* FIXME: about setting of pal_proc_vector... time consuming */
- status = ia64_pal_vp_save(v->arch.arch_vmx.vpd, 0);
+ status = ia64_pal_vp_save(v->arch.privregs, 0);
if (status != PAL_STATUS_SUCCESS)
panic("Save vp status failed\n");
@@ -290,7 +290,7 @@
u64 pte_xen, pte_vhpt;
int i;
- status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0);
+ status = ia64_pal_vp_restore(v->arch.privregs, 0);
if (status != PAL_STATUS_SUCCESS)
panic("Restore vp status failed\n");
@@ -351,7 +351,8 @@
vpd = alloc_vpd();
ASSERT(vpd);
- v->arch.arch_vmx.vpd = vpd;
+// v->arch.arch_vmx.vpd = vpd;
+ v->arch.privregs = vpd;
vpd->virt_env_vaddr = vm_buffer;
#ifdef CONFIG_VTI
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Wed Sep 14 21:26:35 2005
@@ -51,24 +51,24 @@
ipsr = regs->cr_ipsr;
vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
| IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED));
- vmx_vcpu_set_ipsr(vcpu, vpsr.val);
+ vcpu_set_ipsr(vcpu, vpsr.val);
/* Currently, for trap, we do not advance IIP to next
* instruction. That's because we assume caller already
* set up IIP correctly
*/
- vmx_vcpu_set_iip(vcpu , regs->cr_iip);
+ vcpu_set_iip(vcpu , regs->cr_iip);
/* set vifs.v to zero */
- vifs = VPD_CR(vcpu,ifs);
+ vifs = VCPU(vcpu,ifs);
vifs &= ~IA64_IFS_V;
- vmx_vcpu_set_ifs(vcpu, vifs);
-
- vmx_vcpu_set_iipa(vcpu, regs->cr_iipa);
+ vcpu_set_ifs(vcpu, vifs);
+
+ vcpu_set_iipa(vcpu, regs->cr_iipa);
}
- vdcr = VPD_CR(vcpu,dcr);
+ vdcr = VCPU(vcpu,dcr);
/* Set guest psr
* up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
@@ -119,16 +119,16 @@
/* Vol2, Table 8-1 */
if ( vpsr.ic ) {
if ( set_ifa){
- vmx_vcpu_set_ifa(vcpu, vadr);
+ vcpu_set_ifa(vcpu, vadr);
}
if ( set_itir) {
value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
- vmx_vcpu_set_itir(vcpu, value);
+ vcpu_set_itir(vcpu, value);
}
if ( set_iha) {
vmx_vcpu_thash(vcpu, vadr, &value);
- vmx_vcpu_set_iha(vcpu, value);
+ vcpu_set_iha(vcpu, value);
}
}
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Wed Sep 14 21:26:35 2005
@@ -221,10 +221,11 @@
VMX(vcpu,vrr[VRN5]) = 0x538;
VMX(vcpu,vrr[VRN6]) = 0x660;
VMX(vcpu,vrr[VRN7]) = 0x760;
-
+#if 0
VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60);
+#endif
}
void
@@ -275,7 +276,7 @@
ia64_set_rr((VRN6 << VRN_SHIFT),
vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void
*)vcpu->domain->shared_info,
- (void *)vcpu->vcpu_info->arch.privregs,
+ (void *)vcpu->arch.privregs,
( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
ia64_set_pta(vcpu->arch.arch_vmx.mpta);
#endif
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_process.c Wed Sep 14 21:26:35 2005
@@ -82,13 +82,13 @@
case FW_HYPERCALL_PAL_CALL:
//printf("*** PAL hypercall: index=%d\n",regs->r28);
//FIXME: This should call a C routine
- x = pal_emulator_static(VMX_VPD(v, vgr[12]));
+ x = pal_emulator_static(VCPU(v, vgr[12]));
regs->r8 = x.status; regs->r9 = x.v0;
regs->r10 = x.v1; regs->r11 = x.v2;
#if 0
if (regs->r8)
printk("Failed vpal emulation, with
index:0x%lx\n",
- VMX_VPD(v, vgr[12]));
+ VCPU(v, vgr[12]));
#endif
break;
case FW_HYPERCALL_SAL_CALL:
@@ -178,11 +178,11 @@
if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
panic("Guest nested fault!");
}
- VPD_CR(vcpu,isr)=isr;
- VPD_CR(vcpu,iipa) = regs->cr_iip;
+ VCPU(vcpu,isr)=isr;
+ VCPU(vcpu,iipa) = regs->cr_iip;
vector=vec2off[vector];
if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
- VPD_CR(vcpu,iim) = iim;
+ VCPU(vcpu,iim) = iim;
else {
set_ifa_itir_iha(vcpu,ifa,1,1,1);
}
@@ -220,8 +220,8 @@
*
* Now hardcode the vector as 0x10 temporarily
*/
- if
(event_pending(v)&&(!((v->arch.arch_vmx.in_service[0])&(1UL<<0x10)))) {
- VPD_CR(v, irr[0]) |= 1UL << 0x10;
+ if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
+ VCPU(v, irr[0]) |= 1UL << 0x10;
v->arch.irq_new_pending = 1;
}
@@ -295,7 +295,7 @@
}else if(type == DSIDE_TLB){
if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
if(vpsr.ic){
- vmx_vcpu_set_isr(vcpu, misr.val);
+ vcpu_set_isr(vcpu, misr.val);
alt_dtlb(vcpu, vadr);
return IA64_FAULT;
} else{
@@ -313,7 +313,7 @@
data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
if(data){
if(vpsr.ic){
- vmx_vcpu_set_isr(vcpu, misr.val);
+ vcpu_set_isr(vcpu, misr.val);
dtlb_fault(vcpu, vadr);
return IA64_FAULT;
}else{
@@ -327,7 +327,7 @@
}
}else{
if(vpsr.ic){
- vmx_vcpu_set_isr(vcpu, misr.val);
+ vcpu_set_isr(vcpu, misr.val);
dvhpt_fault(vcpu, vadr);
return IA64_FAULT;
}else{
@@ -346,7 +346,7 @@
if(!vpsr.ic){
misr.ni=1;
}
- vmx_vcpu_set_isr(vcpu, misr.val);
+ vcpu_set_isr(vcpu, misr.val);
alt_itlb(vcpu, vadr);
return IA64_FAULT;
} else{
@@ -357,14 +357,14 @@
if(!vpsr.ic){
misr.ni=1;
}
- vmx_vcpu_set_isr(vcpu, misr.val);
+ vcpu_set_isr(vcpu, misr.val);
itlb_fault(vcpu, vadr);
return IA64_FAULT;
}else{
if(!vpsr.ic){
misr.ni=1;
}
- vmx_vcpu_set_isr(vcpu, misr.val);
+ vcpu_set_isr(vcpu, misr.val);
ivhpt_fault(vcpu, vadr);
return IA64_FAULT;
}
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_utility.c
--- a/xen/arch/ia64/vmx/vmx_utility.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_utility.c Wed Sep 14 21:26:35 2005
@@ -455,7 +455,7 @@
value = set_isr_ei_ni (vcpu);
visr.val = visr.val | value;
- vmx_vcpu_set_isr (vcpu,visr.val);
+ vcpu_set_isr (vcpu,visr.val);
}
@@ -476,7 +476,7 @@
value = set_isr_ei_ni (vcpu);
visr.val = visr.val | value;
- vmx_vcpu_set_isr(vcpu, visr.val);
+ vcpu_set_isr(vcpu, visr.val);
}
@@ -508,7 +508,7 @@
value = set_isr_ei_ni (vcpu);
visr.val = visr.val | value;
- vmx_vcpu_set_isr (vcpu, visr.val);
+ vcpu_set_isr (vcpu, visr.val);
}
@@ -533,7 +533,7 @@
value = set_isr_ei_ni (vcpu);
visr.val = visr.val | value;
- vmx_vcpu_set_isr (vcpu, visr.val);
+ vcpu_set_isr (vcpu, visr.val);
}
@@ -559,7 +559,7 @@
value = set_isr_ei_ni (vcpu);
visr.val = visr.val | value;
- vmx_vcpu_set_isr (vcpu, visr.val);
+ vcpu_set_isr (vcpu, visr.val);
}
@@ -580,7 +580,7 @@
value = set_isr_ei_ni (vcpu);
visr.val = visr.val | value;
- vmx_vcpu_set_isr (vcpu, visr.val);
+ vcpu_set_isr (vcpu, visr.val);
}
@@ -594,7 +594,7 @@
isr.na = non_access;
isr.r = 1;
isr.w = 0;
- vmx_vcpu_set_isr(vcpu, isr.val);
+ vcpu_set_isr(vcpu, isr.val);
return;
}
@@ -606,7 +606,7 @@
isr.val = set_isr_ei_ni(vcpu);
isr.code = IA64_PRIV_OP_FAULT;
isr.na = non_access;
- vmx_vcpu_set_isr(vcpu, isr.val);
+ vcpu_set_isr(vcpu, isr.val);
return;
}
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c Wed Sep 14 21:26:35 2005
@@ -100,7 +100,7 @@
* Since these bits will become 0, after success execution of each
* instruction, we will change set them to mIA64_PSR
*/
- VMX_VPD(vcpu,vpsr) = value &
+ VCPU(vcpu,vpsr) = value &
(~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
));
@@ -167,7 +167,7 @@
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
));
- VMX_VPD(vcpu, vpsr) = vpsr.val;
+ VCPU(vcpu, vpsr) = vpsr.val;
ipsr->val &=
(~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
@@ -185,7 +185,7 @@
vpsr.val = vmx_vcpu_get_psr(vcpu);
if(!vpsr.ic)
- VPD_CR(vcpu,ifs) = regs->cr_ifs;
+ VCPU(vcpu,ifs) = regs->cr_ifs;
regs->cr_ifs = IA64_IFS_V;
return (IA64_NO_FAULT);
}
@@ -244,7 +244,7 @@
#else
case VRN7:
vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
- (void *)vcpu->vcpu_info->arch.privregs,
+ (void *)vcpu->arch.privregs,
( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
break;
#endif
@@ -307,15 +307,15 @@
// TODO: Only allowed for current vcpu
UINT64 ifs, psr;
REGS *regs = vcpu_regs(vcpu);
- psr = VPD_CR(vcpu,ipsr);
+ psr = VCPU(vcpu,ipsr);
vmx_vcpu_set_psr(vcpu,psr);
- ifs=VPD_CR(vcpu,ifs);
+ ifs=VCPU(vcpu,ifs);
if((ifs>>63)&&(ifs<<1)){
ifs=(regs->cr_ifs)&0x7f;
regs->rfi_pfs = (ifs<<7)|ifs;
- regs->cr_ifs = VPD_CR(vcpu,ifs);
- }
- regs->cr_iip = VPD_CR(vcpu,iip);
+ regs->cr_ifs = VCPU(vcpu,ifs);
+ }
+ regs->cr_iip = VCPU(vcpu,iip);
return (IA64_NO_FAULT);
}
@@ -323,7 +323,7 @@
UINT64
vmx_vcpu_get_psr(VCPU *vcpu)
{
- return VMX_VPD(vcpu,vpsr);
+ return VCPU(vcpu,vpsr);
}
@@ -334,9 +334,9 @@
vpsr.val = vmx_vcpu_get_psr(vcpu);
if ( vpsr.bn ) {
- *val=VMX_VPD(vcpu,vgr[reg-16]);
+ *val=VCPU(vcpu,vgr[reg-16]);
// Check NAT bit
- if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) {
+ if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
// TODO
//panic ("NAT consumption fault\n");
return IA64_FAULT;
@@ -344,8 +344,8 @@
}
else {
- *val=VMX_VPD(vcpu,vbgr[reg-16]);
- if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) {
+ *val=VCPU(vcpu,vbgr[reg-16]);
+ if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
//panic ("NAT consumption fault\n");
return IA64_FAULT;
}
@@ -360,19 +360,19 @@
IA64_PSR vpsr;
vpsr.val = vmx_vcpu_get_psr(vcpu);
if ( vpsr.bn ) {
- VMX_VPD(vcpu,vgr[reg-16]) = val;
+ VCPU(vcpu,vgr[reg-16]) = val;
if(nat){
- VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) );
+ VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
}else{
- VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
+ VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
}
}
else {
- VMX_VPD(vcpu,vbgr[reg-16]) = val;
+ VCPU(vcpu,vbgr[reg-16]) = val;
if(nat){
- VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) );
+ VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
}else{
- VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) );
+ VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
}
}
return IA64_NO_FAULT;
@@ -447,7 +447,7 @@
IA64FAULT
vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,tpr)=val;
+ VCPU(vcpu,tpr)=val;
vcpu->arch.irq_new_condition = 1;
return IA64_NO_FAULT;
}
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Sep 14 21:26:35 2005
@@ -180,7 +180,7 @@
if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
panic(" get_psr nat bit fault\n");
- val = (val & MASK(0, 32)) | (VMX_VPD(vcpu, vpsr) & MASK(32, 32));
+ val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
#if 0
if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
while(1);
@@ -546,10 +546,10 @@
}
#endif // VMAL_NO_FAULT_CHECK
- if (vmx_vcpu_get_itir(vcpu,&itir)){
+ if (vcpu_get_itir(vcpu,&itir)){
return(IA64_FAULT);
}
- if (vmx_vcpu_get_ifa(vcpu,&ifa)){
+ if (vcpu_get_ifa(vcpu,&ifa)){
return(IA64_FAULT);
}
#ifdef VMAL_NO_FAULT_CHECK
@@ -603,10 +603,10 @@
}
#endif // VMAL_NO_FAULT_CHECK
- if (vmx_vcpu_get_itir(vcpu,&itir)){
+ if (vcpu_get_itir(vcpu,&itir)){
return(IA64_FAULT);
}
- if (vmx_vcpu_get_ifa(vcpu,&ifa)){
+ if (vcpu_get_ifa(vcpu,&ifa)){
return(IA64_FAULT);
}
#ifdef VMAL_NO_FAULT_CHECK
@@ -657,10 +657,10 @@
}
#endif // VMAL_NO_FAULT_CHECK
- if (vmx_vcpu_get_itir(vcpu,itir)){
+ if (vcpu_get_itir(vcpu,itir)){
return(IA64_FAULT);
}
- if (vmx_vcpu_get_ifa(vcpu,ifa)){
+ if (vcpu_get_ifa(vcpu,ifa)){
return(IA64_FAULT);
}
#ifdef VMAL_NO_FAULT_CHECK
@@ -1178,21 +1178,21 @@
#endif //CHECK_FAULT
extern u64 cr_igfld_mask(int index, u64 value);
r2 = cr_igfld_mask(inst.M32.cr3,r2);
- VMX_VPD(vcpu, vcr[inst.M32.cr3]) = r2;
+ VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
switch (inst.M32.cr3) {
case 0: return vmx_vcpu_set_dcr(vcpu,r2);
case 1: return vmx_vcpu_set_itm(vcpu,r2);
case 2: return vmx_vcpu_set_iva(vcpu,r2);
case 8: return vmx_vcpu_set_pta(vcpu,r2);
- case 16:return vmx_vcpu_set_ipsr(vcpu,r2);
- case 17:return vmx_vcpu_set_isr(vcpu,r2);
- case 19:return vmx_vcpu_set_iip(vcpu,r2);
- case 20:return vmx_vcpu_set_ifa(vcpu,r2);
- case 21:return vmx_vcpu_set_itir(vcpu,r2);
- case 22:return vmx_vcpu_set_iipa(vcpu,r2);
- case 23:return vmx_vcpu_set_ifs(vcpu,r2);
- case 24:return vmx_vcpu_set_iim(vcpu,r2);
- case 25:return vmx_vcpu_set_iha(vcpu,r2);
+ case 16:return vcpu_set_ipsr(vcpu,r2);
+ case 17:return vcpu_set_isr(vcpu,r2);
+ case 19:return vcpu_set_iip(vcpu,r2);
+ case 20:return vcpu_set_ifa(vcpu,r2);
+ case 21:return vcpu_set_itir(vcpu,r2);
+ case 22:return vcpu_set_iipa(vcpu,r2);
+ case 23:return vcpu_set_ifs(vcpu,r2);
+ case 24:return vcpu_set_iim(vcpu,r2);
+ case 25:return vcpu_set_iha(vcpu,r2);
case 64:printk("SET LID to 0x%lx\n", r2);
return vmx_vcpu_set_lid(vcpu,r2);
case 65:return IA64_NO_FAULT;
@@ -1213,9 +1213,12 @@
#define cr_get(cr) \
+ ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
+ vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
+
+#define vmx_cr_get(cr) \
((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
-
IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
{
@@ -1241,10 +1244,10 @@
// from_cr_cnt[inst.M33.cr3]++;
switch (inst.M33.cr3) {
- case 0: return cr_get(dcr);
- case 1: return cr_get(itm);
- case 2: return cr_get(iva);
- case 8: return cr_get(pta);
+ case 0: return vmx_cr_get(dcr);
+ case 1: return vmx_cr_get(itm);
+ case 2: return vmx_cr_get(iva);
+ case 8: return vmx_cr_get(pta);
case 16:return cr_get(ipsr);
case 17:return cr_get(isr);
case 19:return cr_get(iip);
@@ -1254,23 +1257,21 @@
case 23:return cr_get(ifs);
case 24:return cr_get(iim);
case 25:return cr_get(iha);
-// case 64:val = ia64_getreg(_IA64_REG_CR_LID);
-// return vmx_vcpu_set_gr(vcpu,tgt,val,0);
- case 64:return cr_get(lid);
+ case 64:return vmx_cr_get(lid);
case 65:
- vmx_vcpu_get_ivr(vcpu,&val);
- return vmx_vcpu_set_gr(vcpu,tgt,val,0);
- case 66:return cr_get(tpr);
+ vmx_vcpu_get_ivr(vcpu,&val);
+ return vmx_vcpu_set_gr(vcpu,tgt,val,0);
+ case 66:return vmx_cr_get(tpr);
case 67:return vmx_vcpu_set_gr(vcpu,tgt,0L,0);
- case 68:return cr_get(irr0);
- case 69:return cr_get(irr1);
- case 70:return cr_get(irr2);
- case 71:return cr_get(irr3);
- case 72:return cr_get(itv);
- case 73:return cr_get(pmv);
- case 74:return cr_get(cmcv);
- case 80:return cr_get(lrr0);
- case 81:return cr_get(lrr1);
+ case 68:return vmx_cr_get(irr0);
+ case 69:return vmx_cr_get(irr1);
+ case 70:return vmx_cr_get(irr2);
+ case 71:return vmx_cr_get(irr3);
+ case 72:return vmx_cr_get(itv);
+ case 73:return vmx_cr_get(pmv);
+ case 74:return vmx_cr_get(cmcv);
+ case 80:return vmx_cr_get(lrr0);
+ case 81:return vmx_cr_get(lrr1);
default:
panic("Read reserved cr register");
}
@@ -1355,7 +1356,7 @@
#else
inst.inst=opcode;
#endif /* BYPASS_VMAL_OPCODE */
-
+ vcpu_set_regs(vcpu, regs);
/*
* Switch to actual virtual rid in rr0 and rr4,
* which is required by some tlb related instructions.
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/xen/domain.c Wed Sep 14 21:26:35 2005
@@ -194,10 +194,12 @@
while (1);
}
memset(d->shared_info, 0, PAGE_SIZE);
- d->shared_info->vcpu_data[0].arch.privregs =
+#if 0
+ d->vcpu[0].arch.privregs =
alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
- printf("arch_vcpu_info=%p\n",
d->shared_info->vcpu_data[0].arch.privregs);
- memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE);
+ printf("arch_vcpu_info=%p\n", d->vcpu[0].arch.privregs);
+ memset(d->vcpu.arch.privregs, 0, PAGE_SIZE);
+#endif
v->vcpu_info = &(d->shared_info->vcpu_data[0]);
d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
@@ -216,7 +218,7 @@
if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL)
|| ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL))
BUG();
- VCPU(v, metaphysical_mode) = 1;
+// VCPU(v, metaphysical_mode) = 1;
v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
@@ -284,12 +286,17 @@
vmx_setup_platform(v, c);
}
-
+ else{
+ v->arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n", v->arch.privregs);
+ memset(v->arch.privregs, 0, PAGE_SIZE);
+ }
*regs = c->regs;
new_thread(v, regs->cr_iip, 0, 0);
v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
- if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
+ if ( c->vcpu.privregs && copy_from_user(v->arch.privregs,
c->vcpu.privregs, sizeof(mapped_regs_t))) {
printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n",
c->vcpu.privregs);
return -EFAULT;
@@ -309,10 +316,10 @@
struct domain *d = v->domain;
printf("arch_do_boot_vcpu: not implemented\n");
- d->shared_info->vcpu_data[v->vcpu_id].arch.privregs =
+ d->vcpu[v->vcpu_id]->arch.privregs =
alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
- printf("arch_vcpu_info=%p\n",
d->shared_info->vcpu_data[v->vcpu_id].arch.privregs);
- memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0,
PAGE_SIZE);
+ printf("arch_vcpu_info=%p\n", d->vcpu[v->vcpu_id]->arch.privregs);
+ memset(d->vcpu[v->vcpu_id]->arch.privregs, 0, PAGE_SIZE);
return;
}
@@ -357,10 +364,10 @@
#ifdef CONFIG_VTI
vmx_init_all_rr(v);
if (d == dom0)
- VMX_VPD(v,vgr[12]) =
dom_fw_setup(d,saved_command_line,256L);
+ VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
/* Virtual processor context setup */
- VMX_VPD(v, vpsr) = IA64_PSR_BN;
- VPD_CR(v, dcr) = 0;
+ VCPU(v, vpsr) = IA64_PSR_BN;
+ VCPU(v, dcr) = 0;
#endif
} else {
init_all_rr(v);
@@ -995,6 +1002,12 @@
printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
if (vmx_dom0)
vmx_final_setup_domain(dom0);
+ else{
+ d->vcpu[0]->arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n", d->vcpu[0]->arch.privregs);
+ memset(d->vcpu[0]->arch.privregs, 0, PAGE_SIZE);
+ }
set_bit(_DOMF_constructed, &d->domain_flags);
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/xen/process.c Wed Sep 14 21:26:35 2005
@@ -173,7 +173,7 @@
struct vcpu *v = current;
if (vector == IA64_EXTINT_VECTOR) {
-
+
extern unsigned long vcpu_verbose, privop_trace;
static first_extint = 1;
if (first_extint) {
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/xen/regionreg.c Wed Sep 14 21:26:35 2005
@@ -234,7 +234,7 @@
newrrv.ve = VHPT_ENABLED_REGION_7;
newrrv.ps = IA64_GRANULE_SHIFT;
ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
- v->vcpu_info->arch.privregs);
+ v->arch.privregs);
}
else {
newrrv.rid = newrid;
@@ -252,7 +252,7 @@
newrrv.ps = PAGE_SHIFT;
if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
- v->vcpu_info->arch.privregs);
+ v->arch.privregs);
else set_rr(rr,newrrv.rrval);
#endif
return 1;
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Tue Sep 13 20:20:02 2005
+++ b/xen/arch/ia64/xen/vcpu.c Wed Sep 14 21:26:35 2005
@@ -355,7 +355,11 @@
IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCBX(vcpu,iva) & ~0x7fffL;
+ if(VMX_DOMAIN(vcpu)){
+ *pval = PSCB(vcpu,iva) & ~0x7fffL;
+ }else{
+ *pval = PSCBX(vcpu,iva) & ~0x7fffL;
+ }
return (IA64_NO_FAULT);
}
@@ -435,7 +439,7 @@
UINT64 val = PSCB(vcpu,iipa);
// SP entry code does not save iipa yet nor does it get
// properly delivered in the pscb
- printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
+// printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
*pval = val;
return (IA64_NO_FAULT);
}
@@ -480,7 +484,11 @@
IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
{
- PSCBX(vcpu,iva) = val & ~0x7fffL;
+ if(VMX_DOMAIN(vcpu)){
+ PSCB(vcpu,iva) = val & ~0x7fffL;
+ }else{
+ PSCBX(vcpu,iva) = val & ~0x7fffL;
+ }
return (IA64_NO_FAULT);
}
@@ -539,7 +547,7 @@
{
// SP entry code does not save iipa yet nor does it get
// properly delivered in the pscb
- printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
+// printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
PSCB(vcpu,iipa) = val;
return IA64_NO_FAULT;
}
@@ -578,11 +586,11 @@
printf("vcpu_pend_interrupt: bad vector\n");
return;
}
-#ifdef CONFIG_VTI
+//#ifdef CONFIG_VTI
if ( VMX_DOMAIN(vcpu) ) {
- set_bit(vector,VPD_CR(vcpu,irr));
+ set_bit(vector,VCPU(vcpu,irr));
} else
-#endif // CONFIG_VTI
+//#endif // CONFIG_VTI
{
/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
if (test_bit(vector,PSCBX(vcpu,irr))) {
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Sep 13 20:20:02 2005
+++ b/xen/include/asm-ia64/domain.h Wed Sep 14 21:26:35 2005
@@ -54,7 +54,7 @@
unsigned long dtlb_pte;
unsigned long irr[4];
unsigned long insvc[4];
- unsigned long iva;
+ unsigned long iva;
unsigned long dcr;
unsigned long itc;
unsigned long domain_itm;
@@ -63,6 +63,7 @@
unsigned long xen_timer_interval;
#endif
void *regs; /* temporary until find a better way to do privops */
+ mapped_regs_t *privregs; /* save the state of vcpu */
int metaphysical_rr0; // from arch_domain (so is pinned)
int metaphysical_rr4; // from arch_domain (so is pinned)
int metaphysical_saved_rr0; // from arch_domain (so is
pinned)
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h Tue Sep 13 20:20:02 2005
+++ b/xen/include/asm-ia64/vcpu.h Wed Sep 14 21:26:35 2005
@@ -16,7 +16,7 @@
typedef cpu_user_regs_t REGS;
-#define VCPU(_v,_x) _v->vcpu_info->arch.privregs->_x
+#define VCPU(_v,_x) _v->arch.privregs->_x
#define PRIVOP_ADDR_COUNT
#ifdef PRIVOP_ADDR_COUNT
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Tue Sep 13 20:20:02 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h Wed Sep 14 21:26:35 2005
@@ -42,14 +42,14 @@
#define VRN5 0x5UL
#define VRN6 0x6UL
#define VRN7 0x7UL
-
+// for vlsapic
+#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
// this def for vcpu_regs won't work if kernel stack is present
#define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) +
IA64_STK_OFFSET)) - 1)
-#define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
+//#define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
#define VMX(x,y) ((x)->arch.arch_vmx.y)
-#define VPD_CR(x,y) (((cr_t*)VMX_VPD(x,vcr))->y)
#define VMM_RR_SHIFT 20
#define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
@@ -129,89 +129,34 @@
static inline
IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,dcr);
+ *pval = VCPU(vcpu,dcr);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,itm);
+ *pval = VCPU(vcpu,itm);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,iva);
+ *pval = VCPU(vcpu,iva);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,pta);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,ipsr);
- return (IA64_NO_FAULT);
-}
-
-static inline
-IA64FAULT vmx_vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,isr);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,iip);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,ifa);
- return (IA64_NO_FAULT);
-}
-
-static inline
-IA64FAULT vmx_vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,itir);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,iipa);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,ifs);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,iim);
- return (IA64_NO_FAULT);
-}
-static inline
-IA64FAULT vmx_vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
-{
- *pval = VPD_CR(vcpu,iha);
- return (IA64_NO_FAULT);
-}
+ *pval = VCPU(vcpu,pta);
+ return (IA64_NO_FAULT);
+}
+
static inline
IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,lid);
+ *pval = VCPU(vcpu,lid);
return (IA64_NO_FAULT);
}
static inline
@@ -223,7 +168,7 @@
static inline
IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,tpr);
+ *pval = VCPU(vcpu,tpr);
return (IA64_NO_FAULT);
}
static inline
@@ -235,54 +180,54 @@
static inline
IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,irr[0]);
+ *pval = VCPU(vcpu,irr[0]);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,irr[1]);
+ *pval = VCPU(vcpu,irr[1]);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,irr[2]);
+ *pval = VCPU(vcpu,irr[2]);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,irr[3]);
+ *pval = VCPU(vcpu,irr[3]);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,itv);
+ *pval = VCPU(vcpu,itv);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,pmv);
+ *pval = VCPU(vcpu,pmv);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,cmcv);
+ *pval = VCPU(vcpu,cmcv);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
{
- *pval = VPD_CR(vcpu,lrr0);
+ *pval = VCPU(vcpu,lrr0);
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
-{ *pval = VPD_CR(vcpu,lrr1);
+{ *pval = VCPU(vcpu,lrr1);
return (IA64_NO_FAULT);
}
static inline
@@ -290,7 +235,7 @@
vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
{
u64 mdcr, mask;
- VPD_CR(vcpu,dcr)=val;
+ VCPU(vcpu,dcr)=val;
/* All vDCR bits will go to mDCR, except for be/pp bit */
mdcr = ia64_get_dcr();
mask = IA64_DCR_BE | IA64_DCR_PP;
@@ -307,7 +252,7 @@
vtime_t *vtm;
vtm=&(vcpu->arch.arch_vmx.vtm);
- VPD_CR(vcpu,itm)=val;
+ VCPU(vcpu,itm)=val;
#ifdef CONFIG_VTI
vtm_interruption_update(vcpu, vtm);
#endif
@@ -317,7 +262,7 @@
IA64FAULT
vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,iva)=val;
+ VCPU(vcpu,iva)=val;
return IA64_NO_FAULT;
}
@@ -325,78 +270,7 @@
IA64FAULT
vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,pta)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_ipsr(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,ipsr)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_isr(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,isr)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_iip(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,iip)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_ifa(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,ifa)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_itir(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,itir)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_iipa(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,iipa)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_ifs(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,ifs)=val;
- return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
-vmx_vcpu_set_iim(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,iim)=val;
- return IA64_NO_FAULT;
-}
-
-static inline
-IA64FAULT
-vmx_vcpu_set_iha(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,iha)=val;
+ VCPU(vcpu,pta)=val;
return IA64_NO_FAULT;
}
@@ -404,7 +278,7 @@
IA64FAULT
vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,lid)=val;
+ VCPU(vcpu,lid)=val;
#ifdef V_IOSAPIC_READY
vlapic_update_shared_info(vcpu);
#endif
@@ -427,7 +301,7 @@
vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,itv)=val;
+ VCPU(vcpu,itv)=val;
#ifdef CONFIG_VTI
vtm_set_itv(vcpu);
#endif
@@ -437,28 +311,28 @@
IA64FAULT
vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,pmv)=val;
+ VCPU(vcpu,pmv)=val;
return IA64_NO_FAULT;
}
static inline
IA64FAULT
vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,cmcv)=val;
+ VCPU(vcpu,cmcv)=val;
return IA64_NO_FAULT;
}
static inline
IA64FAULT
vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,lrr0)=val;
+ VCPU(vcpu,lrr0)=val;
return IA64_NO_FAULT;
}
static inline
IA64FAULT
vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
{
- VPD_CR(vcpu,lrr1)=val;
+ VCPU(vcpu,lrr1)=val;
return IA64_NO_FAULT;
}
@@ -502,7 +376,7 @@
if(reg > 4){
panic("there are only five cpuid registers");
}
- *pval=VMX_VPD(vcpu,vcpuid[reg]);
+ *pval=VCPU(vcpu,vcpuid[reg]);
return (IA64_NO_FAULT);
}
@@ -583,14 +457,14 @@
IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
{
- VMX_VPD(vcpu,vpsr) &= ~IA64_PSR_BN;
+ VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
return (IA64_NO_FAULT);
}
static inline
IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
{
- VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
+ VCPU(vcpu,vpsr) |= IA64_PSR_BN;
return (IA64_NO_FAULT);
}
#if 0
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Tue Sep 13 20:20:02 2005
+++ b/xen/include/asm-ia64/vmx_vpd.h Wed Sep 14 21:26:35 2005
@@ -64,19 +64,19 @@
struct arch_vmx_struct {
// struct virutal_platform_def vmx_platform;
- vpd_t *vpd;
+// vpd_t *vpd;
vtime_t vtm;
unsigned long vrr[8];
unsigned long vkr[8];
- unsigned long mrr5;
- unsigned long mrr6;
- unsigned long mrr7;
+// unsigned long mrr5;
+// unsigned long mrr6;
+// unsigned long mrr7;
unsigned long mpta;
- unsigned long rfi_pfs;
- unsigned long rfi_iip;
- unsigned long rfi_ipsr;
- unsigned long rfi_ifs;
- unsigned long in_service[4]; // vLsapic inservice IRQ bits
+// unsigned long rfi_pfs;
+// unsigned long rfi_iip;
+// unsigned long rfi_ipsr;
+// unsigned long rfi_ifs;
+// unsigned long in_service[4]; // vLsapic inservice IRQ bits
unsigned long flags;
};
diff -r 6dadf4d93ee3 -r 52d2d5208575 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Tue Sep 13 20:20:02 2005
+++ b/xen/include/public/arch-ia64.h Wed Sep 14 21:26:35 2005
@@ -95,35 +95,37 @@
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
-#ifdef CONFIG_VTI
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long cr_iipa; /* for emulation */
- unsigned long cr_isr; /* for emulation */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long rfi_pfs; /* used for elulating rfi */
-#endif
-
- /* The following registers are saved by SAVE_REST: */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
-
+ union {
+ struct {
+ /* The following registers are saved by SAVE_REST: */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+ };
+ struct {
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long cr_iipa; /* for emulation */
+ unsigned long cr_isr; /* for emulation */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long rfi_pfs; /* used for elulating rfi */
+ };
+ };
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
@@ -238,10 +240,12 @@
unsigned long tmp[8]; // temp registers (e.g. for
hyperprivops)
};
};
+#if 0
#ifdef CONFIG_VTI
unsigned long reserved6[3456];
unsigned long vmm_avail[128];
unsigned long reserved7[4096];
+#endif
#endif
} mapped_regs_t;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|