# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID ca44d2dbb273b4a9388b067fc16a0a38fc463a92
# Parent 89d92ce1092462f1999221d2615a9976d78bd17b
Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@xxxxxxxxx>
Signed-off-by Anthony Xu <Anthony.xu@xxxxxxxxx>
Signed-off-by Kevin Tian <Kevin.tian@xxxxxxxxx>
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_entry.S
--- a/xen/arch/ia64/vmx_entry.S Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_entry.S Sat Jul 9 14:58:56 2005
@@ -217,7 +217,7 @@
alloc loc0=ar.pfs,0,1,1,0
adds out0=16,r12
;;
- br.call.sptk.many b0=vmx_deliver_pending_interrupt
+ br.call.sptk.many b0=leave_hypervisor_tail
mov ar.pfs=loc0
adds r8=IA64_VPD_BASE_OFFSET,r13
;;
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/mm.c
--- a/xen/arch/ia64/mm.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/mm.c Sat Jul 9 14:58:56 2005
@@ -95,7 +95,7 @@
#include <asm/vmx_vcpu.h>
#include <asm/vmmu.h>
#include <asm/regionreg.h>
-
+#include <asm/vmx_mm_def.h>
/*
uregs->ptr is virtual address
uregs->val is pte value
@@ -109,8 +109,9 @@
mmu_update_t req;
ia64_rr rr;
thash_cb_t *hcb;
- thash_data_t entry={0};
+ thash_data_t entry={0},*ovl;
vcpu = current;
+ search_section_t sections;
hcb = vmx_vcpu_get_vtlb(vcpu);
for ( i = 0; i < count; i++ )
{
@@ -124,8 +125,18 @@
entry.cl = DSIDE_TLB;
rr = vmx_vcpu_rr(vcpu, req.ptr);
entry.ps = rr.ps;
+ entry.key = redistribute_rid(rr.rid);
entry.rid = rr.rid;
- vtlb_insert(hcb, &entry, req.ptr);
+ entry.vadr = PAGEALIGN(req.ptr,entry.ps);
+ sections.tr = 1;
+ sections.tc = 0;
+ ovl = thash_find_overlap(hcb, &entry, sections);
+ if (ovl) {
+ // generate MCA.
+ panic("Tlb conflict!!");
+ return;
+ }
+ thash_purge_and_insert(hcb, &entry);
}else if(cmd == MMU_MACHPHYS_UPDATE){
mfn = req.ptr >>PAGE_SHIFT;
gpfn = req.val;
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_init.c
--- a/xen/arch/ia64/vmx_init.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_init.c Sat Jul 9 14:58:56 2005
@@ -40,6 +40,7 @@
#include <asm/vmmu.h>
#include <public/arch-ia64.h>
#include <asm/vmx_phy_mode.h>
+#include <asm/processor.h>
#include <asm/vmx.h>
#include <xen/mm.h>
@@ -225,6 +226,17 @@
vmx_purge_double_mapping(dom_rr7, KERNEL_START,
(u64)v->arch.vtlb->ts->vhpt->hash);
+ /* Need to save KR when domain switch, though HV itself doesn;t
+ * use them.
+ */
+ v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
+ v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
+ v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
+ v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
+ v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
+ v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
+ v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
+ v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
}
/* Even guest is in physical mode, we still need such double mapping */
@@ -234,6 +246,7 @@
u64 status, psr;
u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
u64 pte_xen, pte_vhpt;
+ int i;
status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0);
if (status != PAL_STATUS_SUCCESS)
@@ -246,6 +259,14 @@
(u64)v->arch.vtlb->ts->vhpt->hash,
pte_xen, pte_vhpt);
+ ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
+ ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
+ ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
+ ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
+ ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
+ ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
+ ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
+ ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
/* Guest vTLB is not required to be switched explicitly, since
* anchored in vcpu */
}
@@ -290,7 +311,7 @@
vmx_create_vp(v);
/* Set this ed to be vmx */
- v->arch.arch_vmx.flags = 1;
+ set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
/* Other vmx specific initialization work */
}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/vmx_vpd.h Sat Jul 9 14:58:56 2005
@@ -19,8 +19,8 @@
* Kun Tian (Kevin Tian) (kevin.tian@xxxxxxxxx)
*/
-#ifndef _VPD_H_
-#define _VPD_H_
+#ifndef _ASM_IA64_VMX_VPD_H_
+#define _ASM_IA64_VMX_VPD_H_
#ifndef __ASSEMBLY__
@@ -123,6 +123,7 @@
vpd_t *vpd;
vtime_t vtm;
unsigned long vrr[8];
+ unsigned long vkr[8];
unsigned long mrr5;
unsigned long mrr6;
unsigned long mrr7;
@@ -145,6 +146,7 @@
#define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */
#define ARCH_VMX_VMCS_RESUME 2 /* Needs VMCS resume */
#define ARCH_VMX_IO_WAIT 3 /* Waiting for I/O completion */
+#define ARCH_VMX_INTR_ASSIST 4 /* Need DM's assist to issue intr */
#define VMX_DEBUG 1
@@ -191,4 +193,4 @@
#define VPD_VMM_VAIL_START_OFFSET 31744
-#endif /* _VPD_H_ */
+#endif /* _ASM_IA64_VMX_VPD_H_ */
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/xenprocessor.h
--- a/xen/include/asm-ia64/xenprocessor.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/xenprocessor.h Sat Jul 9 14:58:56 2005
@@ -166,6 +166,16 @@
};
} ipi_d_t;
+typedef union {
+ __u64 val;
+ struct {
+ __u64 ig0 : 4;
+ __u64 mic : 4;
+ __u64 rsv : 8;
+ __u64 mmi : 1;
+ __u64 ig1 : 47;
+ };
+} tpr_t;
#define IA64_ISR_CODE_MASK0 0xf
#define IA64_UNIMPL_DADDR_FAULT 0x30
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_hypercall.c
--- a/xen/arch/ia64/vmx_hypercall.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_hypercall.c Sat Jul 9 14:58:56 2005
@@ -141,21 +141,27 @@
static int do_set_shared_page(VCPU *vcpu, u64 gpa)
{
u64 shared_info, o_info;
+ struct domain *d = vcpu->domain;
+ struct vcpu *v;
if(vcpu->domain!=dom0)
return -EPERM;
shared_info = __gpa_to_mpa(vcpu->domain, gpa);
o_info = (u64)vcpu->domain->shared_info;
- vcpu->domain->shared_info= (shared_info_t *)__va(shared_info);
+ d->shared_info= (shared_info_t *)__va(shared_info);
/* Copy existing shared info into new page */
- if (!o_info) {
- memcpy((void*)vcpu->domain->shared_info, (void*)o_info, PAGE_SIZE);
- /* If original page belongs to xen heap, then relinguish back
- * to xen heap. Or else, leave to domain itself to decide.
- */
- if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
- free_xenheap_page(o_info);
- }
+ if (o_info) {
+ memcpy((void*)d->shared_info, (void*)o_info, PAGE_SIZE);
+ for_each_vcpu(d, v) {
+ v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
+ }
+ /* If original page belongs to xen heap, then relinguish back
+ * to xen heap. Or else, leave to domain itself to decide.
+ */
+ if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
+ free_xenheap_page(o_info);
+ } else
+ memset(d->shared_info, 0, PAGE_SIZE);
return 0;
}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/Makefile Sat Jul 9 14:58:56 2005
@@ -15,7 +15,7 @@
ifeq ($(CONFIG_VTI),y)
OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
- vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
+ vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
endif
# perfmon.o
# unwind.o needed for kernel unwinding (rare)
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/privop.h Sat Jul 9 14:58:56 2005
@@ -138,6 +138,19 @@
IA64_INST inst;
struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
} INST64_M47;
+typedef union U_INST64_M1{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
+} INST64_M1;
+typedef union U_INST64_M4 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
+} INST64_M4;
+typedef union U_INST64_M6 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
+} INST64_M6;
+
#endif // CONFIG_VTI
typedef union U_INST64 {
@@ -151,6 +164,11 @@
INST64_I26 I26; // mov register to ar (I unit)
INST64_I27 I27; // mov immediate to ar (I unit)
INST64_I28 I28; // mov from ar (I unit)
+#ifdef CONFIG_VTI
+ INST64_M1 M1; // ld integer
+ INST64_M4 M4; // st integer
+ INST64_M6 M6; // ldfd floating pointer
+#endif // CONFIG_VTI
INST64_M28 M28; // purge translation cache entry
INST64_M29 M29; // mov register to ar (M unit)
INST64_M30 M30; // mov immediate to ar (M unit)
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_virt.c
--- a/xen/arch/ia64/vmx_virt.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_virt.c Sat Jul 9 14:58:56 2005
@@ -1276,7 +1276,13 @@
}
-
+static void post_emulation_action(VCPU *vcpu)
+{
+ if ( vcpu->arch.irq_new_condition ) {
+ vcpu->arch.irq_new_condition = 0;
+ vhpi_detection(vcpu);
+ }
+}
//#define BYPASS_VMAL_OPCODE
extern IA64_SLOT_TYPE slot_types[0x20][3];
@@ -1336,7 +1342,7 @@
slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
if (!slot) inst.inst = bundle.slot0;
else if (slot == 1)
- inst.inst = bundle.slot1a + (bundle.slot1b<<23);
+ inst.inst = bundle.slot1a + (bundle.slot1b<<18);
else if (slot == 2) inst.inst = bundle.slot2;
else printf("priv_handle_op: illegal slot: %d\n", slot);
slot_type = slot_types[bundle.template][slot];
@@ -1478,9 +1484,11 @@
status=IA64_FAULT;
break;
default:
- printf("unknown cause %d:\n", cause);
+ printf("unknown cause %d, iip: %lx, ipsr: %lx\n",
cause,regs->cr_iip,regs->cr_ipsr);
+ while(1);
/* For unknown cause, let hardware to re-execute */
status=IA64_RETRY;
+ break;
// panic("unknown cause in virtualization intercept");
};
@@ -1494,6 +1502,7 @@
}
recover_if_physical_mode(vcpu);
+ post_emulation_action (vcpu);
//TODO set_irq_check(v);
return;
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_vcpu.c
--- a/xen/arch/ia64/vmx_vcpu.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_vcpu.c Sat Jul 9 14:58:56 2005
@@ -22,8 +22,6 @@
* Yaozu Dong (Eddie Dong) (Eddie.dong@xxxxxxxxx)
* Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
*/
-
-
#include <linux/sched.h>
#include <public/arch-ia64.h>
@@ -71,8 +69,8 @@
//unsigned long last_guest_rsm = 0x0;
struct guest_psr_bundle{
- unsigned long ip;
- unsigned long psr;
+ unsigned long ip;
+ unsigned long psr;
};
struct guest_psr_bundle guest_psr_buf[100];
@@ -107,20 +105,24 @@
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
));
+ if ( !old_psr.i && (value & IA64_PSR_I) ) {
+ // vpsr.i 0->1
+ vcpu->arch.irq_new_condition = 1;
+ }
new_psr.val=vmx_vcpu_get_psr(vcpu);
{
- struct xen_regs *regs = vcpu_regs(vcpu);
- guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
- guest_psr_buf[guest_psr_index].psr = new_psr.val;
- if (++guest_psr_index >= 100)
- guest_psr_index = 0;
+ struct xen_regs *regs = vcpu_regs(vcpu);
+ guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
+ guest_psr_buf[guest_psr_index].psr = new_psr.val;
+ if (++guest_psr_index >= 100)
+ guest_psr_index = 0;
}
#if 0
if (old_psr.i != new_psr.i) {
- if (old_psr.i)
- last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
- else
- last_guest_rsm = 0;
+ if (old_psr.i)
+ last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
+ else
+ last_guest_rsm = 0;
}
#endif
@@ -270,8 +272,8 @@
{
va &= ~ (PSIZE(ps)-1);
if ( va == 0x2000000002908000UL ||
- va == 0x600000000000C000UL ) {
- stop();
+ va == 0x600000000000C000UL ) {
+ stop();
}
if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
}
@@ -433,4 +435,11 @@
return IA64_NO_FAULT;
}
-
+IA64FAULT
+vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
+{
+ VPD_CR(vcpu,tpr)=val;
+ vcpu->arch.irq_new_condition = 1;
+ return IA64_NO_FAULT;
+}
+
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/domain.h Sat Jul 9 14:58:56 2005
@@ -42,8 +42,6 @@
* max_pages in domain struct, which indicates maximum memory size
*/
unsigned long max_pfn;
- unsigned int section_nr;
- mm_section_t *sections; /* Describe memory hole except for Dom0 */
#endif //CONFIG_VTI
u64 xen_vastart;
u64 xen_vaend;
@@ -88,6 +86,8 @@
void (*schedule_tail) (struct vcpu *);
struct trap_bounce trap_bounce;
thash_cb_t *vtlb;
+ char irq_new_pending;
+ char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
//for phycial emulation
unsigned long old_rsc;
int mode_flags;
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx_phy_mode.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_phy_mode.c Sat Jul 9 14:58:56 2005
@@ -119,30 +119,67 @@
vcpu->arch.old_rsc = 0;
vcpu->arch.mode_flags = GUEST_IN_PHY;
- psr = ia64_clear_ic();
-
- ia64_set_rr((VRN0<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr0.rrval);
- ia64_srlz_d();
- ia64_set_rr((VRN4<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr4.rrval);
- ia64_srlz_d();
+ return;
+}
+
+extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
#if 0
- /* FIXME: temp workaround to support guest physical mode */
-ia64_itr(0x1, IA64_TEMP_PHYSICAL, dom0_start,
- pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
- 28);
-ia64_itr(0x2, IA64_TEMP_PHYSICAL, dom0_start,
- pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
- 28);
-ia64_srlz_i();
+void
+physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
+{
+ u64 psr;
+ IA64_PSR vpsr;
+ u64 mppn,gppn,mpp1,gpp1;
+ struct domain *d;
+ static u64 test=0;
+ d=vcpu->domain;
+ if(test)
+ panic("domn physical itlb miss happen\n");
+ else
+ test=1;
+ vpsr.val=vmx_vcpu_get_psr(vcpu);
+ gppn=(vadr<<1)>>13;
+ mppn = get_mfn(DOMID_SELF,gppn,1);
+ mppn=(mppn<<12)|(vpsr.cpl<<7);
+ gpp1=0;
+ mpp1 = get_mfn(DOMID_SELF,gpp1,1);
+ mpp1=(mpp1<<12)|(vpsr.cpl<<7);
+// if(vadr>>63)
+// mppn |= PHY_PAGE_UC;
+// else
+// mppn |= PHY_PAGE_WB;
+ mpp1 |= PHY_PAGE_WB;
+ psr=ia64_clear_ic();
+ ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
+ ia64_srlz_i();
+ ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
+ ia64_stop();
+ ia64_srlz_i();
+ ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL),
(mppn|PHY_PAGE_WB), 24);
+ ia64_srlz_i();
+ ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL),
(mppn|PHY_PAGE_WB), 24);
+ ia64_stop();
+ ia64_srlz_i();
+ ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
+ ia64_srlz_i();
+ ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
+ ia64_stop();
+ ia64_srlz_i();
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+ return;
+}
#endif
- ia64_set_psr(psr);
- ia64_srlz_i();
- return;
-}
-
-extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
+
void
physical_itlb_miss(VCPU *vcpu, u64 vadr)
+{
+ physical_itlb_miss_dom0(vcpu, vadr);
+}
+
+
+void
+physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
{
u64 psr;
IA64_PSR vpsr;
@@ -150,7 +187,11 @@
vpsr.val=vmx_vcpu_get_psr(vcpu);
gppn=(vadr<<1)>>13;
mppn = get_mfn(DOMID_SELF,gppn,1);
- mppn=(mppn<<12)|(vpsr.cpl<<7)|PHY_PAGE_WB;
+ mppn=(mppn<<12)|(vpsr.cpl<<7);
+// if(vadr>>63)
+// mppn |= PHY_PAGE_UC;
+// else
+ mppn |= PHY_PAGE_WB;
psr=ia64_clear_ic();
ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
@@ -159,12 +200,15 @@
return;
}
+
void
physical_dtlb_miss(VCPU *vcpu, u64 vadr)
{
u64 psr;
IA64_PSR vpsr;
u64 mppn,gppn;
+// if(vcpu->domain!=dom0)
+// panic("dom n physical dtlb miss happen\n");
vpsr.val=vmx_vcpu_get_psr(vcpu);
gppn=(vadr<<1)>>13;
mppn = get_mfn(DOMID_SELF,gppn,1);
@@ -209,6 +253,8 @@
* mode in same region
*/
if (is_physical_mode(vcpu)) {
+ if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
+ panic("Unexpected domain switch in phy emul\n");
ia64_set_rr((VRN0 << VRN_SHIFT),
vcpu->domain->arch.emul_phy_rr0.rrval);
ia64_set_rr((VRN4 << VRN_SHIFT),
@@ -262,15 +308,10 @@
psr=ia64_clear_ic();
mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
- mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
-//VRID_2_MRID(vcpu,mrr.rid);
- mrr.ve = 1;
- ia64_set_rr(VRN0<<VRN_SHIFT, mrr.rrval );
+ ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
ia64_srlz_d();
mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
- mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
- mrr.ve = 1;
- ia64_set_rr(VRN4<<VRN_SHIFT, mrr.rrval );
+ ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
ia64_srlz_d();
ia64_set_psr(psr);
ia64_srlz_i();
@@ -377,8 +418,10 @@
void
prepare_if_physical_mode(VCPU *vcpu)
{
- if (is_physical_mode(vcpu))
+ if (is_physical_mode(vcpu)) {
+ vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
switch_to_virtual_rid(vcpu);
+ }
return;
}
@@ -386,8 +429,10 @@
void
recover_if_physical_mode(VCPU *vcpu)
{
- if (is_physical_mode(vcpu))
+ if (is_physical_mode(vcpu)) {
+ vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
switch_to_physical_rid(vcpu);
- return;
-}
-
+ }
+ return;
+}
+
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/domain.c Sat Jul 9 14:58:56 2005
@@ -37,10 +37,13 @@
#include <asm/asm-offsets.h> /* for IA64_THREAD_INFO_SIZE */
#include <asm/vcpu.h> /* for function declarations */
+#include <public/arch-ia64.h>
#ifdef CONFIG_VTI
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
+#include <asm/vmx_vpd.h>
#include <asm/pal.h>
+#include <public/io/ioreq.h>
#endif // CONFIG_VTI
#define CONFIG_DOMAIN0_CONTIGUOUS
@@ -203,18 +206,20 @@
* after up.
*/
d->shared_info = (void *)alloc_xenheap_page();
-
- /* FIXME: Because full virtual cpu info is placed in this area,
- * it's unlikely to put it into one shareinfo page. Later
- * need split vcpu context from vcpu_info and conforms to
- * normal xen convention.
+ /* Now assume all vcpu info and event indicators can be
+ * held in one shared page. Definitely later we need to
+ * consider more about it
*/
- v->vcpu_info = (void *)alloc_xenheap_page();
- if (!v->vcpu_info) {
- printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
- while (1);
- }
- memset(v->vcpu_info, 0, PAGE_SIZE);
+
+ memset(d->shared_info, 0, PAGE_SIZE);
+ v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
+ /* Mask all events, and specific port will be unmasked
+ * when customer subscribes to it.
+ */
+ if(v == d->vcpu[0]) {
+ memset(&d->shared_info->evtchn_mask[0], 0xff,
+ sizeof(d->shared_info->evtchn_mask));
+ }
/* Allocate per-domain vTLB and vhpt */
v->arch.vtlb = init_domain_tlb(v);
@@ -291,6 +296,7 @@
c->shared = v->domain->shared_info->arch;
}
+#ifndef CONFIG_VTI
int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v +
IA64_STK_OFFSET) - 1;
@@ -312,6 +318,79 @@
v->domain->shared_info->arch = c->shared;
return 0;
}
+#else // CONFIG_VTI
+int arch_set_info_guest(
+ struct vcpu *v, struct vcpu_guest_context *c)
+{
+ struct domain *d = v->domain;
+ int i, rc, ret;
+ unsigned long progress = 0;
+
+ if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ return 0;
+
+ /* Lazy FP not implemented yet */
+ clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
+ if ( c->flags & VGCF_FPU_VALID )
+ set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
+
+ /* Sync d/i cache conservatively, after domain N is loaded */
+ ret = ia64_pal_cache_flush(3, 0, &progress, NULL);
+ if (ret != PAL_STATUS_SUCCESS)
+ panic("PAL CACHE FLUSH failed for dom[%d].\n",
+ v->domain->domain_id);
+ DPRINTK("Sync i/d cache for dom%d image SUCC\n",
+ v->domain->domain_id);
+
+ /* Physical mode emulation initialization, including
+ * emulation ID allcation and related memory request
+ */
+ physical_mode_init(v);
+
+ /* FIXME: only support PMT table continuously by far */
+ d->arch.pmt = __va(c->pt_base);
+ d->arch.max_pfn = c->pt_max_pfn;
+ v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg);
+ memset((char *)__va(c->share_io_pg),0,PAGE_SIZE);
+
+ if (c->flags & VGCF_VMX_GUEST) {
+ if (!vmx_enabled)
+ panic("No VMX hardware feature for vmx domain.\n");
+
+ vmx_final_setup_domain(d);
+
+ /* One more step to enable interrupt assist */
+ set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
+ }
+
+ vlsapic_reset(v);
+ vtm_init(v);
+
+ /* Only open one port for I/O and interrupt emulation */
+ if (v == d->vcpu[0]) {
+ memset(&d->shared_info->evtchn_mask[0], 0xff,
+ sizeof(d->shared_info->evtchn_mask));
+ clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]);
+ }
+ /* Setup domain context. Actually IA-64 is a bit different with
+ * x86, with almost all system resources better managed by HV
+ * directly. CP only needs to provide start IP of guest, which
+ * ideally is the load address of guest Firmware.
+ */
+ new_thread(v, c->guest_iip, 0, 0);
+
+
+ d->xen_vastart = 0xf000000000000000;
+ d->xen_vaend = 0xf300000000000000;
+ d->arch.breakimm = 0x1000 + d->domain_id;
+ v->arch._thread.on_ustack = 0;
+
+ /* Don't redo final setup */
+ set_bit(_VCPUF_initialised, &v->vcpu_flags);
+
+ return 0;
+}
+#endif // CONFIG_VTI
void arch_do_boot_vcpu(struct vcpu *v)
{
@@ -361,7 +440,10 @@
init_all_rr(v);
if (VMX_DOMAIN(v)) {
- VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
+ if (d == dom0) {
+ VMX_VPD(v,vgr[12]) =
dom_fw_setup(d,saved_command_line,256L);
+ printk("new_thread, done with dom_fw_setup\n");
+ }
/* Virtual processor context setup */
VMX_VPD(v, vpsr) = IA64_PSR_BN;
VPD_CR(v, dcr) = 0;
@@ -556,6 +638,7 @@
}
// FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
+#ifndef CONFIG_VTI
unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
{
unsigned long pte = lookup_domain_mpa(d,mpaddr);
@@ -566,6 +649,14 @@
imva |= mpaddr & ~PAGE_MASK;
return(imva);
}
+#else // CONFIG_VTI
+unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
+{
+ unsigned long imva = __gpa_to_mpa(d, mpaddr);
+
+ return __va(imva);
+}
+#endif // CONFIG_VTI
// remove following line if not privifying in memory
//#define HAVE_PRIVIFY_MEMORY
@@ -812,6 +903,17 @@
/* ... */
}
+/*
+ * Domain 0 has direct access to all devices absolutely. However
+ * the major point of this stub here, is to allow alloc_dom_mem
+ * handled with order > 0 request. Dom0 requires that bit set to
+ * allocate memory for other domains.
+ */
+void physdev_init_dom0(struct domain *d)
+{
+ set_bit(_DOMF_physdev_access, &d->domain_flags);
+}
+
extern unsigned long running_on_sim;
unsigned int vmx_dom0 = 0;
int construct_dom0(struct domain *d,
@@ -963,6 +1065,7 @@
set_bit(_DOMF_constructed, &d->domain_flags);
new_thread(v, pkern_entry, 0, 0);
+ physdev_init_dom0(d);
// FIXME: Hack for keyboard input
#ifdef CLONE_DOMAIN0
if (d == dom0)
@@ -978,6 +1081,8 @@
return 0;
}
+
+
#else //CONFIG_VTI
int construct_dom0(struct domain *d,
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/xensetup.c
--- a/xen/arch/ia64/xensetup.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/xensetup.c Sat Jul 9 14:58:56 2005
@@ -30,7 +30,6 @@
#ifdef CLONE_DOMAIN0
struct domain *clones[CLONE_DOMAIN0];
#endif
-extern struct domain *dom0;
extern unsigned long domain0_ready;
int find_max_pfn (unsigned long, unsigned long, void *);
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/public/arch-ia64.h Sat Jul 9 14:58:56 2005
@@ -181,6 +181,16 @@
} arch_shared_info_t; // DON'T PACK
typedef struct vcpu_guest_context {
+#define VGCF_FPU_VALID (1<<0)
+#define VGCF_VMX_GUEST (1<<1)
+#define VGCF_IN_KERNEL (1<<2)
+ unsigned long flags; /* VGCF_* flags */
+ unsigned long pt_base; /* PMT table base */
+ unsigned long pt_max_pfn; /* Max pfn including holes */
+ unsigned long share_io_pg; /* Shared page for I/O emulation */
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap, now none on IPF */
+ unsigned long guest_iip; /* Guest entry point */
+
struct pt_regs regs;
arch_vcpu_info_t vcpu;
arch_shared_info_t shared;
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/config.h Sat Jul 9 14:58:56 2005
@@ -49,6 +49,7 @@
extern unsigned long xenheap_phys_end;
extern unsigned long xen_pstart;
extern unsigned long xenheap_size;
+extern struct domain *dom0;
extern unsigned long dom0_start;
extern unsigned long dom0_size;
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/vmx_phy_mode.h Sat Jul 9 14:58:56 2005
@@ -83,6 +83,7 @@
#define IA64_RSC_MODE 0x0000000000000003
#define XEN_RR7_RID (0xf00010)
#define GUEST_IN_PHY 0x1
+#define GUEST_PHY_EMUL 0x2
extern int valid_mm_mode[];
extern int mm_switch_table[][8];
extern void physical_mode_init(VCPU *);
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/asm-offsets.c Sat Jul 9 14:58:56 2005
@@ -224,6 +224,7 @@
#ifdef CONFIG_VTI
DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd));
+ DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu,
arch.arch_vmx.in_service[0]));
DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_process.c
--- a/xen/arch/ia64/vmx_process.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_process.c Sat Jul 9 14:58:56 2005
@@ -45,7 +45,9 @@
#include <asm/dom_fw.h>
#include <asm/vmx_vcpu.h>
#include <asm/kregs.h>
+#include <asm/vmx.h>
#include <asm/vmx_mm_def.h>
+#include <xen/mm.h>
/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
@@ -53,7 +55,7 @@
extern struct ia64_sal_retval pal_emulator_static(UINT64);
extern struct ia64_sal_retval
sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
extern void rnat_consumption (VCPU *vcpu);
-
+#define DOMN_PAL_REQUEST 0x110000
IA64FAULT
vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long
isr, unsigned long iim)
{
@@ -148,7 +150,10 @@
regs->r2);
#endif
vmx_vcpu_increment_iip(current);
- } else
+ }else if(iim == DOMN_PAL_REQUEST){
+ pal_emul(current);
+ vmx_vcpu_increment_iip(current);
+ } else
vmx_reflect_interruption(ifa,isr,iim,11);
}
@@ -187,26 +192,43 @@
// ONLY gets called from ia64_leave_kernel
// ONLY call with interrupts disabled?? (else might miss one?)
// NEVER successful if already reflecting a trap/fault because psr.i==0
-void vmx_deliver_pending_interrupt(struct pt_regs *regs)
+void leave_hypervisor_tail(struct pt_regs *regs)
{
struct domain *d = current->domain;
struct vcpu *v = current;
// FIXME: Will this work properly if doing an RFI???
if (!is_idle_task(d) ) { // always comes from guest
- //vcpu_poke_timer(v);
- //if (vcpu_deliverable_interrupts(v)) {
- // unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
- // foodpi();
- // reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
- //}
extern void vmx_dorfirfi(void);
struct pt_regs *user_regs = vcpu_regs(current);
+ if (local_softirq_pending())
+ do_softirq();
+ local_irq_disable();
+
if (user_regs != regs)
printk("WARNING: checking pending interrupt in nested
interrupt!!!\n");
- if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
- return;
- vmx_check_pending_irq(v);
+
+ /* VMX Domain N has other interrupt source, saying DM */
+ if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
+ vmx_intr_assist(v);
+
+ /* FIXME: Check event pending indicator, and set
+ * pending bit if necessary to inject back to guest.
+ * Should be careful about window between this check
+ * and above assist, since IOPACKET_PORT shouldn't be
+ * injected into vmx domain.
+ *
+ * Now hardcode the vector as 0x10 temporarily
+ */
+ if
(event_pending(v)&&(!((v->arch.arch_vmx.in_service[0])&(1UL<<0x10)))) {
+ VPD_CR(v, irr[0]) |= 1UL << 0x10;
+ v->arch.irq_new_pending = 1;
+ }
+
+ if ( v->arch.irq_new_pending ) {
+ v->arch.irq_new_pending = 0;
+ vmx_check_pending_irq(v);
+ }
}
}
@@ -244,7 +266,11 @@
return;
}
if((vec==2)&&(!vpsr.dt)){
- physical_dtlb_miss(vcpu, vadr);
+
if(vcpu->domain!=dom0&&__gpfn_is_io(vcpu->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
+ emulate_io_inst(vcpu,((vadr<<1)>>1),4); // UC
+ }else{
+ physical_dtlb_miss(vcpu, vadr);
+ }
return;
}
vrr = vmx_vcpu_rr(vcpu,vadr);
@@ -255,6 +281,11 @@
// prepare_if_physical_mode(vcpu);
if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
+ if(vcpu->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(vcpu->domain,
data->ppn>>(PAGE_SHIFT-12))){
+
vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
+ emulate_io_inst(vcpu, vadr, data->ma);
+ return IA64_FAULT;
+ }
if ( data->ps != vrr.ps ) {
machine_tlb_insert(vcpu, data);
}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vtlb.c
--- a/xen/arch/ia64/vtlb.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vtlb.c Sat Jul 9 14:58:56 2005
@@ -26,7 +26,7 @@
#include <asm/vmx_mm_def.h>
#include <asm/gcc_intrin.h>
#include <xen/interrupt.h>
-#include <asm/vcpu.h>
+#include <asm/vmx_vcpu.h>
#define MAX_CCH_LENGTH 40
@@ -401,6 +401,8 @@
panic("Can't convert to machine VHPT entry\n");
}
hash_table->next = cch;
+ if(hash_table->tag==hash_table->next->tag)
+ while(1);
}
return /*hash_table*/;
}
@@ -466,10 +468,11 @@
static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
{
thash_data_t *next;
-
+
if ( ++cch_depth > MAX_CCH_LENGTH ) {
printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
- }
+ while(1);
+ }
if ( cch -> next ) {
next = thash_rem_cch(hcb, cch->next);
}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/vmx.h Sat Jul 9 14:58:56 2005
@@ -35,4 +35,6 @@
extern void vmx_purge_double_mapping(u64, u64, u64);
extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
+extern void vmx_wait_io(void);
+extern void vmx_io_assist(struct vcpu *v);
#endif /* _ASM_IA64_VT_H */
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vlsapic.c
--- a/xen/arch/ia64/vlsapic.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vlsapic.c Sat Jul 9 14:58:56 2005
@@ -133,7 +133,7 @@
// FIXME: should use local_irq_disable & local_irq_enable ??
local_irq_save(spsr);
guest_itc = now_itc(vtm);
- update_last_itc(vtm, guest_itc);
+// update_last_itc(vtm, guest_itc);
local_irq_restore(spsr);
return guest_itc;
@@ -174,12 +174,12 @@
/* Interrupt must be disabled at this point */
extern u64 tick_to_ns(u64 tick);
-#define TIMER_SLOP (50*1000) /* ns */ /* copy from ac_timer.c */
+#define TIMER_SLOP (50*1000) /* ns */ /* copy from ac_timer.c */
void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
{
uint64_t cur_itc,vitm,vitv;
uint64_t expires;
- long diff_now, diff_last;
+ long diff_now, diff_last;
uint64_t spsr;
vitv = VPD_CR(vcpu, itv);
@@ -237,21 +237,30 @@
#define NMI_VECTOR 2
#define ExtINT_VECTOR 0
-
+#define NULL_VECTOR -1
#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i])
-/*
- * LID-CR64: Keep in vpd.
- * IVR-CR65: (RO) see guest_read_ivr().
- * TPR-CR66: Keep in vpd, acceleration enabled.
- * EOI-CR67: see guest_write_eoi().
- * IRR0-3 - CR68-71: (RO) Keep in vpd irq_pending[]
- * can move to vpd for optimization.
- * ITV: in time virtualization.
- * PMV: Keep in vpd initialized as 0x10000.
- * CMCV: Keep in vpd initialized as 0x10000.
- * LRR0-1: Keep in vpd, initialized as 0x10000.
- *
- */
+static void update_vhpi(VCPU *vcpu, int vec)
+{
+ u64 vhpi;
+ if ( vec == NULL_VECTOR ) {
+ vhpi = 0;
+ }
+ else if ( vec == NMI_VECTOR ) { // NMI
+ vhpi = 32;
+ } else if (vec == ExtINT_VECTOR) { //ExtINT
+ vhpi = 16;
+ }
+ else {
+ vhpi = vec / 16;
+ }
+
+ VMX_VPD(vcpu,vhpi) = vhpi;
+ // TODO: Add support for XENO
+ if ( VMX_VPD(vcpu,vac).a_int ) {
+ ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
+ (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
+ }
+}
void vlsapic_reset(VCPU *vcpu)
{
@@ -268,9 +277,11 @@
VPD_CR(vcpu, cmcv) = 0x10000;
VPD_CR(vcpu, lrr0) = 0x10000; // default reset value?
VPD_CR(vcpu, lrr1) = 0x10000; // default reset value?
+ update_vhpi(vcpu, NULL_VECTOR);
for ( i=0; i<4; i++) {
VLSAPIC_INSVC(vcpu,i) = 0;
}
+ DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
}
/*
@@ -281,7 +292,7 @@
*/
static __inline__ int highest_bits(uint64_t *dat)
{
- uint64_t bits, bitnum=-1;
+ uint64_t bits, bitnum;
int i;
/* loop for all 256 bits */
@@ -292,12 +303,12 @@
return i*64+bitnum;
}
}
- return -1;
+ return NULL_VECTOR;
}
/*
* Return 0-255 for pending irq.
- * -1 when no pending.
+ * NULL_VECTOR: when no pending.
*/
static int highest_pending_irq(VCPU *vcpu)
{
@@ -320,7 +331,7 @@
static int is_higher_irq(int pending, int inservice)
{
return ( (pending >> 4) > (inservice>>4) ||
- ((pending != -1) && (inservice == -1)) );
+ ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
}
static int is_higher_class(int pending, int mic)
@@ -332,41 +343,97 @@
{
return (vec == 1 || ((vec <= 14 && vec >= 3)));
}
+
+#define IRQ_NO_MASKED 0
+#define IRQ_MASKED_BY_VTPR 1
+#define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
/* See Table 5-8 in SDM vol2 for the definition */
static int
-irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
-{
- uint64_t vtpr;
-
- vtpr = VPD_CR(vcpu, tpr);
-
- if ( h_pending == NMI_VECTOR && h_inservice != NMI_VECTOR )
+_xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
+{
+ tpr_t vtpr;
+ uint64_t mmi;
+
+ vtpr.val = VPD_CR(vcpu, tpr);
+
+ if ( h_inservice == NMI_VECTOR ) {
+ return IRQ_MASKED_BY_INSVC;
+ }
+ if ( h_pending == NMI_VECTOR ) {
// Non Maskable Interrupt
- return 0;
-
- if ( h_pending == ExtINT_VECTOR && h_inservice >= 16)
- return (vtpr>>16)&1; // vtpr.mmi
-
- if ( !(vtpr&(1UL<<16)) &&
- is_higher_irq(h_pending, h_inservice) &&
- is_higher_class(h_pending, (vtpr>>4)&0xf) )
- return 0;
-
- return 1;
-}
-
+ return IRQ_NO_MASKED;
+ }
+ if ( h_inservice == ExtINT_VECTOR ) {
+ return IRQ_MASKED_BY_INSVC;
+ }
+ mmi = vtpr.mmi;
+ if ( h_pending == ExtINT_VECTOR ) {
+ if ( mmi ) {
+ // mask all external IRQ
+ return IRQ_MASKED_BY_VTPR;
+ }
+ else {
+ return IRQ_NO_MASKED;
+ }
+ }
+
+ if ( is_higher_irq(h_pending, h_inservice) ) {
+ if ( !mmi && is_higher_class(h_pending, vtpr.mic) ) {
+ return IRQ_NO_MASKED;
+ }
+ else {
+ return IRQ_MASKED_BY_VTPR;
+ }
+ }
+ else {
+ return IRQ_MASKED_BY_INSVC;
+ }
+}
+
+static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
+{
+ int mask;
+
+ mask = _xirq_masked(vcpu, h_pending, h_inservice);
+ return mask;
+}
+
+
+/*
+ * May come from virtualization fault or
+ * nested host interrupt.
+ */
void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
{
uint64_t spsr;
if (vector & ~0xff) {
- printf("vmx_vcpu_pend_interrupt: bad vector\n");
+ DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
return;
}
local_irq_save(spsr);
VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
local_irq_restore(spsr);
+ vcpu->arch.irq_new_pending = 1;
+}
+
+/*
+ * Add batch of pending interrupt.
+ * The interrupt source is contained in pend_irr[0-3] with
+ * each bits stand for one interrupt.
+ */
+void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
+{
+ uint64_t spsr;
+ int i;
+
+ local_irq_save(spsr);
+ for (i=0 ; i<4; i++ ) {
+ VPD_CR(vcpu,irr[i]) |= pend_irr[i];
+ }
+ local_irq_restore(spsr);
+ vcpu->arch.irq_new_pending = 1;
}
/*
@@ -383,7 +450,7 @@
*/
int vmx_check_pending_irq(VCPU *vcpu)
{
- uint64_t spsr;
+ uint64_t spsr, mask;
int h_pending, h_inservice;
int injected=0;
uint64_t isr;
@@ -391,22 +458,25 @@
local_irq_save(spsr);
h_pending = highest_pending_irq(vcpu);
- if ( h_pending == -1 ) goto chk_irq_exit;
+ if ( h_pending == NULL_VECTOR ) goto chk_irq_exit;
h_inservice = highest_inservice_irq(vcpu);
vpsr.val = vmx_vcpu_get_psr(vcpu);
- if ( vpsr.i &&
- !irq_masked(vcpu, h_pending, h_inservice) ) {
- //inject_guest_irq(v);
+ mask = irq_masked(vcpu, h_pending, h_inservice);
+ if ( vpsr.i && IRQ_NO_MASKED == mask ) {
isr = vpsr.val & IA64_PSR_RI;
if ( !vpsr.ic )
panic("Interrupt when IC=0\n");
vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
injected = 1;
}
- else if ( VMX_VPD(vcpu,vac).a_int &&
- is_higher_irq(h_pending,h_inservice) ) {
- vmx_inject_vhpi(vcpu,h_pending);
+ else if ( mask == IRQ_MASKED_BY_INSVC ) {
+ // cann't inject VHPI
+// DPRINTK("IRQ masked by higher inservice\n");
+ }
+ else {
+ // masked by vpsr.i or vtpr.
+ update_vhpi(vcpu,h_pending);
}
chk_irq_exit:
@@ -414,17 +484,21 @@
return injected;
}
+/*
+ * Only coming from virtualization fault.
+ */
void guest_write_eoi(VCPU *vcpu)
{
int vec;
uint64_t spsr;
vec = highest_inservice_irq(vcpu);
- if ( vec < 0 ) panic("Wrong vector to EOI\n");
+ if ( vec == NULL_VECTOR ) panic("Wrong vector to EOI\n");
local_irq_save(spsr);
VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
local_irq_restore(spsr);
VPD_CR(vcpu, eoi)=0; // overwrite the data
+ vmx_check_pending_irq(vcpu);
}
uint64_t guest_read_vivr(VCPU *vcpu)
@@ -435,37 +509,54 @@
local_irq_save(spsr);
vec = highest_pending_irq(vcpu);
h_inservice = highest_inservice_irq(vcpu);
- if ( vec < 0 || irq_masked(vcpu, vec, h_inservice) ) {
+ if ( vec == NULL_VECTOR ||
+ irq_masked(vcpu, vec, h_inservice) != IRQ_NO_MASKED ) {
local_irq_restore(spsr);
return IA64_SPURIOUS_INT_VECTOR;
}
VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
-
- h_inservice = highest_inservice_irq(vcpu);
- next = highest_pending_irq(vcpu);
- if ( VMX_VPD(vcpu,vac).a_int &&
- (is_higher_irq(next, h_inservice) || (next == -1)) )
- vmx_inject_vhpi(vcpu, next);
+ update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
local_irq_restore(spsr);
return (uint64_t)vec;
}
-void vmx_inject_vhpi(VCPU *vcpu, u8 vec)
-{
- VMX_VPD(vcpu,vhpi) = vec / 16;
-
-
- // non-maskable
- if ( vec == NMI_VECTOR ) // NMI
- VMX_VPD(vcpu,vhpi) = 32;
- else if (vec == ExtINT_VECTOR) //ExtINT
- VMX_VPD(vcpu,vhpi) = 16;
- else if (vec == -1)
- VMX_VPD(vcpu,vhpi) = 0; /* Nothing pending */
-
- ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
- (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
-}
-
+static void generate_exirq(VCPU *vcpu)
+{
+ IA64_PSR vpsr;
+ uint64_t isr;
+
+ vpsr.val = vmx_vcpu_get_psr(vcpu);
+ update_vhpi(vcpu, NULL_VECTOR);
+ isr = vpsr.val & IA64_PSR_RI;
+ if ( !vpsr.ic )
+ panic("Interrupt when IC=0\n");
+ vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
+}
+
+vhpi_detection(VCPU *vcpu)
+{
+ uint64_t threshold,vhpi;
+ tpr_t vtpr;
+ IA64_PSR vpsr;
+
+ vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vtpr.val = VPD_CR(vcpu, tpr);
+
+ threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
+ vhpi = VMX_VPD(vcpu,vhpi);
+ if ( vhpi > threshold ) {
+ // interrupt actived
+ generate_exirq (vcpu);
+ }
+}
+
+vmx_vexirq(VCPU *vcpu)
+{
+ static uint64_t vexirq_count=0;
+
+ vexirq_count ++;
+ printk("Virtual ex-irq %ld\n", vexirq_count);
+ generate_exirq (vcpu);
+}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/xensystem.h Sat Jul 9 14:58:56 2005
@@ -33,6 +33,8 @@
#ifdef CONFIG_VTI
extern struct task_struct *vmx_ia64_switch_to (void *next_task);
#define __switch_to(prev,next,last) do { \
+ ia64_save_fpu(prev->arch._thread.fph); \
+ ia64_load_fpu(next->arch._thread.fph); \
if (VMX_DOMAIN(prev)) \
vmx_save_state(prev); \
else { \
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/mmio.c
--- a/xen/arch/ia64/mmio.c Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/mmio.c Sat Jul 9 14:58:56 2005
@@ -27,7 +27,13 @@
#include <asm/gcc_intrin.h>
#include <xen/interrupt.h>
#include <asm/vmx_vcpu.h>
-
+#include <asm/privop.h>
+#include <asm/types.h>
+#include <public/io/ioreq.h>
+#include <asm/mm.h>
+#include <asm/vmx.h>
+
+/*
struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
{
int i;
@@ -37,65 +43,194 @@
}
return NULL;
}
-
-
-extern void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int
ma);
-static inline void mmio_write(VCPU *vcpu, void *src, u64 dest_pa, size_t s,
int ma)
+*/
+
+#define PIB_LOW_HALF(ofst) !(ofst&(1<<20))
+#define PIB_OFST_INTA 0x1E0000
+#define PIB_OFST_XTP 0x1E0008
+
+static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int
ma)
+{
+ switch (pib_off) {
+ case PIB_OFST_INTA:
+ panic("Undefined write on PIB INTA\n");
+ break;
+ case PIB_OFST_XTP:
+ if ( s == 1 && ma == 4 /* UC */) {
+ vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
+ }
+ else {
+ panic("Undefined write on PIB XTP\n");
+ }
+ break;
+ default:
+ if ( PIB_LOW_HALF(pib_off) ) { // lower half
+ if ( s != 8 || ma != 0x4 /* UC */ ) {
+ panic("Undefined IPI-LHF write!\n");
+ }
+ else {
+ write_ipi(vcpu, pib_off, *(uint64_t *)src);
+ // TODO for SM-VP
+ }
+ }
+ else { // upper half
+ printf("IPI-UHF write %lx\n",pib_off);
+ panic("Not support yet for SM-VP\n");
+ }
+ break;
+ }
+}
+
+static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int
ma)
+{
+ switch (pib_off) {
+ case PIB_OFST_INTA:
+ // todo --- emit on processor system bus.
+ if ( s == 1 && ma == 4) { // 1 byte load
+ // TODO: INTA read from IOSAPIC
+ }
+ else {
+ panic("Undefined read on PIB INTA\n");
+ }
+ break;
+ case PIB_OFST_XTP:
+ if ( s == 1 && ma == 4) {
+ *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
+ }
+ else {
+ panic("Undefined read on PIB XTP\n");
+ }
+ break;
+ default:
+ if ( PIB_LOW_HALF(pib_off) ) { // lower half
+ if ( s != 8 || ma != 4 ) {
+ panic("Undefined IPI-LHF read!\n");
+ }
+ else {
+#ifdef IPI_DEBUG
+ printf("IPI-LHF read %lx\n",pib_off);
+#endif
+ *(uint64_t *)dest = 0; // TODO for SM-VP
+ }
+ }
+ else { // upper half
+ if ( s != 1 || ma != 4 ) {
+ panic("Undefined PIB-UHF read!\n");
+ }
+ else {
+#ifdef IPI_DEBUG
+ printf("IPI-UHF read %lx\n",pib_off);
+#endif
+ *(uint8_t *)dest = 0; // TODO for SM-VP
+ }
+ }
+ break;
+ }
+}
+
+static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
+{
+ struct vcpu *v = current;
+ vcpu_iodata_t *vio;
+ ioreq_t *p;
+ unsigned long addr;
+
+ vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+ if (vio == 0) {
+ panic("bad shared page: %lx", (unsigned long)vio);
+ }
+ p = &vio->vp_ioreq;
+ p->addr = pa;
+ p->size = 1<<s;
+ p->count = 1;
+ p->dir = dir;
+ if(dir==IOREQ_WRITE) //write;
+ p->u.data = *val;
+ p->pdata_valid = 0;
+ p->port_mm = 1;
+ p->df = 0;
+
+ set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
+ p->state = STATE_IOREQ_READY;
+ evtchn_send(IOPACKET_PORT);
+ vmx_wait_io();
+ if(dir){ //read
+ *val=p->u.data;
+ }
+ return;
+}
+#define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
+
+static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
+{
+ struct vcpu *v = current;
+ vcpu_iodata_t *vio;
+ ioreq_t *p;
+ unsigned long addr;
+
+ vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+ if (vio == 0) {
+ panic("bad shared page: %lx");
+ }
+ p = &vio->vp_ioreq;
+ p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
+ p->size = 1<<s;
+ p->count = 1;
+ p->dir = dir;
+ if(dir==IOREQ_WRITE) //write;
+ p->u.data = *val;
+ p->pdata_valid = 0;
+ p->port_mm = 0;
+ p->df = 0;
+
+ set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
+ p->state = STATE_IOREQ_READY;
+ evtchn_send(IOPACKET_PORT);
+ vmx_wait_io();
+ if(dir){ //read
+ *val=p->u.data;
+ }
+ return;
+}
+
+static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma,
int dir)
{
struct virutal_platform_def *v_plat;
- struct mmio_list *mio;
-
+ //mmio_type_t iot;
+ unsigned long iot;
+ iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
v_plat = vmx_vcpu_get_plat(vcpu);
- mio = lookup_mmio(dest_pa, v_plat->mmio);
- if ( mio == NULL )
- panic ("Wrong address for MMIO\n");
-
- switch (mio->iot) {
- case PIB_MMIO:
- pib_write(vcpu, src, dest_pa - v_plat->pib_base, s, ma);
- break;
- case VGA_BUFF:
- case CHIPSET_IO:
- case LOW_MMIO:
- case LEGACY_IO:
- case IO_SAPIC:
+
+ switch (iot) {
+ case GPFN_PIB:
+ if(!dir)
+ pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
+ else
+ pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
+ break;
+ case GPFN_GFW:
+ break;
+ case GPFN_FRAME_BUFFER:
+ case GPFN_LOW_MMIO:
+ low_mmio_access(vcpu, src_pa, dest, s, dir);
+ break;
+ case GPFN_LEGACY_IO:
+ legacy_io_access(vcpu, src_pa, dest, s, dir);
+ break;
+ case GPFN_IOSAPIC:
default:
+ panic("Bad I/O access\n");
break;
}
return;
}
-static inline void mmio_read(VCPU *vcpu, u64 src_pa, void *dest, size_t s, int
ma)
-{
- struct virutal_platform_def *v_plat;
- struct mmio_list *mio;
-
- v_plat = vmx_vcpu_get_plat(vcpu);
- mio = lookup_mmio(src_pa, v_plat->mmio);
- if ( mio == NULL )
- panic ("Wrong address for MMIO\n");
-
- switch (mio->iot) {
- case PIB_MMIO:
- pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
- break;
- case VGA_BUFF:
- case CHIPSET_IO:
- case LOW_MMIO:
- case LEGACY_IO:
- case IO_SAPIC:
- default:
- break;
- }
- return;
-}
-
/*
* Read or write data in guest virtual address mode.
*/
-
+/*
void
-memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
+memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
{
uint64_t pa;
@@ -108,7 +243,7 @@
void
-memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s)
+memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
{
uint64_t pa = (uint64_t)dest;
int ma;
@@ -116,9 +251,9 @@
if ( pa & (1UL <<63) ) {
// UC
ma = 4;
- pa <<=1;
+ pa <<=1;
pa >>=1;
- }
+ }
else {
// WBL
ma = 0; // using WB for WBL
@@ -127,7 +262,7 @@
}
void
-memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
+memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
{
uint64_t pa;
@@ -135,12 +270,12 @@
panic("Normal memory write shouldn't go to this point!");
pa = PPN_2_PA(vtlb->ppn);
pa += POFFSET((u64)src, vtlb->ps);
-
+
mmio_read(vcpu, pa, dest, s, vtlb->ma);
}
void
-memread_p(VCPU *vcpu, void *src, void *dest, size_t s)
+memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
{
uint64_t pa = (uint64_t)src;
int ma;
@@ -148,19 +283,16 @@
if ( pa & (1UL <<63) ) {
// UC
ma = 4;
- pa <<=1;
+ pa <<=1;
pa >>=1;
- }
+ }
else {
// WBL
ma = 0; // using WB for WBL
}
mmio_read(vcpu, pa, dest, s, ma);
}
-
-#define PIB_LOW_HALF(ofst) !(ofst&(1<<20))
-#define PIB_OFST_INTA 0x1E0000
-#define PIB_OFST_XTP 0x1E0008
+*/
/*
@@ -182,23 +314,22 @@
panic ("Inject guest PMI!\n");
break;
case 4: // NMI
- vmx_vcpu_pend_interrupt (vcpu, 2);
+ vmx_vcpu_pend_interrupt (vcpu, 2);
break;
case 5: // INIT
// TODO -- inject guest INIT
panic ("Inject guest INIT!\n");
break;
case 7: // ExtINT
- vmx_vcpu_pend_interrupt (vcpu, 0);
- break;
-
+ vmx_vcpu_pend_interrupt (vcpu, 0);
+ break;
case 1:
case 3:
case 6:
default:
panic ("Deliver reserved IPI!\n");
break;
- }
+ }
}
/*
@@ -209,7 +340,6 @@
int i;
VCPU *vcpu;
LID lid;
-
for (i=0; i<MAX_VIRT_CPUS; i++) {
vcpu = d->vcpu[i];
lid.val = VPD_CR(vcpu, lid);
@@ -226,7 +356,7 @@
static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
{
VCPU *target_cpu;
-
+
target_cpu = lid_2_vcpu(vcpu->domain,
((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
@@ -243,83 +373,89 @@
}
}
-void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
-{
-
- switch (pib_off) {
- case PIB_OFST_INTA:
- panic("Undefined write on PIB INTA\n");
- break;
- case PIB_OFST_XTP:
- if ( s == 1 && ma == 4 /* UC */) {
- vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
- }
- else {
- panic("Undefined write on PIB XTP\n");
- }
- break;
- default:
- if ( PIB_LOW_HALF(pib_off) ) { // lower half
- if ( s != 8 || ma != 0x4 /* UC */ ) {
- panic("Undefined IPI-LHF write!\n");
- }
- else {
- write_ipi(vcpu, pib_off, *(uint64_t *)src);
- // TODO for SM-VP
- }
- }
- else { // upper half
- printf("IPI-UHF write %lx\n",pib_off);
- panic("Not support yet for SM-VP\n");
- }
- break;
- }
-}
-
-void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
-{
- switch (pib_off) {
- case PIB_OFST_INTA:
- // todo --- emit on processor system bus.
- if ( s == 1 && ma == 4) { // 1 byte load
- // TODO: INTA read from IOSAPIC
- }
- else {
- panic("Undefined read on PIB INTA\n");
- }
- break;
- case PIB_OFST_XTP:
- if ( s == 1 && ma == 4) {
- *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
- }
- else {
- panic("Undefined read on PIB XTP\n");
- }
- break;
- default:
- if ( PIB_LOW_HALF(pib_off) ) { // lower half
- if ( s != 8 || ma != 4 ) {
- panic("Undefined IPI-LHF read!\n");
- }
- else {
-#ifdef IPI_DEBUG
- printf("IPI-LHF read %lx\n",pib_off);
-#endif
- *(uint64_t *)dest = 0; // TODO for SM-VP
- }
- }
- else { // upper half
- if ( s != 1 || ma != 4 ) {
- panic("Undefined PIB-UHF read!\n");
- }
- else {
-#ifdef IPI_DEBUG
- printf("IPI-UHF read %lx\n",pib_off);
-#endif
- *(uint8_t *)dest = 0; // TODO for SM-VP
- }
- }
- break;
- }
-}
-
+
+/*
+ dir 1: read 0:write
+ inst_type 0:integer 1:floating point
+ */
+extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
+
+
+void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
+{
+ REGS *regs;
+ IA64_BUNDLE bundle;
+ int slot, dir, inst_type=0;
+ size_t size;
+ u64 data, value, slot1a, slot1b;
+ INST64 inst;
+ regs=vcpu_regs(vcpu);
+ bundle = __vmx_get_domain_bundle(regs->cr_iip);
+ slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
+ if (!slot) inst.inst = bundle.slot0;
+ else if (slot == 1){
+ slot1a=bundle.slot1a;
+ slot1b=bundle.slot1b;
+ inst.inst =slot1a + (slot1b<<18);
+ }
+ else if (slot == 2) inst.inst = bundle.slot2;
+
+ if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
+ inst_type=0; //fp
+ size=(inst.M1.x6&0x3);
+ if((inst.M1.x6>>2)>0xb){ // write
+ vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
+ dir=IOREQ_WRITE; //write
+ }else if((inst.M1.x6>>2)<0xb){ // read
+ vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ dir=IOREQ_READ;
+ }else{
+ printf("This memory access instruction can't be emulated one :
%lx\n",inst.inst);
+ while(1);
+ }
+ }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
+ inst_type=1; //fp
+ dir=IOREQ_READ;
+ size=3; //ldfd
+ }else{
+ printf("This memory access instruction can't be emulated two: %lx\n
",inst.inst);
+ while(1);
+ }
+
+ if(dir==IOREQ_WRITE){
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ }else{
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ if(size==0)
+ data = (value & 0xffffffffffffff00U) | (data & 0xffU);
+ else if(size==1)
+ data = (value & 0xffffffffffff0000U) | (data & 0xffffU);
+ else if(size==2)
+ data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
+
+ if(inst_type==0){ //gp
+ vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
+ }else{
+ panic("Don't support ldfd now !");
+/* switch(inst.M6.f1){
+
+ case 6:
+ regs->f6=(struct ia64_fpreg)data;
+ case 7:
+ regs->f7=(struct ia64_fpreg)data;
+ case 8:
+ regs->f8=(struct ia64_fpreg)data;
+ case 9:
+ regs->f9=(struct ia64_fpreg)data;
+ case 10:
+ regs->f10=(struct ia64_fpreg)data;
+ case 11:
+ regs->f11=(struct ia64_fpreg)data;
+ default :
+ ia64_ldfs(inst.M6.f1,&data);
+ }
+*/
+ }
+ }
+ vmx_vcpu_increment_iip(vcpu);
+}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_minstate.h
--- a/xen/arch/ia64/vmx_minstate.h Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_minstate.h Sat Jul 9 14:58:56 2005
@@ -148,10 +148,14 @@
mov r20=r1; /* A */ \
mov r26=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
+ mov r18=cr.isr; \
COVER; /* B;; (or nothing) */ \
;; \
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
+ tbit.z p6,p0=r29,IA64_PSR_VM_BIT; \
+ tbit.nz.or p6,p0 = r18,39; \
+ ;; \
(p6) br.sptk.few vmx_panic; \
+ tbit.z p0,p15=r29,IA64_PSR_I_BIT; \
mov r1=r16; \
/* mov r21=r16; */ \
/* switch from user to kernel RBS: */ \
diff -r 89d92ce10924 -r ca44d2dbb273 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Sat Jul 9 14:37:13 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h Sat Jul 9 14:58:56 2005
@@ -53,7 +53,7 @@
#define VMM_RR_SHIFT 20
#define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
-#define VRID_2_MRID(vcpu,rid) ((rid) & VMM_RR_MASK) | \
+//#define VRID_2_MRID(vcpu,rid) ((rid) & VMM_RR_MASK) | \
((vcpu->domain->domain_id) << VMM_RR_SHIFT)
extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
extern u64 cr_igfld_mask (int index, u64 value);
@@ -69,7 +69,7 @@
extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
-ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
+extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
@@ -112,10 +112,10 @@
extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
extern void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
extern struct virutal_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
-extern void memread_p(VCPU *vcpu, void *src, void *dest, size_t s);
-extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest,
size_t s);
-extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest,
size_t s);
-extern void memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s);
+extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
+extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest,
size_t s);
+extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest,
size_t s);
+extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
/**************************************************************************
@@ -401,14 +401,8 @@
VPD_CR(vcpu,lid)=val;
return IA64_NO_FAULT;
}
-static inline
-IA64FAULT
-vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
-{
- VPD_CR(vcpu,tpr)=val;
- //TODO
- return IA64_NO_FAULT;
-}
+extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
+
static inline
IA64FAULT
vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_ivt.S
--- a/xen/arch/ia64/vmx_ivt.S Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_ivt.S Sat Jul 9 14:58:56 2005
@@ -347,11 +347,16 @@
mov r31=pr
mov r19=11
mov r30=cr.iim
- mov r29=0x1100
- ;;
- cmp4.eq p6,p7=r29,r30
+ movl r29=0x1100
+ ;;
+ cmp.eq p6,p7=r30,r0
+ (p6) br.sptk vmx_fault_11
+ ;;
+ cmp.eq p6,p7=r29,r30
(p6) br.dptk.few vmx_hypercall_dispatch
(p7) br.sptk.many vmx_dispatch_break_fault
+ ;;
+ VMX_FAULT(11);
END(vmx_break_fault)
.org vmx_ia64_ivt+0x3000
@@ -363,6 +368,8 @@
mov r29=cr.ipsr
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT
+ tbit.z p0,p15=r29,IA64_PSR_I_BIT
+ ;;
(p7) br.sptk vmx_dispatch_interrupt
;;
mov r27=ar.rsc /* M */
@@ -447,7 +454,7 @@
;;
srlz.i
;;
- ssm psr.i
+ (p15) ssm psr.i
adds r3=8,r2 // set up second base pointer for SAVE_REST
srlz.i // ensure everybody knows psr.ic is back on
;;
@@ -508,9 +515,12 @@
.org vmx_ia64_ivt+0x3400
/////////////////////////////////////////////////////////////////////////////////////////
// 0x3400 Entry 13 (size 64 bundles) Reserved
+ENTRY(vmx_virtual_exirq)
VMX_DBG_FAULT(13)
- VMX_FAULT(13)
-
+ mov r31=pr
+ mov r19=13
+ br.sptk vmx_dispatch_vexirq
+END(vmx_virtual_exirq)
.org vmx_ia64_ivt+0x3800
/////////////////////////////////////////////////////////////////////////////////////////
@@ -876,7 +886,7 @@
;;
srlz.i // guarantee that interruption collection is on
;;
- ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=16,r2 // set up second base pointer
;;
VMX_SAVE_REST
@@ -887,8 +897,6 @@
END(vmx_dispatch_reflection)
ENTRY(vmx_dispatch_virtualization_fault)
- cmp.eq pEml,pNonEml=r0,r0 /* force pEml =1, save r4 ~ r7 */
- ;;
VMX_SAVE_MIN_WITH_COVER_R19
;;
alloc r14=ar.pfs,0,0,3,0 // now it's safe (must be first in insn
group!)
@@ -899,7 +907,7 @@
;;
srlz.i // guarantee that interruption collection is on
;;
- ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=16,r2 // set up second base pointer
;;
VMX_SAVE_REST
@@ -910,6 +918,24 @@
END(vmx_dispatch_virtualization_fault)
+ENTRY(vmx_dispatch_vexirq)
+ VMX_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,1,0
+ mov out0=r13
+
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ (p15) ssm psr.i // restore psr.i
+ adds r3=16,r2 // set up second base pointer
+ ;;
+ VMX_SAVE_REST
+ movl r14=ia64_leave_hypervisor
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=vmx_vexirq
+END(vmx_dispatch_vexirq)
ENTRY(vmx_dispatch_tlb_miss)
VMX_SAVE_MIN_WITH_COVER_R19
@@ -922,7 +948,7 @@
;;
srlz.i // guarantee that interruption collection is on
;;
- ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=16,r2 // set up second base pointer
;;
VMX_SAVE_REST
@@ -947,7 +973,7 @@
;;
srlz.i // guarantee that interruption collection is on
;;
- ssm psr.i // restore psr.i
+ (p15)ssm psr.i // restore psr.i
adds r3=16,r2 // set up second base pointer
;;
VMX_SAVE_REST
@@ -965,7 +991,7 @@
;;
srlz.i // guarantee that interruption collection is on
;;
- ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=16,r2 // set up second base pointer
;;
VMX_SAVE_REST
@@ -987,8 +1013,6 @@
ENTRY(vmx_dispatch_interrupt)
- cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
- ;;
VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
;;
alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
@@ -999,7 +1023,7 @@
;;
srlz.i
;;
- ssm psr.i
+ (p15) ssm psr.i
adds r3=16,r2 // set up second base pointer for SAVE_REST
;;
VMX_SAVE_REST
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/vmx_support.c
--- /dev/null Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/vmx_support.c Sat Jul 9 14:58:56 2005
@@ -0,0 +1,159 @@
+
+/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/*
+ * vmx_support.c: vmx specific support interface.
+ * Copyright (c) 2005, Intel Corporation.
+ * Kun Tian (Kevin Tian) (Kevin.tian@xxxxxxxxx)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <public/io/ioreq.h>
+#include <asm/vmx.h>
+#include <asm/vmx_vcpu.h>
+
+/*
+ * I/O emulation should be atomic from domain point of view. However,
+ * when emulation code is waiting for I/O completion by do_block,
+ * other events like DM interrupt, VBD, etc. may come and unblock
+ * current exection flow. So we have to prepare for re-block if unblocked
+ * by non I/O completion event.
+ */
+void vmx_wait_io(void)
+{
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+ extern void do_block();
+
+ do {
+ if (!test_bit(IOPACKET_PORT,
+ &d->shared_info->evtchn_pending[0]))
+ do_block();
+
+ /* Unblocked when some event is coming. Clear pending indication
+ * immediately if deciding to go for io assist
+ */
+ if (test_and_clear_bit(IOPACKET_PORT,
+ &d->shared_info->evtchn_pending[0])) {
+ clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+ vmx_io_assist(v);
+ }
+
+
+ if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
+ /*
+ * Latest event is not I/O completion, so clear corresponding
+ * selector and pending indication, to allow real event coming
+ */
+ clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+
+ /* Here atually one window is leaved before selector is cleared.
+ * However this window only delay the indication to coming event,
+ * nothing losed. Next loop will check I/O channel to fix this
+ * window.
+ */
+ clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ }
+ else
+ break;
+ } while (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags));
+}
+
+/*
+ * Only place to call vmx_io_assist is mmio/legacy_io emulation.
+ * Since I/O emulation is synchronous, it shouldn't be called in
+ * other places. This is not like x86, since IA-64 implements a
+ * per-vp stack without continuation.
+ */
+void vmx_io_assist(struct vcpu *v)
+{
+ vcpu_iodata_t *vio;
+ ioreq_t *p;
+
+ /*
+ * This shared page contains I/O request between emulation code
+ * and device model.
+ */
+ vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
+ if (!vio)
+ panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
+
+ p = &vio->vp_ioreq;
+
+ if (p->state == STATE_IORESP_HOOK)
+ panic("Not supported: No hook available for DM request\n");
+
+ if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
+ if (p->state != STATE_IORESP_READY) {
+ /* Can't do_block here, for the same reason as other places to
+ * use vmx_wait_io. Simple return is safe since vmx_wait_io will
+ * try to block again
+ */
+ return;
+ } else
+ p->state = STATE_INVALID;
+
+ clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
+ } else
+ return; /* Spurous event? */
+}
+
+/*
+ * VMX domainN has two types of interrupt source: lsapic model within
+ * HV, and device model within domain 0 (service OS). There're another
+ * pending array in share page, manipulated by device model directly.
+ * To conform to VT-i spec, we have to sync pending bits in shared page
+ * into VPD. This has to be done before checking pending interrupt at
+ * resume to guest. For domain 0, all the interrupt sources come from
+ * HV, which then doesn't require this assist.
+ */
+void vmx_intr_assist(struct vcpu *v)
+{
+ vcpu_iodata_t *vio;
+ struct domain *d = v->domain;
+ extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
+ unsigned long *pend_irr);
+
+ /* I/O emulation is atomic, so it's impossible to see execution flow
+ * out of vmx_wait_io, when guest is still waiting for response.
+ */
+ if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
+ panic("!!!Bad resume to guest before I/O emulation is done.\n");
+
+ /* Clear indicator specific to interrupt delivered from DM */
+ if (test_and_clear_bit(IOPACKET_PORT,
+ &d->shared_info->evtchn_pending[0])) {
+ if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5])
+ clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+
+ if (!v->vcpu_info->evtchn_pending_sel)
+ clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+ }
+
+ /* Even without event pending, we still need to sync pending bits
+ * between DM and vlsapic. The reason is that interrupt delivery
+ * shares same event channel as I/O emulation, with corresponding
+ * indicator possibly cleared when vmx_wait_io().
+ */
+ vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
+ if (!vio)
+ panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
+
+ vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]);
+ memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr));
+ return;
+}
diff -r 89d92ce10924 -r ca44d2dbb273 xen/arch/ia64/pal_emul.c
--- /dev/null Sat Jul 9 14:37:13 2005
+++ b/xen/arch/ia64/pal_emul.c Sat Jul 9 14:58:56 2005
@@ -0,0 +1,283 @@
+/* PAL/SAL call delegation
+ *
+ * Copyright (c) 2004 Li Susie <susie.li@xxxxxxxxx>
+ * Copyright (c) 2005 Yu Ke <ke.yu@xxxxxxxxx>
+ */
+
+#include <asm/vmx_vcpu.h>
+
+static void
+get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
+ UINT64 *gr30, UINT64 *gr31) {
+
+ vmx_vcpu_get_gr(vcpu,29,gr29);
+ vmx_vcpu_get_gr(vcpu,30,gr30);
+ vmx_vcpu_get_gr(vcpu,31,gr31);
+}
+
+static void
+set_pal_result (VCPU *vcpu,struct ia64_pal_retval result) {
+
+ vmx_vcpu_set_gr(vcpu,8, result.status,0);
+ vmx_vcpu_set_gr(vcpu,9, result.v0,0);
+ vmx_vcpu_set_gr(vcpu,10, result.v1,0);
+ vmx_vcpu_set_gr(vcpu,11, result.v2,0);
+}
+
+
+static struct ia64_pal_retval
+pal_cache_flush (VCPU *vcpu) {
+ UINT64 gr28,gr29, gr30, gr31;
+ struct ia64_pal_retval result;
+
+ get_pal_parameters (vcpu, &gr29, &gr30, &gr31);
+ vmx_vcpu_get_gr(vcpu,28,&gr28);
+
+ /* Always call Host Pal in int=1 */
+ gr30 = gr30 &(~(0x2UL));
+
+ /* call Host PAL cache flush */
+ result=ia64_pal_call_static(gr28 ,gr29, gr30,gr31,1); // Clear psr.ic
when call PAL_CACHE_FLUSH
+
+ /* If host PAL call is interrupted, then loop to complete it */
+// while (result.status == 1) {
+// ia64_pal_call_static(gr28 ,gr29, gr30,
+// result.v1,1LL);
+// }
+ while (result.status != 0) {
+ panic("PAL_CACHE_FLUSH ERROR, status %d", result.status);
+ }
+
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_vm_tr_read (VCPU *vcpu ) {
+#warning pal_vm_tr_read: to be implemented
+ struct ia64_pal_retval result;
+
+ result.status= -1; //unimplemented
+
+ return result;
+}
+
+
+static struct ia64_pal_retval
+pal_prefetch_visibility (VCPU *vcpu) {
+ /* Due to current MM virtualization algorithm,
+ * We do not allow guest to change mapping attribute.
+ * Thus we will not support PAL_PREFETCH_VISIBILITY
+ */
+ struct ia64_pal_retval result;
+
+ result.status= -1; //unimplemented
+
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_platform_addr(VCPU *vcpu) {
+ struct ia64_pal_retval result;
+
+ result.status= 0; //success
+
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_halt (VCPU *vcpu) {
+#warning pal_halt: to be implemented
+ //bugbug: to be implement.
+ struct ia64_pal_retval result;
+
+ result.status= -1; //unimplemented
+
+ return result;
+}
+
+
+static struct ia64_pal_retval
+pal_halt_light (VCPU *vcpu) {
+#if 0
+ // GVMM will go back to HVMM and ask HVMM to call yield().
+ vmmdata.p_ctlblk->status = VM_OK;
+ vmmdata.p_ctlblk->ctlcode = ExitVM_YIELD;
+
+ vmm_transition((UINT64)&vmmdata.p_gsa->guest,
+ (UINT64)&vmmdata.p_gsa->host,
+ (UINT64) vmmdata.p_tramp,0,0);
+
+
+ result.status = 0;
+ result.pal_result[0]=0;
+ result.pal_result[1]=0;
+ result.pal_result[2]=0;
+
+ return result;
+#endif
+ struct ia64_pal_retval result;
+
+ result.status= -1; //unimplemented
+
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_cache_read (VCPU *vcpu) {
+ struct ia64_pal_retval result;
+
+ result.status= -1; //unimplemented
+
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_cache_write (VCPU *vcpu) {
+ struct ia64_pal_retval result;
+
+ result.status= -1; //unimplemented
+
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_bus_get_features(VCPU *vcpu){
+
+}
+
+static struct ia64_pal_retval
+pal_cache_summary(VCPU *vcpu){
+
+}
+
+static struct ia64_pal_retval
+pal_cache_init(VCPU *vcpu){
+ struct ia64_pal_retval result;
+ result.status=0;
+ return result;
+}
+
+static struct ia64_pal_retval
+pal_cache_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_cache_prot_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_cache_shared_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_mem_attrib(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_debug_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_fixed_addr(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_freq_base(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_freq_ratios(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_halt_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_logical_to_physica(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_perf_mon_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_proc_get_features(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_ptce_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_register_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_rse_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_test_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_vm_summary(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_vm_info(VCPU *vcpu){
+}
+
+static struct ia64_pal_retval
+pal_vm_page_size(VCPU *vcpu){
+}
+
+void
+pal_emul( VCPU *vcpu) {
+ UINT64 gr28;
+ struct ia64_pal_retval result;
+
+
+ vmx_vcpu_get_gr(vcpu,28,&gr28); //bank1
+
+ switch (gr28) {
+ case PAL_CACHE_FLUSH:
+ result = pal_cache_flush (vcpu);
+ break;
+
+ case PAL_PREFETCH_VISIBILITY:
+ result = pal_prefetch_visibility (vcpu);
+ break;
+
+ case PAL_VM_TR_READ:
+ result = pal_vm_tr_read (vcpu);
+ break;
+
+ case PAL_HALT:
+ result = pal_halt (vcpu);
+ break;
+
+ case PAL_HALT_LIGHT:
+ result = pal_halt_light (vcpu);
+ break;
+
+ case PAL_CACHE_READ:
+ result = pal_cache_read (vcpu);
+ break;
+
+ case PAL_CACHE_WRITE:
+ result = pal_cache_write (vcpu);
+ break;
+
+ case PAL_PLATFORM_ADDR:
+ result = pal_platform_addr (vcpu);
+ break;
+
+ default:
+ panic("pal_emul(): guest call unsupported pal" );
+ }
+ set_pal_result (vcpu, result);
+}
+
+
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|