ChangeSet 1.1709.1.12, 2005/06/16 12:20:35-06:00, djm@xxxxxxxxxxxxxxx
adds hypercall support in HV for VTI
Signed-off-by Anthony Xu <Anthony.xu@xxxxxxxxx>
Signed-off-by Eddie Dong <Eddie.dong@xxxxxxxxx>
Signed-off-by Kevin Tian <Kevin.tian@xxxxxxxxx>
arch/ia64/Makefile | 2
arch/ia64/domain.c | 18 +++----
arch/ia64/vmmu.c | 76 ++++++++++++++-----------------
arch/ia64/vmx_ivt.S | 76 ++++++++++++++++++++++++++++++-
arch/ia64/vmx_minstate.h | 8 +--
arch/ia64/vtlb.c | 96 ++++++++++++++++++++++++++++++++++------
include/asm-ia64/tlb.h | 10 ++--
include/asm-ia64/vmmu.h | 33 ++++++-------
include/asm-ia64/vmx_platform.h | 2
9 files changed, 224 insertions(+), 97 deletions(-)
diff -Nru a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile 2005-06-19 14:05:02 -04:00
+++ b/xen/arch/ia64/Makefile 2005-06-19 14:05:02 -04:00
@@ -15,7 +15,7 @@
ifeq ($(CONFIG_VTI),y)
OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
- vtlb.o mmio.o vlsapic.o
+ vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
endif
# perfmon.o
# unwind.o needed for kernel unwinding (rare)
diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c 2005-06-19 14:05:02 -04:00
+++ b/xen/arch/ia64/domain.c 2005-06-19 14:05:02 -04:00
@@ -194,21 +194,21 @@
memset(ti, 0, sizeof(struct thread_info));
init_switch_stack(v);
- /* If domain is VMX domain, shared info area is created
- * by domain and then domain notifies HV by specific hypercall.
- * If domain is xenolinux, shared info area is created by
- * HV.
- * Since we have no idea about whether domain is VMX now,
- * (dom0 when parse and domN when build), postpone possible
- * allocation.
- */
+ /* Shared info area is required to be allocated at domain
+ * creation, since control panel will write some I/O info
+ * between front end and back end to that area. However for
+ * vmx domain, our design is to let domain itself to allcoate
+ * shared info area, to keep machine page contiguous. So this
+ * page will be released later when domainN issues request
+ * after up.
+ */
+ d->shared_info = (void *)alloc_xenheap_page();
/* FIXME: Because full virtual cpu info is placed in this area,
* it's unlikely to put it into one shareinfo page. Later
* need split vcpu context from vcpu_info and conforms to
* normal xen convention.
*/
- d->shared_info = NULL;
v->vcpu_info = (void *)alloc_xenheap_page();
if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
diff -Nru a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
--- a/xen/arch/ia64/vmmu.c 2005-06-19 14:05:02 -04:00
+++ b/xen/arch/ia64/vmmu.c 2005-06-19 14:05:02 -04:00
@@ -454,12 +454,13 @@
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
@@ -467,9 +468,7 @@
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
@@ -488,11 +487,12 @@
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
@@ -500,42 +500,27 @@
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
-IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
+/*
+ * Return TRUE/FALSE for success of lock operation
+ */
+int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
{
- thash_data_t data, *ovl;
thash_cb_t *hcb;
- search_section_t sections;
- rr_t vrr;
+ rr_t vrr;
+ u64 preferred_size;
- hcb = vmx_vcpu_get_vtlb(vcpu);
- data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
- data.itir=0;
- data.ps = ps;
- data.vadr=PAGEALIGN(va,ps);
- data.section=THASH_TLB_FM;
- data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, va, &vrr);
- data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
-
- ovl = thash_find_overlap(hcb, &data, sections);
- if (ovl) {
- // generate MCA.
- panic("Foreignmap Tlb conflict!!");
- return;
- }
- thash_insert(hcb, &data, va);
- return IA64_NO_FAULT;
+ hcb = vmx_vcpu_get_vtlb(vcpu);
+ va = PAGEALIGN(va,vrr.ps);
+ preferred_size = PSIZE(vrr.ps);
+ return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
}
-
IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa,
UINT64 idx)
{
@@ -548,11 +533,12 @@
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
@@ -560,7 +546,8 @@
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
@@ -579,11 +566,12 @@
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
@@ -591,7 +579,8 @@
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
@@ -607,7 +596,8 @@
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
return IA64_NO_FAULT;
}
@@ -619,7 +609,8 @@
search_section_t sections;
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
return IA64_NO_FAULT;
}
@@ -632,7 +623,8 @@
thash_data_t data, *ovl;
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
vadr = PAGEALIGN(vadr, ps);
thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
diff -Nru a/xen/arch/ia64/vmx_ivt.S b/xen/arch/ia64/vmx_ivt.S
--- a/xen/arch/ia64/vmx_ivt.S 2005-06-19 14:05:02 -04:00
+++ b/xen/arch/ia64/vmx_ivt.S 2005-06-19 14:05:02 -04:00
@@ -180,7 +180,7 @@
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_1
+(p6)br.sptk vmx_fault_2
mov r16 = cr.ifa
;;
thash r17 = r16
@@ -346,7 +346,12 @@
ENTRY(vmx_break_fault)
mov r31=pr
mov r19=11
- br.sptk.many vmx_dispatch_break_fault
+ mov r30=cr.iim
+ mov r29=0x1100
+ ;;
+ cmp4.eq p6,p7=r29,r30
+ (p6) br.dptk.few vmx_hypercall_dispatch
+ (p7) br.sptk.many vmx_dispatch_break_fault
END(vmx_break_fault)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|