# HG changeset patch
# User fred@xxxxxxxxxxxxxxxxxxxxx
# Node ID 7e74ac6fdea913097d1eb3456d9089e3312de002
# Parent b6803bdaa95ae98b0a5be63b28788f78b2b32072
Final set of HV change to support multiple domains on VTI.
Signed-off-by Anthony Xu <anthony.xu@xxxxxxxxx>
Signed-off-by Eddie Dong <eddie.dong@xxxxxxxxx>
Signed-off-by Fred Yang <fred.yang@xxxxxxxxx>
Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/domain.c Tue Aug 2 10:11:41 2005
@@ -348,6 +348,7 @@
struct domain *d = v->domain;
int i, rc, ret;
unsigned long progress = 0;
+ shared_iopage_t *sp;
if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
return 0;
@@ -373,8 +374,17 @@
/* FIXME: only support PMT table continuously by far */
d->arch.pmt = __va(c->pt_base);
d->arch.max_pfn = c->pt_max_pfn;
- v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg);
- memset((char *)__va(c->share_io_pg),0,PAGE_SIZE);
+ d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
+ sp = get_sp(d);
+ memset((char *)sp,0,PAGE_SIZE);
+ /* FIXME: temp due to old CP */
+ sp->sp_global.eport = 2;
+#ifdef V_IOSAPIC_READY
+ sp->vcpu_number = 1;
+#endif
+ /* TEMP */
+ d->arch.vmx_platform.pib_base = 0xfee00000UL;
+
if (c->flags & VGCF_VMX_GUEST) {
if (!vmx_enabled)
@@ -393,7 +403,7 @@
if (v == d->vcpu[0]) {
memset(&d->shared_info->evtchn_mask[0], 0xff,
sizeof(d->shared_info->evtchn_mask));
- clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]);
+ clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
}
/* Setup domain context. Actually IA-64 is a bit different with
* x86, with almost all system resources better managed by HV
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/mmio.c
--- a/xen/arch/ia64/mmio.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/mmio.c Tue Aug 2 10:11:41 2005
@@ -66,7 +66,7 @@
default:
if ( PIB_LOW_HALF(pib_off) ) { // lower half
if ( s != 8 || ma != 0x4 /* UC */ ) {
- panic("Undefined IPI-LHF write!\n");
+ panic("Undefined IPI-LHF write with s %d, ma %d!\n", s, ma);
}
else {
write_ipi(vcpu, pib_off, *(uint64_t *)src);
@@ -135,13 +135,13 @@
ioreq_t *p;
unsigned long addr;
- vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
panic("bad shared page: %lx", (unsigned long)vio);
}
p = &vio->vp_ioreq;
p->addr = pa;
- p->size = 1<<s;
+ p->size = s;
p->count = 1;
p->dir = dir;
if(dir==IOREQ_WRITE) //write;
@@ -152,9 +152,9 @@
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->state = STATE_IOREQ_READY;
- evtchn_send(IOPACKET_PORT);
+ evtchn_send(iopacket_port(v->domain));
vmx_wait_io();
- if(dir){ //read
+ if(dir==IOREQ_READ){ //read
*val=p->u.data;
}
return;
@@ -168,13 +168,13 @@
ioreq_t *p;
unsigned long addr;
- vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
panic("bad shared page: %lx");
}
p = &vio->vp_ioreq;
p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
- p->size = 1<<s;
+ p->size = s;
p->count = 1;
p->dir = dir;
if(dir==IOREQ_WRITE) //write;
@@ -185,11 +185,20 @@
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->state = STATE_IOREQ_READY;
- evtchn_send(IOPACKET_PORT);
+ evtchn_send(iopacket_port(v->domain));
+
vmx_wait_io();
- if(dir){ //read
+ if(dir==IOREQ_READ){ //read
*val=p->u.data;
}
+#ifdef DEBUG_PCI
+ if(dir==IOREQ_WRITE)
+ if(p->addr == 0xcf8UL)
+ printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
+ else
+ if(p->addr == 0xcfcUL)
+ printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
+#endif //DEBUG_PCI
return;
}
@@ -204,12 +213,13 @@
switch (iot) {
case GPFN_PIB:
if(!dir)
- pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
+ pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
else
pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
break;
case GPFN_GFW:
break;
+ case GPFN_IOSAPIC:
case GPFN_FRAME_BUFFER:
case GPFN_LOW_MMIO:
low_mmio_access(vcpu, src_pa, dest, s, dir);
@@ -217,7 +227,6 @@
case GPFN_LEGACY_IO:
legacy_io_access(vcpu, src_pa, dest, s, dir);
break;
- case GPFN_IOSAPIC:
default:
panic("Bad I/O access\n");
break;
@@ -342,6 +351,8 @@
LID lid;
for (i=0; i<MAX_VIRT_CPUS; i++) {
vcpu = d->vcpu[i];
+ if (!vcpu)
+ continue;
lid.val = VPD_CR(vcpu, lid);
if ( lid.id == id && lid.eid == eid ) {
return vcpu;
@@ -379,15 +390,16 @@
inst_type 0:integer 1:floating point
*/
extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
-
+#define SL_INTEGER 0 // store/load interger
+#define SL_FLOATING 1 // store/load floating
void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
{
REGS *regs;
IA64_BUNDLE bundle;
- int slot, dir, inst_type=0;
+ int slot, dir, inst_type;
size_t size;
- u64 data, value, slot1a, slot1b;
+ u64 data, value,post_update, slot1a, slot1b, temp;
INST64 inst;
regs=vcpu_regs(vcpu);
bundle = __vmx_get_domain_bundle(regs->cr_iip);
@@ -400,28 +412,70 @@
}
else if (slot == 2) inst.inst = bundle.slot2;
+
+ // Integer Load/Store
if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
- inst_type=0; //fp
+ inst_type = SL_INTEGER; //
size=(inst.M1.x6&0x3);
if((inst.M1.x6>>2)>0xb){ // write
+ dir=IOREQ_WRITE; //write
vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
+ }else if((inst.M1.x6>>2)<0xb){ // read
+ dir=IOREQ_READ;
+ vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ }
+ }
+ // Integer Load + Reg update
+ else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
+ inst_type = SL_INTEGER;
+ dir = IOREQ_READ; //write
+ size = (inst.M2.x6&0x3);
+ vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
+ vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
+ vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
+ temp += post_update;
+ vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
+ }
+ // Integer Load/Store + Imm update
+ else if(inst.M3.major==5){
+ inst_type = SL_INTEGER; //
+ size=(inst.M3.x6&0x3);
+ if((inst.M5.x6>>2)>0xb){ // write
dir=IOREQ_WRITE; //write
- }else if((inst.M1.x6>>2)<0xb){ // read
- vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
+ vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
+ post_update = (inst.M5.i<<7)+inst.M5.imm7;
+ if(inst.M5.s)
+ temp -= post_update;
+ else
+ temp += post_update;
+ vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
+
+ }else if((inst.M3.x6>>2)<0xb){ // read
dir=IOREQ_READ;
- }else{
- printf("This memory access instruction can't be emulated one :
%lx\n",inst.inst);
- while(1);
- }
- }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
- inst_type=1; //fp
- dir=IOREQ_READ;
- size=3; //ldfd
- }else{
+ vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
+ vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
+ post_update = (inst.M3.i<<7)+inst.M3.imm7;
+ if(inst.M3.s)
+ temp -= post_update;
+ else
+ temp += post_update;
+ vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
+
+ }
+ }
+ // Floating-point Load/Store
+// else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
+// inst_type=SL_FLOATING; //fp
+// dir=IOREQ_READ;
+// size=3; //ldfd
+// }
+ else{
printf("This memory access instruction can't be emulated two: %lx\n
",inst.inst);
while(1);
}
+ size = 1 << size;
if(dir==IOREQ_WRITE){
mmio_access(vcpu, padr, &data, size, ma, dir);
}else{
@@ -433,7 +487,7 @@
else if(size==2)
data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
- if(inst_type==0){ //gp
+ if(inst_type==SL_INTEGER){ //gp
vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
}else{
panic("Don't support ldfd now !");
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/vlsapic.c
--- a/xen/arch/ia64/vlsapic.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/vlsapic.c Tue Aug 2 10:11:41 2005
@@ -38,6 +38,14 @@
#include <asm/vmx_pal_vsa.h>
#include <asm/kregs.h>
+#define SHARED_VLAPIC_INF
+#ifdef V_IOSAPIC_READY
+static inline vl_apic_info* get_psapic(VCPU *vcpu)
+{
+ shared_iopage_t *sp = get_sp(vcpu->domain);
+ return &(sp->vcpu_iodata[vcpu->vcpu_id].apic_intr);
+}
+#endif
//u64 fire_itc;
//u64 fire_itc2;
//u64 fire_itm;
@@ -216,7 +224,8 @@
*/
void vtm_domain_out(VCPU *vcpu)
{
- rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
+ if(!is_idle_task(vcpu->domain))
+ rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
}
/*
@@ -226,9 +235,11 @@
void vtm_domain_in(VCPU *vcpu)
{
vtime_t *vtm;
-
- vtm=&(vcpu->arch.arch_vmx.vtm);
- vtm_interruption_update(vcpu, vtm);
+
+ if(!is_idle_task(vcpu->domain)) {
+ vtm=&(vcpu->arch.arch_vmx.vtm);
+ vtm_interruption_update(vcpu, vtm);
+ }
}
/*
@@ -262,10 +273,50 @@
}
}
+#ifdef V_IOSAPIC_READY
+void vlapic_update_shared_info(VCPU *vcpu)
+{
+ //int i;
+
+ vl_apic_info *ps;
+
+ if (vcpu->domain == dom0)
+ return;
+
+ ps = get_psapic(vcpu);
+ ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16;
+ printf("vl_lapic_id = %x\n", ps->vl_lapic_id);
+ ps->vl_apr = 0;
+ // skip ps->vl_logical_dest && ps->vl_dest_format
+ // IPF support physical destination mode only
+ ps->vl_arb_id = 0;
+ /*
+ for ( i=0; i<4; i++ ) {
+ ps->tmr[i] = 0; // edge trigger
+ }
+ */
+}
+
+void vlapic_update_ext_irq(VCPU *vcpu)
+{
+ int vec;
+
+ vl_apic_info *ps = get_psapic(vcpu);
+ while ( (vec = highest_bits(ps->irr)) != NULL_VECTOR ) {
+ clear_bit (vec, ps->irr);
+ vmx_vcpu_pend_interrupt(vcpu, vec);
+ }
+}
+#endif
+
void vlsapic_reset(VCPU *vcpu)
{
int i;
- VPD_CR(vcpu, lid) = 0;
+#ifdef V_IOSAPIC_READY
+ vl_apic_info *psapic; // shared lapic inf.
+#endif
+
+ VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
VPD_CR(vcpu, ivr) = 0;
VPD_CR(vcpu,tpr) = 0x10000;
VPD_CR(vcpu, eoi) = 0;
@@ -281,6 +332,10 @@
for ( i=0; i<4; i++) {
VLSAPIC_INSVC(vcpu,i) = 0;
}
+#ifdef V_IOSAPIC_READY
+ vlapic_update_shared_info(vcpu);
+ //vlapic_update_shared_irr(vcpu);
+#endif
DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
}
@@ -414,6 +469,7 @@
}
local_irq_save(spsr);
VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
+ //vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
vcpu->arch.irq_new_pending = 1;
}
@@ -432,6 +488,7 @@
for (i=0 ; i<4; i++ ) {
VPD_CR(vcpu,irr[i]) |= pend_irr[i];
}
+ //vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
vcpu->arch.irq_new_pending = 1;
}
@@ -518,6 +575,7 @@
VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
+ //vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
return (uint64_t)vec;
}
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/vmx_ivt.S
--- a/xen/arch/ia64/vmx_ivt.S Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/vmx_ivt.S Tue Aug 2 10:11:41 2005
@@ -560,6 +560,21 @@
VMX_DBG_FAULT(19)
VMX_FAULT(19)
+ .org vmx_ia64_ivt+0x5000
+/////////////////////////////////////////////////////////////////////////////////////////
+// 0x5000 Entry 20 (size 16 bundles) Page Not Present
+ENTRY(vmx_page_not_present)
+ VMX_REFLECT(20)
+END(vmx_page_not_present)
+
+ .org vmx_ia64_ivt+0x5100
+/////////////////////////////////////////////////////////////////////////////////////////
+// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
+ENTRY(vmx_key_permission)
+ VMX_REFLECT(21)
+END(vmx_key_permission)
+
+ .org vmx_ia64_ivt+0x5200
/////////////////////////////////////////////////////////////////////////////////////////
// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
ENTRY(vmx_iaccess_rights)
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/vmx_support.c
--- a/xen/arch/ia64/vmx_support.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/vmx_support.c Tue Aug 2 10:11:41 2005
@@ -37,18 +37,19 @@
struct vcpu *v = current;
struct domain *d = v->domain;
extern void do_block();
+ int port = iopacket_port(d);
do {
- if (!test_bit(IOPACKET_PORT,
+ if (!test_bit(port,
&d->shared_info->evtchn_pending[0]))
do_block();
/* Unblocked when some event is coming. Clear pending indication
* immediately if deciding to go for io assist
*/
- if (test_and_clear_bit(IOPACKET_PORT,
+ if (test_and_clear_bit(port,
&d->shared_info->evtchn_pending[0])) {
- clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
vmx_io_assist(v);
}
@@ -66,7 +67,7 @@
* nothing losed. Next loop will check I/O channel to fix this
* window.
*/
- clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
}
else
break;
@@ -88,7 +89,7 @@
* This shared page contains I/O request between emulation code
* and device model.
*/
- vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (!vio)
panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
@@ -127,6 +128,7 @@
struct domain *d = v->domain;
extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
unsigned long *pend_irr);
+ int port = iopacket_port(d);
/* I/O emulation is atomic, so it's impossible to see execution flow
* out of vmx_wait_io, when guest is still waiting for response.
@@ -135,10 +137,10 @@
panic("!!!Bad resume to guest before I/O emulation is done.\n");
/* Clear indicator specific to interrupt delivered from DM */
- if (test_and_clear_bit(IOPACKET_PORT,
+ if (test_and_clear_bit(port,
&d->shared_info->evtchn_pending[0])) {
- if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5])
- clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ if (!d->shared_info->evtchn_pending[port >> 5])
+ clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
if (!v->vcpu_info->evtchn_pending_sel)
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
@@ -149,11 +151,14 @@
* shares same event channel as I/O emulation, with corresponding
* indicator possibly cleared when vmx_wait_io().
*/
- vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (!vio)
panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
- vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]);
- memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr));
+#ifdef V_IOSAPIC_READY
+ vlapic_update_ext_irq(v);
+#else
+ panic("IOSAPIC model is missed in qemu\n");
+#endif
return;
}
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/vmx_vcpu.c
--- a/xen/arch/ia64/vmx_vcpu.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/vmx_vcpu.c Tue Aug 2 10:11:41 2005
@@ -23,7 +23,7 @@
* Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
*/
-#include <linux/sched.h>
+#include <xen/sched.h>
#include <public/arch-ia64.h>
#include <asm/ia64_int.h>
#include <asm/vmx_vcpu.h>
@@ -201,7 +201,7 @@
struct virutal_platform_def *
vmx_vcpu_get_plat(VCPU *vcpu)
{
- return &(vcpu->arch.arch_vmx.vmx_platform);
+ return &(vcpu->domain->arch.vmx_platform);
}
@@ -375,7 +375,7 @@
vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
{
REGS *regs=vcpu_regs(vcpu);
- u64 nat;
+ int nat;
//TODO, Eddie
if (!regs) return 0;
if (reg >= 16 && reg < 32) {
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/vmx_virt.c
--- a/xen/arch/ia64/vmx_virt.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/vmx_virt.c Tue Aug 2 10:11:41 2005
@@ -1193,7 +1193,8 @@
case 23:return vmx_vcpu_set_ifs(vcpu,r2);
case 24:return vmx_vcpu_set_iim(vcpu,r2);
case 25:return vmx_vcpu_set_iha(vcpu,r2);
- case 64:return vmx_vcpu_set_lid(vcpu,r2);
+ case 64:printk("SET LID to 0x%lx\n", r2);
+ return vmx_vcpu_set_lid(vcpu,r2);
case 65:return IA64_NO_FAULT;
case 66:return vmx_vcpu_set_tpr(vcpu,r2);
case 67:return vmx_vcpu_set_eoi(vcpu,r2);
@@ -1253,9 +1254,9 @@
case 23:return cr_get(ifs);
case 24:return cr_get(iim);
case 25:return cr_get(iha);
- case 64:val = ia64_getreg(_IA64_REG_CR_LID);
- return vmx_vcpu_set_gr(vcpu,tgt,val,0);
-// case 64:return cr_get(lid);
+// case 64:val = ia64_getreg(_IA64_REG_CR_LID);
+// return vmx_vcpu_set_gr(vcpu,tgt,val,0);
+ case 64:return cr_get(lid);
case 65:
vmx_vcpu_get_ivr(vcpu,&val);
return vmx_vcpu_set_gr(vcpu,tgt,val,0);
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/arch/ia64/vtlb.c
--- a/xen/arch/ia64/vtlb.c Tue Aug 2 10:09:24 2005
+++ b/xen/arch/ia64/vtlb.c Tue Aug 2 10:11:41 2005
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/tlb.h>
+#include <asm/mm.h>
#include <asm/vmx_mm_def.h>
#include <asm/gcc_intrin.h>
#include <xen/interrupt.h>
@@ -359,7 +360,10 @@
void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
thash_data_t *hash_table, *cch;
+ int flag;
rr_t vrr;
+ u64 gppn;
+ u64 ppns, ppne;
hash_table = (hcb->hash_func)(hcb->pta,
va, entry->rid, entry->ps);
@@ -375,7 +379,18 @@
*hash_table = *entry;
hash_table->next = cch;
}
- thash_insert (hcb->ts->vhpt, entry, va);
+ if(hcb->vcpu->domain->domain_id==0){
+ thash_insert(hcb->ts->vhpt, entry, va);
+ return;
+ }
+ flag = 1;
+ gppn =
(POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
+ ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
+ ppne = ppns + PSIZE(entry->ps);
+ if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
+ flag = 0;
+ if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
+ thash_insert(hcb->ts->vhpt, entry, va);
return ;
}
@@ -427,18 +442,22 @@
thash_data_t *hash_table, *p, *q;
thash_internal_t *priv = &hcb->priv;
int idx;
-
+
hash_table = priv->hash_base;
if ( hash_table == entry ) {
- __rem_hash_head (hcb, entry);
+// if ( PURGABLE_ENTRY(hcb, entry) ) {
+ __rem_hash_head (hcb, entry);
+// }
return ;
}
// remove from collision chain
p = hash_table;
for ( q=p->next; q; q = p->next ) {
- if ( q == entry ) {
- p->next = q->next;
- __rem_chain(hcb, entry);
+ if ( q == entry ){
+// if ( PURGABLE_ENTRY(hcb,q ) ) {
+ p->next = q->next;
+ __rem_chain(hcb, entry);
+// }
return ;
}
p = q;
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/domain.h Tue Aug 2 10:11:41 2005
@@ -8,6 +8,7 @@
#include <asm/vmmu.h>
#include <asm/regionreg.h>
#include <public/arch-ia64.h>
+#include <asm/vmx_platform.h>
#endif // CONFIG_VTI
#include <xen/list.h>
@@ -42,6 +43,7 @@
* max_pages in domain struct, which indicates maximum memory size
*/
unsigned long max_pfn;
+ struct virutal_platform_def vmx_platform;
#endif //CONFIG_VTI
u64 xen_vastart;
u64 xen_vaend;
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/ia64_int.h
--- a/xen/include/asm-ia64/ia64_int.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/ia64_int.h Tue Aug 2 10:11:41 2005
@@ -37,7 +37,9 @@
#define IA64_RFI_IN_PROGRESS 0x0002
#define IA64_RETRY 0x0003
#ifdef CONFIG_VTI
-#define IA64_FAULT 0x0002
+#undef IA64_NO_FAULT
+#define IA64_NO_FAULT 0x0000
+#define IA64_FAULT 0x0001
#endif //CONFIG_VTI
#define IA64_FORCED_IFA 0x0004
#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/privop.h Tue Aug 2 10:11:41 2005
@@ -138,14 +138,32 @@
IA64_INST inst;
struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
} INST64_M47;
+
typedef union U_INST64_M1{
IA64_INST inst;
struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
} INST64_M1;
+
+typedef union U_INST64_M2{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
+} INST64_M2;
+
+typedef union U_INST64_M3{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1,
major:4; };
+} INST64_M3;
+
typedef union U_INST64_M4 {
IA64_INST inst;
struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
} INST64_M4;
+
+typedef union U_INST64_M5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, x6:6, s:1,
major:4; };
+} INST64_M5;
+
typedef union U_INST64_M6 {
IA64_INST inst;
struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
@@ -166,7 +184,10 @@
INST64_I28 I28; // mov from ar (I unit)
#ifdef CONFIG_VTI
INST64_M1 M1; // ld integer
+ INST64_M2 M2;
+ INST64_M3 M3;
INST64_M4 M4; // st integer
+ INST64_M5 M5;
INST64_M6 M6; // ldfd floating pointer
#endif // CONFIG_VTI
INST64_M28 M28; // purge translation cache entry
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/vmx.h Tue Aug 2 10:11:41 2005
@@ -23,6 +23,7 @@
#define _ASM_IA64_VT_H
#define RR7_SWITCH_SHIFT 12 /* 4k enough */
+#include <public/io/ioreq.h>
extern void identify_vmx_feature(void);
extern unsigned int vmx_enabled;
@@ -38,4 +39,19 @@
extern void vmx_wait_io(void);
extern void vmx_io_assist(struct vcpu *v);
+
+static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
+{
+ return &((shared_iopage_t
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
+}
+
+static inline int iopacket_port(struct domain *d)
+{
+ return ((shared_iopage_t
*)d->arch.vmx_platform.shared_page_va)->sp_global.eport;
+}
+
+static inline shared_iopage_t *get_sp(struct domain *d)
+{
+ return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va;
+}
#endif /* _ASM_IA64_VT_H */
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/vmx_uaccess.h
--- a/xen/include/asm-ia64/vmx_uaccess.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/vmx_uaccess.h Tue Aug 2 10:11:41 2005
@@ -40,6 +40,8 @@
*/
asm (".section \"__ex_table\", \"a\"\n\t.previous");
+/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
+#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
/* For back compatibility */
#define __access_ok(addr, size, segment) 1
#define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h Tue Aug 2 10:11:41 2005
@@ -105,6 +105,10 @@
extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
extern void vtm_domain_out(VCPU *vcpu);
extern void vtm_domain_in(VCPU *vcpu);
+#ifdef V_IOSAPIC_READY
+extern void vlapic_update_ext_irq(VCPU *vcpu);
+extern void vlapic_update_shared_info(VCPU *vcpu);
+#endif
extern void vlsapic_reset(VCPU *vcpu);
extern int vmx_check_pending_irq(VCPU *vcpu);
extern void guest_write_eoi(VCPU *vcpu);
@@ -399,6 +403,9 @@
vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
{
VPD_CR(vcpu,lid)=val;
+#ifdef V_IOSAPIC_READY
+ vlapic_update_shared_info(vcpu);
+#endif
return IA64_NO_FAULT;
}
extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
diff -r b6803bdaa95a -r 7e74ac6fdea9 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Tue Aug 2 10:09:24 2005
+++ b/xen/include/asm-ia64/vmx_vpd.h Tue Aug 2 10:11:41 2005
@@ -25,7 +25,6 @@
#ifndef __ASSEMBLY__
#include <asm/vtm.h>
-#include <asm/vmx_platform.h>
#include <public/arch-ia64.h>
#define VPD_SHIFT 17 /* 128K requirement */
@@ -84,7 +83,6 @@
unsigned long rfi_ipsr;
unsigned long rfi_ifs;
unsigned long in_service[4]; // vLsapic inservice IRQ bits
- struct virutal_platform_def vmx_platform;
unsigned long flags;
};
@@ -126,7 +124,6 @@
#endif //__ASSEMBLY__
-
// VPD field offset
#define VPD_VAC_START_OFFSET 0
#define VPD_VDC_START_OFFSET 8
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|