# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID f294acb258585f9f42e292cf37a3d3ddff578217
# Parent 63995acdd34a8fd4e6c7fc622642c90729b458c3
# Parent 04dfb5158f3aa8eaa1efd36b358a1f1722e9b801
MErge.
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/Makefile Wed Aug 3 09:35:38 2005
@@ -34,8 +34,27 @@
> $(BASEDIR)/System.map
-asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
+asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
$(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
$(CC) $(CFLAGS) -S -o $@ $<
+
+asm-xsi-offsets.s: asm-xsi-offsets.c
+ $(CC) $(CFLAGS) -S -o $@ $<
+
+$(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s
+ @(set -e; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was auto-generated from $<"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ echo "#ifndef __ASM_XSI_OFFSETS_H__"; \
+ echo "#define __ASM_XSI_OFFSETS_H__"; \
+ echo ""; \
+ sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2
/* \3 */:; s:->::; p;}"; \
+ echo ""; \
+ echo "#endif") <$< >$@
$(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
# Need such symbol link to make linux headers available
@@ -60,6 +79,7 @@
clean:
rm -f *.o *~ core xen.lds.s
$(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s
+ rm -f asm-xsi-offsets.s $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
rm -f lib/*.o
.PHONY: default clean
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/asm-offsets.c Wed Aug 3 09:35:38 2005
@@ -42,29 +42,34 @@
BLANK();
- DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t,
arch.interrupt_collection_enabled));
- DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t,
arch.interrupt_collection_enabled)));
- DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t,
arch.interrupt_delivery_enabled));
- DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
- DEFINE(XSI_IFA_OFS, offsetof(vcpu_info_t, arch.ifa));
- DEFINE(XSI_ITIR_OFS, offsetof(vcpu_info_t, arch.itir));
- DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
- DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
- DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
- DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr));
- DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
- DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
- DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
- DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
- DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
- DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
- DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
- DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t,
arch.incomplete_regframe));
- DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
- DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
- DEFINE(XSI_TPR_OFS, offsetof(vcpu_info_t, arch.tpr));
- DEFINE(XSI_PTA_OFS, offsetof (vcpu_info_t, arch.pta));
- DEFINE(XSI_ITV_OFS, offsetof(vcpu_info_t, arch.itv));
+ DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t,
interrupt_collection_enabled));
+ DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
interrupt_collection_enabled)));
+ DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t,
interrupt_delivery_enabled));
+ DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
+ DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
+ DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
+ DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
+ DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
+ DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
+
+ DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
+ DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
+ DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
+ DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
+ DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
+ DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
+ DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
+ DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
+ DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
+ DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+ DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
+ DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
+ DEFINE(XSI_INCOMPL_REG_OFS, offsetof(mapped_regs_t,
incomplete_regframe));
+ DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
+ DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+ DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
+ DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
+ DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct,
blocked));
//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct,
clear_child_tid));
//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct,
group_leader));
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/dom_fw.c
--- a/xen/arch/ia64/dom_fw.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/dom_fw.c Wed Aug 3 09:35:38 2005
@@ -291,6 +291,10 @@
long r11 = 0;
long status = -1;
+#define USE_PAL_EMULATOR
+#ifdef USE_PAL_EMULATOR
+ return pal_emulator_static(index);
+#endif
if (running_on_sim) return pal_emulator_static(index);
if (index >= PAL_COPY_PAL) {
printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %d!!!!\n",
@@ -314,12 +318,10 @@
break;
case PAL_PTCE_INFO:
{
- ia64_ptce_info_t ptce;
- status = ia64_get_ptce(&ptce);
- if (status != 0) break;
- r9 = ptce.base;
- r10 = (ptce.count[0]<<32)|(ptce.count[1]&0xffffffffL);
- r11 = (ptce.stride[0]<<32)|(ptce.stride[1]&0xffffffffL);
+ // return hard-coded xen-specific values because ptc.e
+ // is emulated on xen to always flush everything
+ // these values result in only one ptc.e instruction
+ status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
}
break;
case PAL_VERSION:
@@ -335,7 +337,10 @@
status = ia64_pal_cache_summary(&r9,&r10);
break;
case PAL_VM_SUMMARY:
- status = ia64_pal_vm_summary(&r9,&r10);
+ // FIXME: what should xen return for these, figure out later
+ // For now, linux does the right thing if pal call fails
+ // In particular, rid_size must be set properly!
+ //status = ia64_pal_vm_summary(&r9,&r10);
break;
case PAL_RSE_INFO:
status = ia64_pal_rse_info(&r9,&r10);
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/domain.c Wed Aug 3 09:35:38 2005
@@ -212,6 +212,10 @@
*/
memset(d->shared_info, 0, PAGE_SIZE);
+ d->shared_info->vcpu_data[v->vcpu_id].arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n",
d->shared_info->vcpu_data[0].arch.privregs);
+ memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0,
PAGE_SIZE);
v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
/* Mask all events, and specific port will be unmasked
* when customer subscribes to it.
@@ -232,8 +236,8 @@
/* FIXME: This is identity mapped address for xenheap.
* Do we need it at all?
*/
- d->xen_vastart = 0xf000000000000000;
- d->xen_vaend = 0xf300000000000000;
+ d->xen_vastart = XEN_START_ADDR;
+ d->xen_vaend = XEN_END_ADDR;
d->arch.breakimm = 0x1000;
}
#else // CONFIG_VTI
@@ -252,12 +256,16 @@
while (1);
}
memset(d->shared_info, 0, PAGE_SIZE);
+ d->shared_info->vcpu_data[0].arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n",
d->shared_info->vcpu_data[0].arch.privregs);
+ memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE);
v->vcpu_info = &(d->shared_info->vcpu_data[0]);
- d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
+ d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
BUG();
- v->vcpu_info->arch.metaphysical_mode = 1;
+ VCPU(v, metaphysical_mode) = 1;
v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
#define DOMAIN_RID_BITS_DEFAULT 18
@@ -266,9 +274,9 @@
v->arch.starting_rid = d->arch.starting_rid;
v->arch.ending_rid = d->arch.ending_rid;
// the following will eventually need to be negotiated dynamically
- d->xen_vastart = 0xf000000000000000;
- d->xen_vaend = 0xf300000000000000;
- d->shared_info_va = 0xf100000000000000;
+ d->xen_vastart = XEN_START_ADDR;
+ d->xen_vaend = XEN_END_ADDR;
+ d->shared_info_va = SHAREDINFO_ADDR;
d->arch.breakimm = 0x1000;
v->arch.breakimm = d->arch.breakimm;
@@ -292,7 +300,15 @@
printf("arch_getdomaininfo_ctxt\n");
c->regs = *regs;
- c->vcpu = v->vcpu_info->arch;
+ c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
+#if 0
+ if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs,
+ v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) {
+ printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs);
+ return -EFAULT;
+ }
+#endif
+
c->shared = v->domain->shared_info->arch;
}
@@ -307,13 +323,20 @@
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
regs->ar_rsc |= (2 << 2); /* force PL2/3 */
- v->vcpu_info->arch = c->vcpu;
+ v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
+ if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
+ c->vcpu.privregs, sizeof(mapped_regs_t))) {
+ printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n",
c->vcpu.privregs);
+ return -EFAULT;
+ }
+
init_all_rr(v);
// this should be in userspace
- regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS
console=ttyS0",256L); //FIXME
- v->vcpu_info->arch.banknum = 1;
- v->vcpu_info->arch.metaphysical_mode = 1;
+ regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=tty0
console=tty0 root=/dev/hda1",256L); //FIXME
+ v->arch.domain_itm_last = -1L;
+ VCPU(v, banknum) = 1;
+ VCPU(v, metaphysical_mode) = 1;
v->domain->shared_info->arch = c->shared;
return 0;
@@ -325,6 +348,7 @@
struct domain *d = v->domain;
int i, rc, ret;
unsigned long progress = 0;
+ shared_iopage_t *sp;
if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
return 0;
@@ -350,8 +374,17 @@
/* FIXME: only support PMT table continuously by far */
d->arch.pmt = __va(c->pt_base);
d->arch.max_pfn = c->pt_max_pfn;
- v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg);
- memset((char *)__va(c->share_io_pg),0,PAGE_SIZE);
+ d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
+ sp = get_sp(d);
+ memset((char *)sp,0,PAGE_SIZE);
+ /* FIXME: temp due to old CP */
+ sp->sp_global.eport = 2;
+#ifdef V_IOSAPIC_READY
+ sp->vcpu_number = 1;
+#endif
+ /* TEMP */
+ d->arch.vmx_platform.pib_base = 0xfee00000UL;
+
if (c->flags & VGCF_VMX_GUEST) {
if (!vmx_enabled)
@@ -370,7 +403,7 @@
if (v == d->vcpu[0]) {
memset(&d->shared_info->evtchn_mask[0], 0xff,
sizeof(d->shared_info->evtchn_mask));
- clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]);
+ clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
}
/* Setup domain context. Actually IA-64 is a bit different with
* x86, with almost all system resources better managed by HV
@@ -380,8 +413,8 @@
new_thread(v, c->guest_iip, 0, 0);
- d->xen_vastart = 0xf000000000000000;
- d->xen_vaend = 0xf300000000000000;
+ d->xen_vastart = XEN_START_ADDR;
+ d->xen_vaend = XEN_END_ADDR;
d->arch.breakimm = 0x1000 + d->domain_id;
v->arch._thread.on_ustack = 0;
@@ -394,7 +427,13 @@
void arch_do_boot_vcpu(struct vcpu *v)
{
+ struct domain *d = v->domain;
printf("arch_do_boot_vcpu: not implemented\n");
+
+ d->shared_info->vcpu_data[v->vcpu_id].arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n",
d->shared_info->vcpu_data[v->vcpu_id].arch.privregs);
+ memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0,
PAGE_SIZE);
return;
}
@@ -449,8 +488,8 @@
VPD_CR(v, dcr) = 0;
} else {
regs->r28 = dom_fw_setup(d,saved_command_line,256L);
- v->vcpu_info->arch.banknum = 1;
- v->vcpu_info->arch.metaphysical_mode = 1;
+ VCPU(v, banknum) = 1;
+ VCPU(v, metaphysical_mode) = 1;
d->shared_info->arch.flags = (d == dom0) ?
(SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN)
: 0;
}
}
@@ -482,8 +521,8 @@
regs->ar_fpsr = FPSR_DEFAULT;
init_all_rr(v);
regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME
- v->vcpu_info->arch.banknum = 1;
- v->vcpu_info->arch.metaphysical_mode = 1;
+ VCPU(v, banknum) = 1;
+ VCPU(v, metaphysical_mode) = 1;
d->shared_info->arch.flags = (d == dom0) ?
(SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN)
: 0;
}
#endif // CONFIG_VTI
@@ -894,7 +933,6 @@
/* Set up shared-info area. */
update_dom_time(d);
- d->shared_info->domain_time = 0;
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
@@ -1072,12 +1110,12 @@
#endif
serial_input_init();
if (d == dom0) {
- v->vcpu_info->arch.delivery_mask[0] = -1L;
- v->vcpu_info->arch.delivery_mask[1] = -1L;
- v->vcpu_info->arch.delivery_mask[2] = -1L;
- v->vcpu_info->arch.delivery_mask[3] = -1L;
+ VCPU(v, delivery_mask[0]) = -1L;
+ VCPU(v, delivery_mask[1]) = -1L;
+ VCPU(v, delivery_mask[2]) = -1L;
+ VCPU(v, delivery_mask[3]) = -1L;
}
- else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
+ else __set_bit(0x30,VCPU(v, delivery_mask));
return 0;
}
@@ -1233,12 +1271,12 @@
#endif
serial_input_init();
if (d == dom0) {
- v->vcpu_info->arch.delivery_mask[0] = -1L;
- v->vcpu_info->arch.delivery_mask[1] = -1L;
- v->vcpu_info->arch.delivery_mask[2] = -1L;
- v->vcpu_info->arch.delivery_mask[3] = -1L;
- }
- else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
+ VCPU(v, delivery_mask[0]) = -1L;
+ VCPU(v, delivery_mask[1]) = -1L;
+ VCPU(v, delivery_mask[2]) = -1L;
+ VCPU(v, delivery_mask[3]) = -1L;
+ }
+ else __set_bit(0x30, VCPU(v, delivery_mask));
return 0;
}
@@ -1285,7 +1323,7 @@
#endif
new_thread(v, pkern_entry, 0, 0);
printk("new_thread returns\n");
- __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
+ __set_bit(0x30, VCPU(v, delivery_mask));
return 0;
}
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/hypercall.c
--- a/xen/arch/ia64/hypercall.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/hypercall.c Wed Aug 3 09:35:38 2005
@@ -41,13 +41,13 @@
// to a yet-to-be-found bug where pending_interruption
// is zero when it shouldn't be. Since PAL is called
// in the idle loop, this should resolve it
- v->vcpu_info->arch.pending_interruption = 1;
+ VCPU(v,pending_interruption) = 1;
#endif
if (regs->r28 == PAL_HALT_LIGHT) {
#define SPURIOUS_VECTOR 15
pi = vcpu_check_pending_interrupts(v);
if (pi != SPURIOUS_VECTOR) {
- if (!v->vcpu_info->arch.pending_interruption)
+ if (!VCPU(v,pending_interruption))
idle_when_pending++;
vcpu_pend_unspecified_interrupt(v);
//printf("idle w/int#%d pending!\n",pi);
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/hyperprivop.S
--- a/xen/arch/ia64/hyperprivop.S Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/hyperprivop.S Wed Aug 3 09:35:38 2005
@@ -18,11 +18,17 @@
#define FAST_HYPERPRIVOPS
#define FAST_HYPERPRIVOP_CNT
#define FAST_REFLECT_CNT
-#define FAST_TICK
+//#define FAST_TICK
#define FAST_BREAK
#define FAST_ACCESS_REFLECT
+#define FAST_RFI
+#define FAST_SSM_I
+#define FAST_PTC_GA
#undef RFI_TO_INTERRUPT // not working yet
#endif
+
+// FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
+//#define HANDLE_AR_UNAT
// FIXME: This is defined in include/asm-ia64/hw_irq.h but this
// doesn't appear to be include'able from assembly?
@@ -183,6 +189,9 @@
// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
// r31 == pr
ENTRY(hyper_ssm_i)
+#ifndef FAST_SSM_I
+ br.spnt.few dispatch_break_fault ;;
+#endif
// give up for now if: ipsr.be==1, ipsr.pp==1
mov r30=cr.ipsr;;
mov r29=cr.iip;;
@@ -259,7 +268,8 @@
adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
bsw.1;;
- // FIXME: need to handle ar.unat!
+ // FIXME?: ar.unat is not really handled correctly,
+ // but may not matter if the OS is NaT-clean
.mem.offset 0,0; st8.spill [r2]=r16,16;
.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
.mem.offset 0,0; st8.spill [r2]=r18,16;
@@ -425,10 +435,12 @@
mov cr.iip=r24;;
// OK, now all set to go except for switch to virtual bank0
mov r30=r2; mov r29=r3;;
+#ifdef HANDLE_AR_UNAT
+ mov r28=ar.unat;
+#endif
adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
bsw.1;;
- // FIXME: need to handle ar.unat!
.mem.offset 0,0; st8.spill [r2]=r16,16;
.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
.mem.offset 0,0; st8.spill [r2]=r18,16;
@@ -445,9 +457,18 @@
.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
.mem.offset 0,0; st8.spill [r2]=r30,16;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
- movl r31=XSI_IPSR;;
+#ifdef HANDLE_AR_UNAT
+ // bank0 regs have no NaT bit, so ensure they are NaT clean
+ mov r16=r0; mov r17=r0; mov r18=r0; mov r19=r0;
+ mov r20=r0; mov r21=r0; mov r22=r0; mov r23=r0;
+ mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0;
+ mov r28=r0; mov r29=r0; mov r30=r0; movl r31=XSI_IPSR;;
+#endif
bsw.0 ;;
mov r2=r30; mov r3=r29;;
+#ifdef HANDLE_AR_UNAT
+ mov ar.unat=r28;
+#endif
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
fast_tick_reflect_done:
@@ -567,10 +588,12 @@
mov cr.iip=r20;;
// OK, now all set to go except for switch to virtual bank0
mov r30=r2; mov r29=r3;;
+#ifdef HANDLE_AR_UNAT
+ mov r28=ar.unat;
+#endif
adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
bsw.1;;
- // FIXME: need to handle ar.unat!
.mem.offset 0,0; st8.spill [r2]=r16,16;
.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
.mem.offset 0,0; st8.spill [r2]=r18,16;
@@ -587,9 +610,19 @@
.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
.mem.offset 0,0; st8.spill [r2]=r30,16;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
+#ifdef HANDLE_AR_UNAT
+ // bank0 regs have no NaT bit, so ensure they are NaT clean
+ mov r16=r0; mov r17=r0; mov r18=r0; mov r19=r0;
+ mov r20=r0; mov r21=r0; mov r22=r0; mov r23=r0;
+ mov r24=r0; mov r25=r0; mov r26=r0; mov r27=r0;
+ mov r28=r0; mov r29=r0; mov r30=r0; movl r31=XSI_IPSR;;
+#endif
movl r31=XSI_IPSR;;
bsw.0 ;;
mov r2=r30; mov r3=r29;;
+#ifdef HANDLE_AR_UNAT
+ mov ar.unat=r28;
+#endif
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
mov pr=r31,-1 ;;
@@ -637,6 +670,9 @@
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_rfi)
+#ifndef FAST_RFI
+ br.spnt.few dispatch_break_fault ;;
+#endif
// if no interrupts pending, proceed
mov r30=r0
cmp.eq p7,p0=r20,r0
@@ -736,7 +772,8 @@
adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
bsw.1;;
- // FIXME: need to handle ar.unat!
+ // FIXME?: ar.unat is not really handled correctly,
+ // but may not matter if the OS is NaT-clean
.mem.offset 0,0; ld8.fill r16=[r2],16 ;
.mem.offset 8,0; ld8.fill r17=[r3],16 ;;
.mem.offset 0,0; ld8.fill r18=[r2],16 ;
@@ -1461,6 +1498,9 @@
#ifdef CONFIG_SMP
FIXME: ptc.ga instruction requires spinlock for SMP
#endif
+#ifndef FAST_PTC_GA
+ br.spnt.few dispatch_break_fault ;;
+#endif
// FIXME: validate not flushing Xen addresses
#ifdef FAST_HYPERPRIVOP_CNT
movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_PTC_GA);;
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/mmio.c
--- a/xen/arch/ia64/mmio.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/mmio.c Wed Aug 3 09:35:38 2005
@@ -66,7 +66,7 @@
default:
if ( PIB_LOW_HALF(pib_off) ) { // lower half
if ( s != 8 || ma != 0x4 /* UC */ ) {
- panic("Undefined IPI-LHF write!\n");
+ panic("Undefined IPI-LHF write with s %d, ma %d!\n", s, ma);
}
else {
write_ipi(vcpu, pib_off, *(uint64_t *)src);
@@ -135,13 +135,13 @@
ioreq_t *p;
unsigned long addr;
- vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
panic("bad shared page: %lx", (unsigned long)vio);
}
p = &vio->vp_ioreq;
p->addr = pa;
- p->size = 1<<s;
+ p->size = s;
p->count = 1;
p->dir = dir;
if(dir==IOREQ_WRITE) //write;
@@ -152,9 +152,9 @@
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->state = STATE_IOREQ_READY;
- evtchn_send(IOPACKET_PORT);
+ evtchn_send(iopacket_port(v->domain));
vmx_wait_io();
- if(dir){ //read
+ if(dir==IOREQ_READ){ //read
*val=p->u.data;
}
return;
@@ -168,13 +168,13 @@
ioreq_t *p;
unsigned long addr;
- vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
panic("bad shared page: %lx");
}
p = &vio->vp_ioreq;
p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
- p->size = 1<<s;
+ p->size = s;
p->count = 1;
p->dir = dir;
if(dir==IOREQ_WRITE) //write;
@@ -185,11 +185,20 @@
set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->state = STATE_IOREQ_READY;
- evtchn_send(IOPACKET_PORT);
+ evtchn_send(iopacket_port(v->domain));
+
vmx_wait_io();
- if(dir){ //read
+ if(dir==IOREQ_READ){ //read
*val=p->u.data;
}
+#ifdef DEBUG_PCI
+ if(dir==IOREQ_WRITE)
+ if(p->addr == 0xcf8UL)
+ printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
+ else
+ if(p->addr == 0xcfcUL)
+ printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
+#endif //DEBUG_PCI
return;
}
@@ -204,12 +213,13 @@
switch (iot) {
case GPFN_PIB:
if(!dir)
- pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
+ pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
else
pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
break;
case GPFN_GFW:
break;
+ case GPFN_IOSAPIC:
case GPFN_FRAME_BUFFER:
case GPFN_LOW_MMIO:
low_mmio_access(vcpu, src_pa, dest, s, dir);
@@ -217,7 +227,6 @@
case GPFN_LEGACY_IO:
legacy_io_access(vcpu, src_pa, dest, s, dir);
break;
- case GPFN_IOSAPIC:
default:
panic("Bad I/O access\n");
break;
@@ -342,6 +351,8 @@
LID lid;
for (i=0; i<MAX_VIRT_CPUS; i++) {
vcpu = d->vcpu[i];
+ if (!vcpu)
+ continue;
lid.val = VPD_CR(vcpu, lid);
if ( lid.id == id && lid.eid == eid ) {
return vcpu;
@@ -379,15 +390,16 @@
inst_type 0:integer 1:floating point
*/
extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
-
+#define SL_INTEGER 0 // store/load interger
+#define SL_FLOATING 1 // store/load floating
void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
{
REGS *regs;
IA64_BUNDLE bundle;
- int slot, dir, inst_type=0;
+ int slot, dir, inst_type;
size_t size;
- u64 data, value, slot1a, slot1b;
+ u64 data, value,post_update, slot1a, slot1b, temp;
INST64 inst;
regs=vcpu_regs(vcpu);
bundle = __vmx_get_domain_bundle(regs->cr_iip);
@@ -400,28 +412,70 @@
}
else if (slot == 2) inst.inst = bundle.slot2;
+
+ // Integer Load/Store
if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
- inst_type=0; //fp
+ inst_type = SL_INTEGER; //
size=(inst.M1.x6&0x3);
if((inst.M1.x6>>2)>0xb){ // write
+ dir=IOREQ_WRITE; //write
vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
+ }else if((inst.M1.x6>>2)<0xb){ // read
+ dir=IOREQ_READ;
+ vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ }
+ }
+ // Integer Load + Reg update
+ else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
+ inst_type = SL_INTEGER;
+ dir = IOREQ_READ; //write
+ size = (inst.M2.x6&0x3);
+ vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
+ vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
+ vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
+ temp += post_update;
+ vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
+ }
+ // Integer Load/Store + Imm update
+ else if(inst.M3.major==5){
+ inst_type = SL_INTEGER; //
+ size=(inst.M3.x6&0x3);
+ if((inst.M5.x6>>2)>0xb){ // write
dir=IOREQ_WRITE; //write
- }else if((inst.M1.x6>>2)<0xb){ // read
- vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
+ vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
+ post_update = (inst.M5.i<<7)+inst.M5.imm7;
+ if(inst.M5.s)
+ temp -= post_update;
+ else
+ temp += post_update;
+ vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
+
+ }else if((inst.M3.x6>>2)<0xb){ // read
dir=IOREQ_READ;
- }else{
- printf("This memory access instruction can't be emulated one :
%lx\n",inst.inst);
- while(1);
- }
- }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
- inst_type=1; //fp
- dir=IOREQ_READ;
- size=3; //ldfd
- }else{
+ vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
+ vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
+ post_update = (inst.M3.i<<7)+inst.M3.imm7;
+ if(inst.M3.s)
+ temp -= post_update;
+ else
+ temp += post_update;
+ vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
+
+ }
+ }
+ // Floating-point Load/Store
+// else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
+// inst_type=SL_FLOATING; //fp
+// dir=IOREQ_READ;
+// size=3; //ldfd
+// }
+ else{
printf("This memory access instruction can't be emulated two: %lx\n
",inst.inst);
while(1);
}
+ size = 1 << size;
if(dir==IOREQ_WRITE){
mmio_access(vcpu, padr, &data, size, ma, dir);
}else{
@@ -433,7 +487,7 @@
else if(size==2)
data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
- if(inst_type==0){ //gp
+ if(inst_type==SL_INTEGER){ //gp
vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
}else{
panic("Don't support ldfd now !");
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/pal_emul.c
--- a/xen/arch/ia64/pal_emul.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/pal_emul.c Wed Aug 3 09:35:38 2005
@@ -1,7 +1,21 @@
-/* PAL/SAL call delegation
+/*
+ * PAL/SAL call delegation
*
* Copyright (c) 2004 Li Susie <susie.li@xxxxxxxxx>
* Copyright (c) 2005 Yu Ke <ke.yu@xxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include <asm/vmx_vcpu.h>
@@ -98,23 +112,6 @@
static struct ia64_pal_retval
pal_halt_light (VCPU *vcpu) {
-#if 0
- // GVMM will go back to HVMM and ask HVMM to call yield().
- vmmdata.p_ctlblk->status = VM_OK;
- vmmdata.p_ctlblk->ctlcode = ExitVM_YIELD;
-
- vmm_transition((UINT64)&vmmdata.p_gsa->guest,
- (UINT64)&vmmdata.p_gsa->host,
- (UINT64) vmmdata.p_tramp,0,0);
-
-
- result.status = 0;
- result.pal_result[0]=0;
- result.pal_result[1]=0;
- result.pal_result[2]=0;
-
- return result;
-#endif
struct ia64_pal_retval result;
result.status= -1; //unimplemented
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
--- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Wed Aug 3 09:35:38 2005
@@ -20,11 +20,19 @@
__do_IRQ(local_vector_to_irq(vector), regs);
/*
-@@ -167,6 +173,95 @@
+@@ -167,6 +173,103 @@
irq_exit();
}
+#ifdef CONFIG_VTI
++#define vmx_irq_enter() \
++ add_preempt_count(HARDIRQ_OFFSET);
++
++/* Now softirq will be checked when leaving hypervisor, or else
++ * scheduler irq will be executed too early.
++ */
++#define vmx_irq_exit(void) \
++ sub_preempt_count(HARDIRQ_OFFSET);
+/*
+ * That's where the IVT branches when we get an external
+ * interrupt. This branches to the correct hardware IRQ handler via
@@ -72,7 +80,7 @@
+ * 16 (without this, it would be ~240, which could easily lead
+ * to kernel stack overflows).
+ */
-+ irq_enter();
++ vmx_irq_enter();
+ saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+ ia64_srlz_d();
+ while (vector != IA64_SPURIOUS_INT_VECTOR) {
@@ -106,7 +114,7 @@
+ * handler needs to be able to wait for further keyboard interrupts,
which can't
+ * come through until ia64_eoi() has been done.
+ */
-+ irq_exit();
++ vmx_irq_exit();
+ if ( wake_dom0 && current != dom0 )
+ domain_wake(dom0->vcpu[0]);
+}
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/patch/linux-2.6.11/kregs.h
--- a/xen/arch/ia64/patch/linux-2.6.11/kregs.h Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/patch/linux-2.6.11/kregs.h Wed Aug 3 09:35:38 2005
@@ -1,6 +1,6 @@
---
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/../../linux-2.6.11/include/asm-ia64/kregs.h
2005-03-01 23:37:49.000000000 -0800
+++
/home/adsharma/disk2/xen-ia64/xeno-unstable-rebase.bk/xen/include/asm-ia64/kregs.h
2005-05-18 12:40:50.000000000 -0700
-@@ -29,8 +29,20 @@
+@@ -29,8 +29,21 @@
*/
#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel
image (code & data) */
#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as
required by EFI */
@@ -12,6 +12,7 @@
+#ifdef XEN
+#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
+#define IA64_TR_VHPT 4 /* dtr4: vhpt */
++#define IA64_TR_ARCH_INFO 5
+#ifdef CONFIG_VTI
+#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table
in domain space */
+#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch
stub */
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/process.c
--- a/xen/arch/ia64/process.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/process.c Wed Aug 3 09:35:38 2005
@@ -50,7 +50,7 @@
IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
-#define PSCB(x,y) x->vcpu_info->arch.y
+#define PSCB(x,y) VCPU(x,y)
#define PSCBX(x,y) x->arch.y
extern unsigned long vcpu_verbose;
@@ -226,7 +226,7 @@
#ifdef CONFIG_SMP
#error "sharedinfo doesn't handle smp yet"
#endif
- regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch;
+ regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
PSCB(v,interrupt_delivery_enabled) = 0;
PSCB(v,interrupt_collection_enabled) = 0;
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/regionreg.c
--- a/xen/arch/ia64/regionreg.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/regionreg.c Wed Aug 3 09:35:38 2005
@@ -14,6 +14,8 @@
#include <asm/page.h>
#include <asm/regionreg.h>
#include <asm/vhpt.h>
+#include <asm/vcpu.h>
+extern void ia64_new_rr7(unsigned long rid,void *shared_info, void
*shared_arch_info);
#define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
@@ -273,7 +275,8 @@
newrrv.rid = newrid;
newrrv.ve = VHPT_ENABLED_REGION_7;
newrrv.ps = IA64_GRANULE_SHIFT;
- ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+ v->vcpu_info->arch.privregs);
}
else {
newrrv.rid = newrid;
@@ -290,7 +293,8 @@
newrrv.ve = 1; // VHPT now enabled for region 7!!
newrrv.ps = PAGE_SHIFT;
if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
- if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+ v->vcpu_info->arch.privregs);
else set_rr(rr,newrrv.rrval);
#endif
return 1;
@@ -332,14 +336,14 @@
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
- v->vcpu_info->arch.rrs[0] = -1;
- v->vcpu_info->arch.rrs[1] = rrv.rrval;
- v->vcpu_info->arch.rrs[2] = rrv.rrval;
- v->vcpu_info->arch.rrs[3] = rrv.rrval;
- v->vcpu_info->arch.rrs[4] = rrv.rrval;
- v->vcpu_info->arch.rrs[5] = rrv.rrval;
+ VCPU(v,rrs[0]) = -1;
+ VCPU(v,rrs[1]) = rrv.rrval;
+ VCPU(v,rrs[2]) = rrv.rrval;
+ VCPU(v,rrs[3]) = rrv.rrval;
+ VCPU(v,rrs[4]) = rrv.rrval;
+ VCPU(v,rrs[5]) = rrv.rrval;
rrv.ve = 0;
- v->vcpu_info->arch.rrs[6] = rrv.rrval;
+ VCPU(v,rrs[6]) = rrv.rrval;
// v->shared_info->arch.rrs[7] = rrv.rrval;
}
@@ -378,7 +382,7 @@
// TODO: These probably should be validated
unsigned long bad = 0;
- if (v->vcpu_info->arch.metaphysical_mode) {
+ if (VCPU(v,metaphysical_mode)) {
ia64_rr rrv;
rrv.rrval = 0;
@@ -390,16 +394,16 @@
ia64_srlz_d();
}
else {
- rr0 = v->vcpu_info->arch.rrs[0];
+ rr0 = VCPU(v,rrs[0]);
if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
}
- rr1 = v->vcpu_info->arch.rrs[1];
- rr2 = v->vcpu_info->arch.rrs[2];
- rr3 = v->vcpu_info->arch.rrs[3];
- rr4 = v->vcpu_info->arch.rrs[4];
- rr5 = v->vcpu_info->arch.rrs[5];
- rr6 = v->vcpu_info->arch.rrs[6];
- rr7 = v->vcpu_info->arch.rrs[7];
+ rr1 = VCPU(v,rrs[1]);
+ rr2 = VCPU(v,rrs[2]);
+ rr3 = VCPU(v,rrs[3]);
+ rr4 = VCPU(v,rrs[4]);
+ rr5 = VCPU(v,rrs[5]);
+ rr6 = VCPU(v,rrs[6]);
+ rr7 = VCPU(v,rrs[7]);
if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
@@ -410,4 +414,5 @@
if (bad) {
panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
}
-}
+ return 0;
+}
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vcpu.c
--- a/xen/arch/ia64/vcpu.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vcpu.c Wed Aug 3 09:35:38 2005
@@ -28,7 +28,7 @@
// this def for vcpu_regs won't work if kernel stack is present
#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
-#define PSCB(x,y) x->vcpu_info->arch.y
+#define PSCB(x,y) VCPU(x,y)
#define PSCBX(x,y) x->arch.y
#define TRUE 1
@@ -155,7 +155,7 @@
// interrupt collection flag
//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
// just handle psr.up and psr.pp for now
- if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
+ if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
| IA64_PSR_DFL | IA64_PSR_DFH))
return (IA64_ILLOP_FAULT);
@@ -164,6 +164,7 @@
if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
if (imm.up) { ipsr->up = 0; psr.up = 0; }
if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
+ if (imm.be) ipsr->be = 0;
if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
return IA64_NO_FAULT;
@@ -214,6 +215,7 @@
if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
// TODO: do this faster
if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
+ if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
if (imm.up) { ipsr->up = 1; psr.up = 1; }
if (imm.be) {
@@ -262,6 +264,7 @@
}
if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
+ if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
@@ -389,6 +392,21 @@
return (IA64_NO_FAULT);
}
+unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
+{
+ ia64_rr rr;
+
+ rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
+ return(rr.ps);
+}
+
+unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
+{
+ ia64_rr rr;
+
+ rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
+ return(rr.rid);
+}
unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
{
@@ -881,6 +899,15 @@
return (IA64_NO_FAULT);
}
+// parameter is a time interval specified in cycles
+void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
+{
+ PSCBX(vcpu,xen_timer_interval) = cycles;
+ vcpu_set_next_timer(vcpu);
+ printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
+ PSCBX(vcpu,xen_timer_interval));
+ __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
+}
IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
{
@@ -1007,16 +1034,6 @@
vcpu_safe_set_itm(s);
//using_xen_as_itm++;
}
-}
-
-// parameter is a time interval specified in cycles
-void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
-{
- PSCBX(vcpu,xen_timer_interval) = cycles;
- vcpu_set_next_timer(vcpu);
- printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
- PSCBX(vcpu,xen_timer_interval));
- __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
}
IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
@@ -1182,12 +1199,6 @@
//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs !=
regs->cr_ifs) {
//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
if (ifs & regs->cr_ifs & 0x8000000000000000L) {
-#define SI_OFS(x) ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
-if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
-printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
-printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
-while(1);
-}
// TODO: validate PSCB(vcpu,iip)
// TODO: PSCB(vcpu,ipsr) = psr;
PSCB(vcpu,ipsr) = psr.i64;
@@ -1222,7 +1233,6 @@
IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
{
- extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
UINT64 pta = PSCB(vcpu,pta);
UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
@@ -1263,7 +1273,6 @@
#define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
unsigned long vhpt_translate_count = 0;
-int in_vcpu_tpa = 0;
IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64
*pteval, UINT64 *itir)
{
@@ -1278,12 +1287,6 @@
unsigned long vipsr = PSCB(vcpu,ipsr);
unsigned long iip = regs->cr_iip;
unsigned long ipsr = regs->cr_ipsr;
-#if 0
- printk("vcpu_translate: bad address %p, viip=%p,
vipsr=%p, iip=%p, ipsr=%p\n", address, viip, vipsr, iip, ipsr);
- if (in_vcpu_tpa) printk("vcpu_translate called from
vcpu_tpa\n");
- while(1);
- panic_domain(0,"vcpu_translate: bad address %p\n",
address);
-#endif
printk("vcpu_translate: bad address %p, viip=%p,
vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
}
@@ -1304,7 +1307,6 @@
/* check 1-entry TLB */
if ((trp = match_dtlb(vcpu,address))) {
dtlb_translate_count++;
-if (!in_vcpu_tpa) printf("vcpu_translate: found in vdtlb\n");
*pteval = trp->page_flags;
*itir = trp->itir;
return IA64_NO_FAULT;
@@ -1356,9 +1358,7 @@
UINT64 pteval, itir, mask;
IA64FAULT fault;
-in_vcpu_tpa=1;
fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
-in_vcpu_tpa=0;
if (fault == IA64_NO_FAULT)
{
mask = itir_mask(itir);
@@ -1534,28 +1534,8 @@
return(rr.ve);
}
-
-unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
-{
- ia64_rr rr;
-
- rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
- return(rr.ps);
-}
-
-
-unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
-{
- ia64_rr rr;
-
- rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
- return(rr.rid);
-}
-
-
IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
{
- extern void set_one_rr(UINT64, UINT64);
PSCB(vcpu,rrs)[reg>>61] = val;
// warning: set_one_rr() does it "live"
set_one_rr(reg,val);
@@ -1785,49 +1765,26 @@
IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
{
// TODO: Only allowed for current vcpu
- UINT64 mpaddr, ps;
+ UINT64 mpaddr, paddr;
IA64FAULT fault;
- TR_ENTRY *trp;
- unsigned long lookup_domain_mpa(struct domain *,unsigned long);
- unsigned long pteval, dom_imva;
-
- if ((trp = match_dtlb(vcpu,vadr))) {
- pteval = trp->page_flags;
- dom_imva = __va(pteval & _PFN_MASK);
- ia64_fc(dom_imva);
- return IA64_NO_FAULT;
- }
+ unsigned long translate_domain_mpaddr(unsigned long);
+ IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
+
fault = vcpu_tpa(vcpu, vadr, &mpaddr);
if (fault == IA64_NO_FAULT) {
- struct domain *dom0;
- unsigned long dom0_start, dom0_size;
- if (vcpu == dom0) {
- if (mpaddr < dom0_start || mpaddr >= dom0_start +
dom0_size) {
- printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
- }
- }
- pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
- if (pteval) {
- dom_imva = __va(pteval & _PFN_MASK);
- ia64_fc(dom_imva);
- }
- else {
- REGS *regs = vcpu_regs(vcpu);
- printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
- vadr,regs->cr_iip);
- }
+ paddr = translate_domain_mpaddr(mpaddr);
+ ia64_fc(__va(paddr));
}
return fault;
}
+int ptce_count = 0;
IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
{
-
// Note that this only needs to be called once, i.e. the
// architected loop to purge the entire TLB, should use
// base = stride1 = stride2 = 0, count0 = count 1 = 1
- // FIXME: When VHPT is in place, flush that too!
#ifdef VHPT_GLOBAL
vhpt_flush(); // FIXME: This is overdoing it
#endif
@@ -1850,6 +1807,7 @@
// FIXME: validate not flushing Xen addresses
// if (Xen address) return(IA64_ILLOP_FAULT);
// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
+//printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
#ifdef VHPT_GLOBAL
vhpt_flush_address(vadr,addr_range);
#endif
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vlsapic.c
--- a/xen/arch/ia64/vlsapic.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vlsapic.c Wed Aug 3 09:35:38 2005
@@ -38,6 +38,14 @@
#include <asm/vmx_pal_vsa.h>
#include <asm/kregs.h>
+#define SHARED_VLAPIC_INF
+#ifdef V_IOSAPIC_READY
+static inline vl_apic_info* get_psapic(VCPU *vcpu)
+{
+ shared_iopage_t *sp = get_sp(vcpu->domain);
+ return &(sp->vcpu_iodata[vcpu->vcpu_id].apic_intr);
+}
+#endif
//u64 fire_itc;
//u64 fire_itc2;
//u64 fire_itm;
@@ -216,7 +224,8 @@
*/
void vtm_domain_out(VCPU *vcpu)
{
- rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
+ if(!is_idle_task(vcpu->domain))
+ rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
}
/*
@@ -226,9 +235,11 @@
void vtm_domain_in(VCPU *vcpu)
{
vtime_t *vtm;
-
- vtm=&(vcpu->arch.arch_vmx.vtm);
- vtm_interruption_update(vcpu, vtm);
+
+ if(!is_idle_task(vcpu->domain)) {
+ vtm=&(vcpu->arch.arch_vmx.vtm);
+ vtm_interruption_update(vcpu, vtm);
+ }
}
/*
@@ -262,10 +273,50 @@
}
}
+#ifdef V_IOSAPIC_READY
+void vlapic_update_shared_info(VCPU *vcpu)
+{
+ //int i;
+
+ vl_apic_info *ps;
+
+ if (vcpu->domain == dom0)
+ return;
+
+ ps = get_psapic(vcpu);
+ ps->vl_lapic_id = ((VPD_CR(vcpu, lid) >> 16) & 0xffff) << 16;
+ printf("vl_lapic_id = %x\n", ps->vl_lapic_id);
+ ps->vl_apr = 0;
+ // skip ps->vl_logical_dest && ps->vl_dest_format
+ // IPF support physical destination mode only
+ ps->vl_arb_id = 0;
+ /*
+ for ( i=0; i<4; i++ ) {
+ ps->tmr[i] = 0; // edge trigger
+ }
+ */
+}
+
+void vlapic_update_ext_irq(VCPU *vcpu)
+{
+ int vec;
+
+ vl_apic_info *ps = get_psapic(vcpu);
+ while ( (vec = highest_bits(ps->irr)) != NULL_VECTOR ) {
+ clear_bit (vec, ps->irr);
+ vmx_vcpu_pend_interrupt(vcpu, vec);
+ }
+}
+#endif
+
void vlsapic_reset(VCPU *vcpu)
{
int i;
- VPD_CR(vcpu, lid) = 0;
+#ifdef V_IOSAPIC_READY
+ vl_apic_info *psapic; // shared lapic inf.
+#endif
+
+ VPD_CR(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
VPD_CR(vcpu, ivr) = 0;
VPD_CR(vcpu,tpr) = 0x10000;
VPD_CR(vcpu, eoi) = 0;
@@ -281,6 +332,10 @@
for ( i=0; i<4; i++) {
VLSAPIC_INSVC(vcpu,i) = 0;
}
+#ifdef V_IOSAPIC_READY
+ vlapic_update_shared_info(vcpu);
+ //vlapic_update_shared_irr(vcpu);
+#endif
DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
}
@@ -414,6 +469,7 @@
}
local_irq_save(spsr);
VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
+ //vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
vcpu->arch.irq_new_pending = 1;
}
@@ -432,6 +488,7 @@
for (i=0 ; i<4; i++ ) {
VPD_CR(vcpu,irr[i]) |= pend_irr[i];
}
+ //vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
vcpu->arch.irq_new_pending = 1;
}
@@ -518,6 +575,7 @@
VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write
+ //vlapic_update_shared_irr(vcpu);
local_irq_restore(spsr);
return (uint64_t)vec;
}
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vmmu.c
--- a/xen/arch/ia64/vmmu.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vmmu.c Wed Aug 3 09:35:38 2005
@@ -145,7 +145,7 @@
thash_cb_t *vhpt;
PTA pta_value;
- page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
+ page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
if ( page == NULL ) {
panic("No enough contiguous memory for init_domain_mm\n");
}
@@ -187,7 +187,7 @@
tlb_special_t *ts;
thash_cb_t *tlb;
- page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
+ page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
if ( page == NULL ) {
panic("No enough contiguous memory for init_domain_mm\n");
}
@@ -224,7 +224,7 @@
/* Only called once */
ASSERT(d->arch.pmt);
- page = alloc_domheap_pages(NULL, get_order(d->max_pages));
+ page = alloc_domheap_pages(NULL, get_order(d->max_pages), 0);
ASSERT(page);
d->arch.pmt = page_to_virt(page);
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vmx_hypercall.c
--- a/xen/arch/ia64/vmx_hypercall.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vmx_hypercall.c Wed Aug 3 09:35:38 2005
@@ -29,6 +29,7 @@
#include <asm/regionreg.h>
#include <asm/page.h>
#include <xen/mm.h>
+#include <xen/multicall.h>
void hyper_not_support(void)
@@ -49,6 +50,42 @@
ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
vmx_vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
+}
+
+unsigned long __hypercall_create_continuation(
+ unsigned int op, unsigned int nr_args, ...)
+{
+ struct mc_state *mcs = &mc_state[smp_processor_id()];
+ VCPU *vcpu = current;
+ struct cpu_user_regs *regs = vcpu_regs(vcpu);
+ unsigned int i;
+ va_list args;
+
+ va_start(args, nr_args);
+ if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
+ panic("PREEMPT happen in multicall\n"); // Not support yet
+ } else {
+ vmx_vcpu_set_gr(vcpu, 15, op, 0);
+ for ( i = 0; i < nr_args; i++) {
+ switch (i) {
+ case 0: vmx_vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
+ break;
+ case 1: vmx_vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
+ break;
+ case 2: vmx_vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
+ break;
+ case 3: vmx_vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
+ break;
+ case 4: vmx_vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
+ break;
+ default: panic("Too many args for hypercall continuation\n");
+ break;
+ }
+ }
+ }
+ vcpu->arch.hypercall_continuation = 1;
+ va_end(args);
+ return op;
}
void hyper_dom_mem_op(void)
@@ -65,7 +102,13 @@
printf("do_dom_mem return value: %lx\n", ret);
vmx_vcpu_set_gr(vcpu, 8, ret, 0);
- vmx_vcpu_increment_iip(vcpu);
+ /* Hard to define a special return value to indicate hypercall restart.
+ * So just add a new mark, which is SMP safe
+ */
+ if (vcpu->arch.hypercall_continuation == 1)
+ vcpu->arch.hypercall_continuation = 0;
+ else
+ vmx_vcpu_increment_iip(vcpu);
}
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vmx_ivt.S
--- a/xen/arch/ia64/vmx_ivt.S Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vmx_ivt.S Wed Aug 3 09:35:38 2005
@@ -560,6 +560,21 @@
VMX_DBG_FAULT(19)
VMX_FAULT(19)
+ .org vmx_ia64_ivt+0x5000
+/////////////////////////////////////////////////////////////////////////////////////////
+// 0x5000 Entry 20 (size 16 bundles) Page Not Present
+ENTRY(vmx_page_not_present)
+ VMX_REFLECT(20)
+END(vmx_page_not_present)
+
+ .org vmx_ia64_ivt+0x5100
+/////////////////////////////////////////////////////////////////////////////////////////
+// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
+ENTRY(vmx_key_permission)
+ VMX_REFLECT(21)
+END(vmx_key_permission)
+
+ .org vmx_ia64_ivt+0x5200
/////////////////////////////////////////////////////////////////////////////////////////
// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
ENTRY(vmx_iaccess_rights)
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vmx_support.c
--- a/xen/arch/ia64/vmx_support.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vmx_support.c Wed Aug 3 09:35:38 2005
@@ -37,18 +37,19 @@
struct vcpu *v = current;
struct domain *d = v->domain;
extern void do_block();
+ int port = iopacket_port(d);
do {
- if (!test_bit(IOPACKET_PORT,
+ if (!test_bit(port,
&d->shared_info->evtchn_pending[0]))
do_block();
/* Unblocked when some event is coming. Clear pending indication
* immediately if deciding to go for io assist
*/
- if (test_and_clear_bit(IOPACKET_PORT,
+ if (test_and_clear_bit(port,
&d->shared_info->evtchn_pending[0])) {
- clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
vmx_io_assist(v);
}
@@ -66,7 +67,7 @@
* nothing losed. Next loop will check I/O channel to fix this
* window.
*/
- clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
}
else
break;
@@ -88,7 +89,7 @@
* This shared page contains I/O request between emulation code
* and device model.
*/
- vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (!vio)
panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
@@ -127,6 +128,7 @@
struct domain *d = v->domain;
extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
unsigned long *pend_irr);
+ int port = iopacket_port(d);
/* I/O emulation is atomic, so it's impossible to see execution flow
* out of vmx_wait_io, when guest is still waiting for response.
@@ -135,10 +137,10 @@
panic("!!!Bad resume to guest before I/O emulation is done.\n");
/* Clear indicator specific to interrupt delivered from DM */
- if (test_and_clear_bit(IOPACKET_PORT,
+ if (test_and_clear_bit(port,
&d->shared_info->evtchn_pending[0])) {
- if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5])
- clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+ if (!d->shared_info->evtchn_pending[port >> 5])
+ clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
if (!v->vcpu_info->evtchn_pending_sel)
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
@@ -149,11 +151,14 @@
* shares same event channel as I/O emulation, with corresponding
* indicator possibly cleared when vmx_wait_io().
*/
- vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = get_vio(v->domain, v->vcpu_id);
if (!vio)
panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
- vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]);
- memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr));
+#ifdef V_IOSAPIC_READY
+ vlapic_update_ext_irq(v);
+#else
+ panic("IOSAPIC model is missed in qemu\n");
+#endif
return;
}
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vmx_vcpu.c
--- a/xen/arch/ia64/vmx_vcpu.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vmx_vcpu.c Wed Aug 3 09:35:38 2005
@@ -23,7 +23,7 @@
* Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
*/
-#include <linux/sched.h>
+#include <xen/sched.h>
#include <public/arch-ia64.h>
#include <asm/ia64_int.h>
#include <asm/vmx_vcpu.h>
@@ -201,7 +201,7 @@
struct virutal_platform_def *
vmx_vcpu_get_plat(VCPU *vcpu)
{
- return &(vcpu->arch.arch_vmx.vmx_platform);
+ return &(vcpu->domain->arch.vmx_platform);
}
@@ -213,7 +213,6 @@
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
{
- extern void set_one_rr(UINT64, UINT64);
ia64_rr oldrr,newrr;
thash_cb_t *hcb;
oldrr=vmx_vcpu_rr(vcpu,reg);
@@ -375,7 +374,7 @@
vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
{
REGS *regs=vcpu_regs(vcpu);
- u64 nat;
+ int nat;
//TODO, Eddie
if (!regs) return 0;
if (reg >= 16 && reg < 32) {
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vmx_virt.c
--- a/xen/arch/ia64/vmx_virt.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vmx_virt.c Wed Aug 3 09:35:38 2005
@@ -1193,7 +1193,8 @@
case 23:return vmx_vcpu_set_ifs(vcpu,r2);
case 24:return vmx_vcpu_set_iim(vcpu,r2);
case 25:return vmx_vcpu_set_iha(vcpu,r2);
- case 64:return vmx_vcpu_set_lid(vcpu,r2);
+ case 64:printk("SET LID to 0x%lx\n", r2);
+ return vmx_vcpu_set_lid(vcpu,r2);
case 65:return IA64_NO_FAULT;
case 66:return vmx_vcpu_set_tpr(vcpu,r2);
case 67:return vmx_vcpu_set_eoi(vcpu,r2);
@@ -1253,9 +1254,9 @@
case 23:return cr_get(ifs);
case 24:return cr_get(iim);
case 25:return cr_get(iha);
- case 64:val = ia64_getreg(_IA64_REG_CR_LID);
- return vmx_vcpu_set_gr(vcpu,tgt,val,0);
-// case 64:return cr_get(lid);
+// case 64:val = ia64_getreg(_IA64_REG_CR_LID);
+// return vmx_vcpu_set_gr(vcpu,tgt,val,0);
+ case 64:return cr_get(lid);
case 65:
vmx_vcpu_get_ivr(vcpu,&val);
return vmx_vcpu_set_gr(vcpu,tgt,val,0);
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/vtlb.c
--- a/xen/arch/ia64/vtlb.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/vtlb.c Wed Aug 3 09:35:38 2005
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/tlb.h>
+#include <asm/mm.h>
#include <asm/vmx_mm_def.h>
#include <asm/gcc_intrin.h>
#include <xen/interrupt.h>
@@ -359,7 +360,10 @@
void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
thash_data_t *hash_table, *cch;
+ int flag;
rr_t vrr;
+ u64 gppn;
+ u64 ppns, ppne;
hash_table = (hcb->hash_func)(hcb->pta,
va, entry->rid, entry->ps);
@@ -375,7 +379,18 @@
*hash_table = *entry;
hash_table->next = cch;
}
- thash_insert (hcb->ts->vhpt, entry, va);
+ if(hcb->vcpu->domain->domain_id==0){
+ thash_insert(hcb->ts->vhpt, entry, va);
+ return;
+ }
+ flag = 1;
+ gppn =
(POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
+ ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
+ ppne = ppns + PSIZE(entry->ps);
+ if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
+ flag = 0;
+ if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
+ thash_insert(hcb->ts->vhpt, entry, va);
return ;
}
@@ -427,18 +442,22 @@
thash_data_t *hash_table, *p, *q;
thash_internal_t *priv = &hcb->priv;
int idx;
-
+
hash_table = priv->hash_base;
if ( hash_table == entry ) {
- __rem_hash_head (hcb, entry);
+// if ( PURGABLE_ENTRY(hcb, entry) ) {
+ __rem_hash_head (hcb, entry);
+// }
return ;
}
// remove from collision chain
p = hash_table;
for ( q=p->next; q; q = p->next ) {
- if ( q == entry ) {
- p->next = q->next;
- __rem_chain(hcb, entry);
+ if ( q == entry ){
+// if ( PURGABLE_ENTRY(hcb,q ) ) {
+ p->next = q->next;
+ __rem_chain(hcb, entry);
+// }
return ;
}
p = q;
@@ -939,7 +958,7 @@
if ( sanity_check == 0 ) return;
sanity_check --;
s_sect.v = 0;
-// page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
+// page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
// if ( page == NULL ) {
// panic("No enough contiguous memory for init_domain_mm\n");
// };
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/xenasm.S
--- a/xen/arch/ia64/xenasm.S Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/xenasm.S Wed Aug 3 09:35:38 2005
@@ -48,10 +48,11 @@
// FIXME? Note that this turns off the DB bit (debug)
#define PSR_BITS_TO_SET IA64_PSR_BN
+//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void
*shared_arch_info);
GLOBAL_ENTRY(ia64_new_rr7)
// not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 2, 7, 0, 0
+ alloc loc1 = ar.pfs, 3, 8, 0, 0
1: {
mov r28 = in0 // copy procedure index
mov r8 = ip // save ip to compute branch
@@ -72,6 +73,10 @@
;;
tpa loc5=loc5 // grab this BEFORE changing rr7
;;
+ mov loc7=in2 // arch_vcpu_info_t
+ ;;
+ tpa loc7=loc7 // grab this BEFORE changing rr7
+ ;;
mov loc3 = psr // save psr
adds r8 = 1f-1b,r8 // calculate return address for call
;;
@@ -203,6 +208,25 @@
mov cr.ifa=r22
;;
mov r25=IA64_TR_SHARED_INFO
+ ;;
+ itr.d dtr[r25]=r23 // wire in new mapping...
+ ;;
+ // Map for arch_vcpu_info_t
+ movl r22=SHARED_ARCHINFO_ADDR
+ ;;
+ movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
+ ;;
+ mov r21=loc7 // saved sharedinfo physical address
+ ;;
+ or r23=r25,r21 // construct PA | page properties
+ mov r24=PAGE_SHIFT<<2
+ ;;
+ ptr.d r22,r24
+ ;;
+ mov cr.itir=r24
+ mov cr.ifa=r22
+ ;;
+ mov r25=IA64_TR_ARCH_INFO
;;
itr.d dtr[r25]=r23 // wire in new mapping...
;;
@@ -278,12 +302,9 @@
END(__get_domain_bundle)
GLOBAL_ENTRY(dorfirfi)
-#define SI_CR_IIP_OFFSET 0x10
-#define SI_CR_IPSR_OFFSET 0x08
-#define SI_CR_IFS_OFFSET 0x18
- movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
- movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
- movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
+ movl r16 = XSI_IIP
+ movl r17 = XSI_IPSR
+ movl r18 = XSI_IFS
;;
ld8 r16 = [r16]
ld8 r17 = [r17]
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/xenmem.c
--- a/xen/arch/ia64/xenmem.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/xenmem.c Wed Aug 3 09:35:38 2005
@@ -65,7 +65,7 @@
#else // CONFIG_VTI
/* Allocate and map the machine-to-phys table */
- if ((pg = alloc_domheap_pages(NULL, 10)) == NULL)
+ if ((pg = alloc_domheap_pages(NULL, 10, 0)) == NULL)
panic("Not enough memory to bootstrap Xen.\n");
memset(page_to_virt(pg), 0x55, 16UL << 20);
#endif // CONFIG_VTI
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/xenmisc.c
--- a/xen/arch/ia64/xenmisc.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/xenmisc.c Wed Aug 3 09:35:38 2005
@@ -103,11 +103,13 @@
}
#endif
+#ifndef CONFIG_VTI
unsigned long __hypercall_create_continuation(
unsigned int op, unsigned int nr_args, ...)
{
printf("__hypercall_create_continuation: not implemented!!!\n");
}
+#endif
///////////////////////////////
@@ -115,14 +117,17 @@
// from arch/x86/apic.c
///////////////////////////////
+extern unsigned long domain0_ready;
+
int reprogram_ac_timer(s_time_t timeout)
{
struct vcpu *v = current;
#ifdef CONFIG_VTI
- if(VMX_DOMAIN(v))
+// if(VMX_DOMAIN(v))
return 1;
#endif // CONFIG_VTI
+ if (!domain0_ready) return 1;
local_cpu_data->itm_next = timeout;
if (is_idle_task(v->domain)) vcpu_safe_set_itm(timeout);
else vcpu_set_next_timer(current);
@@ -175,6 +180,22 @@
void show_registers(struct pt_regs *regs)
{
printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
+}
+
+int is_kernel_text(unsigned long addr)
+{
+ extern char _stext[], _etext[];
+ if (addr >= (unsigned long) _stext &&
+ addr <= (unsigned long) _etext)
+ return 1;
+
+ return 0;
+}
+
+unsigned long kernel_text_end(void)
+{
+ extern char _etext[];
+ return (unsigned long) _etext;
}
///////////////////////////////
@@ -291,8 +312,8 @@
static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
static int i = 100;
int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
-if (!cnt[id]--) { printk("%x",id); cnt[id] = 500; }
-if (!i--) { printk("+",id); cnt[id] = 1000; }
+if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
+if (!i--) { printk("+",id); i = 1000000; }
}
clear_bit(_VCPUF_running, &prev->vcpu_flags);
//if (!is_idle_task(next->domain) )
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/xensetup.c
--- a/xen/arch/ia64/xensetup.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/xensetup.c Wed Aug 3 09:35:38 2005
@@ -136,6 +136,12 @@
.stop_bits = 1
};
+struct ns16550_defaults ns16550_com2 = {
+ .data_bits = 8,
+ .parity = 'n',
+ .stop_bits = 1
+};
+
void start_kernel(void)
{
unsigned char *cmdline;
@@ -158,7 +164,13 @@
/* We initialise the serial devices very early so we can get debugging. */
if (running_on_sim) hpsim_serial_init();
- else ns16550_init(0, &ns16550_com1);
+ else {
+ ns16550_init(0, &ns16550_com1);
+ /* Also init com2 for Tiger4. */
+ ns16550_com2.io_base = 0x2f8;
+ ns16550_com2.irq = 3;
+ ns16550_init(1, &ns16550_com2);
+ }
serial_init_preirq();
init_console();
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/xentime.c
--- a/xen/arch/ia64/xentime.c Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/xentime.c Wed Aug 3 09:35:38 2005
@@ -27,6 +27,7 @@
#include <asm/sections.h>
#include <asm/system.h>
#ifdef XEN
+#include <asm/vcpu.h>
#include <linux/jiffies.h> // not included by xen/sched.h
#endif
#include <xen/softirq.h>
@@ -143,8 +144,8 @@
if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
regs->cr_iip,
- current->vcpu_info->arch.interrupt_delivery_enabled,
- current->vcpu_info->arch.pending_interruption);
+ VCPU(current,interrupt_delivery_enabled),
+ VCPU(current,pending_interruption));
count = 0;
}
#endif
@@ -159,7 +160,7 @@
// We have to ensure that domain0 is launched before we
// call vcpu_timer_expired on it
//domain0_ready = 1; // moved to xensetup.c
- current->vcpu_info->arch.pending_interruption = 1;
+ VCPU(current,pending_interruption) = 1;
}
if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
vcpu_pend_timer(dom0->vcpu[0]);
diff -r 63995acdd34a -r f294acb25858 xen/common/xmalloc.c
--- a/xen/common/xmalloc.c Wed Aug 3 09:35:16 2005
+++ b/xen/common/xmalloc.c Wed Aug 3 09:35:38 2005
@@ -111,7 +111,9 @@
unsigned long flags;
/* We currently always return cacheline aligned. */
+#ifndef __ia64__
BUG_ON(align > SMP_CACHE_BYTES);
+#endif
/* Add room for header, pad to align next header. */
size += sizeof(struct xmalloc_hdr);
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/config.h Wed Aug 3 09:35:38 2005
@@ -230,6 +230,7 @@
#define FORCE_CRASH() asm("break 0;;");
+void dummy_called(char *function);
#define dummy() dummy_called(__FUNCTION__)
// these declarations got moved at some point, find a better place for them
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/domain.h Wed Aug 3 09:35:38 2005
@@ -8,6 +8,7 @@
#include <asm/vmmu.h>
#include <asm/regionreg.h>
#include <public/arch-ia64.h>
+#include <asm/vmx_platform.h>
#endif // CONFIG_VTI
#include <xen/list.h>
@@ -42,6 +43,7 @@
* max_pages in domain struct, which indicates maximum memory size
*/
unsigned long max_pfn;
+ struct virutal_platform_def vmx_platform;
#endif //CONFIG_VTI
u64 xen_vastart;
u64 xen_vaend;
@@ -88,6 +90,7 @@
thash_cb_t *vtlb;
char irq_new_pending;
char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
+ char hypercall_continuation;
//for phycial emulation
unsigned long old_rsc;
int mode_flags;
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/event.h
--- a/xen/include/asm-ia64/event.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/event.h Wed Aug 3 09:35:38 2005
@@ -9,6 +9,9 @@
#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__
+#include <public/arch-ia64.h>
+#include <asm/vcpu.h>
+
static inline void evtchn_notify(struct vcpu *v)
{
vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector);
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/ia64_int.h
--- a/xen/include/asm-ia64/ia64_int.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/ia64_int.h Wed Aug 3 09:35:38 2005
@@ -37,7 +37,9 @@
#define IA64_RFI_IN_PROGRESS 0x0002
#define IA64_RETRY 0x0003
#ifdef CONFIG_VTI
-#define IA64_FAULT 0x0002
+#undef IA64_NO_FAULT
+#define IA64_NO_FAULT 0x0000
+#define IA64_FAULT 0x0001
#endif //CONFIG_VTI
#define IA64_FORCED_IFA 0x0004
#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/privop.h Wed Aug 3 09:35:38 2005
@@ -138,14 +138,32 @@
IA64_INST inst;
struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
} INST64_M47;
+
typedef union U_INST64_M1{
IA64_INST inst;
struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
} INST64_M1;
+
+typedef union U_INST64_M2{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
+} INST64_M2;
+
+typedef union U_INST64_M3{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, x6:6, s:1,
major:4; };
+} INST64_M3;
+
typedef union U_INST64_M4 {
IA64_INST inst;
struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
} INST64_M4;
+
+typedef union U_INST64_M5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, x6:6, s:1,
major:4; };
+} INST64_M5;
+
typedef union U_INST64_M6 {
IA64_INST inst;
struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1,
major:4; };
@@ -166,7 +184,10 @@
INST64_I28 I28; // mov from ar (I unit)
#ifdef CONFIG_VTI
INST64_M1 M1; // ld integer
+ INST64_M2 M2;
+ INST64_M3 M3;
INST64_M4 M4; // st integer
+ INST64_M5 M5;
INST64_M6 M6; // ldfd floating pointer
#endif // CONFIG_VTI
INST64_M28 M28; // purge translation cache entry
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/regionreg.h
--- a/xen/include/asm-ia64/regionreg.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/regionreg.h Wed Aug 3 09:35:38 2005
@@ -39,4 +39,7 @@
#define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8)
#define RR_RID_MASK 0x00000000ffffff00L
+
+int set_one_rr(unsigned long rr, unsigned long val);
+
#endif /* !_REGIONREG_H_ */
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/vcpu.h Wed Aug 3 09:35:38 2005
@@ -13,13 +13,9 @@
struct vcpu;
typedef struct vcpu VCPU;
-// NOTE: The actual VCPU structure (struct virtualcpu) is defined in
-// thread.h. Moving it to here caused a lot of files to change, so
-// for now, we'll leave well enough alone.
typedef struct pt_regs REGS;
-//#define PSCB(vcpu) (((struct spk_thread_t *)vcpu)->pscb)
-//#define vcpu_regs(vcpu) &((struct spk_thread_t
*)vcpu)->thread_regs
-//#define vcpu_thread(vcpu) ((struct spk_thread_t *)vcpu)
+
+#define VCPU(_v,_x) _v->vcpu_info->arch.privregs->_x
#define PRIVOP_ADDR_COUNT
#ifdef PRIVOP_ADDR_COUNT
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/vmx.h Wed Aug 3 09:35:38 2005
@@ -23,6 +23,7 @@
#define _ASM_IA64_VT_H
#define RR7_SWITCH_SHIFT 12 /* 4k enough */
+#include <public/io/ioreq.h>
extern void identify_vmx_feature(void);
extern unsigned int vmx_enabled;
@@ -35,6 +36,22 @@
extern void vmx_purge_double_mapping(u64, u64, u64);
extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
+
extern void vmx_wait_io(void);
extern void vmx_io_assist(struct vcpu *v);
+
+static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
+{
+ return &((shared_iopage_t
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
+}
+
+static inline int iopacket_port(struct domain *d)
+{
+ return ((shared_iopage_t
*)d->arch.vmx_platform.shared_page_va)->sp_global.eport;
+}
+
+static inline shared_iopage_t *get_sp(struct domain *d)
+{
+ return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va;
+}
#endif /* _ASM_IA64_VT_H */
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/vmx_uaccess.h
--- a/xen/include/asm-ia64/vmx_uaccess.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/vmx_uaccess.h Wed Aug 3 09:35:38 2005
@@ -40,6 +40,8 @@
*/
asm (".section \"__ex_table\", \"a\"\n\t.previous");
+/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
+#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
/* For back compatibility */
#define __access_ok(addr, size, segment) 1
#define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h Wed Aug 3 09:35:38 2005
@@ -105,6 +105,10 @@
extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
extern void vtm_domain_out(VCPU *vcpu);
extern void vtm_domain_in(VCPU *vcpu);
+#ifdef V_IOSAPIC_READY
+extern void vlapic_update_ext_irq(VCPU *vcpu);
+extern void vlapic_update_shared_info(VCPU *vcpu);
+#endif
extern void vlsapic_reset(VCPU *vcpu);
extern int vmx_check_pending_irq(VCPU *vcpu);
extern void guest_write_eoi(VCPU *vcpu);
@@ -399,6 +403,9 @@
vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
{
VPD_CR(vcpu,lid)=val;
+#ifdef V_IOSAPIC_READY
+ vlapic_update_shared_info(vcpu);
+#endif
return IA64_NO_FAULT;
}
extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/vmx_vpd.h Wed Aug 3 09:35:38 2005
@@ -25,37 +25,10 @@
#ifndef __ASSEMBLY__
#include <asm/vtm.h>
-#include <asm/vmx_platform.h>
#include <public/arch-ia64.h>
#define VPD_SHIFT 17 /* 128K requirement */
#define VPD_SIZE (1 << VPD_SHIFT)
-typedef union {
- unsigned long value;
- struct {
- int a_int:1;
- int a_from_int_cr:1;
- int a_to_int_cr:1;
- int a_from_psr:1;
- int a_from_cpuid:1;
- int a_cover:1;
- int a_bsw:1;
- long reserved:57;
- };
-} vac_t;
-
-typedef union {
- unsigned long value;
- struct {
- int d_vmsw:1;
- int d_extint:1;
- int d_ibr_dbr:1;
- int d_pmc:1;
- int d_to_pmd:1;
- int d_itm:1;
- long reserved:58;
- };
-} vdc_t;
typedef struct {
unsigned long dcr; // CR0
@@ -89,29 +62,6 @@
unsigned long rsv6[46];
} cr_t;
-typedef struct vpd {
- vac_t vac;
- vdc_t vdc;
- unsigned long virt_env_vaddr;
- unsigned long reserved1[29];
- unsigned long vhpi;
- unsigned long reserved2[95];
- unsigned long vgr[16];
- unsigned long vbgr[16];
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long reserved3[11];
- unsigned long vpsr;
- unsigned long vpr;
- unsigned long reserved4[76];
- unsigned long vcr[128];
- unsigned long reserved5[128];
- unsigned long reserved6[3456];
- unsigned long vmm_avail[128];
- unsigned long reserved7[4096];
-} vpd_t;
-
void vmx_enter_scheduler(void);
//FIXME: Map for LID to vcpu, Eddie
@@ -133,7 +83,6 @@
unsigned long rfi_ipsr;
unsigned long rfi_ifs;
unsigned long in_service[4]; // vLsapic inservice IRQ bits
- struct virutal_platform_def vmx_platform;
unsigned long flags;
};
@@ -175,7 +124,6 @@
#endif //__ASSEMBLY__
-
// VPD field offset
#define VPD_VAC_START_OFFSET 0
#define VPD_VDC_START_OFFSET 8
diff -r 63995acdd34a -r f294acb25858 xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/asm-ia64/xensystem.h Wed Aug 3 09:35:38 2005
@@ -21,10 +21,13 @@
#define XEN_RR7_SWITCH_STUB 0xb700000000000000
#endif // CONFIG_VTI
+#define XEN_START_ADDR 0xf000000000000000
#define KERNEL_START 0xf000000004000000
#define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
#define SHAREDINFO_ADDR 0xf100000000000000
#define VHPT_ADDR 0xf200000000000000
+#define SHARED_ARCHINFO_ADDR 0xf300000000000000
+#define XEN_END_ADDR 0xf400000000000000
#ifndef __ASSEMBLY__
@@ -58,8 +61,9 @@
ia64_save_extra(prev);
\
if (IA64_HAS_EXTRA_STATE(next))
\
ia64_load_extra(next);
\
- ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);
\
+ /*ia64_psr(ia64_task_regs(next))->dfh =
!ia64_is_local_fpu_owner(next);*/ \
(last) = ia64_switch_to((next));
\
+ vcpu_set_next_timer(current);
\
} while (0)
#endif // CONFIG_VTI
diff -r 63995acdd34a -r f294acb25858 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Wed Aug 3 09:35:16 2005
+++ b/xen/include/public/arch-ia64.h Wed Aug 3 09:35:38 2005
@@ -140,38 +140,121 @@
struct pt_fpreg f11; /* scratch */
};
-typedef struct {
- unsigned long ipsr;
- unsigned long iip;
- unsigned long ifs;
- unsigned long precover_ifs;
- unsigned long isr;
- unsigned long ifa;
- unsigned long iipa;
- unsigned long iim;
- unsigned long unat; // not sure if this is needed until NaT arch is
done
- unsigned long tpr;
- unsigned long iha;
- unsigned long itir;
- unsigned long itv;
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long pta;
- int interrupt_collection_enabled; // virtual psr.ic
- int interrupt_delivery_enabled; // virtual psr.i
- int pending_interruption;
- int incomplete_regframe; // see SDM vol2 6.8
- unsigned long delivery_mask[4];
- int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
- int banknum; // 0 or 1, which virtual register bank is active
- unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
- unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
- unsigned long rrs[8]; // region registers
- unsigned long krs[8]; // kernel registers
- unsigned long pkrs[8]; // protection key registers
- unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
+typedef union {
+ unsigned long value;
+ struct {
+ int a_int:1;
+ int a_from_int_cr:1;
+ int a_to_int_cr:1;
+ int a_from_psr:1;
+ int a_from_cpuid:1;
+ int a_cover:1;
+ int a_bsw:1;
+ long reserved:57;
+ };
+} vac_t;
+
+typedef union {
+ unsigned long value;
+ struct {
+ int d_vmsw:1;
+ int d_extint:1;
+ int d_ibr_dbr:1;
+ int d_pmc:1;
+ int d_to_pmd:1;
+ int d_itm:1;
+ long reserved:58;
+ };
+} vdc_t;
+
+typedef struct {
+ vac_t vac;
+ vdc_t vdc;
+ unsigned long virt_env_vaddr;
+ unsigned long reserved1[29];
+ unsigned long vhpi;
+ unsigned long reserved2[95];
+ union {
+ unsigned long vgr[16];
+ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0
active
+ };
+ union {
+ unsigned long vbgr[16];
+ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1
active
+ };
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long reserved3[11];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long reserved4[76];
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr; // CR0
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta; // CR8
+ unsigned long rsv2[7];
+ unsigned long ipsr; // CR16
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim; // CR24
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid; // CR64
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv; // CR72
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0; // CR80
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
+ union {
+ unsigned long reserved5[128];
+ struct {
+ unsigned long precover_ifs;
+ unsigned long unat; // not sure if this is needed until NaT arch
is done
+ int interrupt_collection_enabled; // virtual psr.ic
+ int interrupt_delivery_enabled; // virtual psr.i
+ int pending_interruption;
+ int incomplete_regframe; // see SDM vol2 6.8
+ unsigned long delivery_mask[4];
+ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use
virtual
+ int banknum; // 0 or 1, which virtual register bank is active
+ unsigned long rrs[8]; // region registers
+ unsigned long krs[8]; // kernel registers
+ unsigned long pkrs[8]; // protection key registers
+ unsigned long tmp[8]; // temp registers (e.g. for
hyperprivops)
+ };
+ };
+#ifdef CONFIG_VTI
+ unsigned long reserved6[3456];
+ unsigned long vmm_avail[128];
+ unsigned long reserved7[4096];
+#endif
+} mapped_regs_t;
+
+typedef struct {
+ mapped_regs_t *privregs;
int evtchn_vector;
} arch_vcpu_info_t;
+
+typedef mapped_regs_t vpd_t;
+
#define __ARCH_HAS_VCPU_INFO
typedef struct {
diff -r 63995acdd34a -r f294acb25858 xen/arch/ia64/asm-xsi-offsets.c
--- /dev/null Wed Aug 3 09:35:16 2005
+++ b/xen/arch/ia64/asm-xsi-offsets.c Wed Aug 3 09:35:38 2005
@@ -0,0 +1,110 @@
+/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/*
+ * asm-xsi-offsets.c_
+ * Copyright (c) 2005, Intel Corporation.
+ * Kun Tian (Kevin Tian) <kevin.tian@xxxxxxxxx>
+ * Eddie Dong <eddie.dong@xxxxxxxxx>
+ * Fred Yang <fred.yang@xxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <public/xen.h>
+#ifdef CONFIG_VTI
+#include <asm/tlb.h>
+#include <asm/regs.h>
+#endif // CONFIG_VTI
+
+#define task_struct vcpu
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+void foo(void)
+{
+
+ DEFINE(XSI_BASE, SHARED_ARCHINFO_ADDR);
+
+ DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t,
interrupt_delivery_enabled));
+ DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
interrupt_delivery_enabled)));
+ DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
+ DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
+ DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
+ DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
+ DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
+ DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
+ DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
+ DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
precover_ifs)));
+ DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
+ DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr)));
+ DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
+ DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
+ DEFINE(XSI_IIPA_OFS, offsetof(mapped_regs_t, iipa));
+ DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa)));
+ DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
+ DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim)));
+ DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
+ DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr)));
+ DEFINE(XSI_IHA_OFS, offsetof(mapped_regs_t, iha));
+ DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha)));
+ DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
+ DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
+ DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
+ DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv)));
+ DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
+ DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta)));
+ DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t,
interrupt_collection_enabled));
+ DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
interrupt_collection_enabled)));
+ DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
+ DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
pending_interruption)));
+ DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t,
incomplete_regframe));
+ DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
incomplete_regframe)));
+ DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0]));
+ DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
delivery_mask[0])));
+ DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
+ DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
metaphysical_mode)));
+
+ DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
+ DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
banknum)));
+
+ DEFINE(XSI_BANK0_R16_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
+ DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
bank0_regs[0])));
+ DEFINE(XSI_BANK1_R16_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
+ DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
bank1_regs[0])));
+ DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+ DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0])));
+ DEFINE(XSI_KR0_OFS, offsetof(mapped_regs_t, krs[0]));
+ DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0])));
+ DEFINE(XSI_PKR0_OFS, offsetof(mapped_regs_t, pkrs[0]));
+ DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
pkrs[0])));
+ DEFINE(XSI_TMP0_OFS, offsetof(mapped_regs_t, tmp[0]));
+ DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t,
tmp[0])));
+
+}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|