# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Node ID 3bd97b4fe77d2b751e2ef5beb4e0be4966e7dca3
# Parent 4f1a3ae07dbc11683f2f8064449c96aa590112b1
[IA64] Implement irq redirection of IOSAPIC
Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
xen/arch/ia64/vmx/mmio.c | 299 ---------------------------------
xen/arch/ia64/vmx/viosapic.c | 20 +-
xen/arch/ia64/vmx/vlsapic.c | 323 +++++++++++++++++++++++++-----------
xen/include/asm-ia64/viosapic.h | 1
xen/include/asm-ia64/vlsapic.h | 11 +
xen/include/asm-ia64/vmx_platform.h | 1
xen/include/asm-ia64/vmx_vpd.h | 2
7 files changed, 265 insertions(+), 392 deletions(-)
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/arch/ia64/vmx/mmio.c Thu Dec 07 05:34:07 2006 -0700
@@ -23,7 +23,6 @@
#include <linux/sched.h>
#include <xen/mm.h>
-#include <asm/tlb.h>
#include <asm/vmx_mm_def.h>
#include <asm/gcc_intrin.h>
#include <linux/interrupt.h>
@@ -37,22 +36,7 @@
#include <linux/event.h>
#include <xen/domain.h>
#include <asm/viosapic.h>
-
-/*
-struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
-{
- int i;
- for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
- if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
- return &mio_base[i];
- }
- return NULL;
-}
-*/
-
-#define PIB_LOW_HALF(ofst) !(ofst&(1<<20))
-#define PIB_OFST_INTA 0x1E0000
-#define PIB_OFST_XTP 0x1E0008
+#include <asm/vlsapic.h>
#define HVM_BUFFERED_IO_RANGE_NR 1
@@ -118,87 +102,6 @@ int hvm_buffered_io_intercept(ioreq_t *p
return 1;
}
-static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
-
-static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int
ma)
-{
- switch (pib_off) {
- case PIB_OFST_INTA:
- panic_domain(NULL,"Undefined write on PIB INTA\n");
- break;
- case PIB_OFST_XTP:
- if ( s == 1 && ma == 4 /* UC */) {
- vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
- }
- else {
- panic_domain(NULL,"Undefined write on PIB XTP\n");
- }
- break;
- default:
- if ( PIB_LOW_HALF(pib_off) ) { // lower half
- if ( s != 8 || ma != 0x4 /* UC */ ) {
- panic_domain
- (NULL,"Undefined IPI-LHF write with s %ld, ma %d!\n", s, ma);
- }
- else {
- write_ipi(vcpu, pib_off, *(uint64_t *)src);
- // TODO for SM-VP
- }
- }
- else { // upper half
- printk("IPI-UHF write %lx\n",pib_off);
- panic_domain(NULL,"Not support yet for SM-VP\n");
- }
- break;
- }
-}
-
-static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int
ma)
-{
- switch (pib_off) {
- case PIB_OFST_INTA:
- // todo --- emit on processor system bus.
- if ( s == 1 && ma == 4) { // 1 byte load
- // TODO: INTA read from IOSAPIC
- }
- else {
- panic_domain(NULL,"Undefined read on PIB INTA\n");
- }
- break;
- case PIB_OFST_XTP:
- if ( s == 1 && ma == 4) {
- *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
- }
- else {
- panic_domain(NULL,"Undefined read on PIB XTP\n");
- }
- break;
- default:
- if ( PIB_LOW_HALF(pib_off) ) { // lower half
- if ( s != 8 || ma != 4 ) {
- panic_domain(NULL,"Undefined IPI-LHF read!\n");
- }
- else {
-#ifdef IPI_DEBUG
- printk("IPI-LHF read %lx\n",pib_off);
-#endif
- *(uint64_t *)dest = 0; // TODO for SM-VP
- }
- }
- else { // upper half
- if ( s != 1 || ma != 4 ) {
- panic_domain(NULL,"Undefined PIB-UHF read!\n");
- }
- else {
-#ifdef IPI_DEBUG
- printk("IPI-UHF read %lx\n",pib_off);
-#endif
- *(uint8_t *)dest = 0; // TODO for SM-VP
- }
- }
- break;
- }
-}
static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
{
@@ -283,11 +186,14 @@ static void mmio_access(VCPU *vcpu, u64
perfc_incra(vmx_mmio_access, iot >> 56);
switch (iot) {
- case GPFN_PIB:
- if(!dir)
- pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
+ case GPFN_PIB:
+ if (ma != 4)
+ panic_domain(NULL, "Access PIB not with UC attribute\n");
+
+ if (!dir)
+ vlsapic_write(vcpu, src_pa, s, *dest);
else
- pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
+ *dest = vlsapic_read(vcpu, src_pa, s);
break;
case GPFN_GFW:
break;
@@ -310,195 +216,6 @@ static void mmio_access(VCPU *vcpu, u64
}
return;
}
-
-/*
- * Read or write data in guest virtual address mode.
- */
-/*
-void
-memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
-{
- uint64_t pa;
-
- if (!vtlb->nomap)
- panic("Normal memory write shouldn't go to this point!");
- pa = PPN_2_PA(vtlb->ppn);
- pa += POFFSET((u64)dest, vtlb->ps);
- mmio_write (vcpu, src, pa, s, vtlb->ma);
-}
-
-
-void
-memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
-{
- uint64_t pa = (uint64_t)dest;
- int ma;
-
- if ( pa & (1UL <<63) ) {
- // UC
- ma = 4;
- pa <<=1;
- pa >>=1;
- }
- else {
- // WBL
- ma = 0; // using WB for WBL
- }
- mmio_write (vcpu, src, pa, s, ma);
-}
-
-void
-memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
-{
- uint64_t pa;
-
- if (!vtlb->nomap)
- panic_domain(NULL,"Normal memory write shouldn't go to this point!");
- pa = PPN_2_PA(vtlb->ppn);
- pa += POFFSET((u64)src, vtlb->ps);
-
- mmio_read(vcpu, pa, dest, s, vtlb->ma);
-}
-
-void
-memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
-{
- uint64_t pa = (uint64_t)src;
- int ma;
-
- if ( pa & (1UL <<63) ) {
- // UC
- ma = 4;
- pa <<=1;
- pa >>=1;
- }
- else {
- // WBL
- ma = 0; // using WB for WBL
- }
- mmio_read(vcpu, pa, dest, s, ma);
-}
-*/
-
-/*
- * To inject INIT to guest, we must set the PAL_INIT entry
- * and set psr to switch to physical mode
- */
-#define PAL_INIT_ENTRY 0x80000000ffffffa0
-#define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
- IA64_PSR_IC | IA64_PSR_RI)
-
-static void vmx_inject_guest_pal_init(VCPU *vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- uint64_t psr = vmx_vcpu_get_psr(vcpu);
-
- regs->cr_iip = PAL_INIT_ENTRY;
-
- psr = psr & (~PSR_SET_BITS);
- vmx_vcpu_set_psr(vcpu,psr);
-}
-
-/*
- * Deliver IPI message. (Only U-VP is supported now)
- * offset: address offset to IPI space.
- * value: deliver value.
- */
-static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
-{
-#ifdef IPI_DEBUG
- printk ("deliver_ipi %lx %lx\n",dm,vector);
-#endif
- switch ( dm ) {
- case 0: // INT
- vmx_vcpu_pend_interrupt (vcpu, vector);
- break;
- case 2: // PMI
- // TODO -- inject guest PMI
- panic_domain (NULL, "Inject guest PMI!\n");
- break;
- case 4: // NMI
- vmx_vcpu_pend_interrupt (vcpu, 2);
- break;
- case 5: // INIT
- vmx_inject_guest_pal_init(vcpu);
- break;
- case 7: // ExtINT
- vmx_vcpu_pend_interrupt (vcpu, 0);
- break;
- case 1:
- case 3:
- case 6:
- default:
- panic_domain (NULL, "Deliver reserved IPI!\n");
- break;
- }
-}
-
-/*
- * TODO: Use hash table for the lookup.
- */
-static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
-{
- int i;
- VCPU *vcpu;
- LID lid;
- for (i=0; i<MAX_VIRT_CPUS; i++) {
- vcpu = d->vcpu[i];
- if (!vcpu)
- continue;
- lid.val = VCPU_LID(vcpu);
- if ( lid.id == id && lid.eid == eid )
- return vcpu;
- }
- return NULL;
-}
-
-/*
- * execute write IPI op.
- */
-static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
-{
- VCPU *targ;
- struct domain *d=vcpu->domain;
- targ = lid_2_vcpu(vcpu->domain,
- ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
- if ( targ == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
-
- if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
- struct pt_regs *targ_regs = vcpu_regs (targ);
- struct vcpu_guest_context c;
-
- memset (&c, 0, sizeof (c));
-
- if (arch_set_info_guest (targ, &c) != 0) {
- printk ("arch_boot_vcpu: failure\n");
- return;
- }
- /* First or next rendez-vous: set registers. */
- vcpu_init_regs (targ);
- targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
- targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
-
- if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
- vcpu_wake(targ);
- printk ("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
- targ->vcpu_id, targ_regs->cr_iip);
- }
- else
- printk ("arch_boot_vcpu: huu, already awaken!");
- }
- else {
- int running = test_bit(_VCPUF_running,&targ->vcpu_flags);
- deliver_ipi (targ, ((ipi_d_t)value).dm,
- ((ipi_d_t)value).vector);
- vcpu_unblock(targ);
- if (running)
- smp_send_event_check_cpu(targ->processor);
- }
- return;
-}
-
/*
dir 1: read 0:write
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/arch/ia64/vmx/viosapic.c
--- a/xen/arch/ia64/vmx/viosapic.c Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/arch/ia64/vmx/viosapic.c Thu Dec 07 05:34:07 2006 -0700
@@ -31,7 +31,6 @@
#include <xen/xmalloc.h>
#include <xen/lib.h>
#include <xen/errno.h>
-#include <xen/sched.h>
#include <public/hvm/ioreq.h>
#include <asm/vlsapic.h>
#include <asm/viosapic.h>
@@ -47,11 +46,18 @@ static void viosapic_deliver(struct vios
switch ( delivery_mode )
{
- // don't support interrupt direct currently
case SAPIC_FIXED:
+ {
+ v = vlsapic_lid_to_vcpu(viosapic_domain(viosapic), dest);
+ vlsapic_set_irq(v, vector);
+ vcpu_kick(v);
+ break;
+ }
case SAPIC_LOWEST_PRIORITY:
{
v = vlsapic_lid_to_vcpu(viosapic_domain(viosapic), dest);
+ if (viosapic->lowest_vcpu)
+ v = viosapic->lowest_vcpu;
vlsapic_set_irq(v, vector);
vcpu_kick(v);
break;
@@ -72,10 +78,10 @@ static int iosapic_get_highest_irq(struc
{
uint64_t irqs = viosapic->irr & ~viosapic->isr ;
- if (irqs >> 32)
- return (fls(irqs >> 32) - 1 + 32);
- else
- return fls(irqs) - 1;
+ if (irqs)
+ return ia64_fls(irqs);
+
+ return -1;
}
@@ -327,5 +333,7 @@ void viosapic_init(struct domain *d)
viosapic_reset(viosapic);
+ viosapic->lowest_vcpu = NULL;
+
viosapic->base_address = VIOSAPIC_DEFAULT_BASE_ADDRESS;
}
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/arch/ia64/vmx/vlsapic.c Thu Dec 07 05:34:07 2006 -0700
@@ -25,10 +25,8 @@
#include <asm/ia64_int.h>
#include <asm/vcpu.h>
#include <asm/regionreg.h>
-#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/delay.h>
-#include <asm/vmx_vcpu.h>
#include <asm/vmx_vcpu.h>
#include <asm/regs.h>
#include <asm/gcc_intrin.h>
@@ -39,7 +37,15 @@
#include <asm/kregs.h>
#include <asm/vmx_platform.h>
#include <asm/viosapic.h>
+#include <asm/vlsapic.h>
#include <asm/linux/jiffies.h>
+#include <xen/domain.h>
+
+#ifdef IPI_DEBUG
+#define IPI_DPRINTK(x...) printk(x)
+#else
+#define IPI_DPRINTK(x...)
+#endif
//u64 fire_itc;
//u64 fire_itc2;
@@ -116,16 +122,13 @@ static int vmx_vcpu_unpend_interrupt(VCP
*/
static uint64_t now_itc(vtime_t *vtm)
{
- uint64_t guest_itc=vtm->vtm_offset+ia64_get_itc();
-
- if ( vtm->vtm_local_drift ) {
-// guest_itc -= vtm->vtm_local_drift;
- }
- if (guest_itc >= vtm->last_itc)
- return guest_itc;
- else
- /* guest ITC backwarded due after LP switch */
- return vtm->last_itc;
+ uint64_t guest_itc = vtm->vtm_offset + ia64_get_itc();
+
+ if (guest_itc >= vtm->last_itc)
+ return guest_itc;
+ else
+ /* guest ITC went backward due to LP switch */
+ return vtm->last_itc;
}
/*
@@ -175,7 +178,7 @@ void vtm_init(VCPU *vcpu)
{
vtime_t *vtm;
uint64_t itc_freq;
-
+
vtm = &VMX(vcpu, vtm);
itc_freq = local_cpu_data->itc_freq;
@@ -261,76 +264,6 @@ void vtm_set_itv(VCPU *vcpu, uint64_t va
vtm->pending = 0;
}
}
-
-
-/*
- * Update interrupt or hook the vtm timer for fire
- * At this point vtm_timer should be removed if itv is masked.
- */
-/* Interrupt must be disabled at this point */
-/*
-void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
-{
- uint64_t cur_itc,vitm,vitv;
- uint64_t expires;
- long diff_now, diff_last;
- uint64_t spsr;
-
- vitv = VCPU(vcpu, itv);
- if ( ITV_IRQ_MASK(vitv) ) {
- return;
- }
-
- vitm =VCPU(vcpu, itm);
- local_irq_save(spsr);
- cur_itc =now_itc(vtm);
- diff_last = vtm->last_itc - vitm;
- diff_now = cur_itc - vitm;
-
- if ( diff_last >= 0 ) {
- // interrupt already fired.
- stop_timer(&vtm->vtm_timer);
- }
- else if ( diff_now >= 0 ) {
- // ITV is fired.
- vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
- }
-*/
- /* Both last_itc & cur_itc < itm, wait for fire condition */
-/* else {
- expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
- set_timer(&vtm->vtm_timer, expires);
- }
- local_irq_restore(spsr);
-}
- */
-
-/*
- * Action for vtm when the domain is scheduled out.
- * Remove the timer for vtm.
- */
-/*
-void vtm_domain_out(VCPU *vcpu)
-{
- if(!is_idle_domain(vcpu->domain))
- stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
-}
- */
-/*
- * Action for vtm when the domain is scheduled in.
- * Fire vtm IRQ or add the timer for vtm.
- */
-/*
-void vtm_domain_in(VCPU *vcpu)
-{
- vtime_t *vtm;
-
- if(!is_idle_domain(vcpu->domain)) {
- vtm=&(vcpu->arch.arch_vmx.vtm);
- vtm_interruption_update(vcpu, vtm);
- }
-}
- */
void vlsapic_reset(VCPU *vcpu)
@@ -350,6 +283,7 @@ void vlsapic_reset(VCPU *vcpu)
VCPU(vcpu, lrr0) = 0x10000; // default reset value?
VCPU(vcpu, lrr1) = 0x10000; // default reset value?
update_vhpi(vcpu, NULL_VECTOR);
+ VLSAPIC_XTP(vcpu) = 0x80; // disabled
for ( i=0; i<4; i++) {
VLSAPIC_INSVC(vcpu,i) = 0;
}
@@ -367,7 +301,7 @@ static __inline__ int highest_bits(uint6
{
uint64_t bits, bitnum;
int i;
-
+
/* loop for all 256 bits */
for ( i=3; i >= 0 ; i -- ) {
bits = dat[i];
@@ -411,12 +345,6 @@ static int is_higher_class(int pending,
{
return ( (pending >> 4) > mic );
}
-#if 0
-static int is_invalid_irq(int vec)
-{
- return (vec == 1 || ((vec <= 14 && vec >= 3)));
-}
-#endif //shadow it due to no use currently
#define IRQ_NO_MASKED 0
#define IRQ_MASKED_BY_VTPR 1
@@ -427,7 +355,7 @@ _xirq_masked(VCPU *vcpu, int h_pending,
_xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
{
tpr_t vtpr;
-
+
vtpr.val = VCPU(vcpu, tpr);
if ( h_inservice == NMI_VECTOR ) {
@@ -467,7 +395,7 @@ static int irq_masked(VCPU *vcpu, int h_
static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
{
int mask;
-
+
mask = _xirq_masked(vcpu, h_pending, h_inservice);
return mask;
}
@@ -655,4 +583,213 @@ struct vcpu * vlsapic_lid_to_vcpu(struct
return v;
}
return NULL;
-}
+}
+
+
+/*
+ * To inject INIT to guest, we must set the PAL_INIT entry
+ * and set psr to switch to physical mode
+ */
+#define PAL_INIT_ENTRY 0x80000000ffffffa0
+#define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
+ IA64_PSR_IC | IA64_PSR_RI)
+
+static void vmx_inject_guest_pal_init(VCPU *vcpu)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ uint64_t psr = vmx_vcpu_get_psr(vcpu);
+
+ regs->cr_iip = PAL_INIT_ENTRY;
+
+ psr = psr & ~PSR_SET_BITS;
+ vmx_vcpu_set_psr(vcpu, psr);
+}
+
+
+/*
+ * Deliver IPI message. (Only U-VP is supported now)
+ * offset: address offset to IPI space.
+ * value: deliver value.
+ */
+static void vlsapic_deliver_ipi(VCPU *vcpu, uint64_t dm, uint64_t vector)
+{
+ IPI_DPRINTK("deliver_ipi %lx %lx\n", dm, vector);
+
+ switch (dm) {
+ case SAPIC_FIXED: // INT
+ vmx_vcpu_pend_interrupt(vcpu, vector);
+ break;
+ case SAPIC_PMI:
+ // TODO -- inject guest PMI
+ panic_domain(NULL, "Inject guest PMI!\n");
+ break;
+ case SAPIC_NMI:
+ vmx_vcpu_pend_interrupt(vcpu, 2);
+ break;
+ case SAPIC_INIT:
+ vmx_inject_guest_pal_init(vcpu);
+ break;
+ case SAPIC_EXTINT: // ExtINT
+ vmx_vcpu_pend_interrupt(vcpu, 0);
+ break;
+ default:
+ panic_domain(NULL, "Deliver reserved IPI!\n");
+ break;
+ }
+}
+
+/*
+ * TODO: Use hash table for the lookup.
+ */
+static inline VCPU *lid_to_vcpu(struct domain *d, uint8_t id, uint8_t eid)
+{
+ VCPU *v;
+ LID lid;
+
+ for_each_vcpu(d, v) {
+ lid.val = VCPU_LID(v);
+ if (lid.id == id && lid.eid == eid)
+ return v;
+ }
+ return NULL;
+}
+
+
+/*
+ * execute write IPI op.
+ */
+static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
+{
+ VCPU *targ;
+ struct domain *d = vcpu->domain;
+
+ targ = lid_to_vcpu(vcpu->domain, ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
+ if (targ == NULL)
+ panic_domain(NULL, "Unknown IPI cpu\n");
+
+ if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
+ struct pt_regs *targ_regs = vcpu_regs(targ);
+ struct vcpu_guest_context c;
+
+ memset (&c, 0, sizeof(c));
+
+ if (arch_set_info_guest(targ, &c) != 0) {
+ printk("arch_boot_vcpu: failure\n");
+ return;
+ }
+ /* First or next rendez-vous: set registers. */
+ vcpu_init_regs(targ);
+ targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
+ targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
+
+ if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
+ vcpu_wake(targ);
+ printk("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
+ targ->vcpu_id, targ_regs->cr_iip);
+ } else {
+ printk("arch_boot_vcpu: huh, already awake!");
+ }
+ } else {
+ int running = test_bit(_VCPUF_running, &targ->vcpu_flags);
+ vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm,
+ ((ipi_d_t)value).vector);
+ vcpu_unblock(targ);
+ if (running)
+ smp_send_event_check_cpu(targ->processor);
+ }
+ return;
+}
+
+
+unsigned long vlsapic_read(struct vcpu *v,
+ unsigned long addr,
+ unsigned long length)
+{
+ uint64_t result = 0;
+
+ addr &= (PIB_SIZE - 1);
+
+ switch (addr) {
+ case PIB_OFST_INTA:
+ if (length == 1) // 1 byte load
+ ; // There is no i8259, there is no INTA access
+ else
+ panic_domain(NULL,"Undefined read on PIB INTA\n");
+
+ break;
+ case PIB_OFST_XTP:
+ if (length == 1) {
+ result = VLSAPIC_XTP(v);
+ // printk("read xtp %lx\n", result);
+ } else {
+ panic_domain(NULL, "Undefined read on PIB XTP\n");
+ }
+ break;
+ default:
+ if (PIB_LOW_HALF(addr)) { // lower half
+ if (length != 8 )
+ panic_domain(NULL, "Undefined IPI-LHF read!\n");
+ else
+ IPI_DPRINTK("IPI-LHF read %lx\n", pib_off);
+ } else { // upper half
+ IPI_DPRINTK("IPI-UHF read %lx\n", addr);
+ }
+ break;
+ }
+ return result;
+}
+
+static void vlsapic_write_xtp(struct vcpu *v, uint8_t val)
+{
+ struct viosapic * viosapic;
+ struct vcpu *lvcpu, *vcpu;
+ viosapic = vcpu_viosapic(v);
+ lvcpu = viosapic->lowest_vcpu;
+ VLSAPIC_XTP(v) = val;
+
+ for_each_vcpu(v->domain, vcpu) {
+ if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu))
+ lvcpu = vcpu;
+ }
+
+ if (VLSAPIC_XTP(lvcpu) & 0x80) // Disabled
+ lvcpu = NULL;
+
+ viosapic->lowest_vcpu = lvcpu;
+}
+
+void vlsapic_write(struct vcpu *v,
+ unsigned long addr,
+ unsigned long length,
+ unsigned long val)
+{
+ addr &= (PIB_SIZE - 1);
+
+ switch (addr) {
+ case PIB_OFST_INTA:
+ panic_domain(NULL, "Undefined write on PIB INTA\n");
+ break;
+ case PIB_OFST_XTP:
+ if (length == 1) {
+ // printk("write xtp %lx\n", val);
+ vlsapic_write_xtp(v, val);
+ } else {
+ panic_domain(NULL, "Undefined write on PIB XTP\n");
+ }
+ break;
+ default:
+ if (PIB_LOW_HALF(addr)) { // lower half
+ if (length != 8)
+ panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n",
+ length);
+ else
+ vlsapic_write_ipi(v, addr, val);
+ }
+ else { // upper half
+ // printk("IPI-UHF write %lx\n",addr);
+ panic_domain(NULL, "No support for SM-VP yet\n");
+ }
+ break;
+ }
+}
+
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/include/asm-ia64/viosapic.h
--- a/xen/include/asm-ia64/viosapic.h Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/include/asm-ia64/viosapic.h Thu Dec 07 05:34:07 2006 -0700
@@ -78,6 +78,7 @@ struct viosapic {
uint64_t isr; /* This is used for level trigger */
uint32_t ioregsel;
spinlock_t lock;
+ struct vcpu * lowest_vcpu;
uint64_t base_address;
union viosapic_rte redirtbl[VIOSAPIC_NUM_PINS];
};
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/include/asm-ia64/vlsapic.h
--- a/xen/include/asm-ia64/vlsapic.h Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/include/asm-ia64/vlsapic.h Thu Dec 07 05:34:07 2006 -0700
@@ -50,10 +50,19 @@
#define SAPIC_LEVEL 1
/*
+ * LSAPIC OFFSET
+ */
+#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
+#define PIB_OFST_INTA 0x1E0000
+#define PIB_OFST_XTP 0x1E0008
+
+/*
*Mask bit
*/
#define SAPIC_MASK_SHIFT 16
#define SAPIC_MASK (1 << SAPIC_MASK_SHIFT)
+
+#define VLSAPIC_XTP(_v) VMX(_v, xtp)
extern void vtm_init(struct vcpu *vcpu);
extern void vtm_set_itc(struct vcpu *vcpu, uint64_t new_itc);
@@ -63,5 +72,7 @@ extern void vhpi_detection(struct vcpu *
extern void vhpi_detection(struct vcpu *vcpu);
extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
extern struct vcpu * vlsapic_lid_to_vcpu(struct domain *d, uint16_t dest);
+extern uint64_t vlsapic_read(struct vcpu *v, uint64_t addr, uint64_t s);
+extern void vlsapic_write(struct vcpu *v, uint64_t addr, uint64_t s, uint64_t
val);
#define vlsapic_set_irq vmx_vcpu_pend_interrupt
#endif
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/include/asm-ia64/vmx_platform.h Thu Dec 07 05:34:07 2006 -0700
@@ -28,7 +28,6 @@ typedef struct virtual_platform_def {
spinlock_t buffered_io_lock;
unsigned long shared_page_va;
unsigned long pib_base;
- unsigned char xtp;
unsigned long params[HVM_NR_PARAMS];
struct mmio_list *mmio;
/* One IOSAPIC now... */
diff -r 4f1a3ae07dbc -r 3bd97b4fe77d xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Thu Dec 07 04:15:54 2006 -0700
+++ b/xen/include/asm-ia64/vmx_vpd.h Thu Dec 07 05:34:07 2006 -0700
@@ -96,9 +96,9 @@ struct arch_vmx_struct {
// unsigned long rfi_iip;
// unsigned long rfi_ipsr;
// unsigned long rfi_ifs;
-// unsigned long in_service[4]; // vLsapic inservice IRQ bits
unsigned long flags;
unsigned long xen_port;
+ unsigned char xtp;
#ifdef VTI_DEBUG
unsigned long ivt_current;
struct ivt_debug ivt_debug[IVT_DEBUG_MAX];
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|