WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH 1/12]MCA handler support for Xen/ia64 TAKE 2

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH 1/12]MCA handler support for Xen/ia64 TAKE 2
From: SUZUKI Kazuhiro <kaz@xxxxxxxxxxxxxx>
Date: Fri, 22 Sep 2006 19:32:18 +0900 (JST)
Delivery-date: Fri, 22 Sep 2006 03:32:54 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20060922.193058.68551752.kaz@xxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20060922.193058.68551752.kaz@xxxxxxxxxxxxxxxxxx>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
[1/12]  patch for MCA handler.[mca-mca.patch]

Signed-off-by: Yutaka Ezaki <yutaka.ezaki@xxxxxxxxxxxxxx>
Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>
Signed-off-by: Kazuhiro Suzuki <kaz@xxxxxxxxxxxxxx>
diff -r 3e4fa8b5b245 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Tue Sep 12 11:43:22 2006 -0600
+++ b/xen/arch/ia64/linux-xen/mca.c     Fri Sep 22 09:26:49 2006 +0900
@@ -80,6 +80,9 @@
 #ifdef XEN
 #include <xen/symbols.h>
 #include <xen/mm.h>
+#include <xen/event.h>
+#include <xen/softirq.h>
+#include <asm/xenmca.h>
 #endif
 
 #if defined(IA64_MCA_DEBUG_INFO)
@@ -107,18 +110,25 @@ unsigned long __per_cpu_mca[NR_CPUS];
 /* In mca_asm.S */
 extern void                    ia64_monarch_init_handler (void);
 extern void                    ia64_slave_init_handler (void);
+#ifdef XEN
+extern void setup_vector (unsigned int vec, struct irqaction *action);
+#endif
 
 static ia64_mc_info_t          ia64_mc_info;
 
-#ifndef XEN
 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
 #define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
 #define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
 #define CPE_HISTORY_LENGTH    5
 #define CMC_HISTORY_LENGTH    5
 
+#ifndef        XEN 
 static struct timer_list cpe_poll_timer;
 static struct timer_list cmc_poll_timer;
+#else /* XEN */
+static struct timer cpe_poll_timer;
+static struct timer cmc_poll_timer;
+#endif
 /*
  * This variable tells whether we are currently in polling mode.
  * Start with this in the wrong state so we won't play w/ timers
@@ -135,11 +145,9 @@ static int cpe_poll_enabled = 1;
 static int cpe_poll_enabled = 1;
 
 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-#endif /* !XEN */
 
 static int mca_init;
 
-#ifndef XEN
 /*
  * IA64_MCA log support
  */
@@ -156,11 +164,24 @@ typedef struct ia64_state_log_s
 
 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 
+#ifndef        XEN
 #define IA64_LOG_ALLOCATE(it, size) \
        {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
                (ia64_err_rec_t *)alloc_bootmem(size); \
        ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
                (ia64_err_rec_t *)alloc_bootmem(size);}
+#else  /* XEN */
+#define IA64_LOG_ALLOCATE(it, size) \
+       do { \
+               unsigned int pageorder; \
+               pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu)); 
\
+               ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
+                 (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
+               ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
+                 (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
+       } while(0)
+#endif /* XEN */
+
 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
 #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, 
s)
 #define IA64_LOG_UNLOCK(it)    
spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -175,6 +196,11 @@ static ia64_state_log_t ia64_state_log[I
 #define IA64_LOG_CURR_BUFFER(it)   (void 
*)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
 #define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
 
+#ifdef XEN
+struct list_head               sal_queue[IA64_MAX_LOG_TYPES];
+DEFINE_SPINLOCK(sal_queue_lock);
+#endif /* XEN */
+
 /*
  * ia64_log_init
  *     Reset the OS ia64 log buffer
@@ -201,6 +227,7 @@ ia64_log_init(int sal_info_type)
        memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
 }
 
+#ifndef XEN
 /*
  * ia64_log_get
  *
@@ -276,15 +303,151 @@ ia64_mca_log_sal_error_record(int sal_in
        if (rh->severity == sal_log_severity_corrected)
                ia64_sal_clear_state_info(sal_info_type);
 }
+#else  /* XEN */
+/*
+ * ia64_log_queue
+ *
+ *     Get the current MCA log from SAL and copy it into the OS log buffer.
+ *
+ *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
+ *  Outputs :   size        (total record length)
+ *              *buffer     (ptr to error record)
+ *
+ */
+static u64
+ia64_log_queue(int sal_info_type, int virq)
+{
+       sal_log_record_header_t     *log_buffer;
+       u64                         total_len = 0;
+       int                         s;
+       sal_queue_entry_t           *e;
+       unsigned long               flags;
+
+       IA64_LOG_LOCK(sal_info_type);
+
+       /* Get the process state information */
+       log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
+
+       total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
+
+       if (total_len) {
+               int queue_type;
+
+               spin_lock_irqsave(&sal_queue_lock, flags);
+
+               if (sal_info_type == SAL_INFO_TYPE_MCA &&
+                               virq == VIRQ_MCA_CMC)
+                       queue_type = SAL_INFO_TYPE_CMC;
+               else
+                       queue_type = sal_info_type;
+
+               e = xmalloc(sal_queue_entry_t);
+               e->cpuid = smp_processor_id();
+               e->sal_info_type = sal_info_type;
+               e->vector = IA64_CMC_VECTOR;
+               e->virq = virq;
+               e->length = total_len;
+
+               list_add_tail(&e->list, &sal_queue[queue_type]);
+               spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+               IA64_LOG_INDEX_INC(sal_info_type);
+               IA64_LOG_UNLOCK(sal_info_type);
+               if (sal_info_type != SAL_INFO_TYPE_MCA &&
+                   sal_info_type != SAL_INFO_TYPE_INIT) {
+                       IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. 
"
+                                      "Record length = %ld\n", __FUNCTION__, 
sal_info_type, total_len);
+               }
+               return total_len;
+       } else {
+               IA64_LOG_UNLOCK(sal_info_type);
+               return 0;
+       }
+}
+#endif /* !XEN */
 
 /*
  * platform dependent error handling
  */
-#endif /* !XEN */
 #ifndef PLATFORM_MCA_HANDLERS
-#ifndef XEN
 
 #ifdef CONFIG_ACPI
+
+#ifdef XEN
+#define WARN_ON(condition) do { \
+       if (unlikely((condition)!=0)) { \
+               printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, 
__LINE__); \
+               dump_stack(); \
+       } \
+} while (0)
+
+/**
+ *     disable_irq_nosync - disable an irq without waiting
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Disables and Enables are
+ *     nested.
+ *     Unlike disable_irq(), this function does not ensure existing
+ *     instances of the IRQ handler have completed before returning.
+ *
+ *     This function may be called from IRQ context.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       if (!desc->depth++) {
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ *     enable_irq - enable handling of an irq
+ *     @irq: Interrupt to enable
+ *
+ *     Undoes the effect of one call to disable_irq().  If this
+ *     matches the last disable, processing of interrupts on this
+ *     IRQ line is re-enabled.
+ *
+ *     This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       switch (desc->depth) {
+       case 0:
+               WARN_ON(1);
+               break;
+       case 1: {
+               unsigned int status = desc->status & ~IRQ_DISABLED;
+
+               desc->status = status;
+               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+                       desc->status = status | IRQ_REPLAY;
+                       hw_resend_irq(desc->handler,irq);
+               }
+               desc->handler->enable(irq);
+               /* fall-through */
+       }
+       default:
+               desc->depth--;
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+#endif /* XEN */
 
 int cpe_vector = -1;
 
@@ -301,8 +464,14 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
        /* SAL spec states this should run w/ interrupts enabled */
        local_irq_enable();
 
+#ifndef XEN
        /* Get the CPE error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
+#else  /* XEN */
+       /* CPE error does not inform to dom0 */
+/*     ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE); */
+/*     send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
+#endif /* XEN */
 
        spin_lock(&cpe_history_lock);
        if (!cpe_poll_enabled && cpe_vector >= 0) {
@@ -329,10 +498,17 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
                         */
                        printk(KERN_WARNING "WARNING: Switching to polling CPE 
handler; error records may be lost\n");
 
+#ifndef        XEN 
                        mod_timer(&cpe_poll_timer, jiffies + 
MIN_CPE_POLL_INTERVAL);
 
                        /* lock already released, get out now */
                        return IRQ_HANDLED;
+#else  /* XEN */
+                       set_timer(&cpe_poll_timer, jiffies + 
MIN_CPE_POLL_INTERVAL);
+                       /* lock already released, get out now */
+                       return;
+#endif /* XEN */
+
                } else {
                        cpe_history[index++] = now;
                        if (index == CPE_HISTORY_LENGTH)
@@ -340,11 +516,12 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
                }
        }
        spin_unlock(&cpe_history_lock);
+#ifndef        XEN
        return IRQ_HANDLED;
+#endif /* XEN */
 }
 
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 static void
 show_min_state (pal_min_state_area_t *minstate)
@@ -592,7 +769,6 @@ init_handler_platform (pal_min_state_are
        while (1);                      /* hang city if no debugger */
 }
 
-#ifndef XEN
 #ifdef CONFIG_ACPI
 /*
  * ia64_mca_register_cpev
@@ -623,9 +799,7 @@ ia64_mca_register_cpev (int cpev)
 }
 #endif /* CONFIG_ACPI */
 
-#endif /* !XEN */
 #endif /* PLATFORM_MCA_HANDLERS */
-#ifndef XEN
 
 /*
  * ia64_mca_cmc_vector_setup
@@ -712,6 +886,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
 }
 
+#ifndef XEN
 /*
  * ia64_mca_cmc_vector_disable_keventd
  *
@@ -735,6 +910,7 @@ ia64_mca_cmc_vector_enable_keventd(void 
 {
        on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
 }
+#endif /* XEN  */
 
 /*
  * ia64_mca_wakeup_ipi_wait
@@ -845,7 +1021,10 @@ ia64_mca_rendez_int_handler(int rendez_i
 
        /* Enable all interrupts */
        local_irq_restore(flags);
+
+#ifndef XEN
        return IRQ_HANDLED;
+#endif /* XEN */
 }
 
 /*
@@ -866,7 +1045,9 @@ static irqreturn_t
 static irqreturn_t
 ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
 {
+#ifndef XEN
        return IRQ_HANDLED;
+#endif /* XEN */
 }
 
 /*
@@ -886,15 +1067,26 @@ static void
 static void
 ia64_return_to_sal_check(int recover)
 {
+#ifdef XEN
+       int cpu = smp_processor_id();
+#endif /* XEN */
 
        /* Copy over some relevant stuff from the sal_to_os_mca_handoff
         * so that it can be used at the time of os_mca_to_sal_handoff
         */
+#ifdef XEN
+       ia64_os_to_sal_handoff_state.imots_sal_gp =
+               ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
+
+       ia64_os_to_sal_handoff_state.imots_sal_check_ra =
+               ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
+#else /* XEN */
        ia64_os_to_sal_handoff_state.imots_sal_gp =
                ia64_sal_to_os_handoff_state.imsto_sal_gp;
 
        ia64_os_to_sal_handoff_state.imots_sal_check_ra =
                ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
+#endif /* XEN */
 
        if (recover)
                ia64_os_to_sal_handoff_state.imots_os_status = 
IA64_MCA_CORRECTED;
@@ -904,8 +1096,13 @@ ia64_return_to_sal_check(int recover)
        /* Default = tell SAL to return to same context */
        ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
 
+#ifdef XEN
+       ia64_os_to_sal_handoff_state.imots_new_min_state =
+               (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
+#else /* XEN */
        ia64_os_to_sal_handoff_state.imots_new_min_state =
                (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
+#endif /* XEN */
 
 }
 
@@ -953,27 +1150,44 @@ void
 void
 ia64_mca_ucmc_handler(void)
 {
+#ifdef XEN
+       int cpu = smp_processor_id();
+       pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
+               &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
+#else  /* XEN */
        pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
                &ia64_sal_to_os_handoff_state.proc_state_param;
+#endif /* XEN */
        int recover; 
 
+#ifndef        XEN
        /* Get the MCA error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
+#else  /* XEN */
+       ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
+       send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
+#endif /* XEN */
 
        /* TLB error is only exist in this SAL error record */
        recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
        /* other error recovery */
+#ifndef        XEN
           || (ia64_mca_ucmc_extension 
                && ia64_mca_ucmc_extension(
                        IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
                        &ia64_sal_to_os_handoff_state,
                        &ia64_os_to_sal_handoff_state)); 
-
+#else  /* XEN */
+       ;
+#endif /* XEN */
+
+#ifndef XEN
        if (recover) {
                sal_log_record_header_t *rh = 
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
                rh->severity = sal_log_severity_corrected;
                ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
        }
+#endif /* XEN */
        /*
         *  Wakeup all the processors which are spinning in the rendezvous
         *  loop.
@@ -984,8 +1198,10 @@ ia64_mca_ucmc_handler(void)
        ia64_return_to_sal_check(recover);
 }
 
+#ifndef XEN
 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, 
NULL);
 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+#endif /* !XEN */
 
 /*
  * ia64_mca_cmc_int_handler
@@ -1015,8 +1231,13 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
        /* SAL spec states this should run w/ interrupts enabled */
        local_irq_enable();
 
+#ifndef XEN    
        /* Get the CMC error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
+#else /* XEN */
+       ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
+       send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
+#endif /* XEN */
 
        spin_lock(&cmc_history_lock);
        if (!cmc_polling_enabled) {
@@ -1033,7 +1254,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
 
                        cmc_polling_enabled = 1;
                        spin_unlock(&cmc_history_lock);
+#ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_disable_work);
+#else /* XEN */
+                       cpumask_raise_softirq(cpu_online_map, 
CMC_DISABLE_SOFTIRQ);
+#endif /* XEN */
 
                        /*
                         * Corrected errors will still be corrected, but
@@ -1042,10 +1267,18 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
                         */
                        printk(KERN_WARNING "WARNING: Switching to polling CMC 
handler; error records may be lost\n");
 
+#ifndef        XEN 
                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
+#else  /* XEN */
+                       set_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
+#endif /* XEN */
 
                        /* lock already released, get out now */
+#ifndef        XEN 
                        return IRQ_HANDLED;
+#else  /* XEN */
+                       return;
+#endif /* XEN */
                } else {
                        cmc_history[index++] = now;
                        if (index == CMC_HISTORY_LENGTH)
@@ -1053,7 +1286,9 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
                }
        }
        spin_unlock(&cmc_history_lock);
+#ifndef        XEN 
        return IRQ_HANDLED;
+#endif /* XEN */
 }
 
 /*
@@ -1082,7 +1317,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
        if (start_count == -1)
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
 
+#ifndef XEN
        ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
+#endif /* XEN */
 
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 
@@ -1093,18 +1330,27 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
                if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
 
                        printk(KERN_WARNING "Returning to interrupt driven CMC 
handler\n");
+#ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_enable_work);
+#else /* XEN */
+                       cpumask_raise_softirq(cpu_online_map, 
CMC_ENABLE_SOFTIRQ);
+#endif /* XEN */
                        cmc_polling_enabled = 0;
 
                } else {
 
+#ifndef        XEN 
                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
+#else  /* XEN */
+                       set_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
+#endif /* XEN */
                }
 
                start_count = -1;
        }
-
+#ifndef        XEN 
        return IRQ_HANDLED;
+#endif /* XEN */
 }
 
 /*
@@ -1117,7 +1363,11 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
  *
  */
 static void
+#ifndef        XEN
 ia64_mca_cmc_poll (unsigned long dummy)
+#else  /* XEN */
+ia64_mca_cmc_poll (void *dummy)
+#endif /* XEN */
 {
        /* Trigger a CMC interrupt cascade  */
        platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
@@ -1152,7 +1402,9 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
        if (start_count == -1)
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
 
+#ifndef XEN
        ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
+#endif /* XEN */
 
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 
@@ -1176,11 +1428,16 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
                }
 
                if (cpe_poll_enabled)
+#ifndef        XEN 
                        mod_timer(&cpe_poll_timer, jiffies + poll_time);
+#else  /* XEN */
+                       set_timer(&cpe_poll_timer, jiffies + poll_time);
+#endif /* XEN */
                start_count = -1;
        }
-
+#ifndef        XEN 
        return IRQ_HANDLED;
+#endif /* XEN */
 }
 
 /*
@@ -1194,14 +1451,17 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
  *
  */
 static void
+#ifndef        XEN
 ia64_mca_cpe_poll (unsigned long dummy)
+#else  /* XEN */
+ia64_mca_cpe_poll (void *dummy)
+#endif /* XEN */
 {
        /* Trigger a CPE interrupt cascade  */
        platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 /*
  * C portion of the OS INIT handler
@@ -1247,7 +1507,6 @@ ia64_init_handler (struct pt_regs *pt, s
        init_handler_platform(ms, pt, sw);      /* call platform specific 
routines */
 }
 
-#ifndef XEN
 static int __init
 ia64_mca_disable_cpe_polling(char *str)
 {
@@ -1259,42 +1518,53 @@ __setup("disable_cpe_poll", ia64_mca_dis
 
 static struct irqaction cmci_irqaction = {
        .handler =      ia64_mca_cmc_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif /* XEN */
        .name =         "cmc_hndlr"
 };
 
 static struct irqaction cmcp_irqaction = {
        .handler =      ia64_mca_cmc_int_caller,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif /* XEN */
        .name =         "cmc_poll"
 };
 
 static struct irqaction mca_rdzv_irqaction = {
        .handler =      ia64_mca_rendez_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif /* XEN */
        .name =         "mca_rdzv"
 };
 
 static struct irqaction mca_wkup_irqaction = {
        .handler =      ia64_mca_wakeup_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif /* XEN */
        .name =         "mca_wkup"
 };
 
 #ifdef CONFIG_ACPI
 static struct irqaction mca_cpe_irqaction = {
        .handler =      ia64_mca_cpe_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif /* XEN */
        .name =         "cpe_hndlr"
 };
 
 static struct irqaction mca_cpep_irqaction = {
        .handler =      ia64_mca_cpe_int_caller,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif /* XEN */
        .name =         "cpe_poll"
 };
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 /* Do per-CPU MCA-related initialization.  */
 
@@ -1328,6 +1598,13 @@ ia64_mca_cpu_init(void *cpu_data)
 #endif
                }
        }
+#ifdef XEN
+       else {
+               int i;
+               for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
+                       ia64_log_queue(i, 0);
+       }
+#endif /* XEN */
 
         /*
          * The MCA info structure was allocated earlier and its
@@ -1394,17 +1671,14 @@ ia64_mca_init(void)
        ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
        ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
        ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
-#ifdef XEN
-       s64 rc;
-
-       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
-
-       IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
-#else
        int i;
        s64 rc;
        struct ia64_sal_retval isrv;
        u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;  /* platform specific */
+
+#ifdef XEN
+       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
+#endif /* !XEN */
 
        IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
 
@@ -1450,7 +1724,6 @@ ia64_mca_init(void)
        }
 
        IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup 
mech.\n", __FUNCTION__);
-#endif /* !XEN */
 
        ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
        /*
@@ -1502,7 +1775,6 @@ ia64_mca_init(void)
 
        IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", 
__FUNCTION__);
 
-#ifndef XEN
        /*
         *  Configure the CMCI/P vector and handler. Interrupts for CMC are
         *  per-processor, so AP CMC interrupts are setup in smp_callin() 
(smpboot.c).
@@ -1530,13 +1802,24 @@ ia64_mca_init(void)
        ia64_log_init(SAL_INFO_TYPE_INIT);
        ia64_log_init(SAL_INFO_TYPE_CMC);
        ia64_log_init(SAL_INFO_TYPE_CPE);
-#endif /* !XEN */
+
+#ifdef XEN
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_MCA]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_INIT]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CMC]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CPE]);
+
+       open_softirq(CMC_DISABLE_SOFTIRQ, 
(softirq_handler)ia64_mca_cmc_vector_disable);
+       open_softirq(CMC_ENABLE_SOFTIRQ, 
(softirq_handler)ia64_mca_cmc_vector_enable);
+
+       for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
+               ia64_log_queue(i, 0);
+#endif /* !XEN */
 
        mca_init = 1;
        printk(KERN_INFO "MCA related initialization done\n");
 }
 
-#ifndef XEN
 /*
  * ia64_mca_late_init
  *
@@ -1554,20 +1837,34 @@ ia64_mca_late_init(void)
                return 0;
 
        /* Setup the CMCI/P vector and handler */
+#ifndef        XEN
        init_timer(&cmc_poll_timer);
        cmc_poll_timer.function = ia64_mca_cmc_poll;
+#else  /* XEN */
+       init_timer(&cmc_poll_timer, ia64_mca_cmc_poll, NULL, 
smp_processor_id());
+       printk("INIT_TIMER(cmc_poll_timer): on cpu%d\n", smp_processor_id());
+#endif /* XEN */
 
        /* Unmask/enable the vector */
        cmc_polling_enabled = 0;
+#ifndef XEN    /* XXX FIXME */
        schedule_work(&cmc_enable_work);
+#else /* XEN */
+       cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
+#endif /* XEN */
 
        IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
 
 #ifdef CONFIG_ACPI
        /* Setup the CPEI/P vector and handler */
        cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+#ifndef        XEN
        init_timer(&cpe_poll_timer);
        cpe_poll_timer.function = ia64_mca_cpe_poll;
+#else  /* XEN */
+       init_timer(&cpe_poll_timer, ia64_mca_cpe_poll, NULL, 
smp_processor_id());
+       printk("INIT_TIMER(cpe_poll_timer): on cpu%d\n", smp_processor_id());
+#endif /* XEN */
 
        {
                irq_desc_t *desc;
@@ -1580,7 +1877,11 @@ ia64_mca_late_init(void)
                                if (irq_to_vector(irq) == cpe_vector) {
                                        desc = irq_descp(irq);
                                        desc->status |= IRQ_PER_CPU;
+#ifdef XEN
+                                       setup_vector(irq, &mca_cpe_irqaction);
+#else
                                        setup_irq(irq, &mca_cpe_irqaction);
+#endif
                                }
                        ia64_mca_register_cpev(cpe_vector);
                        IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 
__FUNCTION__);
@@ -1597,5 +1898,8 @@ ia64_mca_late_init(void)
        return 0;
 }
 
+#ifndef XEN
 device_initcall(ia64_mca_late_init);
+#else /* !XEN */
+__initcall(ia64_mca_late_init);
 #endif /* !XEN */
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel