# HG changeset patch
# User sos22@xxxxxxxxxxxxxxxxxxxx
# Node ID 3d45fb64b064d4cbf239aea83b2e8b994ed7b356
# Parent 5a7efe0cf5fbe7b3b9631fc695d7ab2f669ed3c6
Tidy up a bit.
Signed-off-by: Steven Smith, sos22@xxxxxxxxx
diff -r 5a7efe0cf5fb -r 3d45fb64b064
linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c Thu Aug 18 16:28:41 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c Fri Aug 19 10:18:53 2005
@@ -129,10 +129,11 @@
DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
+extern unsigned uber_debug;
+
static inline void __send_IPI_one(unsigned int cpu, int vector)
{
unsigned int evtchn;
- int r;
evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
// printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu,
vector, evtchn);
@@ -143,6 +144,9 @@
synch_test_bit(evtchn, &s->evtchn_mask[0]))
;
#endif
+ if (uber_debug)
+ printk("<0>Send ipi %d to %d evtchn %d.\n",
+ vector, cpu, evtchn);
notify_via_evtchn(evtchn);
} else
printk("send_IPI to unbound port %d/%d",
@@ -601,6 +605,7 @@
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
+ extern unsigned uber_debug;
/*
* Notify initiating CPU that I've grabbed the data and am
@@ -612,6 +617,9 @@
* At this point the info structure may be out of scope unless wait==1
*/
irq_enter();
+ if (uber_debug && smp_processor_id())
+ printk("<0>Processor %d calling %p.\n", smp_processor_id(),
+ func);
(*func)(info);
irq_exit();
diff -r 5a7efe0cf5fb -r 3d45fb64b064
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Thu Aug 18 16:28:41 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Fri Aug 19 10:18:53 2005
@@ -124,6 +124,8 @@
#define VALID_EVTCHN(_chn) ((_chn) >= 0)
+unsigned uber_debug;
+
/*
* Force a proper event-channel callback from Xen after clearing the
* callback mask. We do this in a very simple manner, by making a call
@@ -144,7 +146,7 @@
vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
vcpu_info->evtchn_upcall_pending = 0;
-
+
/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
while ( l1 != 0 )
@@ -158,9 +160,13 @@
l2 &= ~(1 << l2i);
port = (l1i << 5) + l2i;
- if ( (irq = evtchn_to_irq[port]) != -1 )
+ if (uber_debug && cpu)
+ printk("<0>Upcall to %d on %d.\n", port, cpu);
+ if ( (irq = evtchn_to_irq[port]) != -1 ) {
+ if (uber_debug && cpu)
+ printk("<0>IRQ %d.\n", irq);
do_IRQ(irq, regs);
- else
+ } else
evtchn_device_upcall(port);
}
}
@@ -272,6 +278,8 @@
evtchn_to_irq[evtchn] = irq;
irq_to_evtchn[irq] = evtchn;
+ printk("<0>evtchn_to_irq[%d] = %d.\n", evtchn,
+ evtchn_to_irq[evtchn]);
per_cpu(ipi_to_evtchn, vcpu)[ipi] = evtchn;
bind_evtchn_to_cpu(evtchn, vcpu);
@@ -279,6 +287,7 @@
spin_unlock(&irq_mapping_update_lock);
clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
+ clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
}
void _bind_virq_to_irq(int virq, int cpu, int irq)
@@ -294,7 +303,6 @@
panic("Failed to bind virtual IRQ %d\n", virq);
evtchn = op.u.bind_virq.port;
-
evtchn_to_irq[irq_to_evtchn[irq]] = -1;
irq_to_evtchn[irq] = -1;
@@ -306,6 +314,9 @@
bind_evtchn_to_cpu(evtchn, cpu);
spin_unlock(&irq_mapping_update_lock);
+
+ clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
+ clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
}
int bind_ipi_to_irq(int ipi)
diff -r 5a7efe0cf5fb -r 3d45fb64b064
linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Thu Aug 18 16:28:41 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Fri Aug 19 10:18:53 2005
@@ -70,7 +70,13 @@
int r;
int gdt_pages;
r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
- BUG_ON(r != 0);
+ if (r != 0)
+ panic("pickling vcpu %d -> %d!\n", vcpu, r);
+
+ /* Translate from machine to physical addresses where necessary,
+ so that they can be translated to our new machine address space
+ after resume. libxc is responsible for doing this to vcpu0,
+ but we do it to the others. */
gdt_pages = (ctxt->gdt_ents + 511) / 512;
ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
for (r = 0; r < gdt_pages; r++)
@@ -81,7 +87,7 @@
atomic_t vcpus_rebooting;
-static void restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+static int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
{
int r;
int gdt_pages = (ctxt->gdt_ents + 511) / 512;
@@ -93,21 +99,25 @@
((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
ctxt->user_regs.eip = (unsigned long)_restore_vcpu;
+ /* De-canonicalise. libxc handles this for vcpu 0, but we need
+ to do it for the other vcpus. */
ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
for (r = 0; r < gdt_pages; r++)
ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
+
atomic_set(&vcpus_rebooting, 1);
r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
if (r != 0) {
printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
- return;
- }
- /* Hmm... slight hack: make sure the cpus come up in order,
- because that way they get the same evtchn numbers this time as
- they did last time, which works around a few bugs. */
- /* XXX */
+ return -1;
+ }
+
+ /* Make sure we wait for the new vcpu to come up before trying to do
+ anything with it or starting the next one. */
while (atomic_read(&vcpus_rebooting))
barrier();
+
+ return 0;
}
extern unsigned uber_debug;
@@ -159,7 +169,7 @@
extern unsigned long max_pfn;
extern unsigned int *pfn_to_mfn_frame_list;
- cpumask_t feasible_cpus;
+ cpumask_t prev_online_cpus, prev_present_cpus;
int err = 0;
BUG_ON(smp_processor_id() != 0);
@@ -186,7 +196,7 @@
/* (We don't need to worry about other cpus bringing stuff up,
since by the time num_online_cpus() == 1, there aren't any
other cpus) */
- cpus_clear(feasible_cpus);
+ cpus_clear(prev_online_cpus);
preempt_disable();
while (num_online_cpus() > 1) {
preempt_enable();
@@ -198,17 +208,24 @@
printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
goto out_reenable_cpus;
}
- cpu_set(i, feasible_cpus);
+ cpu_set(i, prev_online_cpus);
}
+ preempt_disable();
}
suspend_record->nr_pfns = max_pfn; /* final number of pfns */
__cli();
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_isset(i, feasible_cpus))
- save_vcpu_context(i, &suspended_cpu_records[i]);
+ preempt_enable();
+
+ cpus_clear(prev_present_cpus);
+ for_each_present_cpu(i) {
+ if (i == 0)
+ continue;
+ save_vcpu_context(i, &suspended_cpu_records[i]);
+ cpu_set(i, prev_present_cpus);
+ }
#ifdef __i386__
mm_pin_all();
@@ -282,26 +299,23 @@
usbif_resume();
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_isset(i, feasible_cpus))
- restore_vcpu_context(i, &suspended_cpu_records[i]);
-
- printk("<0>All cpus rebooted...\n");
+ for_each_cpu_mask(i, prev_present_cpus) {
+ restore_vcpu_context(i, &suspended_cpu_records[i]);
+ }
+
__sti();
out_reenable_cpus:
- while (!cpus_empty(feasible_cpus)) {
- i = first_cpu(feasible_cpus);
- printk("<0>Bring %d up.\n", i);
+ for_each_cpu_mask(i, prev_online_cpus) {
j = cpu_up(i);
- printk("<0>cpu_up(%d) -> %d.\n", i, j);
if (j != 0) {
printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
i, j);
err = j;
}
- cpu_clear(i, feasible_cpus);
- }
+ }
+
+ uber_debug = 0;
out:
if ( suspend_record != NULL )
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|