# HG changeset patch
# User adsharma@xxxxxxxxxxxxxxxxxxxx
# Node ID c589ca6d292b28e0da2ccf3675fc6d63d7f1843d
# Parent f90820349056d0c5b8442078187cf7abafeb9fd4
# Parent 2f75dac09365959d87709d15a181201abf189cb8
Merge.
diff -r f90820349056 -r c589ca6d292b .hgignore
--- a/.hgignore Thu Aug 11 18:44:59 2005
+++ b/.hgignore Thu Aug 11 20:38:44 2005
@@ -116,7 +116,6 @@
^tools/ioemu/target-.*/Makefile$
^tools/ioemu/target-.*/config\..*$
^tools/ioemu/target-.*/qemu-dm$
-^tools/ioemu/target-.*/qemu-vgaram-bin$
^tools/libxc/xen/.*$
^tools/misc/cpuperf/cpuperf-perfcntr$
^tools/misc/cpuperf/cpuperf-xen$
diff -r f90820349056 -r c589ca6d292b Makefile
--- a/Makefile Thu Aug 11 18:44:59 2005
+++ b/Makefile Thu Aug 11 20:38:44 2005
@@ -101,11 +101,6 @@
for i in $(ALLKERNELS) ; do $(MAKE) $$i-delete ; done
for i in $(ALLSPARSETREES) ; do $(MAKE) $$i-mrproper ; done
-install-twisted:
- wget http://www.twistedmatrix.com/products/get-current.epy
- tar -zxf Twisted-*.tar.gz
- cd Twisted-* && python setup.py install
-
install-logging: LOGGING=logging-0.4.9.2
install-logging:
[ -f $(LOGGING).tar.gz ] || wget
http://www.red-dove.com/$(LOGGING).tar.gz
@@ -149,7 +144,6 @@
@echo ' kclean - clean guest kernel build trees'
@echo ''
@echo 'Dependency installation targets:'
- @echo ' install-twisted - install the Twisted Matrix Framework'
@echo ' install-logging - install the Python Logging package'
@echo ' install-iptables - install iptables tools'
@echo ''
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Aug
11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Aug
11 20:38:44 2005
@@ -669,7 +669,7 @@
CONFIG_JBD=m
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_FS=y
# CONFIG_REISERFS_CHECK is not set
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Thu Aug 11 20:38:44 2005
@@ -227,12 +227,38 @@
return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
}
-static void update_wallclock(void)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
+static void __update_wallclock(time_t sec, long nsec)
+{
long wtm_nsec, xtime_nsec;
time_t wtm_sec, xtime_sec;
- u64 tmp, nsec;
+ u64 tmp, wc_nsec;
+
+ /* Adjust wall-clock time base based on wall_jiffies ticks. */
+ wc_nsec = processed_system_time;
+ wc_nsec += (u64)sec * 1000000000ULL;
+ wc_nsec += (u64)nsec;
+ wc_nsec -= (jiffies - wall_jiffies) * (u64)(NSEC_PER_SEC / HZ);
+
+ /* Split wallclock base into seconds and nanoseconds. */
+ tmp = wc_nsec;
+ xtime_nsec = do_div(tmp, 1000000000);
+ xtime_sec = (time_t)tmp;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
+
+ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+}
+
+static void update_wallclock(void)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
do {
shadow_tv_version = s->wc_version;
@@ -243,25 +269,8 @@
}
while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
- if (independent_wallclock)
- return;
-
- /* Adjust wall-clock time base based on wall_jiffies ticks. */
- nsec = processed_system_time;
- nsec += (u64)shadow_tv.tv_sec * 1000000000ULL;
- nsec += (u64)shadow_tv.tv_nsec;
- nsec -= (jiffies - wall_jiffies) * (u64)(NSEC_PER_SEC / HZ);
-
- /* Split wallclock base into seconds and nanoseconds. */
- tmp = nsec;
- xtime_nsec = do_div(tmp, 1000000000);
- xtime_sec = (time_t)tmp;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
-
- set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+ if (!independent_wallclock)
+ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
}
/*
@@ -408,18 +417,14 @@
int do_settimeofday(struct timespec *tv)
{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec;
+ time_t sec;
s64 nsec;
- struct timespec xentime;
unsigned int cpu;
struct shadow_time_info *shadow;
+ dom0_op_t op;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
-
- if (!independent_wallclock && !(xen_start_info.flags & SIF_INITDOMAIN))
- return 0; /* Silent failure? */
cpu = get_cpu();
shadow = &per_cpu(shadow_time, cpu);
@@ -431,51 +436,30 @@
* overflows. If that were to happen then our shadow time values would
* be stale, so we can retry with fresh ones.
*/
- again:
- nsec = (s64)tv->tv_nsec - (s64)get_nsec_offset(shadow);
- if (unlikely(!time_values_up_to_date(cpu))) {
+ for ( ; ; ) {
+ nsec = (s64)tv->tv_nsec - (s64)get_nsec_offset(shadow);
+ if (time_values_up_to_date(cpu))
+ break;
get_time_values_from_xen();
- goto again;
- }
-
+ }
+ sec = tv->tv_sec;
__normalize_time(&sec, &nsec);
- set_normalized_timespec(&xentime, sec, nsec);
-
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
-
- nsec -= (shadow->system_timestamp - processed_system_time);
-
- __normalize_time(&sec, &nsec);
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+
if ((xen_start_info.flags & SIF_INITDOMAIN) &&
!independent_wallclock) {
- dom0_op_t op;
op.cmd = DOM0_SETTIME;
- op.u.settime.secs = xentime.tv_sec;
- op.u.settime.nsecs = xentime.tv_nsec;
+ op.u.settime.secs = sec;
+ op.u.settime.nsecs = nsec;
op.u.settime.system_time = shadow->system_timestamp;
- write_sequnlock_irq(&xtime_lock);
HYPERVISOR_dom0_op(&op);
- } else
-#endif
- write_sequnlock_irq(&xtime_lock);
+ update_wallclock();
+ } else if (independent_wallclock) {
+ nsec -= shadow->system_timestamp;
+ __normalize_time(&sec, &nsec);
+ __update_wallclock(sec, nsec);
+ }
+
+ write_sequnlock_irq(&xtime_lock);
put_cpu();
@@ -491,6 +475,9 @@
int retval;
WARN_ON(irqs_disabled());
+
+ if (!(xen_start_info.flags & SIF_INITDOMAIN))
+ return 0;
/* gets recalled with irq locally disabled */
spin_lock_irq(&rtc_lock);
@@ -603,8 +590,10 @@
profile_tick(CPU_PROFILING, regs);
}
- if (unlikely(shadow_tv_version != HYPERVISOR_shared_info->wc_version))
+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
update_wallclock();
+ clock_was_set();
+ }
}
/*
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Thu Aug 11 20:38:44 2005
@@ -25,6 +25,7 @@
#include <asm/mmu_context.h>
#include <asm-xen/foreign_page.h>
+#include <asm-xen/hypervisor.h>
void show_mem(void)
{
@@ -273,6 +274,11 @@
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
unsigned long flags;
+
+#ifdef CONFIG_X86_PAE
+ /* this gives us a page below 4GB */
+ xen_contig_memory((unsigned long)pgd, 0);
+#endif
if (!HAVE_SHARED_KERNEL_PMD)
spin_lock_irqsave(&pgd_lock, flags);
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig Thu Aug 11 20:38:44 2005
@@ -125,6 +125,10 @@
config X86_IO_APIC
bool
default XEN_PRIVILEGED_GUEST
+
+config X86_XEN_GENAPIC
+ bool
+ default XEN_PRIVILEGED_GUEST || SMP
config X86_LOCAL_APIC
bool
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile Thu Aug 11
20:38:44 2005
@@ -25,11 +25,12 @@
c-obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_X86_CPUID) += cpuid.o
-#obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o
+obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o
c-obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o
-c-obj-$(CONFIG_X86_IO_APIC) += genapic.o genapic_cluster.o genapic_flat.o
+obj-$(CONFIG_X86_XEN_GENAPIC) += genapic.o genapic_xen.o
+c-obj-$(CONFIG_X86_IO_APIC) += genapic_cluster.o genapic_flat.o
#obj-$(CONFIG_PM) += suspend.o
#obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
#obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/apic.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/apic.c Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/apic.c Thu Aug 11
20:38:44 2005
@@ -48,7 +48,7 @@
int cpu = smp_processor_id();
profile_tick(CPU_PROFILING, regs);
-#if 0
+#ifndef CONFIG_XEN
if (--per_cpu(prof_counter, cpu) <= 0) {
/*
* The multiplier may have changed since the last time we got
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S Thu Aug 11
20:38:44 2005
@@ -41,7 +41,6 @@
startup_64:
ENTRY(_start)
cld
- movq init_rsp(%rip),%rsp
/* Copy the necessary stuff from xen_start_info structure. */
movq $xen_start_info_union,%rdi
movq $64,%rcx /* sizeof (union xen_start_info_union) / sizeof
(long) */
@@ -52,6 +51,7 @@
cld
#endif /* CONFIG_SMP */
+ movq init_rsp(%rip),%rsp
/* zero EFLAGS after setting rsp */
pushq $0
popfq
@@ -204,6 +204,7 @@
.quad 0,0 /* TSS */
.quad 0,0 /* LDT */
.quad 0,0,0 /* three TLS descriptors */
+ .quad 0 /* unused now? __KERNEL16_CS - 16bit
PM for S3 wakeup. */
gdt_end:
/* asm/segment.h:GDT_ENTRIES must match this */
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/irq.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/irq.c Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/irq.c Thu Aug 11 20:38:44 2005
@@ -21,6 +21,11 @@
atomic_t irq_err_count;
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+atomic_t irq_mis_count;
+#endif
+#endif
/*
* Generic, controller-independent functions:
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Thu Aug 11
20:38:44 2005
@@ -200,12 +200,14 @@
#define ADAPTER_ROM_RESOURCES \
(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
static struct resource video_rom_resource = {
.name = "Video ROM",
.start = 0xc0000,
.end = 0xc7fff,
.flags = IORESOURCE_ROM,
};
+#endif
static struct resource video_ram_resource = {
.name = "Video RAM area",
@@ -599,6 +601,19 @@
}
}
+#ifdef CONFIG_XEN
+void __init smp_alloc_memory(void)
+{
+ int cpu;
+
+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
+ cpu_gdt_descr[cpu].address = (unsigned long)
+ alloc_bootmem_low_pages(PAGE_SIZE);
+ /* XXX free unused pages later */
+ }
+}
+#endif
+
void __init setup_arch(char **cmdline_p)
{
int i, j;
@@ -739,6 +754,11 @@
initrd_start = 0;
}
}
+#endif
+#ifdef CONFIG_SMP
+#ifdef CONFIG_XEN
+ smp_alloc_memory();
+#endif
#endif
paging_init();
#ifdef CONFIG_X86_LOCAL_APIC
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup64.c Thu Aug 11
20:38:44 2005
@@ -276,9 +276,11 @@
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
+#ifndef CONFIG_XEN
if (cpu) {
memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
}
+#endif
cpu_gdt_descr[cpu].size = GDT_SIZE;
cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smp.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smp.c Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smp.c Thu Aug 11 20:38:44 2005
@@ -28,7 +28,12 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apicdef.h>
-
+#ifdef CONFIG_XEN
+#include <asm-xen/evtchn.h>
+
+#define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
+
+#else
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
@@ -44,6 +49,7 @@
static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);
#define FLUSH_ALL -1ULL
+#endif
/*
* We cannot call mmdrop() because we are in interrupt context,
@@ -57,6 +63,7 @@
load_cr3(swapper_pg_dir);
}
+#ifndef CONFIG_XEN
/*
*
* The flush IPI assumes that a thread switch happens in this order:
@@ -250,6 +257,18 @@
{
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
}
+#else
+asmlinkage void smp_invalidate_interrupt (void)
+{ return; }
+void flush_tlb_current_task(void)
+{ xen_tlb_flush_mask(¤t->mm->cpu_vm_mask); }
+void flush_tlb_mm (struct mm_struct * mm)
+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
+void flush_tlb_all(void)
+{ xen_tlb_flush_all(); }
+#endif /* Xen */
void smp_kdb_stop(void)
{
@@ -310,13 +329,21 @@
/* Wait for response */
while (atomic_read(&data.started) != cpus)
+#ifndef CONFIG_XEN
cpu_relax();
+#else
+ barrier();
+#endif
if (!wait)
return;
while (atomic_read(&data.finished) != cpus)
+#ifndef CONFIG_XEN
cpu_relax();
+#else
+ barrier();
+#endif
}
/*
@@ -350,7 +377,11 @@
*/
cpu_clear(smp_processor_id(), cpu_online_map);
local_irq_disable();
+#ifndef CONFIG_XEN
disable_local_APIC();
+#else
+ xxprint("stop_this_cpu disable_local_APIC\n");
+#endif
local_irq_enable();
}
@@ -364,8 +395,10 @@
void smp_send_stop(void)
{
int nolock = 0;
+#ifndef CONFIG_XEN
if (reboot_force)
return;
+#endif
/* Don't deadlock on the call lock in panic */
if (!spin_trylock(&call_lock)) {
/* ignore locking because we have paniced anyways */
@@ -376,7 +409,11 @@
spin_unlock(&call_lock);
local_irq_disable();
+#ifdef CONFIG_XEN
+ xxprint("stop_this_cpu disable_local_APIC\n");
+#else
disable_local_APIC();
+#endif
local_irq_enable();
}
@@ -385,18 +422,32 @@
* all the work is done automatically when
* we return from the interrupt.
*/
+#ifndef CONFIG_XEN
asmlinkage void smp_reschedule_interrupt(void)
-{
+#else
+asmlinkage irqreturn_t smp_reschedule_interrupt(void)
+#endif
+{
+#ifndef CONFIG_XEN
ack_APIC_irq();
-}
-
+#else
+ return IRQ_HANDLED;
+#endif
+}
+
+#ifndef CONFIG_XEN
asmlinkage void smp_call_function_interrupt(void)
+#else
+asmlinkage irqreturn_t smp_call_function_interrupt(void)
+#endif
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
+#ifndef CONFIG_XEN
ack_APIC_irq();
+#endif
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
@@ -413,10 +464,16 @@
mb();
atomic_inc(&call_data->finished);
}
+#ifdef CONFIG_XEN
+ return IRQ_HANDLED;
+#endif
}
int safe_smp_processor_id(void)
{
+#ifdef CONFIG_XEN
+ return smp_processor_id();
+#else
int apicid, i;
if (disable_apic)
@@ -437,4 +494,5 @@
return 0;
return 0; /* Should not happen */
-}
+#endif
+}
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Thu Aug 11
20:38:44 2005
@@ -47,6 +47,9 @@
#include <linux/bootmem.h>
#include <linux/thread_info.h>
#include <linux/module.h>
+#ifdef CONFIG_XEN
+#include <linux/interrupt.h>
+#endif
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
@@ -57,11 +60,20 @@
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/nmi.h>
+#ifdef CONFIG_XEN
+#include <asm/arch_hooks.h>
+
+#include <asm-xen/evtchn.h>
+#endif
/* Change for real CPU hotplug. Note other files need to be fixed
first too. */
#define __cpuinit __init
#define __cpuinitdata __initdata
+
+#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ unsigned int maxcpus = NR_CPUS;
+#endif
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -96,6 +108,7 @@
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_core_map);
+#ifndef CONFIG_XEN
/*
* Trampoline 80x86 program as an array.
*/
@@ -115,6 +128,7 @@
memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
return virt_to_phys(tramp);
}
+#endif
/*
* The bootstrap kernel entry code has set these up. Save them for
@@ -130,6 +144,7 @@
print_cpu_info(c);
}
+#ifndef CONFIG_XEN
/*
* New Funky TSC sync algorithm borrowed from IA64.
* Main advantage is that it doesn't reset the TSCs fully and
@@ -331,6 +346,7 @@
return 0;
}
__setup("notscsync", notscsync_setup);
+#endif
static atomic_t init_deasserted __cpuinitdata;
@@ -343,6 +359,7 @@
int cpuid, phys_id;
unsigned long timeout;
+#ifndef CONFIG_XEN
/*
* If waken up by an INIT in an 82489DX configuration
* we may get here before an INIT-deassert IPI reaches
@@ -352,10 +369,15 @@
while (!atomic_read(&init_deasserted))
cpu_relax();
+#endif
/*
* (This works even if the APIC is not enabled.)
*/
+#ifndef CONFIG_XEN
phys_id = GET_APIC_ID(apic_read(APIC_ID));
+#else
+ phys_id = smp_processor_id();
+#endif
cpuid = smp_processor_id();
if (cpu_isset(cpuid, cpu_callin_map)) {
panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
@@ -389,6 +411,7 @@
cpuid);
}
+#ifndef CONFIG_XEN
/*
* the boot CPU has finished the init stage and is spinning
* on callin_map until we finish. We are free to set up this
@@ -398,6 +421,7 @@
Dprintk("CALLIN, before setup_local_APIC().\n");
setup_local_APIC();
+#endif
/*
* Get our bogomips.
@@ -405,7 +429,9 @@
calibrate_delay();
Dprintk("Stack at about %p\n",&cpuid);
+#ifndef CONFIG_XEN
disable_APIC_timer();
+#endif
/*
* Save our processor parameters
@@ -417,6 +443,29 @@
*/
cpu_set(cpuid, cpu_callin_map);
}
+
+#ifdef CONFIG_XEN
+static irqreturn_t ldebug_interrupt(
+ int irq, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(int, ldebug_irq);
+static char ldebug_name[NR_CPUS][15];
+
+void ldebug_setup(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(ldebug_irq, cpu) = bind_virq_to_irq(VIRQ_DEBUG);
+ sprintf(ldebug_name[cpu], "ldebug%d", cpu);
+ BUG_ON(request_irq(per_cpu(ldebug_irq, cpu), ldebug_interrupt,
+ SA_INTERRUPT, ldebug_name[cpu], NULL));
+}
+
+extern void local_setup_timer(void);
+#endif
/*
* Setup code on secondary processor (after comming out of the trampoline)
@@ -434,6 +483,7 @@
/* otherwise gcc will move up the smp_processor_id before the cpu_init
*/
barrier();
+#ifndef CONFIG_XEN
Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
setup_secondary_APIC_clock();
@@ -446,6 +496,12 @@
}
enable_APIC_timer();
+#else
+ local_setup_timer();
+ ldebug_setup();
+ smp_intr_init();
+ local_irq_enable();
+#endif
/*
* Allow the master to continue.
@@ -453,10 +509,12 @@
cpu_set(smp_processor_id(), cpu_online_map);
mb();
+#ifndef CONFIG_XEN
/* Wait for TSC sync to not schedule things before.
We still process interrupts, which could see an inconsistent
time in that window unfortunately. */
tsc_sync_wait();
+#endif
cpu_idle();
}
@@ -464,6 +522,7 @@
extern volatile unsigned long init_rsp;
extern void (*initial_code)(void);
+#ifndef CONFIG_XEN
#if APIC_DEBUG
static void inquire_remote_apic(int apicid)
{
@@ -627,6 +686,7 @@
return (send_status | accept_status);
}
+#endif
/*
* Boot one CPU.
@@ -637,6 +697,14 @@
unsigned long boot_error;
int timeout;
unsigned long start_rip;
+#ifdef CONFIG_XEN
+ vcpu_guest_context_t ctxt;
+ extern void startup_64_smp(void);
+ extern void hypervisor_callback(void);
+ extern void failsafe_callback(void);
+ extern void smp_trap_init(trap_info_t *);
+ int i;
+#endif
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
@@ -649,7 +717,11 @@
cpu_pda[cpu].pcurrent = idle;
+#ifndef CONFIG_XEN
start_rip = setup_trampoline();
+#else
+ start_rip = (unsigned long)startup_64_smp;
+#endif
init_rsp = idle->thread.rsp;
per_cpu(init_tss,cpu).rsp0 = init_rsp;
@@ -666,6 +738,93 @@
atomic_set(&init_deasserted, 0);
+#ifdef CONFIG_XEN
+ if (cpu_gdt_descr[0].size > PAGE_SIZE)
+ BUG();
+ cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
+ memcpy((void *)cpu_gdt_descr[cpu].address,
+ (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
+
+ memset(&ctxt, 0, sizeof(ctxt));
+
+ ctxt.flags = VGCF_IN_KERNEL;
+ ctxt.user_regs.ds = __USER_DS;
+ ctxt.user_regs.es = __USER_DS;
+ ctxt.user_regs.fs = 0;
+ ctxt.user_regs.gs = 0;
+ ctxt.user_regs.ss = __KERNEL_DS|0x3;
+ ctxt.user_regs.cs = __KERNEL_CS|0x3;
+ ctxt.user_regs.rip = start_rip;
+ ctxt.user_regs.rsp = idle->thread.rsp;
+#define X86_EFLAGS_IOPL_RING3 0x3000
+ ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING3;
+
+ /* FPU is set up to default initial state. */
+ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
+
+ /* Virtual IDT is empty at start-of-day. */
+ for ( i = 0; i < 256; i++ )
+ {
+ ctxt.trap_ctxt[i].vector = i;
+ ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
+ }
+ smp_trap_init(ctxt.trap_ctxt);
+
+ /* No LDT. */
+ ctxt.ldt_ents = 0;
+
+ {
+ unsigned long va;
+ int f;
+
+ for (va = cpu_gdt_descr[cpu].address, f = 0;
+ va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
+ va += PAGE_SIZE, f++) {
+ ctxt.gdt_frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ make_page_readonly((void *)va);
+ }
+ ctxt.gdt_ents = GDT_ENTRIES;
+ }
+
+ /* Ring 1 stack is the initial stack. */
+ ctxt.kernel_ss = __KERNEL_DS;
+ ctxt.kernel_sp = idle->thread.rsp;
+
+ /* Callback handlers. */
+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
+ ctxt.syscall_callback_eip = (unsigned long)system_call;
+
+ ctxt.ctrlreg[3] = (unsigned long)virt_to_machine(init_level4_pgt);
+
+ boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
+
+ if (!boot_error) {
+ /*
+ * allow APs to start initializing.
+ */
+ Dprintk("Before Callout %d.\n", cpu);
+ cpu_set(cpu, cpu_callout_map);
+ Dprintk("After Callout %d.\n", cpu);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 50000; timeout++) {
+ if (cpu_isset(cpu, cpu_callin_map))
+ break; /* It has booted */
+ udelay(100);
+ }
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ /* number CPUs logically, starting from 1 (BSP is 0) */
+ Dprintk("CPU has booted.\n");
+ } else {
+ boot_error= 1;
+ }
+ }
+ x86_cpu_to_apicid[cpu] = apicid;
+#else
Dprintk("Setting warm reset code and vector.\n");
CMOS_WRITE(0xa, 0xf);
@@ -729,6 +888,7 @@
#endif
}
}
+#endif
if (boot_error) {
cpu_clear(cpu, cpu_callout_map); /* was set here
(do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
@@ -790,6 +950,7 @@
}
}
+#ifndef CONFIG_XEN
/*
* Cleanup possible dangling ends...
*/
@@ -817,6 +978,7 @@
free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
#endif
}
+#endif
/*
* Fall back to non SMP mode after errors.
@@ -827,10 +989,12 @@
{
cpu_present_map = cpumask_of_cpu(0);
cpu_possible_map = cpumask_of_cpu(0);
+#ifndef CONFIG_XEN
if (smp_found_config)
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
else
phys_cpu_present_map = physid_mask_of_physid(0);
+#endif
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
}
@@ -857,6 +1021,7 @@
*/
static int __cpuinit smp_sanity_check(unsigned max_cpus)
{
+#ifndef CONFIG_XEN
if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
hard_smp_processor_id());
@@ -896,13 +1061,19 @@
nr_ioapics = 0;
return -1;
}
+#endif
/*
* If SMP should be disabled, then really disable it!
*/
if (!max_cpus) {
+#ifdef CONFIG_XEN
+ HYPERVISOR_shared_info->n_vcpu = 1;
+#endif
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy
APIC emulation.\n");
+#ifndef CONFIG_XEN
nr_ioapics = 0;
+#endif
return -1;
}
@@ -917,7 +1088,10 @@
{
int i;
+#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
+#else
nmi_watchdog_default();
+#endif
current_cpu_data = boot_cpu_data;
current_thread_info()->cpu = 0; /* needed? */
@@ -927,8 +1101,12 @@
* Fill in cpu_present_mask
*/
for (i = 0; i < NR_CPUS; i++) {
+#ifndef CONFIG_XEN
int apicid = cpu_present_to_apicid(i);
if (physid_isset(apicid, phys_cpu_present_map)) {
+#else
+ if (i < HYPERVISOR_shared_info->n_vcpu) {
+#endif
cpu_set(i, cpu_present_map);
/* possible map would be different if we supported real
CPU hotplug. */
@@ -942,6 +1120,9 @@
return;
}
+#ifdef CONFIG_XEN
+ smp_intr_init();
+#else
/*
* Switch from PIC to APIC mode.
@@ -954,20 +1135,26 @@
GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
/* Or can we switch back to PIC here? */
}
+#endif
/*
* Now start the IO-APICs
*/
+#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
+#else
if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
else
nr_ioapics = 0;
+#endif
/*
* Set up local APIC timer on boot CPU.
*/
+#ifndef CONFIG_XEN
setup_boot_APIC_clock();
+#endif
}
/*
@@ -989,17 +1176,23 @@
int __cpuinit __cpu_up(unsigned int cpu)
{
int err;
+#ifndef CONFIG_XEN
int apicid = cpu_present_to_apicid(cpu);
+#else
+ int apicid = cpu;
+#endif
WARN_ON(irqs_disabled());
Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
+#ifndef CONFIG_XEN
if (apicid == BAD_APICID || apicid == boot_cpu_id ||
!physid_isset(apicid, phys_cpu_present_map)) {
printk("__cpu_up: bad cpu %d\n", cpu);
return -EINVAL;
}
+#endif
/* Boot it! */
err = do_boot_cpu(cpu, apicid);
@@ -1021,15 +1214,76 @@
*/
void __cpuinit smp_cpus_done(unsigned int max_cpus)
{
+#ifndef CONFIG_XEN
zap_low_mappings();
smp_cleanup_boot();
#ifdef CONFIG_X86_IO_APIC
setup_ioapic_dest();
#endif
+#endif
detect_siblings();
+#ifndef CONFIG_XEN
time_init_gtod();
check_nmi_watchdog();
-}
+#endif
+}
+
+#ifdef CONFIG_XEN
+extern int bind_ipi_to_irq(int ipi);
+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
+
+static DEFINE_PER_CPU(int, resched_irq);
+static DEFINE_PER_CPU(int, callfunc_irq);
+static char resched_name[NR_CPUS][15];
+static char callfunc_name[NR_CPUS][15];
+
+void smp_intr_init(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(resched_irq, cpu) =
+ bind_ipi_to_irq(RESCHEDULE_VECTOR);
+ sprintf(resched_name[cpu], "resched%d", cpu);
+ BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
+ SA_INTERRUPT, resched_name[cpu], NULL));
+
+ per_cpu(callfunc_irq, cpu) =
+ bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
+ sprintf(callfunc_name[cpu], "callfunc%d", cpu);
+ BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
+ smp_call_function_interrupt,
+ SA_INTERRUPT, callfunc_name[cpu], NULL));
+}
+
+static void smp_intr_exit(void)
+{
+ int cpu = smp_processor_id();
+
+ free_irq(per_cpu(resched_irq, cpu), NULL);
+ unbind_ipi_from_irq(RESCHEDULE_VECTOR);
+
+ free_irq(per_cpu(callfunc_irq, cpu), NULL);
+ unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
+}
+
+extern void local_setup_timer_irq(void);
+extern void local_teardown_timer_irq(void);
+
+void smp_suspend(void)
+{
+ /* XXX todo: take down time and ipi's on all cpus */
+ local_teardown_timer_irq();
+ smp_intr_exit();
+}
+
+void smp_resume(void)
+{
+ /* XXX todo: restore time and ipi's on all cpus */
+ smp_intr_init();
+ local_setup_timer_irq();
+}
+#endif
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c Thu Aug 11
20:38:44 2005
@@ -953,6 +953,17 @@
cpu_init();
}
+void smp_trap_init(trap_info_t *trap_ctxt)
+{
+ trap_info_t *t = trap_table;
+
+ for (t = trap_table; t->address; t++) {
+ trap_ctxt[t->vector].flags = t->flags;
+ trap_ctxt[t->vector].cs = t->cs;
+ trap_ctxt[t->vector].address = t->address;
+ }
+}
+
/* Actual parsing is done early in setup.c. */
static int __init oops_dummy(char *s)
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S Thu Aug 11
18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S Thu Aug 11
20:38:44 2005
@@ -8,11 +8,14 @@
#define sizeof_vcpu_shift 3
#ifdef CONFIG_SMP
-#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
-#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
+//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
+//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
+#define preempt_disable(reg)
+#define preempt_enable(reg)
#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
movq %gs:pda_cpunumber,reg ; \
- shl $sizeof_vcpu_shift,reg ; \
+ shl $32, reg ; \
+ shr $32-sizeof_vcpu_shift,reg ; \
addq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
diff -r f90820349056 -r c589ca6d292b tools/console/client/main.c
--- a/tools/console/client/main.c Thu Aug 11 18:44:59 2005
+++ b/tools/console/client/main.c Thu Aug 11 20:38:44 2005
@@ -162,14 +162,11 @@
struct termios attr;
int domid;
int xc_handle;
- char *sopt = "hf:pc";
+ char *sopt = "h";
int ch;
int opt_ind=0;
struct option lopt[] = {
{ "help", 0, 0, 'h' },
- { "file", 1, 0, 'f' },
- { "pty", 0, 0, 'p' },
- { "ctty", 0, 0, 'c' },
{ 0 },
};
@@ -178,6 +175,7 @@
int spty;
unsigned int len = 0;
struct xs_handle *xs;
+ char *end;
while((ch = getopt_long(argc, argv, sopt, lopt, &opt_ind)) != -1) {
switch(ch) {
@@ -195,7 +193,13 @@
exit(EINVAL);
}
- domid = atoi(argv[optind]);
+ domid = strtol(argv[optind], &end, 10);
+ if (end && *end) {
+ fprintf(stderr, "Invalid DOMID `%s'\n", argv[optind]);
+ fprintf(stderr, "Try `%s --help' for more information.\n",
+ argv[0]);
+ exit(EINVAL);
+ }
xs = xs_daemon_open();
if (xs == NULL) {
@@ -211,7 +215,11 @@
snprintf(path, sizeof(path), "/console/%d/tty", domid);
str_pty = xs_read(xs, path, &len);
- if (str_pty == NULL) {
+ /* FIXME consoled currently does not assume domain-0 doesn't have a
+ console which is good when we break domain-0 up. To keep us
+ user friendly, we'll bail out here since no data will ever show
+ up on domain-0. */
+ if (domid == 0 || str_pty == NULL) {
err(errno, "Could not read tty from store");
}
spty = open(str_pty, O_RDWR | O_NOCTTY);
diff -r f90820349056 -r c589ca6d292b tools/ioemu/target-i386-dm/Makefile
--- a/tools/ioemu/target-i386-dm/Makefile Thu Aug 11 18:44:59 2005
+++ b/tools/ioemu/target-i386-dm/Makefile Thu Aug 11 20:38:44 2005
@@ -376,10 +376,10 @@
$(CC) $(DEFINES) -c -o $@ $<
clean:
- rm -rf *.o *.a *~ $(PROGS) gen-op.h opc.h op.h nwfpe slirp
qemu-vgaram-bin
+ rm -rf *.o *.a *~ $(PROGS) gen-op.h opc.h op.h nwfpe slirp
distclean:
- rm -rf *.o *.a *~ $(PROGS) gen-op.h opc.h op.h nwfpe slirp
qemu-vgaram-bin
+ rm -rf *.o *.a *~ $(PROGS) gen-op.h opc.h op.h nwfpe slirp
install: all
if [ ! -d $(INSTALL_DIR) ];then mkdir -p $(INSTALL_DIR);fi
@@ -387,8 +387,6 @@
install -m 755 -s $(PROGS) "$(INSTALL_DIR)"
install -m 755 qemu-dm.debug "$(INSTALL_DIR)"
install -m 755 qemu-ifup "$(DESTDIR)$(configdir)"
- gunzip -c qemu-vgaram-bin.gz >qemu-vgaram-bin
- install -m 755 qemu-vgaram-bin "$(DESTDIR)$(configdir)"
ifneq ($(wildcard .depend),)
include .depend
endif
diff -r f90820349056 -r c589ca6d292b tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py Thu Aug 11 18:44:59 2005
+++ b/tools/python/xen/xm/main.py Thu Aug 11 20:38:44 2005
@@ -265,7 +265,7 @@
print 'Name Id Mem(MB) CPU VCPU(s) State Time(s)'
for dominfo in domsinfo:
if dominfo.has_key("ssidref1"):
- print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3s %(vcpus)5d
%(state)5s %(cpu_time)7.1f %s:%(ssidref2)02x/p:%(ssidref1)02x" % dominfo)
+ print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3s %(vcpus)5d
%(state)5s %(cpu_time)7.1f s:%(ssidref2)02x/p:%(ssidref1)02x" % dominfo)
else:
print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3s %(vcpus)5d
%(state)5s %(cpu_time)7.1f" % dominfo)
@@ -605,6 +605,10 @@
deprecated(cmd,aliases[cmd])
return commands[aliases[cmd]]
else:
+ if len( cmd ) > 1:
+ matched_commands = filter( lambda (command, func): command[
0:len(cmd) ] == cmd, commands.iteritems() )
+ if len( matched_commands ) == 1:
+ return matched_commands[0][1]
err('Sub Command %s not found!' % cmd)
usage()
diff -r f90820349056 -r c589ca6d292b xen/arch/ia64/vcpu.c
--- a/xen/arch/ia64/vcpu.c Thu Aug 11 18:44:59 2005
+++ b/xen/arch/ia64/vcpu.c Thu Aug 11 20:38:44 2005
@@ -587,6 +587,14 @@
set_bit(vector,PSCBX(vcpu,irr));
PSCB(vcpu,pending_interruption) = 1;
}
+
+ /* Keir: I think you should unblock when an interrupt is pending. */
+ {
+ int running = test_bit(_VCPUF_running, &vcpu->vcpu_flags);
+ vcpu_unblock(vcpu);
+ if ( running )
+ smp_send_event_check_cpu(vcpu->processor);
+ }
}
void early_tick(VCPU *vcpu)
diff -r f90820349056 -r c589ca6d292b xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Aug 11 18:44:59 2005
+++ b/xen/arch/x86/domain.c Thu Aug 11 20:38:44 2005
@@ -279,8 +279,6 @@
shadow_lock_init(d);
INIT_LIST_HEAD(&d->arch.free_shadow_frames);
-
- init_domain_time(d);
}
void arch_do_boot_vcpu(struct vcpu *v)
@@ -503,7 +501,10 @@
}
update_pagetables(v);
-
+
+ if ( v->vcpu_id == 0 )
+ init_domain_time(d);
+
/* Don't redo final setup */
set_bit(_VCPUF_initialised, &v->vcpu_flags);
diff -r f90820349056 -r c589ca6d292b xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Thu Aug 11 18:44:59 2005
+++ b/xen/arch/x86/domain_build.c Thu Aug 11 20:38:44 2005
@@ -74,7 +74,7 @@
unsigned long _initrd_start, unsigned long initrd_len,
char *cmdline)
{
- int i, rc, dom0_pae, xen_pae;
+ int i, rc, dom0_pae, xen_pae, order;
unsigned long pfn, mfn;
unsigned long nr_pages;
unsigned long nr_pt_pages;
@@ -143,10 +143,6 @@
nr_pages = avail_domheap_pages() +
((initrd_len + PAGE_SIZE - 1) >> PAGE_SHIFT) +
((image_len + PAGE_SIZE - 1) >> PAGE_SHIFT);
- if ( (page = alloc_largest(d, nr_pages)) == NULL )
- panic("Not enough RAM for DOM0 reservation.\n");
- alloc_spfn = page_to_pfn(page);
- alloc_epfn = alloc_spfn + d->tot_pages;
if ( (rc = parseelfimage(&dsi)) != 0 )
return rc;
@@ -215,8 +211,15 @@
#endif
}
- if ( ((v_end - dsi.v_start) >> PAGE_SHIFT) > (alloc_epfn - alloc_spfn) )
- panic("Insufficient contiguous RAM to build kernel image.\n");
+ order = get_order(v_end - dsi.v_start);
+ if ( (1UL << order) > nr_pages )
+ panic("Domain 0 allocation is too small for kernel image.\n");
+
+ /* Allocate from DMA pool: PAE L3 table must be below 4GB boundary. */
+ if ( (page = alloc_domheap_pages(d, order, ALLOC_DOM_DMA)) == NULL )
+ panic("Not enough RAM for domain 0 allocation.\n");
+ alloc_spfn = page_to_pfn(page);
+ alloc_epfn = alloc_spfn + d->tot_pages;
printk("PHYSICAL MEMORY ARRANGEMENT:\n"
" Dom0 alloc.: %"PRIphysaddr"->%"PRIphysaddr,
@@ -615,6 +618,8 @@
/* DOM0 gets access to everything. */
physdev_init_dom0(d);
+ init_domain_time(d);
+
set_bit(_DOMF_constructed, &d->domain_flags);
new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
diff -r f90820349056 -r c589ca6d292b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Aug 11 18:44:59 2005
+++ b/xen/arch/x86/mm.c Thu Aug 11 20:38:44 2005
@@ -95,6 +95,7 @@
#include <xen/irq.h>
#include <xen/softirq.h>
#include <xen/domain_page.h>
+#include <xen/event.h>
#include <asm/shadow.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
@@ -855,6 +856,14 @@
int i;
ASSERT(!shadow_mode_refcounts(d));
+
+#ifdef CONFIG_X86_PAE
+ if ( pfn >= 0x100000 )
+ {
+ MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
+ return 0;
+ }
+#endif
pl3e = map_domain_page(pfn);
for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
diff -r f90820349056 -r c589ca6d292b xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Thu Aug 11 18:44:59 2005
+++ b/xen/arch/x86/setup.c Thu Aug 11 20:38:44 2005
@@ -248,10 +248,11 @@
{
char *cmdline;
module_t *mod = (module_t *)__va(mbi->mods_addr);
- unsigned long firsthole_start, nr_pages;
+ unsigned long nr_pages, modules_length;
unsigned long initial_images_start, initial_images_end;
unsigned long _initrd_start = 0, _initrd_len = 0;
unsigned int initrdidx = 1;
+ physaddr_t s, e;
struct e820entry e820_raw[E820MAX];
int i, e820_raw_nr = 0, bytes = 0;
struct ns16550_defaults ns16550 = {
@@ -330,22 +331,31 @@
max_page = init_e820(e820_raw, &e820_raw_nr);
- /* Find the first high-memory RAM hole. */
- for ( i = 0; i < e820.nr_map; i++ )
+ modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
+
+ /* Find a large enough RAM extent to stash the DOM0 modules. */
+ for ( i = 0; ; i++ )
+ {
if ( (e820.map[i].type == E820_RAM) &&
- (e820.map[i].addr >= 0x100000) )
+ (e820.map[i].size >= modules_length) &&
+ ((e820.map[i].addr + e820.map[i].size) >=
+ (xenheap_phys_end + modules_length)) )
+ {
+ /* Stash as near as possible to the beginning of the RAM extent. */
+ initial_images_start = e820.map[i].addr;
+ if ( initial_images_start < xenheap_phys_end )
+ initial_images_start = xenheap_phys_end;
+ initial_images_end = initial_images_start + modules_length;
break;
- firsthole_start = e820.map[i].addr + e820.map[i].size;
-
- /* Relocate the Multiboot modules. */
- initial_images_start = xenheap_phys_end;
- initial_images_end = initial_images_start +
- (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
- if ( initial_images_end > firsthole_start )
- {
- printk("Not enough memory to stash the DOM0 kernel image.\n");
- for ( ; ; ) ;
- }
+ }
+
+ if ( i == e820.nr_map )
+ {
+ printk("Not enough memory to stash the DOM0 kernel image.\n");
+ for ( ; ; ) ;
+ }
+ }
+
#if defined(CONFIG_X86_32)
memmove((void *)initial_images_start, /* use low mapping */
(void *)mod[0].mod_start, /* use low mapping */
@@ -358,16 +368,23 @@
/* Initialise boot-time allocator with all RAM situated after modules. */
xenheap_phys_start = init_boot_allocator(__pa(&_end));
- nr_pages = 0;
+ nr_pages = 0;
for ( i = 0; i < e820.nr_map; i++ )
{
if ( e820.map[i].type != E820_RAM )
continue;
+
nr_pages += e820.map[i].size >> PAGE_SHIFT;
- if ( (e820.map[i].addr + e820.map[i].size) >= initial_images_end )
- init_boot_pages((e820.map[i].addr < initial_images_end) ?
- initial_images_end : e820.map[i].addr,
- e820.map[i].addr + e820.map[i].size);
+
+ /* Initialise boot heap, skipping Xen heap and dom0 modules. */
+ s = e820.map[i].addr;
+ e = s + e820.map[i].size;
+ if ( s < xenheap_phys_end )
+ s = xenheap_phys_end;
+ if ( (s < initial_images_end) && (e > initial_images_start) )
+ s = initial_images_end;
+ init_boot_pages(s, e);
+
#if defined (CONFIG_X86_64)
/*
* x86/64 maps all registered RAM. Points to note:
@@ -404,10 +421,30 @@
end_boot_allocator();
- init_xenheap_pages(xenheap_phys_start, xenheap_phys_end);
- printk("Xen heap: %luMB (%lukB)\n",
- (xenheap_phys_end-xenheap_phys_start) >> 20,
- (xenheap_phys_end-xenheap_phys_start) >> 10);
+ /* Initialise the Xen heap, skipping RAM holes. */
+ nr_pages = 0;
+ for ( i = 0; i < e820.nr_map; i++ )
+ {
+ if ( e820.map[i].type != E820_RAM )
+ continue;
+
+ s = e820.map[i].addr;
+ e = s + e820.map[i].size;
+ if ( s < xenheap_phys_start )
+ s = xenheap_phys_start;
+ if ( e > xenheap_phys_end )
+ e = xenheap_phys_end;
+
+ if ( s < e )
+ {
+ nr_pages += (e - s) >> PAGE_SHIFT;
+ init_xenheap_pages(s, e);
+ }
+ }
+
+ printk("Xen heap: %luMB (%lukB)\n",
+ nr_pages >> (20 - PAGE_SHIFT),
+ nr_pages << (PAGE_SHIFT - 10));
early_boot = 0;
diff -r f90820349056 -r c589ca6d292b xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Thu Aug 11 18:44:59 2005
+++ b/xen/arch/x86/time.c Thu Aug 11 20:38:44 2005
@@ -44,6 +44,7 @@
int timer_ack = 0;
unsigned long volatile jiffies;
static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
+static spinlock_t wc_lock = SPIN_LOCK_UNLOCKED;
struct time_scale {
int shift;
@@ -699,13 +700,14 @@
struct domain *d;
shared_info_t *s;
- x = (secs * 1000000000ULL) + (u64)nsecs + system_time_base;
+ x = (secs * 1000000000ULL) + (u64)nsecs - system_time_base;
y = do_div(x, 1000000000);
wc_sec = _wc_sec = (u32)x;
wc_nsec = _wc_nsec = (u32)y;
read_lock(&domlist_lock);
+ spin_lock(&wc_lock);
for_each_domain ( d )
{
@@ -716,15 +718,18 @@
version_update_end(&s->wc_version);
}
+ spin_unlock(&wc_lock);
read_unlock(&domlist_lock);
}
void init_domain_time(struct domain *d)
{
+ spin_lock(&wc_lock);
version_update_begin(&d->shared_info->wc_version);
d->shared_info->wc_sec = wc_sec;
d->shared_info->wc_nsec = wc_nsec;
version_update_end(&d->shared_info->wc_version);
+ spin_unlock(&wc_lock);
}
static void local_time_calibration(void *unused)
diff -r f90820349056 -r c589ca6d292b xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Thu Aug 11 18:44:59 2005
+++ b/xen/common/page_alloc.c Thu Aug 11 20:38:44 2005
@@ -418,6 +418,8 @@
ps = round_pgup(ps);
pe = round_pgdown(pe);
+ if ( pe <= ps )
+ return;
memguard_guard_range(phys_to_virt(ps), pe - ps);
@@ -487,19 +489,25 @@
ps = round_pgup(ps) >> PAGE_SHIFT;
pe = round_pgdown(pe) >> PAGE_SHIFT;
-
- if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) {
- init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
- init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN),
- pe - MAX_DMADOM_PFN);
+ if ( pe <= ps )
+ return;
+
+ if ( (ps < MAX_DMADOM_PFN) && (pe > MAX_DMADOM_PFN) )
+ {
+ init_heap_pages(
+ MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
+ init_heap_pages(
+ MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), pe - MAX_DMADOM_PFN);
}
else
+ {
init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
-}
-
-
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order,
- unsigned int flags)
+ }
+}
+
+
+struct pfn_info *alloc_domheap_pages(
+ struct domain *d, unsigned int order, unsigned int flags)
{
struct pfn_info *pg;
cpumask_t mask;
diff -r f90820349056 -r c589ca6d292b xen/include/asm-x86/event.h
--- a/xen/include/asm-x86/event.h Thu Aug 11 18:44:59 2005
+++ b/xen/include/asm-x86/event.h Thu Aug 11 20:38:44 2005
@@ -11,6 +11,19 @@
static inline void evtchn_notify(struct vcpu *v)
{
+ /*
+ * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
+ * pending flag. These values may fluctuate (after all, we hold no
+ * locks) but the key insight is that each change will cause
+ * evtchn_upcall_pending to be polled.
+ *
+ * NB2. We save VCPUF_running across the unblock to avoid a needless
+ * IPI for domains that we IPI'd to unblock.
+ */
+ int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ vcpu_unblock(v);
+ if ( running )
+ smp_send_event_check_cpu(v->processor);
}
#endif
diff -r f90820349056 -r c589ca6d292b xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h Thu Aug 11 18:44:59 2005
+++ b/xen/include/asm-x86/page.h Thu Aug 11 20:38:44 2005
@@ -283,13 +283,9 @@
static __inline__ int get_order(unsigned long size)
{
int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
+ size = (size-1) >> PAGE_SHIFT;
+ for ( order = 0; size; order++ )
size >>= 1;
- order++;
- } while (size);
return order;
}
diff -r f90820349056 -r c589ca6d292b xen/include/xen/event.h
--- a/xen/include/xen/event.h Thu Aug 11 18:44:59 2005
+++ b/xen/include/xen/event.h Thu Aug 11 20:38:44 2005
@@ -26,30 +26,14 @@
{
struct domain *d = v->domain;
shared_info_t *s = d->shared_info;
- int running;
- /* These three operations must happen in strict order. */
+ /* These four operations must happen in strict order. */
if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
!test_bit (port, &s->evtchn_mask[0]) &&
- !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) )
+ !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) &&
+ !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
{
- /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
- set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
evtchn_notify(v);
-
- /*
- * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
- * pending flag. These values may fluctuate (after all, we hold no
- * locks) but the key insight is that each change will cause
- * evtchn_upcall_pending to be polled.
- *
- * NB2. We save VCPUF_running across the unblock to avoid a needless
- * IPI for domains that we IPI'd to unblock.
- */
- running = test_bit(_VCPUF_running, &v->vcpu_flags);
- vcpu_unblock(v);
- if ( running )
- smp_send_event_check_cpu(v->processor);
}
}
@@ -73,8 +57,9 @@
*/
extern void send_guest_pirq(struct domain *d, int pirq);
-#define event_pending(_d) \
- ((_d)->vcpu_info->evtchn_upcall_pending && \
- !(_d)->vcpu_info->evtchn_upcall_mask)
+/* Note: Bitwise operations result in fast code with no branches. */
+#define event_pending(v) \
+ ((v)->vcpu_info->evtchn_upcall_pending & \
+ ~(v)->vcpu_info->evtchn_upcall_mask)
#endif /* __XEN_EVENT_H__ */
diff -r f90820349056 -r c589ca6d292b xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Thu Aug 11 18:44:59 2005
+++ b/xen/include/xen/sched.h Thu Aug 11 20:38:44 2005
@@ -297,10 +297,9 @@
(unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
(unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
-#define hypercall_preempt_check() (unlikely( \
- softirq_pending(smp_processor_id()) | \
- (!!current->vcpu_info->evtchn_upcall_pending & \
- !current->vcpu_info->evtchn_upcall_mask) \
+#define hypercall_preempt_check() (unlikely( \
+ softirq_pending(smp_processor_id()) | \
+ event_pending(current) \
))
/* This domain_hash and domain_list are protected by the domlist_lock. */
diff -r f90820349056 -r c589ca6d292b xen/tools/symbols.c
--- a/xen/tools/symbols.c Thu Aug 11 18:44:59 2005
+++ b/xen/tools/symbols.c Thu Aug 11 20:38:44 2005
@@ -311,7 +311,7 @@
off = 0;
for (i = 0; i < cnt; i++) {
- if (!table[i].flags & SYM_FLAG_VALID)
+ if (!(table[i].flags & SYM_FLAG_VALID))
continue;
if ((valid & 0xFF) == 0)
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic.c
--- /dev/null Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic.c Thu Aug 11
20:38:44 2005
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch probe layer.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/smp.h>
+#include <asm/ipi.h>
+
+#if defined(CONFIG_ACPI_BUS)
+#include <acpi/acpi_bus.h>
+#endif
+
+/* which logical CPU number maps to which CPU (physical APIC ID) */
+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+EXPORT_SYMBOL(x86_cpu_to_apicid);
+u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+extern struct genapic apic_cluster;
+extern struct genapic apic_flat;
+
+#ifndef CONFIG_XEN
+struct genapic *genapic = &apic_flat;
+#else
+extern struct genapic apic_xen;
+struct genapic *genapic = &apic_xen;
+#endif
+
+
+/*
+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
+ */
+void __init clustered_apic_check(void)
+{
+#ifndef CONFIG_XEN
+ long i;
+ u8 clusters, max_cluster;
+ u8 id;
+ u8 cluster_cnt[NUM_APIC_CLUSTERS];
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ /* AMD always uses flat mode right now */
+ genapic = &apic_flat;
+ goto print;
+ }
+
+#if defined(CONFIG_ACPI_BUS)
+ /*
+ * Some x86_64 machines use physical APIC mode regardless of how many
+ * procs/clusters are present (x86_64 ES7000 is an example).
+ */
+ if (acpi_fadt.revision > FADT2_REVISION_ID)
+ if (acpi_fadt.force_apic_physical_destination_mode) {
+ genapic = &apic_cluster;
+ goto print;
+ }
+#endif
+
+ memset(cluster_cnt, 0, sizeof(cluster_cnt));
+
+ for (i = 0; i < NR_CPUS; i++) {
+ id = bios_cpu_apicid[i];
+ if (id != BAD_APICID)
+ cluster_cnt[APIC_CLUSTERID(id)]++;
+ }
+
+ clusters = 0;
+ max_cluster = 0;
+ for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
+ if (cluster_cnt[i] > 0) {
+ ++clusters;
+ if (cluster_cnt[i] > max_cluster)
+ max_cluster = cluster_cnt[i];
+ }
+ }
+
+ /*
+ * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
+ * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
+ * else physical mode.
+ * (We don't use lowest priority delivery + HW APIC IRQ steering, so
+ * can ignore the clustered logical case and go straight to physical.)
+ */
+ if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster)
+ genapic = &apic_flat;
+ else
+ genapic = &apic_cluster;
+
+print:
+#else
+ /* hardcode to xen apic functions */
+ genapic = &apic_xen;
+#endif
+ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
+}
+
+/* Same for both flat and clustered. */
+
+#ifdef CONFIG_XEN
+extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned
int dest);
+#endif
+
+void send_IPI_self(int vector)
+{
+#ifndef CONFIG_XEN
+ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+#else
+ xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+#endif
+}
diff -r f90820349056 -r c589ca6d292b
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c
--- /dev/null Thu Aug 11 18:44:59 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c Thu Aug 11
20:38:44 2005
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ *
+ * Hacked to pieces for Xen by Chris Wright.
+ */
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#include <asm/smp.h>
+#include <asm/ipi.h>
+#else
+#include <asm/apic.h>
+#include <asm/apicdef.h>
+#include <asm/genapic.h>
+#endif
+#include <asm-xen/evtchn.h>
+
+DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
+
+static inline void __send_IPI_one(unsigned int cpu, int vector)
+{
+ unsigned int evtchn;
+ Dprintk("%s\n", __FUNCTION__);
+
+ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
+ if (evtchn)
+ notify_via_evtchn(evtchn);
+ else
+ printk("send_IPI to unbound port %d/%d", cpu, vector);
+}
+
+void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int
dest)
+{
+ int cpu;
+
+ switch (shortcut) {
+ case APIC_DEST_SELF:
+ __send_IPI_one(smp_processor_id(), vector);
+ break;
+ case APIC_DEST_ALLBUT:
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ if (cpu_isset(cpu, cpu_online_map)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
+ break;
+ case APIC_DEST_ALLINC:
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu_isset(cpu, cpu_online_map)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
+ break;
+ default:
+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
+ vector);
+ break;
+ }
+}
+
+static cpumask_t xen_target_cpus(void)
+{
+ return cpu_online_map;
+}
+
+/*
+ * Set up the logical destination ID.
+ * Do nothing, not called now.
+ */
+static void xen_init_apic_ldr(void)
+{
+ Dprintk("%s\n", __FUNCTION__);
+ return;
+}
+
+static void xen_send_IPI_allbutself(int vector)
+{
+ /*
+ * if there are no other CPUs in the system then
+ * we get an APIC send error if we try to broadcast.
+ * thus we have to avoid sending IPIs in this case.
+ */
+ Dprintk("%s\n", __FUNCTION__);
+ if (num_online_cpus() > 1)
+ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector,
APIC_DEST_LOGICAL);
+}
+
+static void xen_send_IPI_all(int vector)
+{
+ Dprintk("%s\n", __FUNCTION__);
+ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
+}
+
+static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
+{
+ unsigned long mask = cpus_addr(cpumask)[0];
+ unsigned int cpu;
+ unsigned long flags;
+
+ Dprintk("%s\n", __FUNCTION__);
+ local_irq_save(flags);
+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ if (cpu_isset(cpu, cpumask)) {
+ __send_IPI_one(cpu, vector);
+ }
+ }
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static int xen_apic_id_registered(void)
+{
+ /* better be set */
+ Dprintk("%s\n", __FUNCTION__);
+ return physid_isset(smp_processor_id(), phys_cpu_present_map);
+}
+#endif
+
+static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
+{
+ Dprintk("%s\n", __FUNCTION__);
+ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
+}
+
+static unsigned int phys_pkg_id(int index_msb)
+{
+ u32 ebx;
+
+ Dprintk("%s\n", __FUNCTION__);
+ ebx = cpuid_ebx(1);
+ return ((ebx >> 24) & 0xFF) >> index_msb;
+}
+
+struct genapic apic_xen = {
+ .name = "xen",
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ .int_delivery_mode = dest_LowestPrio,
+#endif
+ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
+ .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
+ .target_cpus = xen_target_cpus,
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ .apic_id_registered = xen_apic_id_registered,
+#endif
+ .init_apic_ldr = xen_init_apic_ldr,
+ .send_IPI_all = xen_send_IPI_all,
+ .send_IPI_allbutself = xen_send_IPI_allbutself,
+ .send_IPI_mask = xen_send_IPI_mask,
+ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
+ .phys_pkg_id = phys_pkg_id,
+};
diff -r f90820349056 -r c589ca6d292b
tools/ioemu/target-i386-dm/qemu-vgaram-bin.gz
--- a/tools/ioemu/target-i386-dm/qemu-vgaram-bin.gz Thu Aug 11 18:44:59 2005
+++ /dev/null Thu Aug 11 20:38:44 2005
@@ -1,7 +0,0 @@
-?ËmB qemu-vgaram-bin íÝÍk\×ÇñCòÂÁMw¢ I
-^´?XÊ?#\c?Ô¡
\ No newline at end of file
-M7okɲe?L,a9?J?oò'B×Ýô(¨1?.L?Å?´x?lJ1/?ÜçÑ}Ï?3wf?h?s7ßü¸3wFsæ?{î¹ç¾?}öpJ=ÉÉ?dAr\2'yI²&Y?\?lH??¼#ù?äE{Ϫ½Ï_×÷¿/¹bÑϾ,¹*yÊ^ßÌÞ?/yáðuy|#½vø¯òY7äõòüïé?Ã?Èë7öò?¾çP
-?~ö}Éç6ÕÜ?|?=.ÿ-«?Õõ3gϵ?^¶×Ûê¬ÿ÷=©³]©³]Y/Ítéð5ymA??ÎÛûß·:=oõ{Åæ_µïsÁ?ë÷xMò¶=Õæ?µÇÃõð°dArM²kÓ?U÷?YÉIËWYf³×j?ïmÁ×yòGÕÅìÈ÷?Ͷß%kC¾î7³vµjmCç?Xï÷GDëýfÞÊD,».ó?ä%ËJ\ݾ??ItzIru/7Óºô;'¤ÿ?ÛË'é?Ì[?×?í}öw¶{·lõ¿bóuÞE?w̶پ.¾Ëc?#??I??lInÙ
-lo^çy¼íåÛ@YÍr=aýÎU˺µ³9ËY¼ÖlzqozZêðYyí´Ì??z??ùóò·óÒ?×åñ]ißwd=}çÔ?ÿCÉr6Ís15ÛÀ??íÀuàuüyê÷?'íù?ú?J>²åº#¹+¹??¾ÿ?=¿c¯Tþ?$3©iÿº?²òO¥þv1cïü[Ý·½?íWG?Wòýï?¶½®[?ítVÚÔµ?1?äñ¦D§«ò¼'óõñ¹¬¯ýùTÛ?¦Üöò¤âqD>´væù?ä??D?c?ÉbjÆ={¾c?uÞCöüZ`´e·Èàë/IÖ$Ë?K??lþ+?×%oJ?´ùOÛTß¿.yÿÁk³é¨lëoH_{AoJV%ò|Uæ¯ÉãËòøDö9Çlª?·ùG³éÉ?äªä?Äÿö{¿æ9ÉÜÞãÙô¼?3'e<+??¶þF§úü?=~Ú>G?ã?¢Ì?evLtÿ3~4ÝÌZ{Ûµir=ó©Ùvm:?ò½¾~?²6ð?=ßÈÖý?lýÿAò²ä÷?·mÞo%/JÞ°vsFúûW$oJ~gí^_˦OYð¿]²2ÎÛkú«öÞµ÷ýBò|ö=µ|ÝNþ?}O}ÿ/MM_wߦGÃêz¸þ?ã?
\ No newline at end of file
-IOú?%ɦ,ÿ?×?ÖÙe[.?êöÿ?=~Âê@?eÓê`Ù?¿þæZê{Õ:þ:XýyßöCÚ߯e;#ÛøS{}àIé7gåõfûoÚ?NgÓÙCM¿ªmì¹Û´7?÷?ýs6ÿ?¯÷?#ß×ƵµÉõ§ûºÞÞúß?iO²$iÚ_Oê¯Ðþ¼?õ}ð§?m{>cù8õ÷½?ãÐdåkYÞïz_¸cß#ª\wøxcSòE<þÚ
*{>Ëbà2ÇÇ/kRßW÷ÆóÒ?äØ˾Ï?ä?DÇïí?áþ^ç?ýý±,û¯ßï¶ÅÛB?zñcßû©\Üþ^ß?õû?u«ß?mXÝèû.Úß½?ÍÓ1àÖ@?5ÛØë2îyW¦OÚövîÁºjú?ò3uÞ¥CýmýÝCý¾÷²=î}¯uâÉÇbµÖ?fx¼Ý]¢û=??ýßv?;þÖx;÷s~~èÏÏÁ=¨_íw·Òøã?¨ò}ÿ7ªîõµÈö???cu=?%úØW?ùQ+W÷?·RÿøÇóX`ù»VÏ?ï*/ÿB¶®½ü?"?åïë?o?y¢Û_¾¬5?¿,ïz±þµ>"Ï{ÚúùZ}ÿ¤ò£÷½m}_>.?©T×G<?NÛò?õUvÞþ½Ì¼-Ôh?ùwøO=ÚN±Û¿_wÎÏýî¦á>(ª|íïóó |