diff -r 733657e188f9 xen/arch/ia64/linux-xen/smp.c --- a/xen/arch/ia64/linux-xen/smp.c Thu Apr 20 12:08:45 2006 +++ b/xen/arch/ia64/linux-xen/smp.c Fri Apr 21 04:39:52 2006 @@ -53,26 +53,31 @@ #endif #ifdef XEN +static void local_flush_tlb_vhpt (void *dummy) +{ + vhpt_flush (); + local_flush_tlb_all (); +} + // FIXME: MOVE ELSEWHERE //Huh? This seems to be used on ia64 even if !CONFIG_SMP void flush_tlb_mask(cpumask_t mask) { -#ifdef CONFIG_SMP int cpu; cpu = smp_processor_id(); if (cpu_isset (cpu, mask)) { cpu_clear(cpu, mask); - local_flush_tlb_all (); + local_flush_tlb_vhpt (NULL); } if (cpus_empty(mask)) return; +#ifdef CONFIG_SMP for (cpu = 0; cpu < NR_CPUS; ++cpu) if (cpu_isset(cpu, mask)) - smp_call_function_single - (cpu, (void (*)(void *))local_flush_tlb_all, NULL, 1, 1); + smp_call_function_single (cpu, local_flush_tlb_vhpt, NULL, 1, 1); #endif } //#if CONFIG_SMP || IA64 @@ -94,13 +99,6 @@ platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } - -//Huh? This seems to be used on ia64 even if !CONFIG_SMP -int try_flush_tlb_mask(cpumask_t mask) -{ - dummy(); - return 1; -} #endif #endif @@ -286,13 +284,13 @@ } #endif +#ifndef XEN void smp_flush_tlb_all (void) { on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); } -#ifdef XEN void smp_vhpt_flush_all(void) { diff -r 733657e188f9 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Thu Apr 20 12:08:45 2006 +++ b/xen/arch/ia64/xen/domain.c Fri Apr 21 04:39:52 2006 @@ -98,13 +98,9 @@ if (d->shared_info != NULL) free_xenheap_page(d->shared_info); + flush_tlb_mask (d->domain_dirty_cpumask); + deallocate_rid_range(d); - - /* It is really good in this? */ - flush_tlb_all(); - - /* It is really good in this? */ - vhpt_flush_all(); } static void default_idle(void) diff -r 733657e188f9 xen/arch/ia64/xen/xenmisc.c --- a/xen/arch/ia64/xen/xenmisc.c Thu Apr 20 12:08:45 2006 +++ b/xen/arch/ia64/xen/xenmisc.c Fri Apr 21 04:39:52 2006 @@ -278,6 +278,9 @@ vmx_load_state(next); /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ prev = ia64_switch_to(next); + + cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask); + if (!VMX_DOMAIN(current)){ vcpu_set_next_timer(current); } diff -r 733657e188f9 xen/include/asm-ia64/linux-xen/asm/tlbflush.h --- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h Thu Apr 20 12:08:45 2006 +++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h Fri Apr 21 04:39:52 2006 @@ -25,6 +25,7 @@ */ extern void local_flush_tlb_all (void); +#ifndef XEN #ifdef CONFIG_SMP extern void smp_flush_tlb_all (void); extern void smp_flush_tlb_mm (struct mm_struct *mm); @@ -33,7 +34,6 @@ # define flush_tlb_all() local_flush_tlb_all() #endif -#ifndef XEN static inline void local_finish_flush_tlb_mm (struct mm_struct *mm) { diff -r 733657e188f9 xen/include/asm-ia64/vhpt.h --- a/xen/include/asm-ia64/vhpt.h Thu Apr 20 12:08:45 2006 +++ b/xen/include/asm-ia64/vhpt.h Fri Apr 21 04:39:52 2006 @@ -11,13 +11,7 @@ /* Number of entries in the VHPT. The size of an entry is 4*8B == 32B */ #define VHPT_NUM_ENTRIES (1 << (VHPT_SIZE_LOG2 - 5)) -#ifdef CONFIG_SMP -# define vhpt_flush_all() smp_vhpt_flush_all() -#else -# define vhpt_flush_all() vhpt_flush() -#endif // FIXME: These should be automatically generated - #define VLE_PGFLAGS_OFFSET 0 #define VLE_ITIR_OFFSET 8 #define VLE_TITAG_OFFSET 16 @@ -47,7 +41,6 @@ extern void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps); extern void vhpt_flush(void); -extern void smp_vhpt_flush_all(void); /* Currently the VHPT is allocated per CPU. */ DECLARE_PER_CPU (unsigned long, vhpt_paddr);