Index: 2005-12-16/include/asm-i386/mach-default/mach_mmu.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ 2005-12-16/include/asm-i386/mach-default/mach_mmu.h 2005-12-20 17:46:18.694786536 +0100 @@ -0,0 +1,18 @@ +#ifndef __ASM_MACH_MMU_H +#define __ASM_MACH_MMU_H + +#define MACH_SWITCH_DECLS + +static inline void mach_switch_pgd(struct mm_struct *next, int cpu) +{ + load_cr3(next->pgd); +} + +static inline void mach_switch_ldt(struct mm_struct *next, int cpu) +{ + load_LDT_nolock(&next->context, cpu); +} + +#define mach_switch_commit() 0 + +#endif /* __ASM_MACH_MMU_H */ Index: 2005-12-16/include/asm-i386/mach-xen/asm/mmu_context.h =================================================================== --- 2005-12-16.orig/include/asm-i386/mach-xen/asm/mmu_context.h 2005-12-20 17:13:17.331999464 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -#ifndef __I386_SCHED_H -#define __I386_SCHED_H - -#include -#include -#include -#include -#include - -/* - * Used for LDT copy/destruction. - */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void destroy_context(struct mm_struct *mm); - - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -#if 0 /* XEN: no lazy tlb */ - unsigned cpu = smp_processor_id(); - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; -#endif -} - -#define prepare_arch_switch(next) __prepare_arch_switch() - -static inline void __prepare_arch_switch(void) -{ - /* - * Save away %fs and %gs. No need to save %es and %ds, as those - * are always kernel segments while inside the kernel. Must - * happen before reload of cr3/ldt (i.e., not in __switch_to). - */ - asm volatile ( "mov %%fs,%0 ; mov %%gs,%1" - : "=m" (current->thread.fs), - "=m" (current->thread.gs)); - asm volatile ( "movl %0,%%fs ; movl %0,%%gs" - : : "r" (0) ); -} - -extern void mm_pin(struct mm_struct *mm); -extern void mm_unpin(struct mm_struct *mm); -void mm_pin_all(void); - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) -{ - int cpu = smp_processor_id(); - struct mmuext_op _op[2], *op = _op; - - if (likely(prev != next)) { - if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)) - mm_pin(next); - - /* stop flush ipis for the previous mm */ - cpu_clear(cpu, prev->cpu_vm_mask); -#if 0 /* XEN: no lazy tlb */ - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; - per_cpu(cpu_tlbstate, cpu).active_mm = next; -#endif - cpu_set(cpu, next->cpu_vm_mask); - - /* Re-load page tables: load_cr3(next->pgd) */ - per_cpu(cur_pgd, cpu) = next->pgd; - op->cmd = MMUEXT_NEW_BASEPTR; - op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); - op++; - - /* - * load the LDT, if the LDT is different: - */ - if (unlikely(prev->context.ldt != next->context.ldt)) { - /* load_LDT_nolock(&next->context, cpu) */ - op->cmd = MMUEXT_SET_LDT; - op->arg1.linear_addr = (unsigned long)next->context.ldt; - op->arg2.nr_ents = next->context.size; - op++; - } - - BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); - } -#if 0 /* XEN: no lazy tlb */ - else { - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; - BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); - - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled - * tlb flush IPI delivery. We must reload %cr3. - */ - load_cr3(next->pgd); - load_LDT_nolock(&next->context, cpu); - } - } -#endif -} - -#define deactivate_mm(tsk, mm) \ - asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) - -#define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) - -#endif Index: 2005-12-16/include/asm-i386/mach-xen/mach_mmu.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ 2005-12-16/include/asm-i386/mach-xen/mach_mmu.h 2005-12-20 17:46:18.697786080 +0100 @@ -0,0 +1,48 @@ +#ifndef __ASM_MACH_MMU_H +#define __ASM_MACH_MMU_H + +#define prepare_arch_switch(next) __prepare_arch_switch() + +static inline void __prepare_arch_switch(void) +{ + /* + * Save away %fs and %gs. No need to save %es and %ds, as those + * are always kernel segments while inside the kernel. Must + * happen before reload of cr3/ldt (i.e., not in __switch_to). + */ + asm volatile ( "mov %%fs,%0 ; mov %%gs,%1" + : "=m" (current->thread.fs), + "=m" (current->thread.gs)); + asm volatile ( "movl %0,%%fs ; movl %0,%%gs" + : : "r" (0) ); +} + +extern void mm_pin(struct mm_struct *mm); +extern void mm_unpin(struct mm_struct *mm); +void mm_pin_all(void); + +#define MACH_SWITCH_DECLS struct mmuext_op ops[2], *op = ops + +static inline struct mmuext_op *mach_switch_pgd(struct mm_struct *next, int cpu, struct mmuext_op *op) +{ + if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)) + mm_pin(next); + per_cpu(cur_pgd, cpu) = next->pgd; + op->cmd = MMUEXT_NEW_BASEPTR; + op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); + return op + 1; +} +#define mach_switch_pgd(next, cpu) ((void)(op = mach_switch_pgd(next, cpu, op))) + +static inline struct mmuext_op *mach_switch_ldt(struct mm_struct *next, int cpu, struct mmuext_op *op) +{ + op->cmd = MMUEXT_SET_LDT; + op->arg1.linear_addr = (unsigned long)next->context.ldt; + op->arg2.nr_ents = next->context.size; + return op + 1; +} +#define mach_switch_ldt(next, cpu) ((void)(op = mach_switch_ldt(next, cpu, op))) + +#define mach_switch_commit() HYPERVISOR_mmuext_op(ops, op - ops, NULL, DOMID_SELF) + +#endif /* __ASM_MACH_MMU_H */ Index: 2005-12-16/include/asm-i386/mmu_context.h =================================================================== --- 2005-12-16.orig/include/asm-i386/mmu_context.h 2005-12-20 17:13:17.333999160 +0100 +++ 2005-12-16/include/asm-i386/mmu_context.h 2005-12-20 17:48:12.502485136 +0100 @@ -6,6 +6,7 @@ #include #include #include +#include /* * Used for LDT copy/destruction. @@ -16,7 +17,7 @@ void destroy_context(struct mm_struct *m static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_X86_XEN) unsigned cpu = smp_processor_id(); if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; @@ -28,26 +29,27 @@ static inline void switch_mm(struct mm_s struct task_struct *tsk) { int cpu = smp_processor_id(); + MACH_SWITCH_DECLS; if (likely(prev != next)) { /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_X86_XEN) per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).active_mm = next; #endif cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables */ - load_cr3(next->pgd); + mach_switch_pgd(next, cpu); /* * load the LDT, if the LDT is different: */ if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context, cpu); + mach_switch_ldt(next, cpu); } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_X86_XEN) else { per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); @@ -56,11 +58,12 @@ static inline void switch_mm(struct mm_s /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ - load_cr3(next->pgd); - load_LDT_nolock(&next->context, cpu); + mach_switch_pgd(next, cpu); + mach_switch_ldt(next, cpu); } } #endif + BUG_ON(mach_switch_commit()); } #define deactivate_mm(tsk, mm) \