WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: consolidate/enhance TLB flushing int

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: consolidate/enhance TLB flushing interface
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 17 Oct 2007 09:40:10 -0700
Delivery-date: Wed, 17 Oct 2007 09:41:44 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1192552297 -3600
# Node ID 9488d31665538a815541109cd2da94adec291bbc
# Parent  1f893d055c6f79d719199d6eac139165295713a0
x86: consolidate/enhance TLB flushing interface

Folding into a single local handler and a single SMP multiplexor as
well as adding capability to also flush caches through the same
interfaces (a subsequent patch will make use of this).

Once at changing cpuinfo_x86, this patch also removes several unused
fields apparently inherited from Linux.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c                           |    5 +
 xen/arch/x86/cpu/common.c                        |    8 +-
 xen/arch/x86/cpu/cyrix.c                         |    6 -
 xen/arch/x86/cpu/intel.c                         |   12 +++
 xen/arch/x86/cpu/mtrr/generic.c                  |    4 -
 xen/arch/x86/domain.c                            |    2 
 xen/arch/x86/domain_build.c                      |    2 
 xen/arch/x86/flushtlb.c                          |   73 ++++++++++++++++++-----
 xen/arch/x86/mm.c                                |   22 +++---
 xen/arch/x86/mm/p2m.c                            |    2 
 xen/arch/x86/mm/shadow/multi.c                   |    2 
 xen/arch/x86/setup.c                             |    2 
 xen/arch/x86/smp.c                               |   34 ++--------
 xen/arch/x86/smpboot.c                           |    2 
 xen/arch/x86/x86_32/domain_page.c                |    4 -
 xen/arch/x86/x86_32/mm.c                         |    2 
 xen/arch/x86/x86_64/compat/mm.c                  |    2 
 xen/arch/x86/x86_64/mm.c                         |    2 
 xen/include/asm-x86/cpufeature.h                 |    3 
 xen/include/asm-x86/flushtlb.h                   |   69 +++++++++++++--------
 xen/include/asm-x86/mach-default/smpboot_hooks.h |    4 -
 xen/include/asm-x86/processor.h                  |   22 ++----
 22 files changed, 174 insertions(+), 110 deletions(-)

diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/cpu/amd.c    Tue Oct 16 17:31:37 2007 +0100
@@ -372,6 +372,11 @@ static void __init init_amd(struct cpuin
        /* Prevent TSC drift in non single-processor, single-core platforms. */
        if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
                disable_c1_ramping();
+
+       /* Support INVLPG of superpages? */
+       __set_bit(2, &c->invlpg_works_ok);
+       if ( cpu_has(c, X86_FEATURE_PAGE1GB) )
+               __set_bit(3, &c->invlpg_works_ok);
 
        start_svm(c);
 }
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/cpu/common.c Tue Oct 16 17:31:37 2007 +0100
@@ -229,7 +229,6 @@ void __devinit generic_identify(struct c
 void __devinit generic_identify(struct cpuinfo_x86 * c)
 {
        u32 tfms, xlvl;
-       int junk;
 
        if (have_cpuid_p()) {
                /* Get vendor name */
@@ -244,8 +243,8 @@ void __devinit generic_identify(struct c
        
                /* Intel-defined flags: level 0x00000001 */
                if ( c->cpuid_level >= 0x00000001 ) {
-                       u32 capability, excap;
-                       cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+                       u32 capability, excap, ebx;
+                       cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
                        c->x86_capability[0] = capability;
                        c->x86_capability[4] = excap;
                        c->x86 = (tfms >> 8) & 15;
@@ -255,6 +254,8 @@ void __devinit generic_identify(struct c
                                c->x86_model += ((tfms >> 16) & 0xF) << 4;
                        } 
                        c->x86_mask = tfms & 15;
+                       if ( cpu_has(c, X86_FEATURE_CLFLSH) )
+                               c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
                } else {
                        /* Have CPUID level 0 only - unheard of */
                        c->x86 = 4;
@@ -313,6 +314,7 @@ void __devinit identify_cpu(struct cpuin
        c->x86_vendor_id[0] = '\0'; /* Unset */
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
+       c->x86_clflush_size = 0;
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
        if (!have_cpuid_p()) {
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/cpu/cyrix.c
--- a/xen/arch/x86/cpu/cyrix.c  Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/cpu/cyrix.c  Tue Oct 16 17:31:37 2007 +0100
@@ -239,7 +239,7 @@ static void __init init_cyrix(struct cpu
                /* Emulate MTRRs using Cyrix's ARRs. */
                set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
                /* 6x86's contain this bug */
-               c->coma_bug = 1;
+               /*c->coma_bug = 1;*/
                break;
 
        case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
@@ -272,7 +272,7 @@ static void __init init_cyrix(struct cpu
                }
                else
                {
-                       c->coma_bug = 1;      /* 6x86MX, it has the bug. */
+                       /*c->coma_bug = 1;*/      /* 6x86MX, it has the bug. */
                }
                tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
                Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
@@ -287,7 +287,7 @@ static void __init init_cyrix(struct cpu
                switch (dir0_lsn) {
                case 0xd:  /* either a 486SLC or DLC w/o DEVID */
                        dir0_msn = 0;
-                       p = Cx486_name[(c->hard_math) ? 1 : 0];
+                       p = Cx486_name[/*(c->hard_math) ? 1 : 0*/1];
                        break;
 
                case 0xe:  /* a 486S A step */
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c  Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/cpu/intel.c  Tue Oct 16 17:31:37 2007 +0100
@@ -123,6 +123,18 @@ static void __devinit init_intel(struct 
        if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
                clear_bit(X86_FEATURE_SEP, c->x86_capability);
 
+       /* Supports INVLPG of superpages? */
+       __set_bit(2, &c->invlpg_works_ok);
+       if (/* PentiumPro erratum 30 */
+           (c->x86 == 6 && c->x86_model == 1 && c->x86_mask < 9) ||
+           /* Dual-Core Intel Xeon 3000/5100 series erratum 89/90 */
+           /* Quad-Core Intel Xeon 3200/5300 series erratum 89/88 */
+           /* Intel Core2 erratum 89 */
+           (c->x86 == 6 && c->x86_model == 15 ) ||
+           /* Dual-Core Intel Xeon LV/ULV erratum 75 */
+           (c->x86 == 6 && c->x86_model == 14 ))
+               __clear_bit(2, &c->invlpg_works_ok);
+
        /* Names for the Pentium II/Celeron processors 
           detectable only by also checking the cache size.
           Dixon is NOT a Celeron. */
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/cpu/mtrr/generic.c
--- a/xen/arch/x86/cpu/mtrr/generic.c   Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/cpu/mtrr/generic.c   Tue Oct 16 17:31:37 2007 +0100
@@ -313,7 +313,7 @@ static void prepare_set(void)
        }
 
        /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-       local_flush_tlb();
+       flush_tlb_local();
 
        /*  Save MTRR state */
        rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
@@ -325,7 +325,7 @@ static void post_set(void)
 static void post_set(void)
 {
        /*  Flush TLBs (no need to flush caches - they are disabled)  */
-       local_flush_tlb();
+       flush_tlb_local();
 
        /* Intel (P6) standard MTRRs */
        mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/domain.c     Tue Oct 16 17:31:37 2007 +0100
@@ -1299,7 +1299,7 @@ void context_switch(struct vcpu *prev, s
         {
             uint64_t efer = read_efer();
 
-            local_flush_tlb_one(GDT_VIRT_START(next) +
+            flush_tlb_one_local(GDT_VIRT_START(next) +
                                 FIRST_RESERVED_GDT_BYTE);
 
             if ( !is_pv_32on64_vcpu(next) == !(efer & EFER_SCE) )
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/domain_build.c       Tue Oct 16 17:31:37 2007 +0100
@@ -347,7 +347,7 @@ int __init construct_dom0(
         for ( i = 0; i < MAX_VIRT_CPUS; i++ )
             d->arch.mm_perdomain_pt[((i << GDT_LDT_VCPU_SHIFT) +
                                      FIRST_RESERVED_GDT_PAGE)] = gdt_l1e;
-        local_flush_tlb_one(GDT_LDT_VIRT_START + FIRST_RESERVED_GDT_BYTE);
+        flush_tlb_one_local(GDT_LDT_VIRT_START + FIRST_RESERVED_GDT_BYTE);
     }
 #endif
     if ( parms.pae == PAEKERN_extended_cr3 )
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/flushtlb.c
--- a/xen/arch/x86/flushtlb.c   Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/flushtlb.c   Tue Oct 16 17:31:37 2007 +0100
@@ -84,10 +84,10 @@ void write_cr3(unsigned long cr3)
 
 #ifdef USER_MAPPINGS_ARE_GLOBAL
     __pge_off();
-    __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
+    asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
     __pge_on();
 #else
-    __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
+    asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
 #endif
 
     post_flush(t);
@@ -95,26 +95,69 @@ void write_cr3(unsigned long cr3)
     local_irq_restore(flags);
 }
 
-void local_flush_tlb(void)
+void flush_area_local(const void *va, unsigned int flags)
 {
-    unsigned long flags;
-    u32 t;
+    const struct cpuinfo_x86 *c = &current_cpu_data;
+    unsigned int level = flags & FLUSH_LEVEL_MASK;
+    unsigned long irqfl;
+
+    ASSERT(level < CONFIG_PAGING_LEVELS);
 
     /* This non-reentrant function is sometimes called in interrupt context. */
-    local_irq_save(flags);
+    local_irq_save(irqfl);
 
-    t = pre_flush();
+    if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
+    {
+        if ( (level != 0) && test_bit(level, &c->invlpg_works_ok) )
+        {
+            asm volatile ( "invlpg %0"
+                           : : "m" (*(const char *)(va)) : "memory" );
+        }
+        else
+        {
+            u32 t = pre_flush();
 
-    hvm_flush_guest_tlbs();
+            hvm_flush_guest_tlbs();
 
-#ifdef USER_MAPPINGS_ARE_GLOBAL
-    __pge_off();
-    __pge_on();
-#else
-    __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (read_cr3()) : "memory" );
+#ifndef USER_MAPPINGS_ARE_GLOBAL
+            if ( !(flags & FLUSH_TLB_GLOBAL) ||
+                 !(mmu_cr4_features & X86_CR4_PGE) )
+            {
+                asm volatile ( "mov %0, %%cr3"
+                               : : "r" (read_cr3()) : "memory" );
+            }
+            else
 #endif
+            {
+                __pge_off();
+                barrier();
+                __pge_on();
+            }
 
-    post_flush(t);
+            post_flush(t);
+        }
+    }
 
-    local_irq_restore(flags);
+    if ( flags & FLUSH_CACHE )
+    {
+        unsigned long i, sz;
+
+        sz = level ? (1UL << ((level - 1) * PAGETABLE_ORDER)) : ULONG_MAX;
+
+        if ( c->x86_clflush_size && c->x86_cache_size &&
+             (sz < (c->x86_cache_size >> (PAGE_SHIFT - 10))) )
+        {
+            sz <<= PAGE_SHIFT;
+            va = (const void *)((unsigned long)va & ~(sz - 1));
+            for ( i = 0; i < sz; i += c->x86_clflush_size )
+                 asm volatile ( "clflush %0"
+                                : : "m" (((const char *)va)[i]) );
+        }
+        else
+        {
+            wbinvd();
+        }
+    }
+
+    local_irq_restore(irqfl);
 }
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/mm.c Tue Oct 16 17:31:37 2007 +0100
@@ -372,7 +372,7 @@ void make_cr3(struct vcpu *v, unsigned l
     /* First check the previous high mapping can't be in the TLB. 
      * (i.e. have we loaded CR3 since we last did this?) */
     if ( unlikely(this_cpu(make_cr3_timestamp) == this_cpu(tlbflush_time)) )
-        local_flush_tlb_one(fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu));
+        flush_tlb_one_local(fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu));
     highmem_l3tab = (l3_pgentry_t *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu);
     lowmem_l3tab  = cache->table[cache->inuse_idx];
     memcpy(lowmem_l3tab, highmem_l3tab, sizeof(cache->table[0]));
@@ -1886,7 +1886,7 @@ static void process_deferred_ops(void)
         if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
             flush_tlb_mask(d->domain_dirty_cpumask);
         else
-            local_flush_tlb();
+            flush_tlb_local();
     }
 
     if ( deferred_ops & DOP_RELOAD_LDT )
@@ -2172,7 +2172,7 @@ int do_mmuext_op(
         case MMUEXT_INVLPG_LOCAL:
             if ( !paging_mode_enabled(d) 
                  || paging_invlpg(v, op.arg1.linear_addr) != 0 )
-                local_flush_tlb_one(op.arg1.linear_addr);
+                flush_tlb_one_local(op.arg1.linear_addr);
             break;
 
         case MMUEXT_TLB_FLUSH_MULTI:
@@ -2848,7 +2848,7 @@ int do_update_va_mapping(unsigned long v
         switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
         {
         case UVMF_LOCAL:
-            local_flush_tlb();
+            flush_tlb_local();
             break;
         case UVMF_ALL:
             flush_tlb_mask(d->domain_dirty_cpumask);
@@ -2870,7 +2870,7 @@ int do_update_va_mapping(unsigned long v
         case UVMF_LOCAL:
             if ( !paging_mode_enabled(d) 
                  || (paging_invlpg(current, va) != 0) ) 
-                local_flush_tlb_one(va);
+                flush_tlb_one_local(va);
             break;
         case UVMF_ALL:
             flush_tlb_one_mask(d->domain_dirty_cpumask, va);
@@ -2989,7 +2989,7 @@ long do_set_gdt(XEN_GUEST_HANDLE(ulong) 
     LOCK_BIGLOCK(current->domain);
 
     if ( (ret = set_gdt(current, frames, entries)) == 0 )
-        local_flush_tlb();
+        flush_tlb_local();
 
     UNLOCK_BIGLOCK(current->domain);
 
@@ -3544,7 +3544,8 @@ int map_pages_to_xen(
 
             if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
             {
-                local_flush_tlb_pge();
+                flush_area_local((const void *)virt,
+                                 FLUSH_TLB_GLOBAL|FLUSH_LEVEL(2));
                 if ( !(l2e_get_flags(ol2e) & _PAGE_PSE) )
                     free_xen_pagetable(mfn_to_virt(l2e_get_pfn(ol2e)));
             }
@@ -3572,14 +3573,15 @@ int map_pages_to_xen(
                                            l2e_get_flags(*pl2e) & ~_PAGE_PSE));
                 l2e_write_atomic(pl2e, l2e_from_pfn(virt_to_mfn(pl1e),
                                                     __PAGE_HYPERVISOR));
-                local_flush_tlb_pge();
+                flush_area_local((const void *)virt,
+                                 FLUSH_TLB_GLOBAL|FLUSH_LEVEL(2));
             }
 
             pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
             ol1e  = *pl1e;
             l1e_write_atomic(pl1e, l1e_from_pfn(mfn, flags));
             if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
-                local_flush_tlb_one(virt);
+                flush_tlb_one_local(virt);
 
             virt    += 1UL << L1_PAGETABLE_SHIFT;
             mfn     += 1UL;
@@ -3655,7 +3657,7 @@ void destroy_xen_mappings(unsigned long 
         }
     }
 
-    flush_tlb_all_pge();
+    flush_all(FLUSH_TLB_GLOBAL);
 }
 
 void __set_fixmap(
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Oct 16 17:31:37 2007 +0100
@@ -493,7 +493,7 @@ static void audit_p2m(struct domain *d)
     test_linear = ( (d == current->domain)
                     && !pagetable_is_null(current->arch.monitor_table) );
     if ( test_linear )
-        local_flush_tlb();
+        flush_tlb_local();
 
     /* Audit part one: walk the domain's page allocation list, checking
      * the m2p entries. */
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Tue Oct 16 17:31:37 2007 +0100
@@ -3089,7 +3089,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
     if ( mfn_to_shadow_page(shadow_l2e_get_mfn(sl2e))->type
          == SH_type_fl1_shadow )
     {
-        local_flush_tlb();
+        flush_tlb_local();
         return 0;
     }
 
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/setup.c      Tue Oct 16 17:31:37 2007 +0100
@@ -114,7 +114,7 @@ struct tss_struct init_tss[NR_CPUS];
 
 char __attribute__ ((__section__(".bss.stack_aligned"))) 
cpu0_stack[STACK_SIZE];
 
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, 1, -1 };
 
 #if CONFIG_PAGING_LEVELS > 2
 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/smp.c        Tue Oct 16 17:31:37 2007 +0100
@@ -164,34 +164,28 @@ void send_IPI_mask_phys(cpumask_t mask, 
 
 static DEFINE_SPINLOCK(flush_lock);
 static cpumask_t flush_cpumask;
-static unsigned long flush_va;
+static const void *flush_va;
+static unsigned int flush_flags;
 
 fastcall void smp_invalidate_interrupt(void)
 {
     ack_APIC_irq();
     perfc_incr(ipis);
     irq_enter();
-    if ( !__sync_lazy_execstate() )
-    {
-        if ( flush_va == FLUSHVA_ALL )
-            local_flush_tlb();
-        else
-            local_flush_tlb_one(flush_va);
-    }
+    if ( !__sync_lazy_execstate() ||
+         (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
+        flush_area_local(flush_va, flush_flags);
     cpu_clear(smp_processor_id(), flush_cpumask);
     irq_exit();
 }
 
-void __flush_tlb_mask(cpumask_t mask, unsigned long va)
+void flush_area_mask(cpumask_t mask, const void *va, unsigned int flags)
 {
     ASSERT(local_irq_is_enabled());
     
     if ( cpu_isset(smp_processor_id(), mask) )
     {
-        if ( va == FLUSHVA_ALL )
-            local_flush_tlb();
-        else
-            local_flush_tlb_one(va);
+        flush_area_local(va, flags);
         cpu_clear(smp_processor_id(), mask);
     }
 
@@ -200,6 +194,7 @@ void __flush_tlb_mask(cpumask_t mask, un
         spin_lock(&flush_lock);
         flush_cpumask = mask;
         flush_va      = va;
+        flush_flags   = flags;
         send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
         while ( !cpus_empty(flush_cpumask) )
             cpu_relax();
@@ -215,22 +210,11 @@ void new_tlbflush_clock_period(void)
     /* Flush everyone else. We definitely flushed just before entry. */
     allbutself = cpu_online_map;
     cpu_clear(smp_processor_id(), allbutself);
-    __flush_tlb_mask(allbutself, FLUSHVA_ALL);
+    flush_mask(allbutself, FLUSH_TLB);
 
     /* No need for atomicity: we are the only possible updater. */
     ASSERT(tlbflush_clock == 0);
     tlbflush_clock++;
-}
-
-static void flush_tlb_all_pge_ipi(void *info)
-{
-    local_flush_tlb_pge();
-}
-
-void flush_tlb_all_pge(void)
-{
-    smp_call_function(flush_tlb_all_pge_ipi, 0, 1, 1);
-    local_flush_tlb_pge();
 }
 
 void smp_send_event_check_mask(cpumask_t mask)
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/smpboot.c    Tue Oct 16 17:31:37 2007 +0100
@@ -518,7 +518,7 @@ void __devinit start_secondary(void *unu
         * low-memory mappings have been cleared, flush them from
         * the local TLBs too.
         */
-       local_flush_tlb();
+       flush_tlb_local();
 
        /* This must be done before setting cpu_online_map */
        set_cpu_sibling_map(raw_smp_processor_id());
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/x86_32/domain_page.c Tue Oct 16 17:31:37 2007 +0100
@@ -78,7 +78,7 @@ void *map_domain_page(unsigned long mfn)
         if ( NEED_FLUSH(this_cpu(tlbflush_time), dcache->tlbflush_timestamp) )
         {
             perfc_incr(domain_page_tlb_flush);
-            local_flush_tlb();
+            flush_tlb_local();
         }
     }
 
@@ -94,7 +94,7 @@ void *map_domain_page(unsigned long mfn)
 
         /* /Second/, flush TLBs. */
         perfc_incr(domain_page_tlb_flush);
-        local_flush_tlb();
+        flush_tlb_local();
         vcache->shadow_epoch = ++dcache->epoch;
         dcache->tlbflush_timestamp = tlbflush_current_time();
 
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/x86_32/mm.c  Tue Oct 16 17:31:37 2007 +0100
@@ -152,7 +152,7 @@ void __init zap_low_mappings(l2_pgentry_
     /* Now zap mappings in the idle pagetables. */
     destroy_xen_mappings(0, HYPERVISOR_VIRT_START);
 
-    flush_tlb_all_pge();
+    flush_all(FLUSH_TLB_GLOBAL);
 
     /* Replace with mapping of the boot trampoline only. */
     map_pages_to_xen(BOOT_TRAMPOLINE, BOOT_TRAMPOLINE >> PAGE_SHIFT,
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/x86_64/compat/mm.c
--- a/xen/arch/x86/x86_64/compat/mm.c   Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/x86_64/compat/mm.c   Tue Oct 16 17:31:37 2007 +0100
@@ -31,7 +31,7 @@ int compat_set_gdt(XEN_GUEST_HANDLE(uint
     LOCK_BIGLOCK(current->domain);
 
     if ( (ret = set_gdt(current, frames, entries)) == 0 )
-        local_flush_tlb();
+        flush_tlb_local();
 
     UNLOCK_BIGLOCK(current->domain);
 
diff -r 1f893d055c6f -r 9488d3166553 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Tue Oct 16 17:31:37 2007 +0100
@@ -205,7 +205,7 @@ void __init zap_low_mappings(void)
 
     /* Remove aliased mapping of first 1:1 PML4 entry. */
     l4e_write(&idle_pg_table[0], l4e_empty());
-    local_flush_tlb_pge();
+    flush_local(FLUSH_TLB_GLOBAL);
 
     /* Replace with mapping of the boot trampoline only. */
     map_pages_to_xen(BOOT_TRAMPOLINE, BOOT_TRAMPOLINE >> PAGE_SHIFT,
diff -r 1f893d055c6f -r 9488d3166553 xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h  Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/include/asm-x86/cpufeature.h  Tue Oct 16 17:31:37 2007 +0100
@@ -50,6 +50,7 @@
 #define X86_FEATURE_NX         (1*32+20) /* Execute Disable */
 #define X86_FEATURE_MMXEXT     (1*32+22) /* AMD MMX extensions */
 #define X86_FEATURE_FFXSR       (1*32+25) /* FFXSR instruction optimizations */
+#define X86_FEATURE_PAGE1GB    (1*32+26) /* 1Gb large page support */
 #define X86_FEATURE_RDTSCP     (1*32+27) /* RDTSCP */
 #define X86_FEATURE_LM         (1*32+29) /* Long Mode (x86-64) */
 #define X86_FEATURE_3DNOWEXT   (1*32+30) /* AMD 3DNow! extensions */
@@ -143,6 +144,7 @@
 #define cpu_has_cyrix_arr      boot_cpu_has(X86_FEATURE_CYRIX_ARR)
 #define cpu_has_centaur_mcr    boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_page1gb                0
 #else /* __x86_64__ */
 #define cpu_has_vme            0
 #define cpu_has_de             1
@@ -166,6 +168,7 @@
 #define cpu_has_cyrix_arr      0
 #define cpu_has_centaur_mcr    0
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_page1gb                boot_cpu_has(X86_FEATURE_PAGE1GB)
 #endif
 
 #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \
diff -r 1f893d055c6f -r 9488d3166553 xen/include/asm-x86/flushtlb.h
--- a/xen/include/asm-x86/flushtlb.h    Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/include/asm-x86/flushtlb.h    Tue Oct 16 17:31:37 2007 +0100
@@ -57,7 +57,7 @@ do {                                    
             cpu_clear(cpu, mask);                                       \
 } while ( 0 )
 
-extern void new_tlbflush_clock_period(void);
+void new_tlbflush_clock_period(void);
 
 /* Read pagetable base. */
 static inline unsigned long read_cr3(void)
@@ -69,34 +69,53 @@ static inline unsigned long read_cr3(voi
 }
 
 /* Write pagetable base and implicitly tick the tlbflush clock. */
-extern void write_cr3(unsigned long cr3);
+void write_cr3(unsigned long cr3);
 
-/* Flush guest mappings from the TLB and implicitly tick the tlbflush clock. */
-extern void local_flush_tlb(void);
+/* flush_* flag fields: */
+ /*
+  * Area to flush:
+  *  0 -> flush entire address space
+  *  1 -> 4kB area containing specified virtual address
+  *  2 -> 4MB/2MB area containing specified virtual address
+  *  3 -> 1GB area containing specified virtual address (x86/64 only)
+  */
+#define FLUSH_LEVEL_MASK 0x0f
+#define FLUSH_LEVEL(x)   (x)
+ /* Flush TLBs (or parts thereof) */
+#define FLUSH_TLB        0x10
+ /* Flush TLBs (or parts thereof) including global mappings */
+#define FLUSH_TLB_GLOBAL 0x20
+ /* Flush data caches */
+#define FLUSH_CACHE      0x40
 
-#define local_flush_tlb_pge()                                     \
-    do {                                                          \
-        __pge_off();                                              \
-        local_flush_tlb();                                        \
-        __pge_on();                                               \
-    } while ( 0 )
+/* Flush local TLBs/caches. */
+void flush_area_local(const void *va, unsigned int flags);
+#define flush_local(flags) flush_area_local(NULL, flags)
 
-#define local_flush_tlb_one(__addr) \
-    __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
+/* Flush specified CPUs' TLBs/caches */
+void flush_area_mask(cpumask_t, const void *va, unsigned int flags);
+#define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags)
 
-#define flush_tlb_all()     flush_tlb_mask(cpu_online_map)
+/* Flush all CPUs' TLBs/caches */
+#define flush_area_all(va, flags) flush_area_mask(cpu_online_map, va, flags)
+#define flush_all(flags) flush_mask(cpu_online_map, flags)
 
-#ifndef CONFIG_SMP
-#define flush_tlb_all_pge()        local_flush_tlb_pge()
-#define flush_tlb_mask(mask)       local_flush_tlb()
-#define flush_tlb_one_mask(mask,v) local_flush_tlb_one(_v)
-#else
-#include <xen/smp.h>
-#define FLUSHVA_ALL (~0UL)
-extern void flush_tlb_all_pge(void);
-extern void __flush_tlb_mask(cpumask_t mask, unsigned long va);
-#define flush_tlb_mask(mask)       __flush_tlb_mask(mask,FLUSHVA_ALL)
-#define flush_tlb_one_mask(mask,v) __flush_tlb_mask(mask,(unsigned long)(v))
-#endif
+/* Flush local TLBs */
+#define flush_tlb_local()                       \
+    flush_local(FLUSH_TLB)
+#define flush_tlb_one_local(v)                  \
+    flush_area_local((const void *)(v), FLUSH_TLB|1)
+
+/* Flush specified CPUs' TLBs */
+#define flush_tlb_mask(mask)                    \
+    flush_mask(mask, FLUSH_TLB)
+#define flush_tlb_one_mask(mask,v)              \
+    flush_area_mask(mask, (const void *)(v), FLUSH_TLB|1)
+
+/* Flush all CPUs' TLBs */
+#define flush_tlb_all()                         \
+    flush_tlb_mask(cpu_online_map)
+#define flush_tlb_one_all(v)                    \
+    flush_tlb_one_mask(cpu_online_map, v)
 
 #endif /* __FLUSHTLB_H__ */
diff -r 1f893d055c6f -r 9488d3166553 
xen/include/asm-x86/mach-default/smpboot_hooks.h
--- a/xen/include/asm-x86/mach-default/smpboot_hooks.h  Tue Oct 16 10:27:55 
2007 +0100
+++ b/xen/include/asm-x86/mach-default/smpboot_hooks.h  Tue Oct 16 17:31:37 
2007 +0100
@@ -9,7 +9,7 @@ static inline void smpboot_setup_warm_re
 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 {
        CMOS_WRITE(0xa, 0xf);
-       local_flush_tlb();
+       flush_tlb_local();
        Dprintk("1.\n");
        *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
        Dprintk("2.\n");
@@ -22,7 +22,7 @@ static inline void smpboot_restore_warm_
        /*
         * Install writable page 0 entry to set BIOS data area.
         */
-       local_flush_tlb();
+       flush_tlb_local();
 
        /*
         * Paranoid:  Set warm reset code and vector here back
diff -r 1f893d055c6f -r 9488d3166553 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Tue Oct 16 10:27:55 2007 +0100
+++ b/xen/include/asm-x86/processor.h   Tue Oct 16 17:31:37 2007 +0100
@@ -160,28 +160,22 @@ struct vcpu;
 #endif
 
 struct cpuinfo_x86 {
-    __u8 x86;          /* CPU family */
-    __u8 x86_vendor;   /* CPU vendor */
+    __u8 x86;            /* CPU family */
+    __u8 x86_vendor;     /* CPU vendor */
     __u8 x86_model;
     __u8 x86_mask;
-    char wp_works_ok;  /* It doesn't on 386's */
-    char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
-    char hard_math;
-    char rfu;
-    int  cpuid_level;  /* Maximum supported CPUID level, -1=no CPUID */
+    __u8 invlpg_works_ok;
+    int  cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
     unsigned int x86_capability[NCAPINTS];
     char x86_vendor_id[16];
     char x86_model_id[64];
-    int  x86_cache_size;  /* in KB - valid for CPUS which support this call  */
-    int  x86_cache_alignment;  /* In bytes */
-    char fdiv_bug;
-    char f00f_bug;
-    char coma_bug;
-    char pad0;
+    int  x86_cache_size; /* in KB - valid for CPUS which support this call  */
+    int  x86_cache_alignment;    /* In bytes */
     int  x86_power;
     unsigned char x86_max_cores; /* cpuid returned max cores value */
-    unsigned char booted_cores; /* number of cores as seen by OS */
+    unsigned char booted_cores;  /* number of cores as seen by OS */
     unsigned char apicid;
+    unsigned short x86_clflush_size;
 } __cacheline_aligned;
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: consolidate/enhance TLB flushing interface, Xen patchbot-unstable <=