WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] linux/x86: batch hypercalls when pinning address spa

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] linux/x86: batch hypercalls when pinning address spaces
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Mon, 26 Mar 2007 16:29:32 +0100
Delivery-date: Mon, 26 Mar 2007 08:28:00 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: head-2007-03-19/arch/i386/mm/pgtable-xen.c
===================================================================
--- head-2007-03-19.orig/arch/i386/mm/pgtable-xen.c     2007-03-21 
11:51:37.000000000 +0100
+++ head-2007-03-19/arch/i386/mm/pgtable-xen.c  2007-03-23 17:51:03.000000000 
+0100
@@ -574,10 +574,13 @@ static void _pin_lock(struct mm_struct *
        }
 }
 
-static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
+#define PIN_BATCH 4
+static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
+
+static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
+                                             unsigned int cpu, unsigned seq)
 {
        unsigned long pfn = page_to_pfn(page);
-       int rc;
 
        if (PageHighMem(page)) {
                if (pgprot_val(flags) & _PAGE_RW)
@@ -585,12 +588,18 @@ static inline void pgd_walk_set_prot(str
                else
                        set_bit(PG_pinned, &page->flags);
        } else {
-               rc = HYPERVISOR_update_va_mapping(
-                       (unsigned long)__va(pfn << PAGE_SHIFT),
-                       pfn_pte(pfn, flags), 0);
-               if (rc)
-                       BUG();
+               MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
+                               (unsigned long)__va(pfn << PAGE_SHIFT),
+                               pfn_pte(pfn, flags), 0);
+               if (unlikely(++seq == PIN_BATCH)) {
+                       if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, 
cpu),
+                                                               PIN_BATCH, 
NULL)))
+                               BUG();
+                       seq = 0;
+               }
        }
+
+       return seq;
 }
 
 static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
@@ -598,37 +607,48 @@ static void pgd_walk(pgd_t *pgd_base, pg
        pgd_t *pgd = pgd_base;
        pud_t *pud;
        pmd_t *pmd;
-       int    g, u, m, rc;
+       int    g, u, m;
+       unsigned int cpu, seq;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return;
 
-       for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
+       cpu = get_cpu();
+
+       for (g = 0, seq = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
                if (pgd_none(*pgd))
                        continue;
                pud = pud_offset(pgd, 0);
                if (PTRS_PER_PUD > 1) /* not folded */
-                       pgd_walk_set_prot(virt_to_page(pud),flags);
+                       seq = 
pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
                for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
                        if (pud_none(*pud))
                                continue;
                        pmd = pmd_offset(pud, 0);
                        if (PTRS_PER_PMD > 1) /* not folded */
-                               pgd_walk_set_prot(virt_to_page(pmd),flags);
+                               seq = 
pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
                        for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
                                if (pmd_none(*pmd))
                                        continue;
-                               pgd_walk_set_prot(pmd_page(*pmd),flags);
+                               seq = 
pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
                        }
                }
        }
 
-       rc = HYPERVISOR_update_va_mapping(
-               (unsigned long)pgd_base,
-               pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-               UVMF_TLB_FLUSH);
-       if (rc)
+       if (likely(seq != 0)) {
+               MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
+                       (unsigned long)pgd_base,
+                       pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
+                       UVMF_TLB_FLUSH);
+               if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
+                                                       seq + 1, NULL)))
+                       BUG();
+       } else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
+                       pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
+                       UVMF_TLB_FLUSH))
                BUG();
+
+       put_cpu();
 }
 
 static void __pgd_pin(pgd_t *pgd)
Index: head-2007-03-19/arch/x86_64/mm/pageattr-xen.c
===================================================================
--- head-2007-03-19.orig/arch/x86_64/mm/pageattr-xen.c  2007-03-21 
11:50:13.000000000 +0100
+++ head-2007-03-19/arch/x86_64/mm/pageattr-xen.c       2007-03-23 
17:51:24.000000000 +0100
@@ -20,17 +20,26 @@ static void _pin_lock(struct mm_struct *
 LIST_HEAD(mm_unpinned);
 DEFINE_SPINLOCK(mm_unpinned_lock);
 
-static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
+#define PIN_BATCH 8
+static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
+
+static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
+                                            unsigned int cpu, unsigned int seq)
 {
        struct page *page = virt_to_page(pt);
        unsigned long pfn = page_to_pfn(page);
-       int rc;
 
-       rc = HYPERVISOR_update_va_mapping(
+       MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
                (unsigned long)__va(pfn << PAGE_SHIFT),
                pfn_pte(pfn, flags), 0);
-       if (rc)
-               BUG();
+       if (unlikely(++seq == PIN_BATCH)) {
+               if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
+                                                       PIN_BATCH, NULL)))
+                       BUG();
+               seq = 0;
+       }
+
+       return seq;
 }
 
 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
@@ -40,8 +49,12 @@ static void mm_walk(struct mm_struct *mm
        pmd_t       *pmd;
        pte_t       *pte;
        int          g,u,m;
+       unsigned int cpu, seq;
+       multicall_entry_t *mcl;
 
        pgd = mm->pgd;
+       cpu = get_cpu();
+
        /*
         * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
         * be the 'current' task's pagetables (e.g., current may be 32-bit,
@@ -49,26 +62,45 @@ static void mm_walk(struct mm_struct *mm
         * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
         * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
         */
-       for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
+       for (g = 0, seq = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
                if (pgd_none(*pgd))
                        continue;
                pud = pud_offset(pgd, 0);
                if (PTRS_PER_PUD > 1) /* not folded */ 
-                       mm_walk_set_prot(pud,flags);
+                       seq = mm_walk_set_prot(pud,flags,cpu,seq);
                for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
                        if (pud_none(*pud))
                                continue;
                        pmd = pmd_offset(pud, 0);
                        if (PTRS_PER_PMD > 1) /* not folded */ 
-                               mm_walk_set_prot(pmd,flags);
+                               seq = mm_walk_set_prot(pmd,flags,cpu,seq);
                        for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
                                if (pmd_none(*pmd))
                                        continue;
                                pte = pte_offset_kernel(pmd,0);
-                               mm_walk_set_prot(pte,flags);
+                               seq = mm_walk_set_prot(pte,flags,cpu,seq);
                        }
                }
        }
+
+       mcl = per_cpu(pb_mcl, cpu);
+       if (unlikely(seq > PIN_BATCH - 2)) {
+               if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
+                       BUG();
+               seq = 0;
+       }
+       MULTI_update_va_mapping(mcl + seq,
+              (unsigned long)__user_pgd(mm->pgd),
+              pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
+              0);
+       MULTI_update_va_mapping(mcl + seq + 1,
+              (unsigned long)mm->pgd,
+              pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
+              UVMF_TLB_FLUSH);
+       if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
+               BUG();
+
+       put_cpu();
 }
 
 void mm_pin(struct mm_struct *mm)
@@ -79,17 +112,6 @@ void mm_pin(struct mm_struct *mm)
        spin_lock(&mm->page_table_lock);
 
        mm_walk(mm, PAGE_KERNEL_RO);
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)mm->pgd,
-               pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-               UVMF_TLB_FLUSH))
-               BUG();
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)__user_pgd(mm->pgd),
-               pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-                       PAGE_KERNEL_RO),
-               UVMF_TLB_FLUSH))
-               BUG();
        xen_pgd_pin(__pa(mm->pgd)); /* kernel */
        xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
        mm->context.pinned = 1;
@@ -109,17 +131,7 @@ void mm_unpin(struct mm_struct *mm)
 
        xen_pgd_unpin(__pa(mm->pgd));
        xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)mm->pgd,
-               pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0))
-               BUG();
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)__user_pgd(mm->pgd),
-               pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-                       PAGE_KERNEL), 0))
-               BUG();
        mm_walk(mm, PAGE_KERNEL);
-       xen_tlb_flush();
        mm->context.pinned = 0;
        spin_lock(&mm_unpinned_lock);
        list_add(&mm->context.unpinned, &mm_unpinned);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] linux/x86: batch hypercalls when pinning address spaces, Jan Beulich <=