WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Small address spaces patch for Xen

To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] Small address spaces patch for Xen
From: Jacob Gorm Hansen <jacobg@xxxxxxx>
Date: Fri, 29 Apr 2005 14:32:29 -0700
Delivery-date: Fri, 29 Apr 2005 21:32:20 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla Thunderbird 1.0 (X11/20050302)
Hi,

attached is my patch for xen and xenlinux to enable small address spaces. To use this, you will need a purpose-built user space for dom0, available through the link below. Unpack this in an empty partition, on a ramdisk, or export it as an nfsroot. Remember to add init=/linuxrc to your dom0 kernel commandline.

This patch is by no means-production quality, e.g.:

- It steals the shadow-pt linear mapping for dom0, so migration anything that activates xen's shadow mode will likely crash Xen.

- It does not support the perdomain mapping correctly, so use of segments in dom0 or domU will have strange effects.

- It does not prevent domU from accessing dom0's memory.

But hopefully you should be able to play with it.

Dom0 busybox-based userspace tarball, including vmlinuz for domU (/vmlinuz) and vmtools:

http://www.diku.dk/~jacobg/sas/busybox.tar.gz

Pre-built xen.gz and vmlinuz for dom0:

http://www.diku.dk/~jacobg/sas/xen-sas.tar.gz

In the busybox tarball root, you will find some scripts (e.g. /create_ramdisk, create_10, create_nfs_sh) for booting domUs with various combinations of routed and bridged networking. There is also a ramdisk (domUinitrd) to boot from. Both dom0 and domU have iperf installed.

Binaries for dom0 are so far all statically linked, I use the attached linker-script to link them at the new location, e.g. linking with "-static -T elf_i386_glibc21.x" or similar.

Jacob
--- orig/xen/common/grant_table.c
+++ mod/xen/common/grant_table.c
@@ -547,6 +547,7 @@
     {
         l1_pgentry_t   *pl1e;
         unsigned long   _ol1e;
+               struct exec_domain *ed = current;
 
         pl1e = &linear_pg_table[l1_linear_offset(virt)];
 
--- orig/xen/arch/x86/domain.c
+++ mod/xen/arch/x86/domain.c
@@ -40,6 +40,7 @@
 #include <xen/kernel.h>
 #include <public/io/ioreq.h>
 #include <xen/multicall.h>
+#include <asm/mm.h>
 
 /* opt_noreboot: If true, machine will need manual reset on error. */
 static int opt_noreboot = 0;
@@ -243,6 +244,11 @@
     }
     else
     {
+               ed->arch.cached_pgd_version = ~0UL;
+               ed->arch.pgd_version = 0;
+               ed->arch.pgd_lower_limit = 0;
+               ed->arch.pgd_upper_limit = UNPRIV_GUEST_VIRT_END >> 
L2_PAGETABLE_SHIFT;
+               ed->arch.linear_start    = LINEAR_PT_VIRT_START;
         ed->arch.schedule_tail = continue_nonidle_task;
 
         d->shared_info = (void *)alloc_xenheap_page();
@@ -726,6 +732,17 @@
 #define loaddebug(_ed,_reg) \
        __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" 
((_ed)->debugreg[_reg]))
 
+//#define SW_PERF
+#ifdef SW_PERF
+#define sw_incr(a) ( ++(a))
+#else
+#define sw_incr(a)
+#endif
+int sw_total=0;
+int sw_free=0;
+int sw_update=0;
+int sw_flush=0;
+
 static void __context_switch(void)
 {
     execution_context_t *stack_ec = get_execution_context();
@@ -771,7 +788,46 @@
     if ( p->domain != n->domain )
         set_bit(cpu, &n->domain->cpuset);
 
-    write_ptbase(n);
+       /* dom 1 -> 0 */
+
+       if(p->arch.pgd_upper_limit==n->arch.pgd_lower_limit &&
+               !is_idle_task(p->domain))
+       {
+               if(unlikely(p->arch.cached_pgd_version != n->arch.pgd_version))
+               {
+                       unsigned long* vprev = (unsigned long*)map_domain_mem( 
pagetable_val(p->arch.monitor_table));
+                       unsigned long* vnext = (unsigned long*)map_domain_mem( 
pagetable_val(n->arch.monitor_table));
+
+                       int off = UNPRIV_GUEST_VIRT_END>>L2_PAGETABLE_SHIFT;
+                       int len = 
(HYPERVISOR_VIRT_START-UNPRIV_GUEST_VIRT_END)>>L2_PAGETABLE_SHIFT;
+
+                       p->arch.cached_pgd_version = n->arch.pgd_version;
+
+                       memcpy(&vprev[off], &vnext[off], len * 
sizeof(l2_pgentry_t));
+                       vprev[DOM0_LINEAR_PT_VIRT_START>>L2_PAGETABLE_SHIFT] = 
vnext[DOM0_LINEAR_PT_VIRT_START>>L2_PAGETABLE_SHIFT];
+                       vprev[PERDOMAIN_VIRT_START>>L2_PAGETABLE_SHIFT] = 
vnext[PERDOMAIN_VIRT_START>>L2_PAGETABLE_SHIFT]; // fake out the PERDOMAIN 
mapping for now
+
+                       unmap_domain_mem(vprev);
+                       unmap_domain_mem(vnext);
+
+                       sw_incr(sw_update);
+               }
+               else sw_incr(sw_free);
+       }
+
+       /* dom 0 -> dom 1 */
+       else if(read_cr3() != pagetable_val(n->arch.monitor_table))
+       {
+               write_ptbase(n);
+               sw_incr(sw_flush);
+       }
+
+#ifdef SW_PERF
+       if((sw_total++ & 0x1ff)==0)
+               printk("tot %d free %d update %d flush %d\n",
+                               sw_total, sw_free, sw_update, sw_flush);
+#endif
+
     __asm__ __volatile__ ( "lgdt %0" : "=m" (*n->arch.gdt) );
 
     if ( p->domain != n->domain )
--- orig/xen/include/asm-x86/config.h
+++ mod/xen/include/asm-x86/config.h
@@ -267,6 +267,9 @@
 #define XENHEAP_DEFAULT_MB     (DIRECTMAP_MBYTES)
 #define DIRECTMAP_PHYS_END     (DIRECTMAP_MBYTES<<20)
 
+#define DOM0_LINEAR_PT_VIRT_START (SH_LINEAR_PT_VIRT_START)
+#define DOM0_LINEAR_PT_VIRT_END (SH_LINEAR_PT_VIRT_END)
+
 /* Maximum linear address accessible via guest memory segments. */
 #define GUEST_SEGMENT_MAX_ADDR  RO_MPT_VIRT_END
 
--- orig/xen/include/asm-x86/domain.h
+++ mod/xen/include/asm-x86/domain.h
@@ -146,6 +146,15 @@
     unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
     /* Next entry is passed to LGDT on domain switch. */
     char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
+
+
+       /* for small address spaces support */
+       unsigned long pgd_lower_limit;
+       unsigned long pgd_upper_limit;
+       unsigned long linear_start;
+       unsigned long pgd_version;
+       unsigned long cached_pgd_version;
+
 } __cacheline_aligned;
 
 #define IDLE0_ARCH_EXEC_DOMAIN                                      \
--- orig/xen/include/asm-x86/page.h
+++ mod/xen/include/asm-x86/page.h
@@ -49,11 +49,14 @@
 #include <asm/bitops.h>
 #include <asm/flushtlb.h>
 
+#if 0
 #define linear_l1_table                                                 \
     ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
+#endif
+
 #define __linear_l2_table                                                 \
-    ((l2_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
-                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
+    ((l2_pgentry_t *)((ed->arch.linear_start)+                            \
+                     ((ed->arch.linear_start) >> (PAGETABLE_ORDER<<0))))
 #define __linear_l3_table                                                 \
     ((l3_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) +   \
@@ -64,7 +67,8 @@
                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) +   \
                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
 
-#define linear_pg_table linear_l1_table
+//#define linear_pg_table linear_l1_table
+#define linear_pg_table ((l1_pgentry_t *)(ed->arch.linear_start))
 #define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
 #define linear_l3_table(_ed) ((_ed)->arch.guest_vl3table)
 #define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
--- orig/xen/include/public/arch-x86_32.h
+++ mod/xen/include/public/arch-x86_32.h
@@ -77,6 +77,8 @@
 #define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
 #endif
 
+#define UNPRIV_GUEST_VIRT_END 0xF0000000
+
 #ifndef __ASSEMBLY__
 
 /* NB. Both the following are 32 bits each. */
--- orig/xen/arch/x86/x86_32/mm.c
+++ mod/xen/arch/x86/x86_32/mm.c
@@ -133,7 +133,7 @@
         l2e_create_phys(__pa(mapcache), __PAGE_HYPERVISOR);
 
     /* Set up linear page table mapping. */
-    idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)] =
+    idle_pg_table[l2_table_offset(DOM0_LINEAR_PT_VIRT_START)] =
         l2e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
 }
 
--- orig/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
+++ mod/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
@@ -20,7 +20,12 @@
  * Leave one empty page between vmalloc'ed areas and
  * the start of the fixmap.
  */
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
 #define __FIXADDR_TOP  (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
+#else
+#define __FIXADDR_TOP  (UNPRIV_GUEST_VIRT_END - 2 * PAGE_SIZE)
+#endif
 
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
--- orig/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h
+++ mod/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h
@@ -35,6 +35,8 @@
 
 extern void kmap_init(void);
 
+#error "highmem not supported"
+
 /*
  * Right now we initialize only a single pte table. It can be extended
  * easily, subsequent pte tables have to be allocated in one physical
--- orig/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h
+++ mod/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h
@@ -167,12 +167,23 @@
 
 #endif /* __ASSEMBLY__ */
 
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+
+#ifdef __ASSEMBLY__
+#define __PAGE_OFFSET          (0xF6000000)
+#else
+#define __PAGE_OFFSET          (0xF6000000UL)
+#endif
+
+#else
+
 #ifdef __ASSEMBLY__
 #define __PAGE_OFFSET          (0xC0000000)
 #else
 #define __PAGE_OFFSET          (0xC0000000UL)
 #endif
 
+#endif
 
 #define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
 #define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
--- orig/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
+++ mod/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
@@ -7,7 +7,12 @@
 
 #define PGDIR_SHIFT    22
 #define PTRS_PER_PGD   1024
-#define PTRS_PER_PGD_NO_HV     (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define PTRS_PER_PGD_NO_HV     (HYPERVISOR_VIRT_START>> PGDIR_SHIFT)
+#else
+#define PTRS_PER_PGD_NO_HV     (UNPRIV_GUEST_VIRT_END >> PGDIR_SHIFT)
+#endif
 
 /*
  * the i386 is two-level, so we don't really have any
--- orig/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
+++ mod/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
@@ -60,13 +60,18 @@
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
+#define TWOLEVEL_PGDIR_SHIFT   22
+
 #define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define FIRST_USER_PGD_NR (UNPRIV_GUEST_VIRT_END>>TWOLEVEL_PGDIR_SHIFT)
+#else
 #define FIRST_USER_PGD_NR      0
+#endif
 
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
 
-#define TWOLEVEL_PGDIR_SHIFT   22
 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
 
--- orig/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
+++ mod/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
@@ -315,7 +315,11 @@
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define TASK_UNMAPPED_BASE     (0xF2000000)
+#else
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
+#endif
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
--- orig/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
+++ mod/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
@@ -2,7 +2,11 @@
 #include <linux/config.h>
 
 .section __xen_guest
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+       .ascii  "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xF6000000"
+#else
        .ascii  "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xC0000000"
+#endif
        .ascii  ",LOADER=generic"
        .byte   0
 
--- orig/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
+++ mod/linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
@@ -353,6 +353,7 @@
         * page directory, write-protect the new page directory, then switch to
         * it. We clean up by write-enabling and then freeing the old page dir.
         */
+       memset(pgd_base,0,PAGE_SIZE);  /* TODO optimize a bit / necessary at 
all? */
        memcpy(pgd_base, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
        make_page_readonly(pgd_base);
        xen_pgd_pin(__pa(pgd_base));
@@ -416,7 +417,7 @@
         * Note that "pgd_clear()" doesn't do it for
         * us, because pgd_clear() is a no-op on i386.
         */
-       for (i = 0; i < USER_PTRS_PER_PGD; i++)
+       for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
 #ifdef CONFIG_X86_PAE
                set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
 #else
--- orig/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
+++ mod/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
@@ -280,6 +280,14 @@
                        swapper_pg_dir + USER_PTRS_PER_PGD,
                        (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 
+#ifndef CONFIG_XEN_PRIVILEGED_GUEST
+       /* perhaps just move this code inside Xen */
+
+       memset((pgd_t*) pgd + UNPRIV_GUEST_VIRT_END>>TWOLEVEL_PGDIR_SHIFT, 0,
+                       
((HYPERVISOR_VIRT_START-UNPRIV_GUEST_VIRT_END)>>TWOLEVEL_PGDIR_SHIFT)*
+                       sizeof(pgd_t));
+#endif
+
        if (PTRS_PER_PMD > 1)
                return;
 
--- orig/xen/arch/x86/domain_build.c
+++ mod/xen/arch/x86/domain_build.c
@@ -225,6 +225,9 @@
     ed->arch.failsafe_selector = FLAT_KERNEL_CS;
     ed->arch.event_selector    = FLAT_KERNEL_CS;
     ed->arch.kernel_ss = FLAT_KERNEL_SS;
+       ed->arch.pgd_lower_limit = UNPRIV_GUEST_VIRT_END >> L2_PAGETABLE_SHIFT;
+       ed->arch.pgd_upper_limit = ROOT_PAGETABLE_FIRST_XEN_SLOT;
+       ed->arch.linear_start = DOM0_LINEAR_PT_VIRT_START;
     for ( i = 0; i < 256; i++ ) 
         ed->arch.traps[i].cs = FLAT_KERNEL_CS;
 
@@ -243,7 +246,7 @@
     /* WARNING: The new domain must have its 'processor' field filled in! */
     l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
     memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
-    l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+    l2tab[ed->arch.linear_start >> L2_PAGETABLE_SHIFT] =
         l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
     l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
         l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
--- orig/xen/arch/x86/mm.c
+++ mod/xen/arch/x86/mm.c
@@ -687,7 +687,7 @@
     memcpy(&pl2e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
            &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
            ROOT_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-    pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
+    
pl2e[l2_table_offset(d->exec_domain[smp_processor_id()]->arch.linear_start)] =
         l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
     pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
         l2e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_pt),
@@ -915,13 +915,20 @@
                         unsigned long pfn)
 {
     l2_pgentry_t ol2e;
+    struct exec_domain *ed = current;
 
-    if ( unlikely(!is_guest_l2_slot(pgentry_ptr_to_slot(pl2e))) )
+       if( unlikely ( 
+                       pgentry_ptr_to_slot(pl2e) <  ed->arch.pgd_lower_limit ||
+                       pgentry_ptr_to_slot(pl2e) >= ed->arch.pgd_upper_limit)
+                       )
     {
         MEM_LOG("Illegal L2 update attempt in Xen-private area %p", pl2e);
         return 0;
     }
 
+       ed->arch.pgd_version++;  /* TODO make atomic */
+       ed->arch.pgd_version &= 0x7fffffff;
+
     if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
         return 0;
 
@@ -1294,6 +1301,10 @@
 
     if ( likely(okay) )
     {
+               ed->arch.pgd_version++; /* TODO use atomic */
+               ed->arch.pgd_version &= 0x7fffffff;
+               ed->arch.cached_pgd_version = ~0UL;
+
         invalidate_shadow_ldt(ed);
 
         old_base_mfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
@@ -1530,6 +1541,21 @@
             else if ( likely(test_and_clear_bit(_PGT_pinned, 
                                                 &page->u.inuse.type_info)) )
             {
+                               if(page->u.inuse.type_info && PGT_l2_page_table)
+                               {
+                                       struct exec_domain* ed = 
page_get_owner(page)->exec_domain[smp_processor_id()];
+                                       
if(ed->arch.pgd_upper_limit<ROOT_PAGETABLE_FIRST_XEN_SLOT)
+                                       {
+                                               int i;
+                                               unsigned long* pgd = (unsigned 
long*)map_domain_mem( op.mfn <<PAGE_SHIFT );
+
+                                               for(i=ed->arch.pgd_upper_limit; 
i<ROOT_PAGETABLE_FIRST_XEN_SLOT; i++) 
+                                                       pgd[i]=0;
+
+                                               unmap_domain_mem(pgd);
+                                       }
+                               }
+
                 put_page_and_type(page);
                 put_page(page);
             }
@@ -1890,6 +1916,12 @@
                 if ( likely(get_page_type(page, PGT_l2_page_table)) )
                 {
                     l2_pgentry_t l2e;
+                                       unsigned long real_cr3 = 
pagetable_val(ed->arch.monitor_table);
+
+                                       if(read_cr3() != real_cr3 && 
mfn==real_cr3>>PAGE_SHIFT)
+                                       {
+                                               write_ptbase(ed);
+                                       }
 
                     /* FIXME: doesn't work with PAE */
                     l2e = l2e_create_phys(req.val, req.val);
@@ -2476,6 +2508,7 @@
     l2_pgentry_t  *pl2e;
     int            i;
     unsigned int   modified = 0;
+       struct exec_domain *ed = current;
 
     ASSERT(!shadow_mode_enabled(d));
 
@@ -2596,7 +2629,8 @@
     unsigned long pfn;
     struct pfn_info *page;
     l1_pgentry_t pte, ol1e, nl1e, *pl1e;
-    struct domain *d = current->domain;
+    struct exec_domain *ed = current;
+    struct domain *d = ed->domain;
 
     /* Aligned access only, thank you. */
     if ( !access_ok(addr, bytes) || ((addr & (bytes-1)) != 0) )
@@ -2710,6 +2744,7 @@
     l2_pgentry_t    *pl2e;
     int              which;
     u32              l2_idx;
+       struct exec_domain *ed = current;
 
     if ( unlikely(shadow_mode_enabled(d)) )
         return 0;
/* Default linker script, for normal executables */
OUTPUT_FORMAT("elf32-i386", "elf32-i386",
              "elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_start)
SEARCH_DIR("/usr/i686-pc-linux-gnu/lib"); 
SEARCH_DIR("/usr/i386-glibc21-linux/lib"); SEARCH_DIR("/usr/local/lib");
/* Do we need any of these for elf?
   __DYNAMIC = 0;    */
/*
MEMORY 
{
        rom (rx)  : ORIGIN = 0x80000000, LENGTH = 8M 
        ram (!rx) : org = 0x81000000, l = 8M
}
*/
SECTIONS
{
  /* Read-only sections, merged into text segment: */
  PROVIDE (__executable_start = 0xF0048000); . = 0xF0048000 + SIZEOF_HEADERS;
  .interp         : { *(.interp) }
  .hash           : { *(.hash) }
  .dynsym         : { *(.dynsym) }
  .dynstr         : { *(.dynstr) }
  .gnu.version    : { *(.gnu.version) }
  .gnu.version_d  : { *(.gnu.version_d) }
  .gnu.version_r  : { *(.gnu.version_r) }
  .rel.init       : { *(.rel.init) }
  .rela.init      : { *(.rela.init) }
  .rel.text       : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
  .rela.text      : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
  .rel.fini       : { *(.rel.fini) }
  .rela.fini      : { *(.rela.fini) }
  .rel.rodata     : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
  .rela.rodata    : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
  .rel.data.rel.ro   : { *(.rel.data.rel.ro*) }
  .rela.data.rel.ro   : { *(.rel.data.rel.ro*) }
  .rel.data       : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
  .rela.data      : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
  .rel.tdata      : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
  .rela.tdata     : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
  .rel.tbss       : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
  .rela.tbss      : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
  .rel.ctors      : { *(.rel.ctors) }
  .rela.ctors     : { *(.rela.ctors) }
  .rel.dtors      : { *(.rel.dtors) }
  .rela.dtors     : { *(.rela.dtors) }
  .rel.got        : { *(.rel.got) }
  .rela.got       : { *(.rela.got) }
  .rel.sdata      : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
  .rela.sdata     : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
  .rel.sbss       : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
  .rela.sbss      : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
  .rel.sdata2     : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
  .rela.sdata2    : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
  .rel.sbss2      : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
  .rela.sbss2     : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
  .rel.bss        : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
  .rela.bss       : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
  .rel.plt        : { *(.rel.plt) }
  .rela.plt       : { *(.rela.plt) }
  .init           :
  {
    KEEP (*(.init))
  } =0x9090
  .plt            : { *(.plt) }
  .text           :
  {
    *(.text .stub .text.* .gnu.linkonce.t.*)
    /* .gnu.warning sections are handled specially by elf32.em.  */
    *(.gnu.warning)
  } =0x9090
  .fini           :
  {
    KEEP (*(.fini))
  } =0x9090
  PROVIDE (__etext = .);
  PROVIDE (_etext = .);
  PROVIDE (etext = .);
  .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
  .rodata1        : { *(.rodata1) }
  .sdata2         : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) }
  .sbss2          : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
  .eh_frame_hdr : { *(.eh_frame_hdr) }
  .eh_frame       : ONLY_IF_RO { KEEP (*(.eh_frame)) }
  .gcc_except_table   : ONLY_IF_RO { *(.gcc_except_table) }
  /* Adjust the address for the data segment.  We want to adjust up to
     the same address within the page on the next page up.  */
  . = ALIGN(0x1000) + (. & (0x1000 - 1));
  /* For backward-compatibility with tools that don't support the
     *_array_* sections below, our glibc's crt files contain weak
     definitions of symbols that they reference.  We don't want to use
     them, though, unless they're strictly necessary, because they'd
     bring us empty sections, unlike PROVIDE below, so we drop the
     sections from the crt files here.  */
  /DISCARD/ : {
      */crti.o(.init_array .fini_array .preinit_array)
      */crtn.o(.init_array .fini_array .preinit_array)
  }
  /* Exception handling  */
  .eh_frame       : ONLY_IF_RW { KEEP (*(.eh_frame)) }
  .gcc_except_table   : ONLY_IF_RW { *(.gcc_except_table) }
  /* Thread Local Storage sections  */
  .tdata          : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
  .tbss           : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
  /* Ensure the __preinit_array_start label is properly aligned.  We
     could instead move the label definition inside the section, but
     the linker would then create the section even if it turns out to
     be empty, which isn't pretty.  */
  . = ALIGN(32 / 8);
  PROVIDE (__preinit_array_start = .);
  .preinit_array     : { *(.preinit_array) }
  PROVIDE (__preinit_array_end = .);
  PROVIDE (__init_array_start = .);
  .init_array     : { *(.init_array) }
  PROVIDE (__init_array_end = .);
  PROVIDE (__fini_array_start = .);
  .fini_array     : { *(.fini_array) }
  PROVIDE (__fini_array_end = .);
  .ctors          :
  {
    /* gcc uses crtbegin.o to find the start of
       the constructors, so we make sure it is
       first.  Because this is a wildcard, it
       doesn't matter if the user does not
       actually link against crtbegin.o; the
       linker won't look for a file to match a
       wildcard.  The wildcard also means that it
       doesn't matter which directory crtbegin.o
       is in.  */
    KEEP (*crtbegin*.o(.ctors))
    /* We don't want to include the .ctor section from
       from the crtend.o file until after the sorted ctors.
       The .ctor section from the crtend file contains the
       end of ctors marker and it must be last */
    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
    KEEP (*(SORT(.ctors.*)))
    KEEP (*(.ctors))
  }
  .dtors          :
  {
    KEEP (*crtbegin*.o(.dtors))
    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
    KEEP (*(SORT(.dtors.*)))
    KEEP (*(.dtors))
  }
  .jcr            : { KEEP (*(.jcr)) }
  .data.rel.ro : { *(.data.rel.ro.local) *(.data.rel.ro*) }
  .dynamic        : { *(.dynamic) }
  .data           :
  {
    *(.data .data.* .gnu.linkonce.d.*)
    SORT(CONSTRUCTORS)
  }
  .data1          : { *(.data1) }
  .got            : { *(.got.plt) *(.got) }
  /* We want the small data sections together, so single-instruction offsets
     can access them all, and initialized data all before uninitialized, so
     we can shorten the on-disk segment size.  */
  .sdata          :
  {
    *(.sdata .sdata.* .gnu.linkonce.s.*)
  }
  _edata = .;
  PROVIDE (edata = .);
  __bss_start = .;
  .sbss           :
  {
    PROVIDE (__sbss_start = .);
    PROVIDE (___sbss_start = .);
    *(.dynsbss)
    *(.sbss .sbss.* .gnu.linkonce.sb.*)
    *(.scommon)
    PROVIDE (__sbss_end = .);
    PROVIDE (___sbss_end = .);
  }
  .bss            :
  {
   *(.dynbss)
   *(.bss .bss.* .gnu.linkonce.b.*)
   *(COMMON)
   /* Align here to ensure that the .bss section occupies space up to
      _end.  Align after .bss to ensure correct alignment even if the
      .bss section disappears because there are no input sections.  */
   . = ALIGN(32 / 8);
  }
  . = ALIGN(32 / 8);
  _end = .;
  PROVIDE (end = .);
  /* Stabs debugging sections.  */
  .stab          0 : { *(.stab) }
  .stabstr       0 : { *(.stabstr) }
  .stab.excl     0 : { *(.stab.excl) }
  .stab.exclstr  0 : { *(.stab.exclstr) }
  .stab.index    0 : { *(.stab.index) }
  .stab.indexstr 0 : { *(.stab.indexstr) }
  .comment       0 : { *(.comment) }
  /* DWARF debug sections.
     Symbols in the DWARF debugging sections are relative to the beginning
     of the section so we begin them at 0.  */
  /* DWARF 1 */
  .debug          0 : { *(.debug) }
  .line           0 : { *(.line) }
  /* GNU DWARF 1 extensions */
  .debug_srcinfo  0 : { *(.debug_srcinfo) }
  .debug_sfnames  0 : { *(.debug_sfnames) }
  /* DWARF 1.1 and DWARF 2 */
  .debug_aranges  0 : { *(.debug_aranges) }
  .debug_pubnames 0 : { *(.debug_pubnames) }
  /* DWARF 2 */
  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
  .debug_abbrev   0 : { *(.debug_abbrev) }
  .debug_line     0 : { *(.debug_line) }
  .debug_frame    0 : { *(.debug_frame) }
  .debug_str      0 : { *(.debug_str) }
  .debug_loc      0 : { *(.debug_loc) }
  .debug_macinfo  0 : { *(.debug_macinfo) }
  /* SGI/MIPS DWARF 2 extensions */
  .debug_weaknames 0 : { *(.debug_weaknames) }
  .debug_funcnames 0 : { *(.debug_funcnames) }
  .debug_typenames 0 : { *(.debug_typenames) }
  .debug_varnames  0 : { *(.debug_varnames) }
  /DISCARD/ : { *(.note.GNU-stack) }
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>