Hi,
> This makes me a little nervous, since the ELF loading code is working fine
> for
> PPC at the moment. What exactly are you planning on changing?
Basically what the suggestion outlined: use physical address + VIRT_BASE
instead of placing virtual addresses into the physical address fields.
Some discussions on that went over the list some weeks ago. Patch
attached for reference.
There is another change in the patch: introduce a
xc_unmap_foreign_range() as symmetric call for xc_map_foreign_range()
and make domain builders use it instead of calling munmap directly.
That is one of the changes I need to make the domain builders usable for
kexec, more will follow ;)
I'll go split stuff into individual patches for submission (planned for
post-3.0.2 development cycle).
cheers,
Gerd
--
Gerd 'just married' Hoffmann <kraxel@xxxxxxx>
I'm the hacker formerly known as Gerd Knorr.
http://www.suse.de/~kraxel/just-married.jpeg
diff -r 25e1c3b1a9f2 linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h Tue Mar 21
08:53:00 2006
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h Tue Mar 21
12:37:41 2006
@@ -289,10 +289,6 @@
#endif
#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
-#undef LOAD_OFFSET
-#define LOAD_OFFSET 0
-
-
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
#define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
diff -r 25e1c3b1a9f2 linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/page.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/page.h Tue Mar
21 08:53:00 2006
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/page.h Tue Mar
21 12:37:41 2006
@@ -260,9 +260,6 @@
#define __PAGE_OFFSET 0xffff880000000000
#endif /* !__ASSEMBLY__ */
-#undef LOAD_OFFSET
-#define LOAD_OFFSET 0
-
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
diff -r 25e1c3b1a9f2 tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_core.c Tue Mar 21 12:37:41 2006
@@ -20,7 +20,7 @@
if ( vaddr == NULL )
return -1;
memcpy(dst_page, vaddr, PAGE_SIZE);
- munmap(vaddr, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, domid, vaddr, PAGE_SIZE);
return 0;
}
diff -r 25e1c3b1a9f2 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_linux_build.c Tue Mar 21 12:37:41 2006
@@ -183,7 +183,7 @@
ltab = (uint64_t)page_array[pltab] << PAGE_SHIFT; \
pltab <<= PAGE_SHIFT; \
if ( vltab != NULL ) \
- munmap(vltab, PAGE_SIZE); \
+ xc_unmap_foreign_range(xc_handle, dom, vltab, PAGE_SIZE); \
if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
PROT_READ|PROT_WRITE, \
ltab >> PAGE_SHIFT)) == NULL ) \
@@ -243,15 +243,15 @@
}
vl1e++;
}
- munmap(vl1tab, PAGE_SIZE);
- munmap(vl2tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl1tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl2tab, PAGE_SIZE);
return 0;
error_out:
if (vl1tab)
- munmap(vl1tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl1tab, PAGE_SIZE);
if (vl2tab)
- munmap(vl2tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl2tab, PAGE_SIZE);
return -1;
}
@@ -329,18 +329,18 @@
vl1e++;
}
- munmap(vl1tab, PAGE_SIZE);
- munmap(vl2tab, PAGE_SIZE);
- munmap(vl3tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl1tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl2tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl3tab, PAGE_SIZE);
return 0;
error_out:
if (vl1tab)
- munmap(vl1tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl1tab, PAGE_SIZE);
if (vl2tab)
- munmap(vl2tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl2tab, PAGE_SIZE);
if (vl3tab)
- munmap(vl3tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl3tab, PAGE_SIZE);
return -1;
}
@@ -427,21 +427,21 @@
vl1e++;
}
- munmap(vl1tab, PAGE_SIZE);
- munmap(vl2tab, PAGE_SIZE);
- munmap(vl3tab, PAGE_SIZE);
- munmap(vl4tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl1tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl2tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl3tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl4tab, PAGE_SIZE);
return 0;
error_out:
if (vl1tab)
- munmap(vl1tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl1tab, PAGE_SIZE);
if (vl2tab)
- munmap(vl2tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl2tab, PAGE_SIZE);
if (vl3tab)
- munmap(vl3tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl3tab, PAGE_SIZE);
if (vl4tab)
- munmap(vl4tab, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, vl4tab, PAGE_SIZE);
return -1;
}
#endif
@@ -846,19 +846,19 @@
{
fprintf(stderr,"m2p update failure p=%lx m=%lx\n",
count, page_array[count]);
- munmap(physmap, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, physmap, PAGE_SIZE);
goto error_out;
}
*physmap_e++ = page_array[count];
if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
{
- munmap(physmap, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, physmap, PAGE_SIZE);
physmap = physmap_e = xc_map_foreign_range(
xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
page_array[physmap_pfn++]);
}
}
- munmap(physmap, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, physmap, PAGE_SIZE);
/* Send the page update requests down to the hypervisor. */
if ( xc_finish_mmu_updates(xc_handle, mmu) )
@@ -958,7 +958,7 @@
strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
}
- munmap(start_info, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, start_info, PAGE_SIZE);
/* shared_info page starts its life empty. */
shared_info = xc_map_foreign_range(
@@ -968,7 +968,7 @@
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
- munmap(shared_info, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, shared_info, PAGE_SIZE);
/* Send the page update requests down to the hypervisor. */
if ( xc_finish_mmu_updates(xc_handle, mmu) )
diff -r 25e1c3b1a9f2 tools/libxc/xc_load_aout9.c
--- a/tools/libxc/xc_load_aout9.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_load_aout9.c Tue Mar 21 12:37:41 2006
@@ -126,7 +126,7 @@
pg = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_WRITE,
parray[off>>PAGE_SHIFT]);
memcpy(pg + pgoff, buf, chunksz);
- munmap(pg, PAGE_SIZE);
+ xc_unmap_foreign_range(xch, dom, pg, PAGE_SIZE);
off += chunksz;
buf += chunksz;
diff -r 25e1c3b1a9f2 tools/libxc/xc_load_bin.c
--- a/tools/libxc/xc_load_bin.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_load_bin.c Tue Mar 21 12:37:41 2006
@@ -273,7 +273,7 @@
if ( chunksz > PAGE_SIZE )
chunksz = PAGE_SIZE;
memcpy(va, image + done, chunksz);
- munmap(va, PAGE_SIZE);
+ xc_unmap_foreign_range(xch, dom, va, PAGE_SIZE);
}
if ( 0 != image_info->bss_end_addr &&
@@ -289,7 +289,7 @@
if ( chunksz > (PAGE_SIZE - (done & (PAGE_SIZE-1))) )
chunksz = PAGE_SIZE - (done & (PAGE_SIZE-1));
memset(va + (done & (PAGE_SIZE-1)), 0, chunksz);
- munmap(va, PAGE_SIZE);
+ xc_unmap_foreign_range(xch, dom, va, PAGE_SIZE);
}
return 0;
diff -r 25e1c3b1a9f2 tools/libxc/xc_load_elf.c
--- a/tools/libxc/xc_load_elf.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_load_elf.c Tue Mar 21 12:37:41 2006
@@ -59,6 +59,7 @@
Elf_Phdr *phdr;
Elf_Shdr *shdr;
unsigned long kernstart = ~0UL, kernend=0UL;
+ unsigned long sstart, send;
const char *shstrtab;
char *guestinfo=NULL, *p;
int h;
@@ -117,6 +118,8 @@
}
if ( (strstr(guestinfo, "PAE=yes") != NULL) )
dsi->pae_kernel = 1;
+ if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL )
+ dsi->virt_base = strtoul(p+10, &p, 0);
break;
}
@@ -138,11 +141,30 @@
phdr = (Elf_Phdr *)(image + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
continue;
- if ( phdr->p_paddr < kernstart )
- kernstart = phdr->p_paddr;
- if ( (phdr->p_paddr + phdr->p_memsz) > kernend )
- kernend = phdr->p_paddr + phdr->p_memsz;
- }
+ sstart = phdr->p_paddr;
+ send = phdr->p_paddr + phdr->p_memsz;
+ /*
+ * bug comparibility alert: old linux kernels used to have
+ * virtual addresses in the paddr headers, whereas newer ones
+ * (since kexec merge, around 2.6.14) correctly use physical
+ * addresses.
+ *
+ * As we want to be able to boot both kinds of kernels we'll
+ * do some guesswork here: If paddr is greater than virt_base
+ * we assume it is a old kernel and use it as-is. Otherwise
+ * we'll add virt_base to get the correct address.
+ */
+ if (sstart < dsi->virt_base) {
+ sstart += dsi->virt_base;
+ send += dsi->virt_base;
+ }
+ if ( sstart < kernstart )
+ kernstart = sstart;
+ if ( send > kernend )
+ kernend = send;
+ }
+ if (dsi->virt_base > 0 && ehdr->e_entry < dsi->virt_base)
+ ehdr->e_entry += dsi->virt_base;
if ( (kernstart > kernend) ||
(ehdr->e_entry < kernstart) ||
@@ -189,7 +211,11 @@
for ( done = 0; done < phdr->p_filesz; done += chunksz )
{
- pa = (phdr->p_paddr + done) - dsi->v_start;
+ /* bug compatibility alert, see above */
+ pa = phdr->p_paddr + done;
+ if (pa > dsi->virt_base)
+ pa -= dsi->virt_base;
+
va = xc_map_foreign_range(
xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
chunksz = phdr->p_filesz - done;
@@ -197,19 +223,23 @@
chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
memcpy(va + (pa & (PAGE_SIZE-1)),
image + phdr->p_offset + done, chunksz);
- munmap(va, PAGE_SIZE);
+ xc_unmap_foreign_range(xch, dom, va, PAGE_SIZE);
}
for ( ; done < phdr->p_memsz; done += chunksz )
{
- pa = (phdr->p_paddr + done) - dsi->v_start;
+ /* bug compatibility alert, see above */
+ pa = phdr->p_paddr + done;
+ if (pa > dsi->virt_base)
+ pa -= dsi->virt_base;
+
va = xc_map_foreign_range(
xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
chunksz = phdr->p_memsz - done;
if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
- munmap(va, PAGE_SIZE);
+ xc_unmap_foreign_range(xch, dom, va, PAGE_SIZE);
}
}
diff -r 25e1c3b1a9f2 tools/libxc/xc_pagetab.c
--- a/tools/libxc/xc_pagetab.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_pagetab.c Tue Mar 21 12:37:41 2006
@@ -170,13 +170,13 @@
}
out_unmap_pt:
- munmap(pt, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, pt, PAGE_SIZE);
out_unmap_pd:
- munmap(pd, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, pd, PAGE_SIZE);
out_unmap_pdp:
- munmap(pdppage, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, pdppage, PAGE_SIZE);
out_unmap_pml:
- munmap(pml, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, dom, pml, PAGE_SIZE);
out:
return mfn;
}
diff -r 25e1c3b1a9f2 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_private.c Tue Mar 21 12:37:41 2006
@@ -60,6 +60,12 @@
return NULL;
}
return addr;
+}
+
+int xc_unmap_foreign_range(int xc_handle, uint32_t dom,
+ void *ptr, int size)
+{
+ return munmap(ptr, size);
}
/*******************/
@@ -371,7 +377,7 @@
if ( vaddr == NULL )
return -1;
memcpy(vaddr, src_page, PAGE_SIZE);
- munmap(vaddr, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, domid, vaddr, PAGE_SIZE);
return 0;
}
@@ -384,7 +390,7 @@
if ( vaddr == NULL )
return -1;
memset(vaddr, 0, PAGE_SIZE);
- munmap(vaddr, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, domid, vaddr, PAGE_SIZE);
return 0;
}
@@ -426,7 +432,7 @@
if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
memcpy(va + (pa & (PAGE_SIZE-1)), src + done, chunksz);
- munmap(va, PAGE_SIZE);
+ xc_unmap_foreign_range(xch, dom, va, PAGE_SIZE);
}
}
diff -r 25e1c3b1a9f2 tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xc_ptrace.c Tue Mar 21 12:37:41 2006
@@ -188,7 +188,7 @@
p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
if ( v != NULL )
- munmap(v, PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, current_domid, v, PAGE_SIZE);
v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
if ( v == NULL )
return NULL;
@@ -255,7 +255,7 @@
{
cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
if ( cr3_virt[cpu] )
- munmap(cr3_virt[cpu], PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, current_domid, cr3_virt[cpu],
PAGE_SIZE);
cr3_virt[cpu] = xc_map_foreign_range(
xc_handle, current_domid, PAGE_SIZE, PROT_READ,
cr3_phys[cpu] >> PAGE_SHIFT);
@@ -270,7 +270,7 @@
{
pde_phys[cpu] = pde;
if ( pde_virt[cpu] )
- munmap(pde_virt[cpu], PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, current_domid, pde_virt[cpu],
PAGE_SIZE);
pde_virt[cpu] = xc_map_foreign_range(
xc_handle, current_domid, PAGE_SIZE, PROT_READ,
pde_phys[cpu] >> PAGE_SHIFT);
@@ -285,7 +285,7 @@
{
page_phys[cpu] = page;
if ( page_virt[cpu] )
- munmap(page_virt[cpu], PAGE_SIZE);
+ xc_unmap_foreign_range(xc_handle, current_domid, page_virt[cpu],
PAGE_SIZE);
page_virt[cpu] = xc_map_foreign_range(
xc_handle, current_domid, PAGE_SIZE, perm,
page_phys[cpu] >> PAGE_SHIFT);
diff -r 25e1c3b1a9f2 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xenctrl.h Tue Mar 21 12:37:41 2006
@@ -458,8 +458,10 @@
* @parm mfn the frame address to map.
*/
void *xc_map_foreign_range(int xc_handle, uint32_t dom,
- int size, int prot,
- unsigned long mfn );
+ int size, int prot,
+ unsigned long mfn );
+int xc_unmap_foreign_range(int xc_handle, uint32_t dom,
+ void *ptr, int size);
void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
unsigned long *arr, int num );
diff -r 25e1c3b1a9f2 tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h Tue Mar 21 08:53:00 2006
+++ b/tools/libxc/xg_private.h Tue Mar 21 12:37:41 2006
@@ -135,6 +135,7 @@
unsigned long v_kernstart;
unsigned long v_kernend;
unsigned long v_kernentry;
+ unsigned long virt_base;
unsigned int load_symtab;
unsigned int pae_kernel;
diff -r 25e1c3b1a9f2 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Mar 21 08:53:00 2006
+++ b/xen/arch/x86/mm.c Tue Mar 21 12:37:41 2006
@@ -1603,6 +1603,7 @@
if ( unlikely(!okay) )
{
/* Switch to idle pagetable: this VCPU has no active p.t. now. */
+ MEM_LOG("%s: slow path via idle table", __FUNCTION__);
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = mk_pagetable(0);
update_pagetables(v);
diff -r 25e1c3b1a9f2 xen/common/elf.c
--- a/xen/common/elf.c Tue Mar 21 08:53:00 2006
+++ b/xen/common/elf.c Tue Mar 21 12:37:41 2006
@@ -24,6 +24,7 @@
Elf_Phdr *phdr;
Elf_Shdr *shdr;
unsigned long kernstart = ~0UL, kernend=0UL;
+ unsigned long sstart, send;
char *shstrtab, *guestinfo=NULL, *p;
char *elfbase = (char *)dsi->image_addr;
int h;
@@ -76,6 +77,8 @@
return -EINVAL;
}
+ if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL )
+ dsi->virt_base = simple_strtoul(p+10, &p, 0);
break;
}
@@ -86,11 +89,40 @@
phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
continue;
- if ( phdr->p_paddr < kernstart )
- kernstart = phdr->p_paddr;
- if ( (phdr->p_paddr + phdr->p_memsz) > kernend )
- kernend = phdr->p_paddr + phdr->p_memsz;
- }
+ sstart = phdr->p_paddr;
+ send = phdr->p_paddr + phdr->p_memsz;
+ /*
+ * bug comparibility alert: old linux kernels used to have
+ * virtual addresses in the paddr headers, whereas newer ones
+ * (since kexec merge, around 2.6.14) correctly use physical
+ * addresses.
+ *
+ * As we want to be able to boot both kinds of kernels we'll
+ * do some guesswork here: If paddr is greater than virt_base
+ * we assume it is a old kernel and use it as-is. Otherwise
+ * we'll add virt_base to get the correct address.
+ */
+ if (sstart < dsi->virt_base) {
+ sstart += dsi->virt_base;
+ send += dsi->virt_base;
+ }
+ printk("%s: program hdr: %08lx (=vaddr) "
+ "paddr: %08lx filesz: %08lx memsz: %08lx => %08lx-%08lx\n",
+ __FUNCTION__,
+ (unsigned long)phdr->p_vaddr,
+ (unsigned long)phdr->p_paddr,
+ (unsigned long)phdr->p_filesz,
+ (unsigned long)phdr->p_memsz,
+ sstart, send);
+ if ( sstart < kernstart )
+ kernstart = sstart;
+ if ( send > kernend )
+ kernend = send;
+ }
+ if (dsi->virt_base > 0 && ehdr->e_entry < dsi->virt_base)
+ ehdr->e_entry += dsi->virt_base;
+ printk("%s: entry point: %08lx\n", __FUNCTION__,
+ (unsigned long)ehdr->e_entry);
if ( (kernstart > kernend) ||
(ehdr->e_entry < kernstart) ||
@@ -126,6 +158,7 @@
char *elfbase = (char *)dsi->image_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)dsi->image_addr;
Elf_Phdr *phdr;
+ unsigned long vaddr;
int h;
for ( h = 0; h < ehdr->e_phnum; h++ )
@@ -133,11 +166,15 @@
phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
continue;
+ vaddr = phdr->p_paddr;
+ if (vaddr < dsi->virt_base)
+ vaddr += dsi->virt_base;
if ( phdr->p_filesz != 0 )
- memcpy((char *)phdr->p_paddr, elfbase + phdr->p_offset,
+ memcpy((char *)vaddr,
+ elfbase + phdr->p_offset,
phdr->p_filesz);
if ( phdr->p_memsz > phdr->p_filesz )
- memset((char *)phdr->p_paddr + phdr->p_filesz, 0,
+ memset((char *)phdr->p_vaddr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
diff -r 25e1c3b1a9f2 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Tue Mar 21 08:53:00 2006
+++ b/xen/include/xen/sched.h Tue Mar 21 12:37:41 2006
@@ -168,6 +168,7 @@
unsigned long v_kernstart;
unsigned long v_kernend;
unsigned long v_kernentry;
+ unsigned long virt_base;
/* Initialised by loader: Private. */
unsigned int load_symtab;
unsigned long symtab_addr;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|