Keir Fraser wrote:
> On 17/02/2009 04:00, "Mukesh Rathor" <mukesh.rathor@xxxxxxxxxx> wrote:
>
>> Could the fix be made in the loop above the above mentioned loop in the
>> function where it's ensuring init mappings cover kernel+tables to check
>> for pud also?
>
> That would be the correct thing to do. I think the kernel should be
> self-sufficient in this respect, rather than relying on libxc to do this pud
> setup for it. It should be an easy kernel fix, so long as you can guarantee
> to be able to easily allocate the page for the pud. :-)
>
> -- Keir
>
In that case, attached please find proposed patch. I'm having difficulty
currently with building the latest tree, so it was tested on an older
version where the function is the same. Please let me know if it
looks good. I can resubmit with any changes.
thanks,
Mukesh
--- init-xen.c.orig 2009-02-17 18:58:58.716954000 -0800
+++ init-xen.c 2009-02-17 19:33:57.310074000 -0800
@@ -587,35 +587,45 @@ void __init xen_init_pt(void)
static void __init extend_init_mapping(unsigned long tables_space)
{
unsigned long va = __START_KERNEL_map;
- unsigned long phys, addr, *pte_page;
+ unsigned long phys, addr, *pte_page, *pmd_page;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte, new_pte;
- unsigned long *page = (unsigned long *)init_level4_pgt;
+ unsigned long *pud_page = (unsigned long *)init_level4_pgt;
- addr = page[pgd_index(va)];
- addr_to_page(addr, page);
- addr = page[pud_index(va)];
- addr_to_page(addr, page);
-
- /* Kill mapping of low 1MB. */
+ /* Kill low mappings */
while (va < (unsigned long)&_text) {
if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
BUG();
va += PAGE_SIZE;
}
+ addr = pud_page[pgd_index(va)]; /* get pud entry from pgd tbl */
+ addr_to_page(addr, pud_page); /* pud_page now va of pud tbl */
/* Ensure init mappings cover kernel text/data and initial tables. */
while (va < (__START_KERNEL_map
+ (start_pfn << PAGE_SHIFT)
+ tables_space)) {
- pmd = (pmd_t *)&page[pmd_index(va)];
+
+ pud = (pud_t *)&pud_page[pud_index(va)]; /* get pud entry */
+ if (pud_none(*pud)) {
+ pmd_page = alloc_static_page(&phys);
+ early_make_page_readonly(
+ pmd_page, XENFEAT_writable_page_tables);
+ set_pud(pud, __pud(phys | _KERNPG_TABLE));
+ } else {
+ addr = pud_page[pud_index(va)];
+ addr_to_page(addr, pmd_page);
+ }
+
+ pmd = (pmd_t *)&pmd_page[pmd_index(va)];
if (pmd_none(*pmd)) {
pte_page = alloc_static_page(&phys);
early_make_page_readonly(
pte_page, XENFEAT_writable_page_tables);
set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
} else {
- addr = page[pmd_index(va)];
+ addr = pmd_page[pmd_index(va)];
addr_to_page(addr, pte_page);
}
pte = (pte_t *)&pte_page[pte_index(va)];
@@ -630,7 +640,7 @@ static void __init extend_init_mapping(u
/* Finally, blow away any spurious initial mappings. */
while (1) {
- pmd = (pmd_t *)&page[pmd_index(va)];
+ pmd = (pmd_t *)&pmd_page[pmd_index(va)];
if (pmd_none(*pmd))
break;
if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
@@ -719,6 +729,7 @@ static void xen_finish_init_mapping(void
table_end = start_pfn;
}
+
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
This runs before bootmem is initialized and gets pages directly from the
physical memory. To access them they are temporarily mapped. */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|