When new memory added to the system, we need to setup the frametable to cover
all new memory ranges also.
Change the frametable setup function to support memory add.
For 32 bit guest and compatibility guest, the new mapping will be propagated to
other guest in page fault handler when accessed (In a seperated patch).
I didn't add the 4 pages hook to 1G page step since that will low probability.
Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Wang, shane <shane.wang@xxxxxxxxx>
diff -r eff1ade3454d xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Jun 25 21:42:12 2009 +0800
+++ b/xen/arch/x86/mm.c Sun Jun 28 02:43:58 2009 +0800
@@ -155,6 +155,7 @@ struct domain *dom_xen, *dom_io;
/* Frame table and its size in pages. */
struct page_info *__read_mostly frame_table;
+unsigned long frame_table_pages;
unsigned long max_page, max_boot_page;
unsigned long total_pages;
@@ -179,33 +180,79 @@ l2_pgentry_t *compat_idle_pg_table_l2 =
#define l3_disallow_mask(d) L3_DISALLOW_MASK
#endif
-void __init init_frametable(void)
-{
- unsigned long nr_pages, page_step, i, mfn;
-
- frame_table = (struct page_info *)FRAMETABLE_VIRT_START;
-
- nr_pages = PFN_UP(max_page * sizeof(*frame_table));
- page_step = 1 << (cpu_has_page1gb ? L3_PAGETABLE_SHIFT - PAGE_SHIFT
- : L2_PAGETABLE_SHIFT - PAGE_SHIFT);
-
- for ( i = 0; i < nr_pages; i += page_step )
- {
+int construct_frame_table(unsigned long spfn, unsigned long epfn)
+{
+ unsigned long nr_pages, i, mfn, old_pages, new_pages = 0;
+
+ old_pages = frame_table_pages;
+ nr_pages = PFN_UP(epfn * sizeof(*frame_table));
+
+ for ( i = frame_table_pages; i < nr_pages; )
+ {
+ if ( cpu_has_page1gb && !(i & ((1UL << 2* PAGETABLE_ORDER) - 1)) &&
+ ((nr_pages - i) > (1UL << 2* PAGETABLE_ORDER)) )
+ {
+ mfn = alloc_boot_page_range(spfn, epfn,
+ 1UL << (2 * PAGETABLE_ORDER),
+ 1UL << (2 * PAGETABLE_ORDER));
+ if (mfn)
+ {
+ map_pages_to_xen(FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
+ mfn, (1UL << 2 * PAGETABLE_ORDER), PAGE_HYPERVISOR);
+ i += ( 1UL << (2* PAGETABLE_ORDER));
+ new_pages += (1UL << (2* PAGETABLE_ORDER));
+ continue;
+ }
+ }
/*
* The hardcoded 4 below is arbitrary - just pick whatever you think
* is reasonable to waste as a trade-off for using a large page.
*/
- while (nr_pages + 4 - i < page_step)
- page_step >>= PAGETABLE_ORDER;
- mfn = alloc_boot_pages(page_step, page_step);
- if ( mfn == 0 )
- panic("Not enough memory for frame table\n");
- map_pages_to_xen(
- FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
- mfn, page_step, PAGE_HYPERVISOR);
- }
-
- memset(frame_table, 0, nr_pages << PAGE_SHIFT);
+ if ( !(i & ((1UL << PAGETABLE_ORDER) -1 )) &&
+ (nr_pages + 4 - i) > (1UL << PAGETABLE_ORDER))
+ {
+ mfn = alloc_boot_page_range(spfn, epfn,
+ 1UL << PAGETABLE_ORDER, 1UL << PAGETABLE_ORDER);
+ if (mfn)
+ {
+ map_pages_to_xen(FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
+ mfn, (1UL << PAGETABLE_ORDER), PAGE_HYPERVISOR);
+ i += (1UL << PAGETABLE_ORDER);
+ new_pages += (1UL << PAGETABLE_ORDER);
+ continue;
+ }
+ }
+
+ /* Turn to 4k maping now */
+ mfn = alloc_boot_page_range(spfn, epfn, 1, 1);
+ if (mfn)
+ {
+ map_pages_to_xen(FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
+ mfn, 1, PAGE_HYPERVISOR);
+ i++;
+ new_pages ++;
+ continue;
+ }
+ else
+ return -ENOMEM;
+ }
+
+ memset((unsigned char *)(FRAMETABLE_VIRT_START +
+ (frame_table_pages << PAGE_SHIFT)),
+ 0, new_pages << PAGE_SHIFT);
+ frame_table_pages += new_pages;
+
+ return 0;
+}
+
+void __init init_frametable()
+{
+ int rc ;
+
+ frame_table = (struct page_info *)FRAMETABLE_VIRT_START;
+ frame_table_pages = 0;
+ if( ( rc = construct_frame_table(0, max_page)) )
+ panic("Failed to init frame table %x\n", rc);
}
#if defined(__x86_64__)
frame_table.patch
Description: frame_table.patch
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|