# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1181273719 -32400 # Node ID afbcf495550da3625d12ca5ae10120661b972a99 # Parent 96331db61e47065da5d374a9ec432d6837aaf5af Fix p2m exposure. p2m table page doesn't belong to any domain so that special handling required. Instead, dom_p2m is introduced and make all p2m page belong to dom_p2m. PATCHNAME: fix_p2m_exposure Signed-off-by: Isaku Yamahata diff -r 96331db61e47 -r afbcf495550d xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Wed Jun 06 09:30:01 2007 -0600 +++ b/xen/arch/ia64/xen/mm.c Fri Jun 08 12:35:19 2007 +0900 @@ -187,6 +187,12 @@ extern unsigned long ia64_iobase; static struct domain *dom_xen, *dom_io; +/* This number is the bigger number than DOMID_SELF, DOMID_XEN and DOMID_IO. + * If more new reserved domain id is introduced, this might be increased. + */ +#define DOMID_P2M (0x7FF8U) +static struct domain *dom_p2m; + // followings are stolen from arch_init_memory() @ xen/arch/x86/mm.c void alloc_dom_xen_and_dom_io(void) @@ -227,11 +233,8 @@ mm_teardown_pte(struct domain* d, volati if (!mfn_valid(mfn)) return; page = mfn_to_page(mfn); - // page might be pte page for p2m exposing. check it. - if (page_get_owner(page) == NULL) { - BUG_ON(page->count_info != 0); - return; - } + BUG_ON(page_get_owner(page) == NULL); + // struct page_info corresponding to mfn may exist or not depending // on CONFIG_VIRTUAL_FRAME_TABLE. // The above check is too easy. @@ -1379,10 +1382,19 @@ dom0vp_add_physmap_with_gmfn(struct doma #ifdef CONFIG_XEN_IA64_EXPOSE_P2M static struct page_info* p2m_pte_zero_page = NULL; +/* This must called before dom0 p2m table allocation */ void __init expose_p2m_init(void) { pte_t* pte; + + /* + * Initialise our DOMID_IO domain. + * This domain owns m2p table pages. + */ + dom_p2m = alloc_domain(DOMID_P2M); + BUG_ON(dom_p2m == NULL); + dom_p2m->max_pages = ~0U; pte = pte_alloc_one_kernel(NULL, 0); BUG_ON(pte == NULL); @@ -1393,13 +1405,8 @@ static int static int expose_p2m_page(struct domain* d, unsigned long mpaddr, struct page_info* page) { - // we can't get_page(page) here. - // pte page is allocated form xen heap.(see pte_alloc_one_kernel().) - // so that the page has NULL page owner and it's reference count - // is useless. - // see also mm_teardown_pte()'s page_get_owner() == NULL check. - BUG_ON(page_get_owner(page) != NULL); - + int ret = get_page(page, dom_p2m); + BUG_ON(ret != 1); return __assign_domain_page(d, mpaddr, page_to_maddr(page), ASSIGN_readonly); } @@ -1918,8 +1925,9 @@ void *pgtable_quicklist_alloc(void) void *pgtable_quicklist_alloc(void) { void *p; + BUG_ON(dom_p2m == NULL); if (!opt_p2m_xenheap) { - struct page_info *page = alloc_domheap_page(NULL); + struct page_info *page = alloc_domheap_page(dom_p2m); if (page == NULL) return NULL; p = page_to_virt(page); @@ -1927,16 +1935,28 @@ void *pgtable_quicklist_alloc(void) return p; } p = alloc_xenheap_pages(0); - if (p) + if (p) { clear_page(p); + share_xen_page_with_guest(virt_to_page(p), dom_p2m, + /* + * This page should be read only. + * At this moment, the third argument doesn't + * make sense. + * It should be 1 when supported . + */ + 0); + } return p; } void pgtable_quicklist_free(void *pgtable_entry) { - if (!opt_p2m_xenheap) - free_domheap_page(virt_to_page(pgtable_entry)); - else + struct page_info* page = virt_to_page(pgtable_entry); + BUG_ON(page_get_owner(page) != dom_p2m); + BUG_ON(page->count_info != (1 | PGC_allocated)); + + put_page(page); + if (opt_p2m_xenheap) free_xenheap_page(pgtable_entry); }