diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index acc1f34..7cbbb07 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4592,9 +4592,107 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p) return 0; } +static int xenmem_add_to_physmap(struct domain *d, + struct xen_add_to_physmap xatp) +{ + struct page_info* page = NULL; + unsigned long mfn = 0; + unsigned long prev_mfn, gpfn; + int rc; + + switch ( xatp.space ) + { + case XENMAPSPACE_shared_info: + if ( xatp.idx == 0 ) + mfn = virt_to_mfn(d->shared_info); + break; + case XENMAPSPACE_grant_table: + spin_lock(&d->grant_table->lock); + + if ( d->grant_table->gt_version == 0 ) + d->grant_table->gt_version = 1; + + if ( d->grant_table->gt_version == 2 && + (xatp.idx & XENMAPIDX_grant_table_status) ) + { + xatp.idx &= ~XENMAPIDX_grant_table_status; + if ( xatp.idx < nr_status_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); + } + else + { + if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && + (xatp.idx < max_nr_grant_frames) ) + gnttab_grow_table(d, xatp.idx + 1); + + if ( xatp.idx < nr_grant_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); + } + + spin_unlock(&d->grant_table->lock); + break; + case XENMAPSPACE_gmfn: + { + p2m_type_t p2mt; + + xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); + /* If the page is still shared, exit early */ + if ( p2m_is_shared(p2mt) ) + { + rcu_unlock_domain(d); + return -ENOMEM; + } + if ( !get_page_from_pagenr(xatp.idx, d) ) + break; + mfn = xatp.idx; + page = mfn_to_page(mfn); + break; + } + default: + break; + } + + if ( !paging_mode_translate(d) || (mfn == 0) ) + { + if ( page ) + put_page(page); + rcu_unlock_domain(d); + return -EINVAL; + } + + domain_lock(d); + + if ( page ) + put_page(page); + + /* Remove previously mapped page if it was present. */ + prev_mfn = gmfn_to_mfn(d, xatp.gpfn); + if ( mfn_valid(prev_mfn) ) + { + if ( is_xen_heap_mfn(prev_mfn) ) + /* Xen heap frames are simply unhooked from this phys slot. */ + guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); + else + /* Normal domain memory is freed, to avoid leaking memory. */ + guest_remove_page(d, xatp.gpfn); + } + + /* Unmap from old location, if any. */ + gpfn = get_gpfn_from_mfn(mfn); + ASSERT( gpfn != SHARED_M2P_ENTRY ); + if ( gpfn != INVALID_M2P_ENTRY ) + guest_physmap_remove_page(d, gpfn, mfn, 0); + + /* Map at new location. */ + rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); + + domain_unlock(d); + + return rc; +} + long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) { - struct page_info *page = NULL; int rc; switch ( op ) @@ -4602,7 +4700,6 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) case XENMEM_add_to_physmap: { struct xen_add_to_physmap xatp; - unsigned long prev_mfn, mfn = 0, gpfn; struct domain *d; if ( copy_from_guest(&xatp, arg, 1) ) @@ -4618,93 +4715,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) return -EPERM; } - switch ( xatp.space ) - { - case XENMAPSPACE_shared_info: - if ( xatp.idx == 0 ) - mfn = virt_to_mfn(d->shared_info); - break; - case XENMAPSPACE_grant_table: - spin_lock(&d->grant_table->lock); - - if ( d->grant_table->gt_version == 0 ) - d->grant_table->gt_version = 1; - - if ( d->grant_table->gt_version == 2 && - (xatp.idx & XENMAPIDX_grant_table_status) ) - { - xatp.idx &= ~XENMAPIDX_grant_table_status; - if ( xatp.idx < nr_status_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); - } - else - { - if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && - (xatp.idx < max_nr_grant_frames) ) - gnttab_grow_table(d, xatp.idx + 1); - - if ( xatp.idx < nr_grant_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); - } - - spin_unlock(&d->grant_table->lock); - break; - case XENMAPSPACE_gmfn: - { - p2m_type_t p2mt; - - xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); - /* If the page is still shared, exit early */ - if ( p2m_is_shared(p2mt) ) - { - rcu_unlock_domain(d); - return -ENOMEM; - } - if ( !get_page_from_pagenr(xatp.idx, d) ) - break; - mfn = xatp.idx; - page = mfn_to_page(mfn); - break; - } - default: - break; - } - - if ( !paging_mode_translate(d) || (mfn == 0) ) - { - if ( page ) - put_page(page); - rcu_unlock_domain(d); - return -EINVAL; - } - - domain_lock(d); - - if ( page ) - put_page(page); - - /* Remove previously mapped page if it was present. */ - prev_mfn = gmfn_to_mfn(d, xatp.gpfn); - if ( mfn_valid(prev_mfn) ) - { - if ( is_xen_heap_mfn(prev_mfn) ) - /* Xen heap frames are simply unhooked from this phys slot. */ - guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); - else - /* Normal domain memory is freed, to avoid leaking memory. */ - guest_remove_page(d, xatp.gpfn); - } - - /* Unmap from old location, if any. */ - gpfn = get_gpfn_from_mfn(mfn); - ASSERT( gpfn != SHARED_M2P_ENTRY ); - if ( gpfn != INVALID_M2P_ENTRY ) - guest_physmap_remove_page(d, gpfn, mfn, 0); - - /* Map at new location. */ - rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); - - domain_unlock(d); + rc = xenmem_add_to_physmap(d, xatp); rcu_unlock_domain(d);