diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index acc1f34..f75011e 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p) return 0; } -long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) +static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp) { struct page_info *page = NULL; + unsigned long prev_mfn, mfn = 0, gpfn; int rc; - switch ( op ) - { - case XENMEM_add_to_physmap: + switch ( xatp.space ) { - struct xen_add_to_physmap xatp; - unsigned long prev_mfn, mfn = 0, gpfn; - struct domain *d; - - if ( copy_from_guest(&xatp, arg, 1) ) - return -EFAULT; + case XENMAPSPACE_shared_info: + if ( xatp.idx == 0 ) + mfn = virt_to_mfn(d->shared_info); + break; + case XENMAPSPACE_grant_table: + spin_lock(&d->grant_table->lock); - rc = rcu_lock_target_domain_by_id(xatp.domid, &d); - if ( rc != 0 ) - return rc; + if ( d->grant_table->gt_version == 0 ) + d->grant_table->gt_version = 1; - if ( xsm_add_to_physmap(current->domain, d) ) + if ( d->grant_table->gt_version == 2 && + (xatp.idx & XENMAPIDX_grant_table_status) ) { - rcu_unlock_domain(d); - return -EPERM; + xatp.idx &= ~XENMAPIDX_grant_table_status; + if ( xatp.idx < nr_status_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); + } + else + { + if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && + (xatp.idx < max_nr_grant_frames) ) + gnttab_grow_table(d, xatp.idx + 1); + + if ( xatp.idx < nr_grant_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); } - switch ( xatp.space ) + spin_unlock(&d->grant_table->lock); + break; + case XENMAPSPACE_gmfn: + { + p2m_type_t p2mt; + + xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); + /* If the page is still shared, exit early */ + if ( p2m_is_shared(p2mt) ) { - case XENMAPSPACE_shared_info: - if ( xatp.idx == 0 ) - mfn = virt_to_mfn(d->shared_info); + rcu_unlock_domain(d); + return -ENOMEM; + } + if ( !get_page_from_pagenr(xatp.idx, d) ) break; - case XENMAPSPACE_grant_table: - spin_lock(&d->grant_table->lock); + mfn = xatp.idx; + page = mfn_to_page(mfn); + break; + } + default: + break; + } - if ( d->grant_table->gt_version == 0 ) - d->grant_table->gt_version = 1; + if ( !paging_mode_translate(d) || (mfn == 0) ) + { + if ( page ) + put_page(page); + rcu_unlock_domain(d); + return -EINVAL; + } - if ( d->grant_table->gt_version == 2 && - (xatp.idx & XENMAPIDX_grant_table_status) ) - { - xatp.idx &= ~XENMAPIDX_grant_table_status; - if ( xatp.idx < nr_status_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); - } - else - { - if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && - (xatp.idx < max_nr_grant_frames) ) - gnttab_grow_table(d, xatp.idx + 1); + domain_lock(d); - if ( xatp.idx < nr_grant_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); - } + if ( page ) + put_page(page); - spin_unlock(&d->grant_table->lock); - break; - case XENMAPSPACE_gmfn: - { - p2m_type_t p2mt; + /* Remove previously mapped page if it was present. */ + prev_mfn = gmfn_to_mfn(d, xatp.gpfn); + if ( mfn_valid(prev_mfn) ) + { + if ( is_xen_heap_mfn(prev_mfn) ) + /* Xen heap frames are simply unhooked from this phys slot. */ + guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); + else + /* Normal domain memory is freed, to avoid leaking memory. */ + guest_remove_page(d, xatp.gpfn); + } - xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); - /* If the page is still shared, exit early */ - if ( p2m_is_shared(p2mt) ) - { - rcu_unlock_domain(d); - return -ENOMEM; - } - if ( !get_page_from_pagenr(xatp.idx, d) ) - break; - mfn = xatp.idx; - page = mfn_to_page(mfn); - break; - } - default: - break; - } + /* Unmap from old location, if any. */ + gpfn = get_gpfn_from_mfn(mfn); + ASSERT( gpfn != SHARED_M2P_ENTRY ); + if ( gpfn != INVALID_M2P_ENTRY ) + guest_physmap_remove_page(d, gpfn, mfn, 0); - if ( !paging_mode_translate(d) || (mfn == 0) ) - { - if ( page ) - put_page(page); - rcu_unlock_domain(d); - return -EINVAL; - } + /* Map at new location. */ + rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); - domain_lock(d); + domain_unlock(d); - if ( page ) - put_page(page); + return rc; +} - /* Remove previously mapped page if it was present. */ - prev_mfn = gmfn_to_mfn(d, xatp.gpfn); - if ( mfn_valid(prev_mfn) ) - { - if ( is_xen_heap_mfn(prev_mfn) ) - /* Xen heap frames are simply unhooked from this phys slot. */ - guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); - else - /* Normal domain memory is freed, to avoid leaking memory. */ - guest_remove_page(d, xatp.gpfn); - } - /* Unmap from old location, if any. */ - gpfn = get_gpfn_from_mfn(mfn); - ASSERT( gpfn != SHARED_M2P_ENTRY ); - if ( gpfn != INVALID_M2P_ENTRY ) - guest_physmap_remove_page(d, gpfn, mfn, 0); +long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) +{ + int rc; + + switch ( op ) + { + case XENMEM_add_to_physmap: + { + struct xen_add_to_physmap xatp; + struct domain *d; - /* Map at new location. */ - rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); + if ( copy_from_guest(&xatp, arg, 1) ) + return -EFAULT; + + rc = rcu_lock_target_domain_by_id(xatp.domid, &d); + if ( rc != 0 ) + return rc; + + if ( xsm_add_to_physmap(current->domain, d) ) + { + rcu_unlock_domain(d); + return -EPERM; + } - domain_unlock(d); + xenmem_add_to_physmap(d, xatp); rcu_unlock_domain(d);