# HG changeset patch # User cegger # Date 1271330314 -7200 Change p2m infrastructure to operate on per-p2m instead of per-domain. This allows us to use multiple p2m tables per-domain. diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -160,7 +160,7 @@ void dump_pageframe_info(struct domain * if ( is_hvm_domain(d) ) { - p2m_pod_dump_data(d); + p2m_pod_dump_data(p2m_get_hostp2m(d)); } page_list_for_each ( page, &d->xenpage_list ) diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/domctl.c --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -992,7 +992,7 @@ long arch_do_domctl( ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); for ( i = 0; i < nr_mfns; i++ ) - set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i)); + set_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i, _mfn(mfn+i)); } else { @@ -1001,7 +1001,7 @@ long arch_do_domctl( gfn, mfn, nr_mfns); for ( i = 0; i < nr_mfns; i++ ) - clear_mmio_p2m_entry(d, gfn+i); + clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i); ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); } diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -55,6 +55,7 @@ int hvmemul_do_io( paddr_t value = ram_gpa; int value_is_ptr = (p_data == NULL); struct vcpu *curr = current; + struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain); ioreq_t *p = get_ioreq(curr); unsigned long ram_gfn = paddr_to_pfn(ram_gpa); p2m_type_t p2mt; @@ -62,10 +63,10 @@ int hvmemul_do_io( int rc; /* Check for paged out page */ - ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt, 0); + ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(curr->domain, ram_gfn); + p2m_mem_paging_populate(p2m, ram_gfn); return X86EMUL_RETRY; } if ( p2m_is_shared(p2mt) ) @@ -638,7 +639,7 @@ static int hvmemul_rep_movs( unsigned long saddr, daddr, bytes; paddr_t sgpa, dgpa; uint32_t pfec = PFEC_page_present; - struct domain *d = current->domain; + struct p2m_domain *p2m = p2m_get_hostp2m(current->domain); p2m_type_t p2mt; int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); char *buf; @@ -669,12 +670,12 @@ static int hvmemul_rep_movs( if ( rc != X86EMUL_OKAY ) return rc; - (void)gfn_to_mfn(d, sgpa >> PAGE_SHIFT, &p2mt); + (void)gfn_to_mfn(p2m, sgpa >> PAGE_SHIFT, &p2mt); if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) ) return hvmemul_do_mmio( sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL); - (void)gfn_to_mfn(d, dgpa >> PAGE_SHIFT, &p2mt); + (void)gfn_to_mfn(p2m, dgpa >> PAGE_SHIFT, &p2mt); if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) ) return hvmemul_do_mmio( dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL); diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -308,16 +308,17 @@ static int hvm_set_ioreq_page( struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn) { struct page_info *page; + struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_type_t p2mt; unsigned long mfn; void *va; - mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn, &p2mt, 0)); + mfn = mfn_x(gfn_to_mfn_unshare(p2m, gmfn, &p2mt, 0)); if ( !p2m_is_ram(p2mt) ) return -EINVAL; if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(d, gmfn); + p2m_mem_paging_populate(p2m, gmfn); return -ENOENT; } if ( p2m_is_shared(p2mt) ) @@ -958,9 +959,10 @@ bool_t hvm_hap_nested_page_fault(unsigne { p2m_type_t p2mt; mfn_t mfn; - struct domain *d = current->domain; - - mfn = gfn_to_mfn_guest(d, gfn, &p2mt); + struct vcpu *v = current; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); + + mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt); /* * If this GFN is emulated MMIO or marked as read-only, pass the fault @@ -975,12 +977,12 @@ bool_t hvm_hap_nested_page_fault(unsigne /* Check if the page has been paged out */ if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) ) - p2m_mem_paging_populate(d, gfn); + p2m_mem_paging_populate(p2m, gfn); /* Mem sharing: unshare the page and try again */ if ( p2mt == p2m_ram_shared ) { - mem_sharing_unshare_page(d, gfn, 0); + mem_sharing_unshare_page(p2m, gfn, 0); return 1; } @@ -992,8 +994,8 @@ bool_t hvm_hap_nested_page_fault(unsigne * a large page, we do not change other pages type within that large * page. */ - paging_mark_dirty(d, mfn_x(mfn)); - p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw); + paging_mark_dirty(v->domain, mfn_x(mfn)); + p2m_change_type(p2m, gfn, p2m_ram_logdirty, p2m_ram_rw); return 1; } @@ -1081,6 +1083,7 @@ int hvm_set_cr0(unsigned long value) { struct vcpu *v = current; p2m_type_t p2mt; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0]; HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value); @@ -1116,7 +1119,7 @@ int hvm_set_cr0(unsigned long value) { /* The guest CR3 must be pointing to the guest physical. */ gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT; - mfn = mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m, gfn, &p2mt)); if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) { @@ -1203,7 +1206,8 @@ int hvm_set_cr3(unsigned long value) { /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); - mfn = mfn_x(gfn_to_mfn(v->domain, value >> PAGE_SHIFT, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain), + value >> PAGE_SHIFT, &p2mt)); if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) goto bad_cr3; @@ -1347,6 +1351,7 @@ static void *hvm_map_entry(unsigned long p2m_type_t p2mt; uint32_t pfec; struct vcpu *v = current; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE ) { @@ -1363,10 +1368,10 @@ static void *hvm_map_entry(unsigned long gfn = paging_gva_to_gfn(v, va, &pfec); if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) return NULL; - mfn = mfn_x(gfn_to_mfn_unshare(v->domain, gfn, &p2mt, 0)); + mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0)); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(v->domain, gfn); + p2m_mem_paging_populate(p2m, gfn); return NULL; } if ( p2m_is_shared(p2mt) ) @@ -1733,6 +1738,7 @@ static enum hvm_copy_result __hvm_copy( void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec) { struct vcpu *curr = current; + struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain); unsigned long gfn, mfn; p2m_type_t p2mt; char *p; @@ -1761,11 +1767,11 @@ static enum hvm_copy_result __hvm_copy( gfn = addr >> PAGE_SHIFT; } - mfn = mfn_x(gfn_to_mfn_unshare(curr->domain, gfn, &p2mt, 0)); + mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0)); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(curr->domain, gfn); + p2m_mem_paging_populate(p2m, gfn); return HVMCOPY_gfn_paged_out; } if ( p2m_is_shared(p2mt) ) @@ -3056,6 +3062,7 @@ long do_hvm_op(unsigned long op, XEN_GUE { struct xen_hvm_modified_memory a; struct domain *d; + struct p2m_domain *p2m; unsigned long pfn; if ( copy_from_guest(&a, arg, 1) ) @@ -3083,13 +3090,14 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( !paging_mode_log_dirty(d) ) goto param_fail3; + p2m = p2m_get_hostp2m(d); for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) { p2m_type_t t; - mfn_t mfn = gfn_to_mfn(d, pfn, &t); + mfn_t mfn = gfn_to_mfn(p2m, pfn, &t); if ( p2m_is_paging(t) ) { - p2m_mem_paging_populate(d, pfn); + p2m_mem_paging_populate(p2m, pfn); rc = -EINVAL; goto param_fail3; @@ -3116,6 +3124,7 @@ long do_hvm_op(unsigned long op, XEN_GUE { struct xen_hvm_set_mem_type a; struct domain *d; + struct p2m_domain *p2m; unsigned long pfn; /* Interface types to internal p2m types */ @@ -3145,15 +3154,16 @@ long do_hvm_op(unsigned long op, XEN_GUE if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ) goto param_fail4; + p2m = p2m_get_hostp2m(d); for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) { p2m_type_t t; p2m_type_t nt; mfn_t mfn; - mfn = gfn_to_mfn_unshare(d, pfn, &t, 0); + mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0); if ( p2m_is_paging(t) ) { - p2m_mem_paging_populate(d, pfn); + p2m_mem_paging_populate(p2m, pfn); rc = -EINVAL; goto param_fail4; @@ -3172,7 +3182,7 @@ long do_hvm_op(unsigned long op, XEN_GUE } else { - nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]); + nt = p2m_change_type(p2m, pfn, t, memtype[a.hvmmem_type]); if ( nt != t ) { gdprintk(XENLOG_WARNING, diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/hvm/mtrr.c --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -399,7 +399,7 @@ uint32_t get_pat_flags(struct vcpu *v, { struct domain *d = v->domain; p2m_type_t p2mt; - gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt); + gfn_to_mfn(p2m_get_hostp2m(d), paddr_to_pfn(gpaddr), &p2mt); if (p2m_is_ram(p2mt)) gdprintk(XENLOG_WARNING, "Conflict occurs for a given guest l1e flags:%x " diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/hvm/stdvga.c --- a/xen/arch/x86/hvm/stdvga.c +++ b/xen/arch/x86/hvm/stdvga.c @@ -469,6 +469,7 @@ static int mmio_move(struct hvm_hw_stdvg int i; int sign = p->df ? -1 : 1; p2m_type_t p2mt; + struct p2m_domain *p2m = p2m_get_hostp2m(current->domain); if ( p->data_is_ptr ) { @@ -481,8 +482,7 @@ static int mmio_move(struct hvm_hw_stdvg if ( hvm_copy_to_guest_phys(data, &tmp, p->size) != HVMCOPY_okay ) { - (void)gfn_to_mfn(current->domain, - data >> PAGE_SHIFT, &p2mt); + (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt); /* * The only case we handle is vga_mem <-> vga_mem. * Anything else disables caching and leaves it to qemu-dm. @@ -504,8 +504,7 @@ static int mmio_move(struct hvm_hw_stdvg if ( hvm_copy_from_guest_phys(&tmp, data, p->size) != HVMCOPY_okay ) { - (void)gfn_to_mfn(current->domain, - data >> PAGE_SHIFT, &p2mt); + (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt); if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) || ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) ) return 0; diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -228,7 +228,7 @@ static int svm_vmcb_restore(struct vcpu { if ( c->cr0 & X86_CR0_PG ) { - mfn = mfn_x(gfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m, c->cr3 >> PAGE_SHIFT, &p2mt)); if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) ) { gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", @@ -1536,7 +1536,9 @@ static void svm_do_nested_pgfault(paddr_ unsigned long gfn = gpa >> PAGE_SHIFT; mfn_t mfn; p2m_type_t p2mt; - struct domain *d = current->domain; + struct p2m_domain *p2m; + + p2m = p2m_get_hostp2m(current->domain); if ( tb_init_done ) { @@ -1549,7 +1551,7 @@ static void svm_do_nested_pgfault(paddr_ _d.gpa = gpa; _d.qualification = 0; - _d.mfn = mfn_x(gfn_to_mfn_query(d, gfn, &_d.p2mt)); + _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt)); __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d); } @@ -1558,10 +1560,10 @@ static void svm_do_nested_pgfault(paddr_ return; /* Everything else is an error. */ - mfn = gfn_to_mfn_guest(d, gfn, &p2mt); + mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt); gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n", gpa, mfn_x(mfn), p2mt); - domain_crash(d); + domain_crash(current->domain); } static void svm_fpu_dirty_intercept(void) diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -491,7 +491,8 @@ static int vmx_restore_cr0_cr3( { if ( cr0 & X86_CR0_PG ) { - mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain), + cr3 >> PAGE_SHIFT, &p2mt)); if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) ) { gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3); @@ -1007,7 +1008,8 @@ static void vmx_load_pdptrs(struct vcpu if ( cr3 & 0x1fUL ) goto crash; - mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain), + cr3 >> PAGE_SHIFT, &p2mt)); if ( !p2m_is_ram(p2mt) ) goto crash; @@ -1226,7 +1228,7 @@ void ept_sync_domain(struct domain *d) return; ASSERT(local_irq_is_enabled()); - ASSERT(p2m_locked_by_me(d->arch.p2m)); + ASSERT(p2m_locked_by_me(p2m_get_hostp2m(d))); /* * Flush active cpus synchronously. Flush others the next time this domain @@ -1345,7 +1347,7 @@ static void vmx_set_uc_mode(struct vcpu { if ( paging_mode_hap(v->domain) ) ept_change_entry_emt_with_range( - v->domain, 0, v->domain->arch.p2m->max_mapped_pfn); + v->domain, 0, p2m_get_hostp2m(v->domain)->max_mapped_pfn); hvm_asid_flush_vcpu(v); } @@ -1915,7 +1917,8 @@ static int vmx_alloc_vlapic_mapping(stru return -ENOMEM; share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable); set_mmio_p2m_entry( - d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(virt_to_mfn(apic_va))); + p2m_get_hostp2m(d), paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), + _mfn(virt_to_mfn(apic_va))); d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va); return 0; @@ -2123,6 +2126,7 @@ static void ept_handle_violation(unsigne unsigned long gla, gfn = gpa >> PAGE_SHIFT; mfn_t mfn; p2m_type_t p2mt; + struct p2m_domain *p2m = p2m_get_hostp2m(current->domain); if ( tb_init_done ) { @@ -2135,7 +2139,7 @@ static void ept_handle_violation(unsigne _d.gpa = gpa; _d.qualification = qualification; - _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt)); + _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt)); __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d); } @@ -2145,7 +2149,7 @@ static void ept_handle_violation(unsigne return; /* Everything else is an error. */ - mfn = gfn_to_mfn_guest(current->domain, gfn, &p2mt); + mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt); gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), " "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", qualification, diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -363,7 +363,7 @@ int page_is_ram_type(unsigned long mfn, unsigned long domain_get_maximum_gpfn(struct domain *d) { if ( is_hvm_domain(d) ) - return d->arch.p2m->max_mapped_pfn; + return p2m_get_hostp2m(d)->max_mapped_pfn; /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */ return arch_get_max_pfn(d) - 1; } @@ -1734,7 +1734,8 @@ static int mod_l1_entry(l1_pgentry_t *pl if ( l1e_get_flags(nl1e) & _PAGE_PRESENT ) { /* Translate foreign guest addresses. */ - mfn = mfn_x(gfn_to_mfn(pg_dom, l1e_get_pfn(nl1e), &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pg_dom), + l1e_get_pfn(nl1e), &p2mt)); if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) ) return 0; ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0); @@ -3028,8 +3029,8 @@ int do_mmu_update( struct page_info *page; int rc = 0, okay = 1, i = 0; unsigned int cmd, done = 0, pt_dom; - struct domain *d = current->domain, *pt_owner = d, *pg_owner; struct vcpu *v = current; + struct domain *d = v->domain, *pt_owner = d, *pg_owner; struct domain_mmap_cache mapcache; if ( unlikely(count & MMU_UPDATE_PREEMPTED) ) @@ -3113,13 +3114,13 @@ int do_mmu_update( req.ptr -= cmd; gmfn = req.ptr >> PAGE_SHIFT; - mfn = mfn_x(gfn_to_mfn(pt_owner, gmfn, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pt_owner), gmfn, &p2mt)); if ( !p2m_is_valid(p2mt) ) mfn = INVALID_MFN; if ( p2m_is_paged(p2mt) ) { - p2m_mem_paging_populate(pg_owner, gmfn); + p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), gmfn); rc = -ENOENT; break; @@ -3144,12 +3145,13 @@ int do_mmu_update( { l1_pgentry_t l1e = l1e_from_intpte(req.val); p2m_type_t l1e_p2mt; - gfn_to_mfn(pg_owner, l1e_get_pfn(l1e), &l1e_p2mt); + gfn_to_mfn(p2m_get_hostp2m(pg_owner), + l1e_get_pfn(l1e), &l1e_p2mt); if ( p2m_is_paged(l1e_p2mt) ) { - p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e)); - + p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), + l1e_get_pfn(l1e)); rc = -ENOENT; break; } @@ -3166,7 +3168,7 @@ int do_mmu_update( /* Unshare the page for RW foreign mappings */ if(l1e_get_flags(l1e) & _PAGE_RW) { - rc = mem_sharing_unshare_page(pg_owner, + rc = mem_sharing_unshare_page(p2m_get_hostp2m(pg_owner), l1e_get_pfn(l1e), 0); if(rc) break; @@ -3182,12 +3184,12 @@ int do_mmu_update( { l2_pgentry_t l2e = l2e_from_intpte(req.val); p2m_type_t l2e_p2mt; - gfn_to_mfn(pg_owner, l2e_get_pfn(l2e), &l2e_p2mt); + gfn_to_mfn(p2m_get_hostp2m(pg_owner), l2e_get_pfn(l2e), &l2e_p2mt); if ( p2m_is_paged(l2e_p2mt) ) { - p2m_mem_paging_populate(pg_owner, l2e_get_pfn(l2e)); - + p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), + l2e_get_pfn(l2e)); rc = -ENOENT; break; } @@ -3212,12 +3214,12 @@ int do_mmu_update( { l3_pgentry_t l3e = l3e_from_intpte(req.val); p2m_type_t l3e_p2mt; - gfn_to_mfn(pg_owner, l3e_get_pfn(l3e), &l3e_p2mt); + gfn_to_mfn(p2m_get_hostp2m(pg_owner), l3e_get_pfn(l3e), &l3e_p2mt); if ( p2m_is_paged(l3e_p2mt) ) { - p2m_mem_paging_populate(pg_owner, l3e_get_pfn(l3e)); - + p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), + l3e_get_pfn(l3e)); rc = -ENOENT; break; } @@ -3243,12 +3245,13 @@ int do_mmu_update( { l4_pgentry_t l4e = l4e_from_intpte(req.val); p2m_type_t l4e_p2mt; - gfn_to_mfn(pg_owner, l4e_get_pfn(l4e), &l4e_p2mt); + gfn_to_mfn(p2m_get_hostp2m(pg_owner), + l4e_get_pfn(l4e), &l4e_p2mt); if ( p2m_is_paged(l4e_p2mt) ) { - p2m_mem_paging_populate(pg_owner, l4e_get_pfn(l4e)); - + p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), + l4e_get_pfn(l4e)); rc = -ENOENT; break; } @@ -3630,8 +3633,8 @@ static int create_grant_p2m_mapping(uint p2mt = p2m_grant_map_ro; else p2mt = p2m_grant_map_rw; - rc = guest_physmap_add_entry(current->domain, addr >> PAGE_SHIFT, - frame, 0, p2mt); + rc = guest_physmap_add_entry(p2m_get_hostp2m(current->domain), + addr >> PAGE_SHIFT, frame, 0, p2mt); if ( rc ) return GNTST_general_error; else @@ -3669,11 +3672,12 @@ static int replace_grant_p2m_mapping( unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT); p2m_type_t type; mfn_t old_mfn; + struct domain *d = current->domain; if ( new_addr != 0 || (flags & GNTMAP_contains_pte) ) return GNTST_general_error; - old_mfn = gfn_to_mfn(current->domain, gfn, &type); + old_mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &type); if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame ) { gdprintk(XENLOG_WARNING, @@ -3681,7 +3685,7 @@ static int replace_grant_p2m_mapping( type, mfn_x(old_mfn), frame); return GNTST_general_error; } - guest_physmap_remove_page(current->domain, gfn, frame, 0); + guest_physmap_remove_page(d, gfn, frame, 0); return GNTST_okay; } @@ -4288,7 +4292,8 @@ long arch_memory_op(int op, XEN_GUEST_HA { p2m_type_t p2mt; - xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt, 0)); + xatp.idx = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), + xatp.idx, &p2mt, 0)); /* If the page is still shared, exit early */ if ( p2m_is_shared(p2mt) ) { @@ -4478,6 +4483,7 @@ long arch_memory_op(int op, XEN_GUEST_HA { xen_pod_target_t target; struct domain *d; + struct p2m_domain *p2m; /* Support DOMID_SELF? */ if ( !IS_PRIV(current->domain) ) @@ -4501,9 +4507,10 @@ long arch_memory_op(int op, XEN_GUEST_HA rc = p2m_pod_set_mem_target(d, target.target_pages); } + p2m = p2m_get_hostp2m(d); target.tot_pages = d->tot_pages; - target.pod_cache_pages = d->arch.p2m->pod.count; - target.pod_entries = d->arch.p2m->pod.entry_count; + target.pod_cache_pages = p2m->pod.count; + target.pod_entries = p2m->pod.entry_count; if ( copy_to_guest(arg, &target, 1) ) { diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/guest_walk.c --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -86,17 +86,17 @@ static uint32_t set_ad_bits(void *guest_ return 0; } -static inline void *map_domain_gfn(struct domain *d, +static inline void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, p2m_type_t *p2mt, uint32_t *rc) { /* Translate the gfn, unsharing if shared */ - *mfn = gfn_to_mfn_unshare(d, gfn_x(gfn), p2mt, 0); + *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0); if ( p2m_is_paging(*p2mt) ) { - p2m_mem_paging_populate(d, gfn_x(gfn)); + p2m_mem_paging_populate(p2m, gfn_x(gfn)); *rc = _PAGE_PAGED; return NULL; @@ -123,6 +123,7 @@ guest_walk_tables(struct vcpu *v, unsign uint32_t pfec, mfn_t top_mfn, void *top_map) { struct domain *d = v->domain; + struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_type_t p2mt; guest_l1e_t *l1p = NULL; guest_l2e_t *l2p = NULL; @@ -154,7 +155,7 @@ guest_walk_tables(struct vcpu *v, unsign if ( rc & _PAGE_PRESENT ) goto out; /* Map the l3 table */ - l3p = map_domain_gfn(d, + l3p = map_domain_gfn(p2m, guest_l4e_get_gfn(gw->l4e), &gw->l3mfn, &p2mt, @@ -181,7 +182,7 @@ guest_walk_tables(struct vcpu *v, unsign #endif /* PAE or 64... */ /* Map the l2 table */ - l2p = map_domain_gfn(d, + l2p = map_domain_gfn(p2m, guest_l3e_get_gfn(gw->l3e), &gw->l2mfn, &p2mt, @@ -237,7 +238,7 @@ guest_walk_tables(struct vcpu *v, unsign else { /* Not a superpage: carry on and find the l1e. */ - l1p = map_domain_gfn(d, + l1p = map_domain_gfn(p2m, guest_l2e_get_gfn(gw->l2e), &gw->l1mfn, &p2mt, diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/hap/guest_walk.c --- a/xen/arch/x86/mm/hap/guest_walk.c +++ b/xen/arch/x86/mm/hap/guest_walk.c @@ -43,13 +43,14 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN void *top_map; p2m_type_t p2mt; walk_t gw; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); /* Get the top-level table's MFN */ cr3 = v->arch.hvm_vcpu.guest_cr[3]; - top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0); + top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT); + p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT); pfec[0] = PFEC_page_paged; return INVALID_GFN; @@ -78,10 +79,10 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN if ( missing == 0 ) { gfn_t gfn = guest_l1e_get_gfn(gw.l1e); - gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0); + gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(v->domain, gfn_x(gfn)); + p2m_mem_paging_populate(p2m, gfn_x(gfn)); pfec[0] = PFEC_page_paged; return INVALID_GFN; @@ -130,4 +131,3 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN * indent-tabs-mode: nil * End: */ - diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -70,7 +70,7 @@ static int hap_enable_vram_tracking(stru /* set l1e entries of P2M table to be read-only. */ for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) - p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); + p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(&d->domain_dirty_cpumask); return 0; @@ -90,7 +90,7 @@ static int hap_disable_vram_tracking(str /* set l1e entries of P2M table with normal mode */ for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) - p2m_change_type(d, i, p2m_ram_logdirty, p2m_ram_rw); + p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw); flush_tlb_mask(&d->domain_dirty_cpumask); return 0; @@ -106,7 +106,7 @@ static void hap_clean_vram_tracking(stru /* set l1e entries of P2M table to be read-only. */ for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) - p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); + p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(&d->domain_dirty_cpumask); } @@ -200,7 +200,8 @@ static int hap_enable_log_dirty(struct d hap_unlock(d); /* set l1e entries of P2M table to be read-only. */ - p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty); + p2m_change_entry_type_global(p2m_get_hostp2m(d), + p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(&d->domain_dirty_cpumask); return 0; } @@ -212,14 +213,16 @@ static int hap_disable_log_dirty(struct hap_unlock(d); /* set l1e entries of P2M table with normal mode */ - p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw); + p2m_change_entry_type_global(p2m_get_hostp2m(d), + p2m_ram_logdirty, p2m_ram_rw); return 0; } static void hap_clean_dirty_bitmap(struct domain *d) { /* set l1e entries of P2M table to be read-only. */ - p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty); + p2m_change_entry_type_global(p2m_get_hostp2m(d), + p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(&d->domain_dirty_cpumask); } @@ -273,8 +276,9 @@ static void hap_free(struct domain *d, m page_list_add_tail(pg, &d->arch.paging.hap.freelist); } -static struct page_info *hap_alloc_p2m_page(struct domain *d) +static struct page_info *hap_alloc_p2m_page(struct p2m_domain *p2m) { + struct domain *d = p2m->domain; struct page_info *pg; hap_lock(d); @@ -312,8 +316,9 @@ static struct page_info *hap_alloc_p2m_p return pg; } -static void hap_free_p2m_page(struct domain *d, struct page_info *pg) +static void hap_free_p2m_page(struct p2m_domain *p2m, struct page_info *pg) { + struct domain *d = p2m->domain; hap_lock(d); ASSERT(page_get_owner(pg) == d); /* Should have just the one ref we gave it in alloc_p2m_page() */ @@ -580,10 +585,11 @@ int hap_enable(struct domain *d, u32 mod } } - /* allocate P2m table */ + /* allocate p2m table */ if ( mode & PG_translate ) { - rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page); + rv = p2m_alloc_table(p2m_get_hostp2m(d), + hap_alloc_p2m_page, hap_free_p2m_page); if ( rv != 0 ) goto out; } @@ -600,7 +606,7 @@ void hap_final_teardown(struct domain *d if ( d->arch.paging.hap.total_pages != 0 ) hap_teardown(d); - p2m_teardown(d); + p2m_teardown(p2m_get_hostp2m(d)); ASSERT(d->arch.paging.hap.p2m_pages == 0); } @@ -700,9 +706,11 @@ void hap_vcpu_init(struct vcpu *v) static int hap_page_fault(struct vcpu *v, unsigned long va, struct cpu_user_regs *regs) { + struct domain *d = v->domain; + HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n", - v->domain->domain_id, v->vcpu_id); - domain_crash(v->domain); + d->domain_id, v->vcpu_id); + domain_crash(d); return 0; } @@ -822,6 +830,7 @@ static unsigned long hap_gva_to_gfn_real return ((paddr_t)gva >> PAGE_SHIFT); } + /* Entry points into this mode of the hap code. */ static const struct paging_mode hap_paging_real_mode = { .page_fault = hap_page_fault, @@ -871,5 +880,3 @@ static const struct paging_mode hap_pagi * indent-tabs-mode: nil * End: */ - - diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c +++ b/xen/arch/x86/mm/hap/p2m-ept.c @@ -31,23 +31,23 @@ #include /* Non-ept "lock-and-check" wrapper */ -static int ept_pod_check_and_populate(struct domain *d, unsigned long gfn, +static int ept_pod_check_and_populate(struct p2m_domain *p2m, unsigned long gfn, ept_entry_t *entry, int order, p2m_query_t q) { int r; - p2m_lock(d->arch.p2m); + p2m_lock(p2m); /* Check to make sure this is still PoD */ if ( entry->avail1 != p2m_populate_on_demand ) { - p2m_unlock(d->arch.p2m); + p2m_unlock(p2m); return 0; } - r = p2m_pod_demand_populate(d, gfn, order, q); + r = p2m_pod_demand_populate(p2m, gfn, order, q); - p2m_unlock(d->arch.p2m); + p2m_unlock(p2m); return r; } @@ -93,11 +93,11 @@ static void ept_p2m_type_to_flags(ept_en #define GUEST_TABLE_POD_PAGE 3 /* Fill in middle levels of ept table */ -static int ept_set_middle_entry(struct domain *d, ept_entry_t *ept_entry) +static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry) { struct page_info *pg; - pg = p2m_alloc_ptp(d, 0); + pg = p2m_alloc_ptp(p2m, 0); if ( pg == NULL ) return 0; @@ -127,7 +127,7 @@ static int ept_set_middle_entry(struct d * GUEST_TABLE_POD: * The next entry is marked populate-on-demand. */ -static int ept_next_level(struct domain *d, bool_t read_only, +static int ept_next_level(struct p2m_domain *p2m, bool_t read_only, ept_entry_t **table, unsigned long *gfn_remainder, u32 shift) { @@ -147,7 +147,7 @@ static int ept_next_level(struct domain if ( read_only ) return GUEST_TABLE_MAP_FAILED; - if ( !ept_set_middle_entry(d, ept_entry) ) + if ( !ept_set_middle_entry(p2m, ept_entry) ) return GUEST_TABLE_MAP_FAILED; } @@ -165,7 +165,7 @@ static int ept_next_level(struct domain } /* It's super page before and we should break down it now. */ -static int ept_split_large_page(struct domain *d, +static int ept_split_large_page(struct p2m_domain *p2m, ept_entry_t **table, u32 *index, unsigned long gfn, int level) { @@ -182,7 +182,7 @@ static int ept_split_large_page(struct d * before a leaf super entry */ - if ( !ept_set_middle_entry(d, &temp_ept_entry) ) + if ( !ept_set_middle_entry(p2m, &temp_ept_entry) ) return 0; /* split the super page to small next level pages */ @@ -224,7 +224,7 @@ static int ept_split_large_page(struct d * by observing whether any gfn->mfn translations are modified. */ static int -ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, +ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int order, p2m_type_t p2mt) { ept_entry_t *table = NULL; @@ -240,7 +240,6 @@ ept_set_entry(struct domain *d, unsigned int direct_mmio = (p2mt == p2m_mmio_direct); uint8_t ipat = 0; int need_modify_vtd_table = 1; - struct p2m_domain *p2m = p2m_get_hostp2m(d); if ( order != 0 ) if ( (gfn & ((1UL << order) - 1)) ) @@ -252,7 +251,8 @@ ept_set_entry(struct domain *d, unsigned for ( i = EPT_DEFAULT_GAW; i > walk_level; i-- ) { - ret = ept_next_level(d, 0, &table, &gfn_remainder, i * EPT_TABLE_ORDER); + ret = ept_next_level(p2m, 0, &table, &gfn_remainder, + i * EPT_TABLE_ORDER); if ( !ret ) goto out; else if ( ret != GUEST_TABLE_NORMAL_PAGE ) @@ -277,7 +277,7 @@ ept_set_entry(struct domain *d, unsigned if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_in_start) ) { - ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &ipat, + ept_entry->emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat, direct_mmio); ept_entry->ipat = ipat; ept_entry->sp_avail = order ? 1 : 0; @@ -305,7 +305,7 @@ ept_set_entry(struct domain *d, unsigned num = cpu_vmx_ept_super_page_level_limit; for ( level = split_level; level > num ; level-- ) { - rv = ept_split_large_page(d, &table, &index, gfn, level); + rv = ept_split_large_page(p2m, &table, &index, gfn, level); if ( !rv ) goto out; } @@ -313,7 +313,7 @@ ept_set_entry(struct domain *d, unsigned split_ept_entry = table + index; split_ept_entry->avail1 = p2mt; ept_p2m_type_to_flags(split_ept_entry, p2mt); - split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &ipat, + split_ept_entry->emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat, direct_mmio); split_ept_entry->ipat = ipat; @@ -325,8 +325,8 @@ ept_set_entry(struct domain *d, unsigned /* Track the highest gfn for which we have ever had a valid mapping */ if ( mfn_valid(mfn_x(mfn)) - && (gfn + (1UL << order) - 1 > d->arch.p2m->max_mapped_pfn) ) - d->arch.p2m->max_mapped_pfn = gfn + (1UL << order) - 1; + && (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) ) + p2m->max_mapped_pfn = gfn + (1UL << order) - 1; /* Success */ rv = 1; @@ -334,30 +334,30 @@ ept_set_entry(struct domain *d, unsigned out: unmap_domain_page(table); - ept_sync_domain(d); + ept_sync_domain(p2m->domain); /* Now the p2m table is not shared with vt-d page table */ - if ( rv && iommu_enabled && need_iommu(d) && need_modify_vtd_table ) + if ( rv && iommu_enabled && need_iommu(p2m->domain) && need_modify_vtd_table ) { if ( p2mt == p2m_ram_rw ) { if ( order == EPT_TABLE_ORDER ) { for ( i = 0; i < (1 << order); i++ ) - iommu_map_page(d, gfn - offset + i, mfn_x(mfn) - offset + i); + iommu_map_page(p2m->domain, gfn - offset + i, mfn_x(mfn) - offset + i); } else if ( !order ) - iommu_map_page(d, gfn, mfn_x(mfn)); + iommu_map_page(p2m->domain, gfn, mfn_x(mfn)); } else { if ( order == EPT_TABLE_ORDER ) { for ( i = 0; i < (1 << order); i++ ) - iommu_unmap_page(d, gfn - offset + i); + iommu_unmap_page(p2m->domain, gfn - offset + i); } else if ( !order ) - iommu_unmap_page(d, gfn); + iommu_unmap_page(p2m->domain, gfn); } } @@ -365,11 +365,11 @@ out: } /* Read ept p2m entries */ -static mfn_t ept_get_entry(struct domain *d, unsigned long gfn, p2m_type_t *t, +static mfn_t ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_query_t q) { ept_entry_t *table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); unsigned long gfn_remainder = gfn; ept_entry_t *ept_entry; u32 index; @@ -380,7 +380,7 @@ static mfn_t ept_get_entry(struct domain *t = p2m_mmio_dm; /* This pfn is higher than the highest the p2m map currently holds */ - if ( gfn > d->arch.p2m->max_mapped_pfn ) + if ( gfn > p2m->max_mapped_pfn ) goto out; /* Should check if gfn obeys GAW here. */ @@ -388,7 +388,7 @@ static mfn_t ept_get_entry(struct domain for ( i = EPT_DEFAULT_GAW; i > 0; i-- ) { retry: - ret = ept_next_level(d, 1, &table, &gfn_remainder, + ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i * EPT_TABLE_ORDER); if ( !ret ) goto out; @@ -406,7 +406,7 @@ static mfn_t ept_get_entry(struct domain index = gfn_remainder >> ( i * EPT_TABLE_ORDER); ept_entry = table + index; - if ( !ept_pod_check_and_populate(d, gfn, + if ( !ept_pod_check_and_populate(p2m, gfn, ept_entry, 9, q) ) goto retry; else @@ -429,7 +429,7 @@ static mfn_t ept_get_entry(struct domain ASSERT(i == 0); - if ( ept_pod_check_and_populate(d, gfn, + if ( ept_pod_check_and_populate(p2m, gfn, ept_entry, 0, q) ) goto out; } @@ -460,10 +460,11 @@ out: /* WARNING: Only caller doesn't care about PoD pages. So this function will * always return 0 for PoD pages, not populate them. If that becomes necessary, * pass a p2m_query_t type along to distinguish. */ -static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long gfn, int *level) +static ept_entry_t ept_get_entry_content(struct p2m_domain *p2m, + unsigned long gfn, int *level) { ept_entry_t *table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); unsigned long gfn_remainder = gfn; ept_entry_t *ept_entry; ept_entry_t content = { .epte = 0 }; @@ -472,12 +473,12 @@ static ept_entry_t ept_get_entry_content int ret=0; /* This pfn is higher than the highest the p2m map currently holds */ - if ( gfn > d->arch.p2m->max_mapped_pfn ) + if ( gfn > p2m->max_mapped_pfn ) goto out; for ( i = EPT_DEFAULT_GAW; i > 0; i-- ) { - ret = ept_next_level(d, 1, &table, &gfn_remainder, + ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i * EPT_TABLE_ORDER); if ( !ret || ret == GUEST_TABLE_POD_PAGE ) goto out; @@ -497,8 +498,9 @@ static ept_entry_t ept_get_entry_content void ept_walk_table(struct domain *d, unsigned long gfn) { + struct p2m_domain *p2m = p2m_get_hostp2m(d); ept_entry_t *table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); unsigned long gfn_remainder = gfn; int i; @@ -507,10 +509,10 @@ void ept_walk_table(struct domain *d, un d->domain_id, gfn); /* This pfn is higher than the highest the p2m map currently holds */ - if ( gfn > d->arch.p2m->max_mapped_pfn ) + if ( gfn > p2m->max_mapped_pfn ) { gdprintk(XENLOG_ERR, " gfn exceeds max_mapped_pfn %lx\n", - d->arch.p2m->max_mapped_pfn); + p2m->max_mapped_pfn); goto out; } @@ -548,7 +550,7 @@ out: * To test if the new emt type is the same with old, * return 1 to not to reset ept entry. */ -static int need_modify_ept_entry(struct domain *d, unsigned long gfn, +static int need_modify_ept_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, uint8_t o_ipat, uint8_t o_emt, p2m_type_t p2mt) { @@ -556,7 +558,7 @@ static int need_modify_ept_entry(struct uint8_t emt; int direct_mmio = (p2mt == p2m_mmio_direct); - emt = epte_get_entry_emt(d, gfn, mfn, &ipat, direct_mmio); + emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat, direct_mmio); if ( (emt == o_emt) && (ipat == o_ipat) ) return 0; @@ -564,21 +566,23 @@ static int need_modify_ept_entry(struct return 1; } -void ept_change_entry_emt_with_range(struct domain *d, unsigned long start_gfn, +void ept_change_entry_emt_with_range(struct domain *d, + unsigned long start_gfn, unsigned long end_gfn) { unsigned long gfn; ept_entry_t e; mfn_t mfn; int order = 0; + struct p2m_domain *p2m = p2m_get_hostp2m(d); - p2m_lock(d->arch.p2m); + p2m_lock(p2m); for ( gfn = start_gfn; gfn <= end_gfn; gfn++ ) { int level = 0; uint64_t trunk = 0; - e = ept_get_entry_content(d, gfn, &level); + e = ept_get_entry_content(p2m, gfn, &level); if ( !p2m_has_emt(e.avail1) ) continue; @@ -597,9 +601,9 @@ void ept_change_entry_emt_with_range(str * Set emt for super page. */ order = level * EPT_TABLE_ORDER; - if ( need_modify_ept_entry(d, gfn, mfn, + if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.avail1) ) - ept_set_entry(d, gfn, mfn, order, e.avail1); + ept_set_entry(p2m, gfn, mfn, order, e.avail1); gfn += trunk; break; } @@ -608,11 +612,11 @@ void ept_change_entry_emt_with_range(str } else /* gfn assigned with 4k */ { - if ( need_modify_ept_entry(d, gfn, mfn, e.ipat, e.emt, e.avail1) ) - ept_set_entry(d, gfn, mfn, order, e.avail1); + if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.avail1) ) + ept_set_entry(p2m, gfn, mfn, order, e.avail1); } } - p2m_unlock(d->arch.p2m); + p2m_unlock(p2m); } /* @@ -620,7 +624,7 @@ void ept_change_entry_emt_with_range(str * to the new type. This is used in hardware-assisted paging to * quickly enable or diable log-dirty tracking */ -static void ept_change_entry_type_global(struct domain *d, p2m_type_t ot, +static void ept_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt) { ept_entry_t *l4e; @@ -632,12 +636,12 @@ static void ept_change_entry_type_global int i2; int i1; - if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 ) + if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 ) return; BUG_ON(EPT_DEFAULT_GAW != 3); - l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ ) { if ( !l4e[i4].epte ) @@ -709,14 +713,15 @@ static void ept_change_entry_type_global unmap_domain_page(l4e); - ept_sync_domain(d); + ept_sync_domain(p2m->domain); } void ept_p2m_init(struct domain *d) { - d->arch.p2m->set_entry = ept_set_entry; - d->arch.p2m->get_entry = ept_get_entry; - d->arch.p2m->change_entry_type_global = ept_change_entry_type_global; + struct p2m_domain *p2m = p2m_get_hostp2m(d); + p2m->set_entry = ept_set_entry; + p2m->get_entry = ept_get_entry; + p2m->change_entry_type_global = ept_change_entry_type_global; } /* diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -240,7 +240,7 @@ int mem_event_domctl(struct domain *d, x /* Get MFN of ring page */ guest_get_eff_l1e(v, ring_addr, &l1e); gfn = l1e_get_pfn(l1e); - ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt); + ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); rc = -EINVAL; if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) ) @@ -249,7 +249,7 @@ int mem_event_domctl(struct domain *d, x /* Get MFN of shared page */ guest_get_eff_l1e(v, shared_addr, &l1e); gfn = l1e_get_pfn(l1e); - shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt); + shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); rc = -EINVAL; if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) ) diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/mem_paging.c --- a/xen/arch/x86/mm/mem_paging.c +++ b/xen/arch/x86/mm/mem_paging.c @@ -29,33 +29,34 @@ int mem_paging_domctl(struct domain *d, XEN_GUEST_HANDLE(void) u_domctl) { int rc; + struct p2m_domain *p2m = p2m_get_hostp2m(d); switch( mec->op ) { case XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE: { unsigned long gfn = mec->gfn; - rc = p2m_mem_paging_nominate(d, gfn); + rc = p2m_mem_paging_nominate(p2m, gfn); } break; case XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT: { unsigned long gfn = mec->gfn; - rc = p2m_mem_paging_evict(d, gfn); + rc = p2m_mem_paging_evict(p2m, gfn); } break; case XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP: { unsigned long gfn = mec->gfn; - rc = p2m_mem_paging_prep(d, gfn); + rc = p2m_mem_paging_prep(p2m, gfn); } break; case XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME: { - p2m_mem_paging_resume(d); + p2m_mem_paging_resume(p2m); rc = 0; } break; diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -254,6 +254,7 @@ static void mem_sharing_audit(void) list_for_each(le, &e->gfns) { struct domain *d; + struct p2m_domain *p2m; p2m_type_t t; mfn_t mfn; @@ -265,7 +266,8 @@ static void mem_sharing_audit(void) g->domain, g->gfn, mfn_x(e->mfn)); continue; } - mfn = gfn_to_mfn(d, g->gfn, &t); + p2m = p2m_get_hostp2m(d); + mfn = gfn_to_mfn(p2m, g->gfn, &t); if(mfn_x(mfn) != mfn_x(e->mfn)) MEM_SHARING_DEBUG("Incorrect P2M for d=%d, PFN=%lx." "Expecting MFN=%ld, got %ld\n", @@ -380,7 +382,7 @@ int mem_sharing_debug_gfn(struct domain mfn_t mfn; struct page_info *page; - mfn = gfn_to_mfn(d, gfn, &p2mt); + mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &p2mt); page = mfn_to_page(mfn); printk("Debug for domain=%d, gfn=%lx, ", @@ -490,7 +492,7 @@ int mem_sharing_debug_gref(struct domain return mem_sharing_debug_gfn(d, gfn); } -int mem_sharing_nominate_page(struct domain *d, +int mem_sharing_nominate_page(struct p2m_domain *p2m, unsigned long gfn, int expected_refcnt, shr_handle_t *phandle) @@ -502,10 +504,11 @@ int mem_sharing_nominate_page(struct dom shr_handle_t handle; shr_hash_entry_t *hash_entry; struct gfn_info *gfn_info; + struct domain *d = p2m->domain; *phandle = 0UL; - mfn = gfn_to_mfn(d, gfn, &p2mt); + mfn = gfn_to_mfn(p2m, gfn, &p2mt); /* Check if mfn is valid */ ret = -EINVAL; @@ -539,7 +542,7 @@ int mem_sharing_nominate_page(struct dom } /* Change the p2m type */ - if(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt) + if(p2m_change_type(p2m, gfn, p2mt, p2m_ram_shared) != p2mt) { /* This is unlikely, as the type must have changed since we've checked * it a few lines above. @@ -602,7 +605,7 @@ int mem_sharing_share_pages(shr_handle_t list_del(&gfn->list); d = get_domain_by_id(gfn->domain); BUG_ON(!d); - BUG_ON(set_shared_p2m_entry(d, gfn->gfn, se->mfn) == 0); + BUG_ON(set_shared_p2m_entry(p2m_get_hostp2m(d), gfn->gfn, se->mfn) == 0); put_domain(d); list_add(&gfn->list, &se->gfns); put_page_and_type(cpage); @@ -621,7 +624,7 @@ err_out: return ret; } -int mem_sharing_unshare_page(struct domain *d, +int mem_sharing_unshare_page(struct p2m_domain *p2m, unsigned long gfn, uint16_t flags) { @@ -634,8 +637,9 @@ int mem_sharing_unshare_page(struct doma struct gfn_info *gfn_info = NULL; shr_handle_t handle; struct list_head *le; + struct domain *d = p2m->domain; - mfn = gfn_to_mfn(d, gfn, &p2mt); + mfn = gfn_to_mfn(p2m, gfn, &p2mt); page = mfn_to_page(mfn); handle = page->shr_handle; @@ -699,7 +703,7 @@ gfn_found: unmap_domain_page(s); unmap_domain_page(t); - ASSERT(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) != 0); + ASSERT(set_shared_p2m_entry(p2m, gfn, page_to_mfn(page)) != 0); put_page_and_type(old_page); private_page_found: @@ -711,7 +715,7 @@ private_page_found: atomic_dec(&nr_saved_mfns); shr_unlock(); - if(p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != + if(p2m_change_type(p2m, gfn, p2m_ram_shared, p2m_ram_rw) != p2m_ram_shared) { printk("Could not change p2m type.\n"); @@ -743,7 +747,7 @@ int mem_sharing_domctl(struct domain *d, shr_handle_t handle; if(!mem_sharing_enabled(d)) return -EINVAL; - rc = mem_sharing_nominate_page(d, gfn, 0, &handle); + rc = mem_sharing_nominate_page(p2m_get_hostp2m(d), gfn, 0, &handle); mec->u.nominate.handle = handle; mem_sharing_audit(); } @@ -759,7 +763,8 @@ int mem_sharing_domctl(struct domain *d, return -EINVAL; if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0) return -EINVAL; - rc = mem_sharing_nominate_page(d, gfn, 3, &handle); + rc = mem_sharing_nominate_page(p2m_get_hostp2m(d), + gfn, 3, &handle); mec->u.nominate.handle = handle; mem_sharing_audit(); } diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -108,14 +108,14 @@ static unsigned long p2m_type_to_flags(p } #if P2M_AUDIT -static void audit_p2m(struct domain *d); +static void audit_p2m(struct p2m_domain *p2m); #else -# define audit_p2m(_d) do { (void)(_d); } while(0) +# define audit_p2m(_p2m) do { (void)(_p2m); } while(0) #endif /* P2M_AUDIT */ -// Find the next level's P2M entry, checking for out-of-range gfn's... -// Returns NULL on error. -// +/* Find the next level's P2M entry, checking for out-of-range gfn's... + * Returns NULL on error. + */ static l1_pgentry_t * p2m_find_entry(void *table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max) @@ -135,26 +135,26 @@ p2m_find_entry(void *table, unsigned lon } struct page_info * -p2m_alloc_ptp(struct domain *d, unsigned long type) +p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type) { struct page_info *pg; - pg = d->arch.p2m->alloc_page(d); + pg = p2m->alloc_page(p2m); if (pg == NULL) return NULL; - page_list_add_tail(pg, &d->arch.p2m->pages); + page_list_add_tail(pg, &p2m->pages); pg->u.inuse.type_info = type | 1 | PGT_validated; pg->count_info |= 1; return pg; } -// Walk one level of the P2M table, allocating a new table if required. -// Returns 0 on error. -// +/* Walk one level of the P2M table, allocating a new table if required. + * Returns 0 on error. + */ static int -p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table, +p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max, unsigned long type) { @@ -163,7 +163,7 @@ p2m_next_level(struct domain *d, mfn_t * l1_pgentry_t new_entry; void *next; int i; - ASSERT(d->arch.p2m->alloc_page); + ASSERT(p2m->alloc_page); if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn, shift, max)) ) @@ -174,7 +174,7 @@ p2m_next_level(struct domain *d, mfn_t * { struct page_info *pg; - pg = p2m_alloc_ptp(d, type); + pg = p2m_alloc_ptp(p2m, type); if ( pg == NULL ) return 0; @@ -183,7 +183,7 @@ p2m_next_level(struct domain *d, mfn_t * switch ( type ) { case PGT_l3_page_table: - paging_write_p2m_entry(d, gfn, + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, *table_mfn, new_entry, 4); break; case PGT_l2_page_table: @@ -191,11 +191,11 @@ p2m_next_level(struct domain *d, mfn_t * /* for PAE mode, PDPE only has PCD/PWT/P bits available */ new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT); #endif - paging_write_p2m_entry(d, gfn, + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, *table_mfn, new_entry, 3); break; case PGT_l1_page_table: - paging_write_p2m_entry(d, gfn, + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, *table_mfn, new_entry, 2); break; default: @@ -212,7 +212,7 @@ p2m_next_level(struct domain *d, mfn_t * unsigned long flags, pfn; struct page_info *pg; - pg = p2m_alloc_ptp(d, PGT_l2_page_table); + pg = p2m_alloc_ptp(p2m, PGT_l2_page_table); if ( pg == NULL ) return 0; @@ -223,13 +223,13 @@ p2m_next_level(struct domain *d, mfn_t * for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) { new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags); - paging_write_p2m_entry(d, gfn, l1_entry+i, *table_mfn, new_entry, - 2); + paging_write_p2m_entry(p2m->domain, gfn, + l1_entry+i, *table_mfn, new_entry, 2); } unmap_domain_page(l1_entry); new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), __PAGE_HYPERVISOR|_PAGE_USER); //disable PSE - paging_write_p2m_entry(d, gfn, + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, *table_mfn, new_entry, 3); } @@ -240,7 +240,7 @@ p2m_next_level(struct domain *d, mfn_t * unsigned long flags, pfn; struct page_info *pg; - pg = p2m_alloc_ptp(d, PGT_l1_page_table); + pg = p2m_alloc_ptp(p2m, PGT_l1_page_table); if ( pg == NULL ) return 0; @@ -257,14 +257,14 @@ p2m_next_level(struct domain *d, mfn_t * for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { new_entry = l1e_from_pfn(pfn + i, flags); - paging_write_p2m_entry(d, gfn, + paging_write_p2m_entry(p2m->domain, gfn, l1_entry+i, *table_mfn, new_entry, 1); } unmap_domain_page(l1_entry); new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), __PAGE_HYPERVISOR|_PAGE_USER); - paging_write_p2m_entry(d, gfn, + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, *table_mfn, new_entry, 2); } @@ -280,17 +280,17 @@ p2m_next_level(struct domain *d, mfn_t * * Populate-on-demand functionality */ static -int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, +int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt); static int -p2m_pod_cache_add(struct domain *d, +p2m_pod_cache_add(struct p2m_domain *p2m, struct page_info *page, unsigned long order) { int i; struct page_info *p; - struct p2m_domain *p2md = d->arch.p2m; + struct domain *d = p2m->domain; #ifndef NDEBUG mfn_t mfn; @@ -305,7 +305,7 @@ p2m_pod_cache_add(struct domain *d, return -1; } - for(i=0; i < 1 << order ; i++) { + for(i=0; i < 1 << order; i++) { struct domain * od; p = mfn_to_page(_mfn(mfn_x(mfn) + i)); @@ -320,7 +320,7 @@ p2m_pod_cache_add(struct domain *d, } #endif - ASSERT(p2m_locked_by_me(p2md)); + ASSERT(p2m_locked_by_me(p2m)); /* * Pages from domain_alloc and returned by the balloon driver aren't @@ -337,7 +337,7 @@ p2m_pod_cache_add(struct domain *d, spin_lock(&d->page_alloc_lock); /* First, take all pages off the domain list */ - for(i=0; i < 1 << order ; i++) + for(i=0; i < 1 << order; i++) { p = page + i; page_list_del(p, &d->page_list); @@ -347,12 +347,12 @@ p2m_pod_cache_add(struct domain *d, switch(order) { case 9: - page_list_add_tail(page, &p2md->pod.super); /* lock: page_alloc */ - p2md->pod.count += 1 << order; + page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */ + p2m->pod.count += 1 << order; break; case 0: - page_list_add_tail(page, &p2md->pod.single); /* lock: page_alloc */ - p2md->pod.count += 1 ; + page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */ + p2m->pod.count += 1; break; default: BUG(); @@ -371,57 +371,56 @@ p2m_pod_cache_add(struct domain *d, * down 2-meg pages into singleton pages automatically. Returns null if * a superpage is requested and no superpages are available. Must be called * with the d->page_lock held. */ -static struct page_info * p2m_pod_cache_get(struct domain *d, +static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, unsigned long order) { - struct p2m_domain *p2md = d->arch.p2m; struct page_info *p = NULL; int i; - if ( order == 9 && page_list_empty(&p2md->pod.super) ) + if ( order == 9 && page_list_empty(&p2m->pod.super) ) { return NULL; } - else if ( order == 0 && page_list_empty(&p2md->pod.single) ) + else if ( order == 0 && page_list_empty(&p2m->pod.single) ) { unsigned long mfn; struct page_info *q; - BUG_ON( page_list_empty(&p2md->pod.super) ); + BUG_ON( page_list_empty(&p2m->pod.super) ); /* Break up a superpage to make single pages. NB count doesn't * need to be adjusted. */ - p = page_list_remove_head(&p2md->pod.super); + p = page_list_remove_head(&p2m->pod.super); mfn = mfn_x(page_to_mfn(p)); for ( i=0; ipod.single); + page_list_add_tail(q, &p2m->pod.single); } } switch ( order ) { case 9: - BUG_ON( page_list_empty(&p2md->pod.super) ); - p = page_list_remove_head(&p2md->pod.super); - p2md->pod.count -= 1 << order; /* Lock: page_alloc */ + BUG_ON( page_list_empty(&p2m->pod.super) ); + p = page_list_remove_head(&p2m->pod.super); + p2m->pod.count -= 1 << order; /* Lock: page_alloc */ break; case 0: - BUG_ON( page_list_empty(&p2md->pod.single) ); - p = page_list_remove_head(&p2md->pod.single); - p2md->pod.count -= 1; + BUG_ON( page_list_empty(&p2m->pod.single) ); + p = page_list_remove_head(&p2m->pod.single); + p2m->pod.count -= 1; break; default: BUG(); } /* Put the pages back on the domain page_list */ - for ( i = 0 ; i < (1 << order) ; i++ ) + for ( i = 0; i < (1 << order); i++ ) { - BUG_ON(page_get_owner(p + i) != d); - page_list_add_tail(p + i, &d->page_list); + BUG_ON(page_get_owner(p + i) != p2m->domain); + page_list_add_tail(p + i, &p2m->domain->page_list); } return p; @@ -429,18 +428,18 @@ static struct page_info * p2m_pod_cache_ /* Set the size of the cache, allocating or freeing as necessary. */ static int -p2m_pod_set_cache_target(struct domain *d, unsigned long pod_target) +p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target) { - struct p2m_domain *p2md = d->arch.p2m; + struct domain *d = p2m->domain; int ret = 0; /* Increasing the target */ - while ( pod_target > p2md->pod.count ) + while ( pod_target > p2m->pod.count ) { struct page_info * page; int order; - if ( (pod_target - p2md->pod.count) >= SUPERPAGE_PAGES ) + if ( (pod_target - p2m->pod.count) >= SUPERPAGE_PAGES ) order = 9; else order = 0; @@ -456,18 +455,18 @@ p2m_pod_set_cache_target(struct domain * } printk("%s: Unable to allocate domheap page for pod cache. target %lu cachesize %d\n", - __func__, pod_target, p2md->pod.count); + __func__, pod_target, p2m->pod.count); ret = -ENOMEM; goto out; } - p2m_pod_cache_add(d, page, order); + p2m_pod_cache_add(p2m, page, order); } /* Decreasing the target */ /* We hold the p2m lock here, so we don't need to worry about * cache disappearing under our feet. */ - while ( pod_target < p2md->pod.count ) + while ( pod_target < p2m->pod.count ) { struct page_info * page; int order, i; @@ -476,20 +475,20 @@ p2m_pod_set_cache_target(struct domain * * entries may disappear before we grab the lock. */ spin_lock(&d->page_alloc_lock); - if ( (p2md->pod.count - pod_target) > SUPERPAGE_PAGES - && !page_list_empty(&p2md->pod.super) ) + if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES + && !page_list_empty(&p2m->pod.super) ) order = 9; else order = 0; - page = p2m_pod_cache_get(d, order); + page = p2m_pod_cache_get(p2m, order); ASSERT(page != NULL); spin_unlock(&d->page_alloc_lock); /* Then free them */ - for ( i = 0 ; i < (1 << order) ; i++ ) + for ( i = 0; i < (1 << order); i++ ) { /* Copied from common/memory.c:guest_remove_page() */ if ( unlikely(!get_page(page+i, d)) ) @@ -553,14 +552,14 @@ int p2m_pod_set_mem_target(struct domain *d, unsigned long target) { unsigned pod_target; - struct p2m_domain *p2md = d->arch.p2m; + struct p2m_domain *p2m = p2m_get_hostp2m(d); int ret = 0; unsigned long populated; - p2m_lock(p2md); + p2m_lock(p2m); /* P == B: Nothing to do. */ - if ( p2md->pod.entry_count == 0 ) + if ( p2m->pod.entry_count == 0 ) goto out; /* Don't do anything if the domain is being torn down */ @@ -572,21 +571,21 @@ p2m_pod_set_mem_target(struct domain *d, if ( target < d->tot_pages ) goto out; - populated = d->tot_pages - p2md->pod.count; + populated = d->tot_pages - p2m->pod.count; pod_target = target - populated; /* B < T': Set the cache size equal to # of outstanding entries, * let the balloon driver fill in the rest. */ - if ( pod_target > p2md->pod.entry_count ) - pod_target = p2md->pod.entry_count; - - ASSERT( pod_target >= p2md->pod.count ); - - ret = p2m_pod_set_cache_target(d, pod_target); + if ( pod_target > p2m->pod.entry_count ) + pod_target = p2m->pod.entry_count; + + ASSERT( pod_target >= p2m->pod.count ); + + ret = p2m_pod_set_cache_target(p2m, pod_target); out: - p2m_unlock(p2md); + p2m_unlock(p2m); return ret; } @@ -594,37 +593,37 @@ out: void p2m_pod_empty_cache(struct domain *d) { - struct p2m_domain *p2md = d->arch.p2m; + struct p2m_domain *p2m = p2m_get_hostp2m(d); struct page_info *page; /* After this barrier no new PoD activities can happen. */ BUG_ON(!d->is_dying); - spin_barrier(&p2md->lock); + spin_barrier(&p2m->lock); spin_lock(&d->page_alloc_lock); - while ( (page = page_list_remove_head(&p2md->pod.super)) ) + while ( (page = page_list_remove_head(&p2m->pod.super)) ) { int i; - for ( i = 0 ; i < SUPERPAGE_PAGES ; i++ ) + for ( i = 0; i < SUPERPAGE_PAGES; i++ ) { BUG_ON(page_get_owner(page + i) != d); page_list_add_tail(page + i, &d->page_list); } - p2md->pod.count -= SUPERPAGE_PAGES; + p2m->pod.count -= SUPERPAGE_PAGES; } - while ( (page = page_list_remove_head(&p2md->pod.single)) ) + while ( (page = page_list_remove_head(&p2m->pod.single)) ) { BUG_ON(page_get_owner(page) != d); page_list_add_tail(page, &d->page_list); - p2md->pod.count -= 1; + p2m->pod.count -= 1; } - BUG_ON(p2md->pod.count != 0); + BUG_ON(p2m->pod.count != 0); spin_unlock(&d->page_alloc_lock); } @@ -642,9 +641,9 @@ p2m_pod_decrease_reservation(struct doma xen_pfn_t gpfn, unsigned int order) { - struct p2m_domain *p2md = d->arch.p2m; int ret=0; int i; + struct p2m_domain *p2m = p2m_get_hostp2m(d); int steal_for_cache = 0; int pod = 0, nonpod = 0, ram = 0; @@ -652,14 +651,14 @@ p2m_pod_decrease_reservation(struct doma /* If we don't have any outstanding PoD entries, let things take their * course */ - if ( p2md->pod.entry_count == 0 ) + if ( p2m->pod.entry_count == 0 ) goto out; /* Figure out if we need to steal some freed memory for our cache */ - steal_for_cache = ( p2md->pod.entry_count > p2md->pod.count ); - - p2m_lock(p2md); - audit_p2m(d); + steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count ); + + p2m_lock(p2m); + audit_p2m(p2m); if ( unlikely(d->is_dying) ) goto out_unlock; @@ -670,7 +669,7 @@ p2m_pod_decrease_reservation(struct doma { p2m_type_t t; - gfn_to_mfn_query(d, gpfn + i, &t); + gfn_to_mfn_query(p2m, gpfn + i, &t); if ( t == p2m_populate_on_demand ) pod++; @@ -690,9 +689,9 @@ p2m_pod_decrease_reservation(struct doma { /* All PoD: Mark the whole region invalid and tell caller * we're done. */ - set_p2m_entry(d, gpfn, _mfn(INVALID_MFN), order, p2m_invalid); - p2md->pod.entry_count-=(1<pod.entry_count < 0); + set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid); + p2m->pod.entry_count-=(1<pod.entry_count < 0); ret = 1; goto out_entry_check; } @@ -710,12 +709,12 @@ p2m_pod_decrease_reservation(struct doma mfn_t mfn; p2m_type_t t; - mfn = gfn_to_mfn_query(d, gpfn + i, &t); + mfn = gfn_to_mfn_query(p2m, gpfn + i, &t); if ( t == p2m_populate_on_demand ) { - set_p2m_entry(d, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid); - p2md->pod.entry_count--; /* Lock: p2m */ - BUG_ON(p2md->pod.entry_count < 0); + set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid); + p2m->pod.entry_count--; /* Lock: p2m */ + BUG_ON(p2m->pod.entry_count < 0); pod--; } else if ( steal_for_cache && p2m_is_ram(t) ) @@ -726,12 +725,12 @@ p2m_pod_decrease_reservation(struct doma page = mfn_to_page(mfn); - set_p2m_entry(d, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid); + set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid); set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY); - p2m_pod_cache_add(d, page, 0); - - steal_for_cache = ( p2md->pod.entry_count > p2md->pod.count ); + p2m_pod_cache_add(p2m, page, 0); + + steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count ); nonpod--; ram--; @@ -745,33 +744,31 @@ p2m_pod_decrease_reservation(struct doma out_entry_check: /* If we've reduced our "liabilities" beyond our "assets", free some */ - if ( p2md->pod.entry_count < p2md->pod.count ) + if ( p2m->pod.entry_count < p2m->pod.count ) { - p2m_pod_set_cache_target(d, p2md->pod.entry_count); + p2m_pod_set_cache_target(p2m, p2m->pod.entry_count); } out_unlock: - audit_p2m(d); - p2m_unlock(p2md); + audit_p2m(p2m); + p2m_unlock(p2m); out: return ret; } void -p2m_pod_dump_data(struct domain *d) +p2m_pod_dump_data(struct p2m_domain *p2m) { - struct p2m_domain *p2md = d->arch.p2m; - printk(" PoD entries=%d cachesize=%d\n", - p2md->pod.entry_count, p2md->pod.count); + p2m->pod.entry_count, p2m->pod.count); } /* Search for all-zero superpages to be reclaimed as superpages for the * PoD cache. Must be called w/ p2m lock held, page_alloc lock not held. */ static int -p2m_pod_zero_check_superpage(struct domain *d, unsigned long gfn) +p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn) { mfn_t mfn, mfn0 = _mfn(INVALID_MFN); p2m_type_t type, type0 = 0; @@ -779,6 +776,7 @@ p2m_pod_zero_check_superpage(struct doma int ret=0, reset = 0; int i, j; int max_ref = 1; + struct domain *d = p2m->domain; if ( !superpage_aligned(gfn) ) goto out; @@ -792,7 +790,7 @@ p2m_pod_zero_check_superpage(struct doma for ( i=0; iarch.p2m->pod.entry_count += SUPERPAGE_PAGES; + p2m_pod_cache_add(p2m, mfn_to_page(mfn0), 9); + p2m->pod.entry_count += SUPERPAGE_PAGES; out_reset: if ( reset ) - set_p2m_entry(d, gfn, mfn0, 9, type0); + set_p2m_entry(p2m, gfn, mfn0, 9, type0); out: return ret; } static void -p2m_pod_zero_check(struct domain *d, unsigned long *gfns, int count) +p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count) { mfn_t mfns[count]; p2m_type_t types[count]; unsigned long * map[count]; + struct domain *d = p2m->domain; int i, j; int max_ref = 1; @@ -920,7 +919,7 @@ p2m_pod_zero_check(struct domain *d, uns /* First, get the gfn list, translate to mfns, and map the pages. */ for ( i=0; iarch.p2m->pod.entry_count++; + p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), 0); + p2m->pod.entry_count++; } } @@ -1014,56 +1013,53 @@ p2m_pod_zero_check(struct domain *d, uns #define POD_SWEEP_LIMIT 1024 static void -p2m_pod_emergency_sweep_super(struct domain *d) +p2m_pod_emergency_sweep_super(struct p2m_domain *p2m) { - struct p2m_domain *p2md = d->arch.p2m; unsigned long i, start, limit; - if ( p2md->pod.reclaim_super == 0 ) + if ( p2m->pod.reclaim_super == 0 ) { - p2md->pod.reclaim_super = (p2md->pod.max_guest>>9)<<9; - p2md->pod.reclaim_super -= SUPERPAGE_PAGES; + p2m->pod.reclaim_super = (p2m->pod.max_guest>>9)<<9; + p2m->pod.reclaim_super -= SUPERPAGE_PAGES; } - start = p2md->pod.reclaim_super; + start = p2m->pod.reclaim_super; limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0; - for ( i=p2md->pod.reclaim_super ; i > 0 ; i-=SUPERPAGE_PAGES ) + for ( i = p2m->pod.reclaim_super; i > 0; i -= SUPERPAGE_PAGES ) { - p2m_pod_zero_check_superpage(d, i); + p2m_pod_zero_check_superpage(p2m, i); /* Stop if we're past our limit and we have found *something*. * * NB that this is a zero-sum game; we're increasing our cache size * by increasing our 'debt'. Since we hold the p2m lock, * (entry_count - count) must remain the same. */ - if ( !page_list_empty(&p2md->pod.super) && i < limit ) + if ( !page_list_empty(&p2m->pod.super) && i < limit ) break; } - p2md->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0; - + p2m->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0; } #define POD_SWEEP_STRIDE 16 static void -p2m_pod_emergency_sweep(struct domain *d) +p2m_pod_emergency_sweep(struct p2m_domain *p2m) { - struct p2m_domain *p2md = d->arch.p2m; unsigned long gfns[POD_SWEEP_STRIDE]; unsigned long i, j=0, start, limit; p2m_type_t t; - if ( p2md->pod.reclaim_single == 0 ) - p2md->pod.reclaim_single = p2md->pod.max_guest; - - start = p2md->pod.reclaim_single; + if ( p2m->pod.reclaim_single == 0 ) + p2m->pod.reclaim_single = p2m->pod.max_guest; + + start = p2m->pod.reclaim_single; limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0; /* FIXME: Figure out how to avoid superpages */ - for ( i=p2md->pod.reclaim_single ; i > 0 ; i-- ) + for ( i = p2m->pod.reclaim_single; i > 0; i-- ) { - gfn_to_mfn_query(d, i, &t ); + gfn_to_mfn_query(p2m, i, &t ); if ( p2m_is_ram(t) ) { gfns[j] = i; @@ -1071,7 +1067,7 @@ p2m_pod_emergency_sweep(struct domain *d BUG_ON(j > POD_SWEEP_STRIDE); if ( j == POD_SWEEP_STRIDE ) { - p2m_pod_zero_check(d, gfns, j); + p2m_pod_zero_check(p2m, gfns, j); j = 0; } } @@ -1080,29 +1076,29 @@ p2m_pod_emergency_sweep(struct domain *d * NB that this is a zero-sum game; we're increasing our cache size * by re-increasing our 'debt'. Since we hold the p2m lock, * (entry_count - count) must remain the same. */ - if ( p2md->pod.count > 0 && i < limit ) + if ( p2m->pod.count > 0 && i < limit ) break; } if ( j ) - p2m_pod_zero_check(d, gfns, j); - - p2md->pod.reclaim_single = i ? i - 1 : i; + p2m_pod_zero_check(p2m, gfns, j); + + p2m->pod.reclaim_single = i ? i - 1 : i; } int -p2m_pod_demand_populate(struct domain *d, unsigned long gfn, +p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn, unsigned int order, p2m_query_t q) { + struct domain *d = p2m->domain; struct page_info *p = NULL; /* Compiler warnings */ unsigned long gfn_aligned; mfn_t mfn; - struct p2m_domain *p2md = d->arch.p2m; int i; - ASSERT(p2m_locked_by_me(d->arch.p2m)); + ASSERT(p2m_locked_by_me(p2m)); /* This check is done with the p2m lock held. This will make sure that * even if d->is_dying changes under our feet, p2m_pod_empty_cache() @@ -1120,34 +1116,34 @@ p2m_pod_demand_populate(struct domain *d * set_p2m_entry() should automatically shatter the 1GB page into * 512 2MB pages. The rest of 511 calls are unnecessary. */ - set_p2m_entry(d, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9, + set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9, p2m_populate_on_demand); - audit_p2m(d); - p2m_unlock(p2md); + audit_p2m(p2m); + p2m_unlock(p2m); return 0; } /* If we're low, start a sweep */ - if ( order == 9 && page_list_empty(&p2md->pod.super) ) - p2m_pod_emergency_sweep_super(d); - - if ( page_list_empty(&p2md->pod.single) && + if ( order == 9 && page_list_empty(&p2m->pod.super) ) + p2m_pod_emergency_sweep_super(p2m); + + if ( page_list_empty(&p2m->pod.single) && ( ( order == 0 ) - || (order == 9 && page_list_empty(&p2md->pod.super) ) ) ) - p2m_pod_emergency_sweep(d); + || (order == 9 && page_list_empty(&p2m->pod.super) ) ) ) + p2m_pod_emergency_sweep(p2m); /* Keep track of the highest gfn demand-populated by a guest fault */ - if ( q == p2m_guest && gfn > p2md->pod.max_guest ) - p2md->pod.max_guest = gfn; + if ( q == p2m_guest && gfn > p2m->pod.max_guest ) + p2m->pod.max_guest = gfn; spin_lock(&d->page_alloc_lock); - if ( p2md->pod.count == 0 ) + if ( p2m->pod.count == 0 ) goto out_of_memory; /* Get a page f/ the cache. A NULL return value indicates that the * 2-meg range should be marked singleton PoD, and retried */ - if ( (p = p2m_pod_cache_get(d, order)) == NULL ) + if ( (p = p2m_pod_cache_get(p2m, order)) == NULL ) goto remap_and_retry; mfn = page_to_mfn(p); @@ -1158,13 +1154,13 @@ p2m_pod_demand_populate(struct domain *d gfn_aligned = (gfn >> order) << order; - set_p2m_entry(d, gfn_aligned, mfn, order, p2m_ram_rw); - - for( i = 0 ; i < (1UL << order) ; i++ ) + set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw); + + for( i = 0; i < (1UL << order); i++ ) set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i); - p2md->pod.entry_count -= (1 << order); /* Lock: p2m */ - BUG_ON(p2md->pod.entry_count < 0); + p2m->pod.entry_count -= (1 << order); /* Lock: p2m */ + BUG_ON(p2m->pod.entry_count < 0); if ( tb_init_done ) { @@ -1186,7 +1182,7 @@ out_of_memory: spin_unlock(&d->page_alloc_lock); printk("%s: Out of populate-on-demand memory! tot_pages %" PRIu32 " pod_entries %" PRIi32 "\n", - __func__, d->tot_pages, p2md->pod.entry_count); + __func__, d->tot_pages, p2m->pod.entry_count); domain_crash(d); out_fail: return -1; @@ -1197,7 +1193,7 @@ remap_and_retry: /* Remap this 2-meg region in singleton chunks */ gfn_aligned = (gfn>>order)<arch.p2m); + int do_locking = !p2m_locked_by_me(p2m); int r; if ( do_locking ) - p2m_lock(d->arch.p2m); - - audit_p2m(d); + p2m_lock(p2m); + + audit_p2m(p2m); /* Check to make sure this is still PoD */ if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) != p2m_populate_on_demand ) { if ( do_locking ) - p2m_unlock(d->arch.p2m); + p2m_unlock(p2m); return 0; } - r = p2m_pod_demand_populate(d, gfn, order, q); - - audit_p2m(d); + r = p2m_pod_demand_populate(p2m, gfn, order, q); + + audit_p2m(p2m); if ( do_locking ) - p2m_unlock(d->arch.p2m); + p2m_unlock(p2m); return r; } -// Returns 0 on error (out of memory) +/* Returns 0 on error (out of memory) */ static int -p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, +p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt) { - // XXX -- this might be able to be faster iff current->domain == d - mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); + /* XXX -- this might be able to be faster iff current->domain == d */ + mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); void *table =map_domain_page(mfn_x(table_mfn)); unsigned long i, gfn_remainder = gfn; l1_pgentry_t *p2m_entry; @@ -1273,14 +1269,14 @@ p2m_set_entry(struct domain *d, unsigned t.gfn = gfn; t.mfn = mfn_x(mfn); t.p2mt = p2mt; - t.d = d->domain_id; + t.d = p2m->domain->domain_id; t.order = page_order; __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t); } #if CONFIG_PAGING_LEVELS >= 4 - if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, + if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn, L4_PAGETABLE_SHIFT - PAGE_SHIFT, L4_PAGETABLE_ENTRIES, PGT_l3_page_table) ) goto out; @@ -1298,14 +1294,15 @@ p2m_set_entry(struct domain *d, unsigned !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) { P2M_ERROR("configure P2M table L3 entry with large page\n"); - domain_crash(d); + domain_crash(p2m->domain); goto out; } l3e_content = mfn_valid(mfn) ? l3e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt) | _PAGE_PSE) : l3e_empty(); entry_content.l1 = l3e_content.l3; - paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 3); + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, + table_mfn, entry_content, 3); } /* @@ -1315,17 +1312,17 @@ p2m_set_entry(struct domain *d, unsigned * in Xen's address space for translated PV guests. * When using AMD's NPT on PAE Xen, we are restricted to 4GB. */ - else if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, + else if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT, ((CONFIG_PAGING_LEVELS == 3) - ? (paging_mode_hap(d) ? 4 : 8) + ? (paging_mode_hap(p2m->domain) ? 4 : 8) : L3_PAGETABLE_ENTRIES), PGT_l2_page_table) ) goto out; if ( page_order == 0 ) { - if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, + if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn, L2_PAGETABLE_SHIFT - PAGE_SHIFT, L2_PAGETABLE_ENTRIES, PGT_l1_page_table) ) goto out; @@ -1340,7 +1337,8 @@ p2m_set_entry(struct domain *d, unsigned entry_content = l1e_empty(); /* level 1 entry */ - paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1); + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, + table_mfn, entry_content, 1); } else if ( page_order == 9 ) { @@ -1354,7 +1352,7 @@ p2m_set_entry(struct domain *d, unsigned !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) { P2M_ERROR("configure P2M table 4KB L2 entry with large page\n"); - domain_crash(d); + domain_crash(p2m->domain); goto out; } @@ -1365,22 +1363,23 @@ p2m_set_entry(struct domain *d, unsigned l2e_content = l2e_empty(); entry_content.l1 = l2e_content.l2; - paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 2); + paging_write_p2m_entry(p2m->domain, gfn, p2m_entry, + table_mfn, entry_content, 2); } /* Track the highest gfn for which we have ever had a valid mapping */ if ( mfn_valid(mfn) - && (gfn + (1UL << page_order) - 1 > d->arch.p2m->max_mapped_pfn) ) - d->arch.p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; - - if ( iommu_enabled && need_iommu(d) ) + && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) + p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; + + if ( iommu_enabled && need_iommu(p2m->domain) ) { if ( p2mt == p2m_ram_rw ) for ( i = 0; i < (1UL << page_order); i++ ) - iommu_map_page(d, gfn+i, mfn_x(mfn)+i ); + iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i ); else for ( int i = 0; i < (1UL << page_order); i++ ) - iommu_unmap_page(d, gfn+i); + iommu_unmap_page(p2m->domain, gfn+i); } /* Success */ @@ -1391,8 +1390,9 @@ out: return rv; } +/* Read p2m table (through the linear mapping). */ static mfn_t -p2m_gfn_to_mfn(struct domain *d, unsigned long gfn, p2m_type_t *t, +p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_query_t q) { mfn_t mfn; @@ -1400,7 +1400,7 @@ p2m_gfn_to_mfn(struct domain *d, unsigne l2_pgentry_t *l2e; l1_pgentry_t *l1e; - ASSERT(paging_mode_translate(d)); + ASSERT(paging_mode_translate(p2m->domain)); /* XXX This is for compatibility with the old model, where anything not * XXX marked as RAM was considered to be emulated MMIO space. @@ -1408,9 +1408,9 @@ p2m_gfn_to_mfn(struct domain *d, unsigne * XXX we will return p2m_invalid for unmapped gfns */ *t = p2m_mmio_dm; - mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); - - if ( gfn > d->arch.p2m->max_mapped_pfn ) + mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); + + if ( gfn > p2m->max_mapped_pfn ) /* This pfn is higher than the highest the p2m map currently holds */ return _mfn(INVALID_MFN); @@ -1446,7 +1446,7 @@ pod_retry_l3: { if ( q != p2m_query ) { - if ( !p2m_pod_demand_populate(d, gfn, 18, q) ) + if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) ) goto pod_retry_l3; } else @@ -1481,8 +1481,8 @@ pod_retry_l2: if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand ) { if ( q != p2m_query ) { - if ( !p2m_pod_check_and_populate(d, gfn, - (l1_pgentry_t *)l2e, 9, q) ) + if ( !p2m_pod_check_and_populate(p2m, gfn, + (l1_pgentry_t *)l2e, 9, q) ) goto pod_retry_l2; } else *t = p2m_populate_on_demand; @@ -1513,8 +1513,8 @@ pod_retry_l1: if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand ) { if ( q != p2m_query ) { - if ( !p2m_pod_check_and_populate(d, gfn, - (l1_pgentry_t *)l1e, 0, q) ) + if ( !p2m_pod_check_and_populate(p2m, gfn, + (l1_pgentry_t *)l1e, 0, q) ) goto pod_retry_l1; } else *t = p2m_populate_on_demand; @@ -1531,8 +1531,7 @@ pod_retry_l1: return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN); } -/* Init the datastructures for later use by the p2m code */ -int p2m_init(struct domain *d) +static int p2m_allocp2m(struct p2m_domain **_p2m) { struct p2m_domain *p2m; @@ -1540,39 +1539,68 @@ int p2m_init(struct domain *d) if ( p2m == NULL ) return -ENOMEM; - d->arch.p2m = p2m; - + *_p2m = p2m; + return 0; +} + +/* Init the datastructures for later use by the p2m code */ +static void p2m_initialise(struct domain *d, struct p2m_domain *p2m, + bool_t preserve_allocfree) +{ + void *alloc, *free; + + alloc = free = NULL; + if (preserve_allocfree) { + alloc = p2m->alloc_page; + free = p2m->free_page; + } memset(p2m, 0, sizeof(*p2m)); p2m_lock_init(p2m); INIT_PAGE_LIST_HEAD(&p2m->pages); INIT_PAGE_LIST_HEAD(&p2m->pod.super); INIT_PAGE_LIST_HEAD(&p2m->pod.single); + p2m->domain = d; p2m->set_entry = p2m_set_entry; p2m->get_entry = p2m_gfn_to_mfn; p2m->change_entry_type_global = p2m_change_type_global; + if (preserve_allocfree) { + p2m->alloc_page = alloc; + p2m->free_page = free; + } if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ) ept_p2m_init(d); + return; +} + +int p2m_init(struct domain *d) +{ + int rv; + + rv = p2m_allocp2m(&p2m_get_hostp2m(d)); + if ( rv ) + return rv; + p2m_initialise(d, p2m_get_hostp2m(d), 0); + return 0; } -void p2m_change_entry_type_global(struct domain *d, +void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt) { - struct p2m_domain *p2m = d->arch.p2m; - p2m_lock(p2m); - p2m->change_entry_type_global(d, ot, nt); + p2m->change_entry_type_global(p2m, ot, nt); p2m_unlock(p2m); } static -int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, +int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt) { + struct domain *d = p2m->domain; unsigned long todo = 1ul << page_order; unsigned int order; int rc = 1; @@ -1592,7 +1620,7 @@ int set_p2m_entry(struct domain *d, unsi */ BUG_ON(order == 18 && CONFIG_PAGING_LEVELS < 4); - if ( !d->arch.p2m->set_entry(d, gfn, mfn, order, p2mt) ) + if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) ) rc = 0; gfn += 1ul << order; if ( mfn_x(mfn) != INVALID_MFN ) @@ -1603,26 +1631,24 @@ int set_p2m_entry(struct domain *d, unsi return rc; } -// Allocate a new p2m table for a domain. -// -// The structure of the p2m table is that of a pagetable for xen (i.e. it is -// controlled by CONFIG_PAGING_LEVELS). -// -// The alloc_page and free_page functions will be used to get memory to -// build the p2m, and to release it again at the end of day. -// -// Returns 0 for success or -errno. -// -int p2m_alloc_table(struct domain *d, - struct page_info * (*alloc_page)(struct domain *d), - void (*free_page)(struct domain *d, struct page_info *pg)) - +/* Allocate a new p2m table for a domain. + * + * The structure of the p2m table is that of a pagetable for xen (i.e. it is + * controlled by CONFIG_PAGING_LEVELS). + * + * The alloc_page and free_page functions will be used to get memory to + * build the p2m, and to release it again at the end of day. + * + * Returns 0 for success or -errno. + */ +int p2m_alloc_table(struct p2m_domain *p2m, + struct page_info * (*alloc_page)(struct p2m_domain *p2m), + void (*free_page)(struct p2m_domain *p2m, struct page_info *pg)) { mfn_t mfn = _mfn(INVALID_MFN); struct page_info *page, *p2m_top; unsigned int page_count = 0; unsigned long gfn = -1UL; - struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_lock(p2m); @@ -1638,7 +1664,7 @@ int p2m_alloc_table(struct domain *d, p2m->alloc_page = alloc_page; p2m->free_page = free_page; - p2m_top = p2m->alloc_page(d); + p2m_top = p2m->alloc_page(p2m); if ( p2m_top == NULL ) { p2m_unlock(p2m); @@ -1660,12 +1686,12 @@ int p2m_alloc_table(struct domain *d, P2M_PRINTK("populating p2m table\n"); /* Initialise physmap tables for slot zero. Other code assumes this. */ - if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), 0, + if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0, p2m_invalid) ) goto error; /* Copy all existing mappings from the page list and m2p */ - page_list_for_each(page, &d->page_list) + page_list_for_each(page, &p2m->domain->page_list) { mfn = page_to_mfn(page); gfn = get_gpfn_from_mfn(mfn_x(mfn)); @@ -1679,7 +1705,7 @@ int p2m_alloc_table(struct domain *d, (gfn != 0x55555555L) #endif && gfn != INVALID_M2P_ENTRY - && !set_p2m_entry(d, gfn, mfn, 0, p2m_ram_rw) ) + && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) ) goto error; } @@ -1694,38 +1720,38 @@ int p2m_alloc_table(struct domain *d, return -ENOMEM; } -void p2m_teardown(struct domain *d) +void p2m_teardown(struct p2m_domain *p2m) /* Return all the p2m pages to Xen. * We know we don't have any extra mappings to these pages */ { struct page_info *pg; - struct p2m_domain *p2m = p2m_get_hostp2m(d); unsigned long gfn; p2m_type_t t; mfn_t mfn; p2m_lock(p2m); - for(gfn=0; gfn < p2m->max_mapped_pfn; gfn++) + for (gfn=0; gfn < p2m->max_mapped_pfn; gfn++) { - mfn = p2m->get_entry(d, gfn, &t, p2m_query); - if(mfn_valid(mfn) && (t == p2m_ram_shared)) - BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN)); + mfn = p2m->get_entry(p2m, gfn, &t, p2m_query); + if (mfn_valid(mfn) && (t == p2m_ram_shared)) + BUG_ON(mem_sharing_unshare_page(p2m, gfn, MEM_SHARING_DESTROY_GFN)); } p2m->phys_table = pagetable_null(); while ( (pg = page_list_remove_head(&p2m->pages)) ) - p2m->free_page(d, pg); + p2m->free_page(p2m, pg); p2m_unlock(p2m); } void p2m_final_teardown(struct domain *d) { + /* Iterate over all p2m tables per domain */ xfree(d->arch.p2m); d->arch.p2m = NULL; } #if P2M_AUDIT -static void audit_p2m(struct domain *d) +static void audit_p2m(struct p2m_domain *p2m) { struct page_info *page; struct domain *od; @@ -1735,6 +1761,7 @@ static void audit_p2m(struct domain *d) unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0; int test_linear; p2m_type_t type; + struct domain *d = p2m->domain; if ( !paging_mode_translate(d) ) return; @@ -1789,7 +1816,7 @@ static void audit_p2m(struct domain *d) continue; } - p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query); + p2mfn = gfn_to_mfn_type_p2m(p2m, gfn, &type, p2m_query); if ( mfn_x(p2mfn) != mfn ) { mpbad++; @@ -1805,9 +1832,9 @@ static void audit_p2m(struct domain *d) set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); } - if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) ) + if ( test_linear && (gfn <= p2m->max_mapped_pfn) ) { - lp2mfn = mfn_x(gfn_to_mfn_query(d, gfn, &type)); + lp2mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &type)); if ( lp2mfn != mfn_x(p2mfn) ) { P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx " @@ -1822,21 +1849,19 @@ static void audit_p2m(struct domain *d) spin_unlock(&d->page_alloc_lock); /* Audit part two: walk the domain's p2m table, checking the entries. */ - if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 ) + if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 ) { + l3_pgentry_t *l3e; l2_pgentry_t *l2e; l1_pgentry_t *l1e; - int i1, i2; + int i1, i2, i3; #if CONFIG_PAGING_LEVELS == 4 l4_pgentry_t *l4e; - l3_pgentry_t *l3e; - int i3, i4; - l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + int i4; + l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #else /* CONFIG_PAGING_LEVELS == 3 */ - l3_pgentry_t *l3e; - int i3; - l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #endif gfn = 0; @@ -1966,11 +1991,11 @@ static void audit_p2m(struct domain *d) } - if ( entry_count != d->arch.p2m->pod.entry_count ) + if ( entry_count != p2m->pod.entry_count ) { printk("%s: refcounted entry count %d, audit count %d!\n", __func__, - d->arch.p2m->pod.entry_count, + p2m->pod.entry_count, entry_count); BUG(); } @@ -1988,18 +2013,18 @@ static void audit_p2m(struct domain *d) static void -p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn, +p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, unsigned int page_order) { unsigned long i; mfn_t mfn_return; p2m_type_t t; - if ( !paging_mode_translate(d) ) + if ( !paging_mode_translate(p2m->domain) ) { - if ( need_iommu(d) ) + if ( need_iommu(p2m->domain) ) for ( i = 0; i < (1 << page_order); i++ ) - iommu_unmap_page(d, mfn + i); + iommu_unmap_page(p2m->domain, mfn + i); return; } @@ -2007,23 +2032,23 @@ p2m_remove_page(struct domain *d, unsign for ( i = 0; i < (1UL << page_order); i++ ) { - mfn_return = d->arch.p2m->get_entry(d, gfn + i, &t, p2m_query); + mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query); if ( !p2m_is_grant(t) ) set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); } - set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid); + set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid); } void -guest_physmap_remove_page(struct domain *d, unsigned long gfn, +guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, unsigned int page_order) { - p2m_lock(d->arch.p2m); - audit_p2m(d); - p2m_remove_page(d, gfn, mfn, page_order); - audit_p2m(d); - p2m_unlock(d->arch.p2m); + p2m_lock(p2m); + audit_p2m(p2m); + p2m_remove_page(p2m, gfn, mfn, page_order); + audit_p2m(p2m); + p2m_unlock(p2m); } #if CONFIG_PAGING_LEVELS == 3 @@ -2054,7 +2079,7 @@ int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, unsigned int order) { - struct p2m_domain *p2md = d->arch.p2m; + struct p2m_domain *p2m = p2m_get_hostp2m(d); unsigned long i; p2m_type_t ot; mfn_t omfn; @@ -2067,15 +2092,15 @@ guest_physmap_mark_populate_on_demand(st if ( rc != 0 ) return rc; - p2m_lock(p2md); - audit_p2m(d); + p2m_lock(p2m); + audit_p2m(p2m); P2M_DEBUG("mark pod gfn=%#lx\n", gfn); /* Make sure all gpfns are unused */ for ( i = 0; i < (1UL << order); i++ ) { - omfn = gfn_to_mfn_query(d, gfn + i, &ot); + omfn = gfn_to_mfn_query(p2m, gfn + i, &ot); if ( p2m_is_ram(ot) ) { printk("%s: gfn_to_mfn returned type %d!\n", @@ -2091,29 +2116,29 @@ guest_physmap_mark_populate_on_demand(st } /* Now, actually do the two-way mapping */ - if ( !set_p2m_entry(d, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order, + if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order, p2m_populate_on_demand) ) rc = -EINVAL; else { - p2md->pod.entry_count += 1 << order; /* Lock: p2m */ - p2md->pod.entry_count -= pod_count; - BUG_ON(p2md->pod.entry_count < 0); + p2m->pod.entry_count += 1 << order; /* Lock: p2m */ + p2m->pod.entry_count -= pod_count; + BUG_ON(p2m->pod.entry_count < 0); } - audit_p2m(d); - p2m_unlock(p2md); + audit_p2m(p2m); + p2m_unlock(p2m); out: return rc; - } int -guest_physmap_add_entry(struct domain *d, unsigned long gfn, +guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, unsigned int page_order, p2m_type_t t) { + struct domain *d = p2m->domain; unsigned long i, ogfn; p2m_type_t ot; mfn_t omfn; @@ -2139,20 +2164,20 @@ guest_physmap_add_entry(struct domain *d if ( rc != 0 ) return rc; - p2m_lock(d->arch.p2m); - audit_p2m(d); + p2m_lock(p2m); + audit_p2m(p2m); P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn); /* First, remove m->p mappings for existing p->m mappings */ for ( i = 0; i < (1UL << page_order); i++ ) { - omfn = gfn_to_mfn_query(d, gfn + i, &ot); + omfn = gfn_to_mfn_query(p2m, gfn + i, &ot); if ( p2m_is_grant(ot) ) { /* Really shouldn't be unmapping grant maps this way */ domain_crash(d); - p2m_unlock(d->arch.p2m); + p2m_unlock(p2m); return -EINVAL; } else if ( p2m_is_ram(ot) ) @@ -2186,7 +2211,7 @@ guest_physmap_add_entry(struct domain *d * address */ P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n", mfn + i, ogfn, gfn + i); - omfn = gfn_to_mfn_query(d, ogfn, &ot); + omfn = gfn_to_mfn_query(p2m, ogfn, &ot); /* If we get here, we know the local domain owns the page, so it can't have been grant mapped in. */ BUG_ON( p2m_is_grant(ot) ); @@ -2196,7 +2221,7 @@ guest_physmap_add_entry(struct domain *d P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", ogfn , mfn_x(omfn)); if ( mfn_x(omfn) == (mfn + i) ) - p2m_remove_page(d, ogfn, mfn + i, 0); + p2m_remove_page(p2m, ogfn, mfn + i, 0); } } } @@ -2204,7 +2229,7 @@ guest_physmap_add_entry(struct domain *d /* Now, actually do the two-way mapping */ if ( mfn_valid(_mfn(mfn)) ) { - if ( !set_p2m_entry(d, gfn, _mfn(mfn), page_order, t) ) + if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) ) rc = -EINVAL; if ( !p2m_is_grant(t) ) { @@ -2216,18 +2241,18 @@ guest_physmap_add_entry(struct domain *d { gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn, mfn); - if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, + if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid) ) rc = -EINVAL; else { - d->arch.p2m->pod.entry_count -= pod_count; /* Lock: p2m */ - BUG_ON(d->arch.p2m->pod.entry_count < 0); + p2m->pod.entry_count -= pod_count; /* Lock: p2m */ + BUG_ON(p2m->pod.entry_count < 0); } } - audit_p2m(d); - p2m_unlock(d->arch.p2m); + audit_p2m(p2m); + p2m_unlock(p2m); return rc; } @@ -2235,7 +2260,7 @@ guest_physmap_add_entry(struct domain *d /* Walk the whole p2m table, changing any entries of the old type * to the new type. This is used in hardware-assisted paging to * quickly enable or diable log-dirty tracking */ -void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt) +void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt) { unsigned long mfn, gfn, flags; l1_pgentry_t l1e_content; @@ -2248,17 +2273,16 @@ void p2m_change_type_global(struct domai l4_pgentry_t *l4e; unsigned long i4; #endif /* CONFIG_PAGING_LEVELS == 4 */ - struct p2m_domain *p2m = p2m_get_hostp2m(d); BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt)); - if ( !paging_mode_translate(d) ) + if ( !paging_mode_translate(p2m->domain) ) return; if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 ) return; - ASSERT(p2m_locked_by_me(d->arch.p2m)); + ASSERT(p2m_locked_by_me(p2m)); #if CONFIG_PAGING_LEVELS == 4 l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); @@ -2294,7 +2318,8 @@ void p2m_change_type_global(struct domai gfn = get_gpfn_from_mfn(mfn); flags = p2m_type_to_flags(nt); l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); - paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l3e[i3], + paging_write_p2m_entry(p2m->domain, gfn, + (l1_pgentry_t *)&l3e[i3], l3mfn, l1e_content, 3); continue; } @@ -2324,7 +2349,8 @@ void p2m_change_type_global(struct domai * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; flags = p2m_type_to_flags(nt); l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); - paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l2e[i2], + paging_write_p2m_entry(p2m->domain, gfn, + (l1_pgentry_t *)&l2e[i2], l2mfn, l1e_content, 2); continue; } @@ -2347,7 +2373,7 @@ void p2m_change_type_global(struct domai /* create a new 1le entry with the new type */ flags = p2m_type_to_flags(nt); l1e_content = l1e_from_pfn(mfn, flags); - paging_write_p2m_entry(d, gfn, &l1e[i1], + paging_write_p2m_entry(p2m->domain, gfn, &l1e[i1], l1mfn, l1e_content, 1); } unmap_domain_page(l1e); @@ -2369,7 +2395,7 @@ void p2m_change_type_global(struct domai /* Modify the p2m type of a single gfn from ot to nt, returning the * entry's previous type */ -p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, +p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t ot, p2m_type_t nt) { p2m_type_t pt; @@ -2377,31 +2403,31 @@ p2m_type_t p2m_change_type(struct domain BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt)); - p2m_lock(d->arch.p2m); - - mfn = gfn_to_mfn(d, gfn, &pt); + p2m_lock(p2m); + + mfn = gfn_to_mfn(p2m, gfn, &pt); if ( pt == ot ) - set_p2m_entry(d, gfn, mfn, 0, nt); - - p2m_unlock(d->arch.p2m); + set_p2m_entry(p2m, gfn, mfn, 0, nt); + + p2m_unlock(p2m); return pt; } int -set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) +set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn) { int rc = 0; p2m_type_t ot; mfn_t omfn; - if ( !paging_mode_translate(d) ) + if ( !paging_mode_translate(p2m->domain) ) return 0; - omfn = gfn_to_mfn_query(d, gfn, &ot); + omfn = gfn_to_mfn_query(p2m, gfn, &ot); if ( p2m_is_grant(ot) ) { - domain_crash(d); + domain_crash(p2m->domain); return 0; } else if ( p2m_is_ram(ot) ) @@ -2411,9 +2437,61 @@ set_mmio_p2m_entry(struct domain *d, uns } P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn)); - p2m_lock(d->arch.p2m); - rc = set_p2m_entry(d, gfn, mfn, 0, p2m_mmio_direct); - p2m_unlock(d->arch.p2m); + p2m_lock(p2m); + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct); + p2m_unlock(p2m); + if ( 0 == rc ) + gdprintk(XENLOG_ERR, + "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", + mfn_x(gfn_to_mfn(p2m, gfn, &ot))); + return rc; +} + +int +clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn) +{ + int rc = 0; + mfn_t mfn; + p2m_type_t t; + + if ( !paging_mode_translate(p2m->domain) ) + return 0; + + mfn = gfn_to_mfn(p2m, gfn, &t); + if ( !mfn_valid(mfn) ) + { + gdprintk(XENLOG_ERR, + "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn); + return 0; + } + p2m_lock(p2m); + rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0); + p2m_unlock(p2m); + + return rc; +} + +int +set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn) +{ + int rc = 0; + p2m_type_t ot; + mfn_t omfn; + struct domain *d = p2m->domain; + + if ( !paging_mode_translate(d) ) + return 0; + + omfn = gfn_to_mfn_query(p2m, gfn, &ot); + /* At the moment we only allow p2m change if gfn has already been made + * sharable first */ + ASSERT(p2m_is_shared(ot)); + ASSERT(mfn_valid(omfn)); + /* XXX: M2P translations have to be handled properly for shared pages */ + set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); + + P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn)); + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared); if ( 0 == rc ) gdprintk(XENLOG_ERR, "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", @@ -2421,64 +2499,14 @@ set_mmio_p2m_entry(struct domain *d, uns return rc; } -int -clear_mmio_p2m_entry(struct domain *d, unsigned long gfn) -{ - int rc = 0; - unsigned long mfn; - - if ( !paging_mode_translate(d) ) - return 0; - - mfn = gmfn_to_mfn(d, gfn); - if ( INVALID_MFN == mfn ) - { - gdprintk(XENLOG_ERR, - "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn); - return 0; - } - p2m_lock(d->arch.p2m); - rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0, 0); - p2m_unlock(d->arch.p2m); - - return rc; -} - -int -set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) -{ - int rc = 0; - p2m_type_t ot; - mfn_t omfn; - - if ( !paging_mode_translate(d) ) - return 0; - - omfn = gfn_to_mfn_query(d, gfn, &ot); - /* At the moment we only allow p2m change if gfn has already been made - * sharable first */ - ASSERT(p2m_is_shared(ot)); - ASSERT(mfn_valid(omfn)); - /* XXX: M2P translations have to be handled properly for shared pages */ - set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); - - P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn)); - rc = set_p2m_entry(d, gfn, mfn, 0, p2m_ram_shared); - if ( 0 == rc ) - gdprintk(XENLOG_ERR, - "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", - gmfn_to_mfn(d, gfn)); - return rc; -} - -int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn) +int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn) { struct page_info *page; p2m_type_t p2mt; mfn_t mfn; int ret; - mfn = gfn_to_mfn(d, gfn, &p2mt); + mfn = gfn_to_mfn(p2m, gfn, &p2mt); /* Check if mfn is valid */ ret = -EINVAL; @@ -2504,9 +2532,9 @@ int p2m_mem_paging_nominate(struct domai goto out; /* Fix p2m entry */ - p2m_lock(d->arch.p2m); - set_p2m_entry(d, gfn, mfn, 0, p2m_ram_paging_out); - p2m_unlock(d->arch.p2m); + p2m_lock(p2m); + set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out); + p2m_unlock(p2m); ret = 0; @@ -2514,14 +2542,15 @@ int p2m_mem_paging_nominate(struct domai return ret; } -int p2m_mem_paging_evict(struct domain *d, unsigned long gfn) +int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn) { struct page_info *page; p2m_type_t p2mt; mfn_t mfn; + struct domain *d = p2m->domain; /* Get mfn */ - mfn = gfn_to_mfn(d, gfn, &p2mt); + mfn = gfn_to_mfn(p2m, gfn, &p2mt); if ( unlikely(!mfn_valid(mfn)) ) return -EINVAL; @@ -2539,9 +2568,9 @@ int p2m_mem_paging_evict(struct domain * put_page(page); /* Remove mapping from p2m table */ - p2m_lock(d->arch.p2m); - set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged); - p2m_unlock(d->arch.p2m); + p2m_lock(p2m); + set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged); + p2m_unlock(p2m); /* Put the page back so it gets freed */ put_page(page); @@ -2549,11 +2578,12 @@ int p2m_mem_paging_evict(struct domain * return 0; } -void p2m_mem_paging_populate(struct domain *d, unsigned long gfn) +void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn) { struct vcpu *v = current; mem_event_request_t req; p2m_type_t p2mt; + struct domain *d = p2m->domain; memset(&req, 0, sizeof(req)); @@ -2564,12 +2594,12 @@ void p2m_mem_paging_populate(struct doma /* Fix p2m mapping */ /* XXX: It seems inefficient to have this here, as it's only needed * in one case (ept guest accessing paging out page) */ - gfn_to_mfn(d, gfn, &p2mt); + gfn_to_mfn(p2m, gfn, &p2mt); if ( p2mt != p2m_ram_paging_out ) { - p2m_lock(d->arch.p2m); - set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start); - p2m_unlock(d->arch.p2m); + p2m_lock(p2m); + set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start); + p2m_unlock(p2m); } /* Pause domain */ @@ -2587,25 +2617,26 @@ void p2m_mem_paging_populate(struct doma mem_event_put_request(d, &req); } -int p2m_mem_paging_prep(struct domain *d, unsigned long gfn) +int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn) { struct page_info *page; /* Get a free page */ - page = alloc_domheap_page(d, 0); + page = alloc_domheap_page(p2m->domain, 0); if ( unlikely(page == NULL) ) return -EINVAL; /* Fix p2m mapping */ - p2m_lock(d->arch.p2m); - set_p2m_entry(d, gfn, page_to_mfn(page), 0, p2m_ram_paging_in); - p2m_unlock(d->arch.p2m); + p2m_lock(p2m); + set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in); + p2m_unlock(p2m); return 0; } -void p2m_mem_paging_resume(struct domain *d) +void p2m_mem_paging_resume(struct p2m_domain *p2m) { + struct domain *d = p2m->domain; mem_event_response_t rsp; p2m_type_t p2mt; mfn_t mfn; @@ -2614,10 +2645,10 @@ void p2m_mem_paging_resume(struct domain mem_event_get_response(d, &rsp); /* Fix p2m entry */ - mfn = gfn_to_mfn(d, rsp.gfn, &p2mt); - p2m_lock(d->arch.p2m); - set_p2m_entry(d, rsp.gfn, mfn, 0, p2m_ram_rw); - p2m_unlock(d->arch.p2m); + mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt); + p2m_lock(p2m); + set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw); + p2m_unlock(p2m); /* Unpause domain */ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) @@ -2627,7 +2658,6 @@ void p2m_mem_paging_resume(struct domain mem_event_unpause_vcpus(d); } - /* * Local variables: * mode: C diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1712,8 +1712,9 @@ sh_alloc_p2m_pages(struct domain *d) // Returns 0 if no memory is available... static struct page_info * -shadow_alloc_p2m_page(struct domain *d) +shadow_alloc_p2m_page(struct p2m_domain *p2m) { + struct domain *d = p2m->domain; struct page_info *pg; mfn_t mfn; void *p; @@ -1739,8 +1740,9 @@ shadow_alloc_p2m_page(struct domain *d) } static void -shadow_free_p2m_page(struct domain *d, struct page_info *pg) +shadow_free_p2m_page(struct p2m_domain *p2m, struct page_info *pg) { + struct domain *d = p2m->domain; ASSERT(page_get_owner(pg) == d); /* Should have just the one ref we gave it in alloc_p2m_page() */ if ( (pg->count_info & PGC_count_mask) != 1 ) @@ -3082,6 +3084,7 @@ int shadow_enable(struct domain *d, u32 struct page_info *pg = NULL; uint32_t *e; int i, rv = 0; + struct p2m_domain *p2m = p2m_get_hostp2m(d); mode |= PG_SH_enable; @@ -3117,7 +3120,8 @@ int shadow_enable(struct domain *d, u32 * to avoid possible deadlock. */ if ( mode & PG_translate ) { - rv = p2m_alloc_table(d, shadow_alloc_p2m_page, shadow_free_p2m_page); + rv = p2m_alloc_table(p2m, + shadow_alloc_p2m_page, shadow_free_p2m_page); if (rv != 0) goto out_unlocked; } @@ -3128,7 +3132,7 @@ int shadow_enable(struct domain *d, u32 { /* Get a single page from the shadow pool. Take it via the * P2M interface to make freeing it simpler afterwards. */ - pg = shadow_alloc_p2m_page(d); + pg = shadow_alloc_p2m_page(p2m); if ( pg == NULL ) { rv = -ENOMEM; @@ -3177,10 +3181,10 @@ int shadow_enable(struct domain *d, u32 out_locked: shadow_unlock(d); out_unlocked: - if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m_get_hostp2m(d))) ) - p2m_teardown(d); + if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) ) + p2m_teardown(p2m); if ( rv != 0 && pg != NULL ) - shadow_free_p2m_page(d, pg); + shadow_free_p2m_page(p2m, pg); domain_unpause(d); return rv; } @@ -3192,6 +3196,7 @@ void shadow_teardown(struct domain *d) struct vcpu *v; mfn_t mfn; struct page_info *pg; + struct p2m_domain *p2m = p2m_get_hostp2m(d); ASSERT(d->is_dying); ASSERT(d != current->domain); @@ -3243,7 +3248,7 @@ void shadow_teardown(struct domain *d) #endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */ while ( (pg = page_list_remove_head(&d->arch.paging.shadow.p2m_freelist)) ) - shadow_free_p2m_page(d, pg); + shadow_free_p2m_page(p2m, pg); if ( d->arch.paging.shadow.total_pages != 0 ) { @@ -3277,7 +3282,7 @@ void shadow_teardown(struct domain *d) if ( !hvm_paging_enabled(v) ) v->arch.guest_table = pagetable_null(); } - shadow_free_p2m_page(d, + shadow_free_p2m_page(p2m, pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable)); d->arch.paging.shadow.unpaged_pagetable = pagetable_null(); } @@ -3314,7 +3319,7 @@ void shadow_final_teardown(struct domain shadow_teardown(d); /* It is now safe to pull down the p2m map. */ - p2m_teardown(d); + p2m_teardown(p2m_get_hostp2m(d)); SHADOW_PRINTK("dom %u final teardown done." " Shadow pages total = %u, free = %u, p2m=%u\n", @@ -3630,10 +3635,11 @@ int shadow_track_dirty_vram(struct domai unsigned long i; p2m_type_t t; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; + struct p2m_domain *p2m = p2m_get_hostp2m(d); if (end_pfn < begin_pfn - || begin_pfn > d->arch.p2m->max_mapped_pfn - || end_pfn >= d->arch.p2m->max_mapped_pfn) + || begin_pfn > p2m->max_mapped_pfn + || end_pfn >= p2m->max_mapped_pfn) return -EINVAL; shadow_lock(d); @@ -3702,7 +3708,7 @@ int shadow_track_dirty_vram(struct domai /* Iterate over VRAM to track dirty bits. */ for ( i = 0; i < nr; i++ ) { - mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t); + mfn_t mfn = gfn_to_mfn(p2m, begin_pfn + i, &t); struct page_info *page; int dirty = 0; paddr_t sl1ma = dirty_vram->sl1ma[i]; @@ -3787,7 +3793,7 @@ int shadow_track_dirty_vram(struct domai /* was clean for more than two seconds, try to disable guest * write access */ for ( i = begin_pfn; i < end_pfn; i++ ) { - mfn_t mfn = gfn_to_mfn(d, i, &t); + mfn_t mfn = gfn_to_mfn(p2m, i, &t); if (mfn_x(mfn) != INVALID_MFN) flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0); } diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -2231,6 +2231,7 @@ static int validate_gl4e(struct vcpu *v, shadow_l4e_t *sl4p = se; mfn_t sl3mfn = _mfn(INVALID_MFN); struct domain *d = v->domain; + struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_type_t p2mt; int result = 0; @@ -2239,7 +2240,7 @@ static int validate_gl4e(struct vcpu *v, if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT ) { gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e); - mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt); + mfn_t gl3mfn = gfn_to_mfn_query(p2m, gl3gfn, &p2mt); if ( p2m_is_ram(p2mt) ) sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow); else if ( p2mt != p2m_populate_on_demand ) @@ -2290,13 +2291,14 @@ static int validate_gl3e(struct vcpu *v, mfn_t sl2mfn = _mfn(INVALID_MFN); p2m_type_t p2mt; int result = 0; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); perfc_incr(shadow_validate_gl3e_calls); if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT ) { gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e); - mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt); + mfn_t gl2mfn = gfn_to_mfn_query(p2m, gl2gfn, &p2mt); if ( p2m_is_ram(p2mt) ) sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow); else if ( p2mt != p2m_populate_on_demand ) @@ -2320,6 +2322,7 @@ static int validate_gl2e(struct vcpu *v, guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge; shadow_l2e_t *sl2p = se; mfn_t sl1mfn = _mfn(INVALID_MFN); + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); p2m_type_t p2mt; int result = 0; @@ -2345,7 +2348,7 @@ static int validate_gl2e(struct vcpu *v, } else { - mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt); + mfn_t gl1mfn = gfn_to_mfn_query(p2m, gl1gfn, &p2mt); if ( p2m_is_ram(p2mt) ) sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); else if ( p2mt != p2m_populate_on_demand ) @@ -2406,6 +2409,7 @@ static int validate_gl1e(struct vcpu *v, shadow_l1e_t *sl1p = se; gfn_t gfn; mfn_t gmfn; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); p2m_type_t p2mt; int result = 0; #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) @@ -2415,7 +2419,7 @@ static int validate_gl1e(struct vcpu *v, perfc_incr(shadow_validate_gl1e_calls); gfn = guest_l1e_get_gfn(new_gl1e); - gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); + gmfn = gfn_to_mfn_query(p2m, gfn, &p2mt); l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt); result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn); @@ -2475,7 +2479,7 @@ void sh_resync_l1(struct vcpu *v, mfn_t shadow_l1e_t nsl1e; gfn = guest_l1e_get_gfn(gl1e); - gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); + gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt); l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt); rc |= shadow_set_l1e(v, sl1p, nsl1e, p2mt, sl1mfn); @@ -2791,7 +2795,7 @@ static void sh_prefetch(struct vcpu *v, /* Look at the gfn that the l1e is pointing at */ gfn = guest_l1e_get_gfn(gl1e); - gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); + gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt); /* Propagate the entry. */ l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt); @@ -3147,7 +3151,7 @@ static int sh_page_fault(struct vcpu *v, /* What mfn is the guest trying to access? */ gfn = guest_l1e_get_gfn(gw.l1e); - gmfn = gfn_to_mfn_guest(d, gfn, &p2mt); + gmfn = gfn_to_mfn_guest(p2m_get_hostp2m(d), gfn, &p2mt); if ( shadow_mode_refcounts(d) && ((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) || @@ -4219,7 +4223,7 @@ sh_update_cr3(struct vcpu *v, int do_loc if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT ) { gl2gfn = guest_l3e_get_gfn(gl3e[i]); - gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt); + gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt); if ( p2m_is_ram(p2mt) ) flush |= sh_remove_write_access(v, gl2mfn, 2, 0); } @@ -4232,7 +4236,7 @@ sh_update_cr3(struct vcpu *v, int do_loc if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT ) { gl2gfn = guest_l3e_get_gfn(gl3e[i]); - gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt); + gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt); if ( p2m_is_ram(p2mt) ) sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) ? SH_type_l2h_shadow @@ -4614,6 +4618,7 @@ static mfn_t emulate_gva_to_mfn(struct v mfn_t mfn; p2m_type_t p2mt; uint32_t pfec = PFEC_page_present | PFEC_write_access; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); /* Translate the VA to a GFN */ gfn = sh_gva_to_gfn(v, vaddr, &pfec); @@ -4629,9 +4634,9 @@ static mfn_t emulate_gva_to_mfn(struct v /* Translate the GFN to an MFN */ /* PoD: query only if shadow lock is held (to avoid deadlock) */ if ( shadow_locked_by_me(v->domain) ) - mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt); + mfn = gfn_to_mfn_query(p2m, _gfn(gfn), &p2mt); else - mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt); + mfn = gfn_to_mfn(p2m, _gfn(gfn), &p2mt); if ( p2m_is_readonly(p2mt) ) return _mfn(READONLY_GFN); @@ -5036,7 +5041,7 @@ int sh_audit_l1_table(struct vcpu *v, mf { gfn = guest_l1e_get_gfn(*gl1e); mfn = shadow_l1e_get_mfn(*sl1e); - gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); + gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt); if ( !p2m_is_grant(p2mt) && mfn_x(gmfn) != mfn_x(mfn) ) AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn " --> %" PRI_mfn " != mfn %" PRI_mfn, @@ -5080,6 +5085,7 @@ int sh_audit_l2_table(struct vcpu *v, mf shadow_l2e_t *sl2e; mfn_t mfn, gmfn, gl2mfn; gfn_t gfn; + struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); p2m_type_t p2mt; char *s; int done = 0; @@ -5106,7 +5112,7 @@ int sh_audit_l2_table(struct vcpu *v, mf mfn = shadow_l2e_get_mfn(*sl2e); gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? get_fl1_shadow_status(v, gfn) - : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), + : get_shadow_status(v, gfn_to_mfn_query(p2m, gfn, &p2mt), SH_type_l1_shadow); if ( mfn_x(gmfn) != mfn_x(mfn) ) AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn @@ -5114,8 +5120,8 @@ int sh_audit_l2_table(struct vcpu *v, mf " --> %" PRI_mfn " != mfn %" PRI_mfn, gfn_x(gfn), (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0 - : mfn_x(gfn_to_mfn_query(v->domain, gfn, &p2mt)), - mfn_x(gmfn), mfn_x(mfn)); + : mfn_x(gfn_to_mfn_query(p2m, + gfn, &p2mt)), mfn_x(gmfn), mfn_x(mfn)); } }); sh_unmap_domain_page(gp); @@ -5153,7 +5159,7 @@ int sh_audit_l3_table(struct vcpu *v, mf { gfn = guest_l3e_get_gfn(*gl3e); mfn = shadow_l3e_get_mfn(*sl3e); - gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), + gmfn = get_shadow_status(v, gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt), ((GUEST_PAGING_LEVELS == 3 || is_pv_32on64_vcpu(v)) && !shadow_mode_external(v->domain) @@ -5200,7 +5206,8 @@ int sh_audit_l4_table(struct vcpu *v, mf { gfn = guest_l4e_get_gfn(*gl4e); mfn = shadow_l4e_get_mfn(*sl4e); - gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), + gmfn = get_shadow_status(v, gfn_to_mfn_query(p2m_get_hostp2m(v->domain), + gfn, &p2mt), SH_type_l3_shadow); if ( mfn_x(gmfn) != mfn_x(mfn) ) AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/common/grant_table.c --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -109,7 +109,7 @@ static unsigned inline int max_nr_maptra #define gfn_to_mfn_private(_d, _gfn) ({ \ p2m_type_t __p2mt; \ unsigned long __x; \ - __x = mfn_x(gfn_to_mfn_unshare(_d, _gfn, &__p2mt, 1)); \ + __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt, 1)); \ if ( !p2m_is_valid(__p2mt) ) \ __x = INVALID_MFN; \ __x; }) @@ -1059,7 +1059,7 @@ gnttab_unpopulate_status_frames(struct d for ( i = 0; i < nr_status_frames(gt); i++ ) { - page_set_owner(virt_to_page(gt->status[i]), dom_xen); + page_set_owner(virt_to_page(gt->status[i]), p2m_get_hostp2m(dom_xen)); free_xenheap_page(gt->status[i]); gt->status[i] = NULL; } @@ -1498,7 +1498,7 @@ gnttab_transfer( if ( unlikely(e->tot_pages++ == 0) ) get_knownalive_domain(e); page_list_add_tail(page, &e->page_list); - page_set_owner(page, e); + page_set_owner(page, p2m_get_hostp2m(e)); spin_unlock(&e->page_alloc_lock); @@ -1893,12 +1893,13 @@ __gnttab_copy( { #ifdef CONFIG_X86 p2m_type_t p2mt; - s_frame = mfn_x(gfn_to_mfn(sd, op->source.u.gmfn, &p2mt)); + struct p2m_domain *p2m = p2m_get_hostp2m(sd); + s_frame = mfn_x(gfn_to_mfn(p2m, op->source.u.gmfn, &p2mt)); if ( !p2m_is_valid(p2mt) ) s_frame = INVALID_MFN; if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(sd, op->source.u.gmfn); + p2m_mem_paging_populate(p2m, op->source.u.gmfn); rc = -ENOENT; goto error_out; } @@ -1939,12 +1940,13 @@ __gnttab_copy( { #ifdef CONFIG_X86 p2m_type_t p2mt; - d_frame = mfn_x(gfn_to_mfn_unshare(dd, op->dest.u.gmfn, &p2mt, 1)); + struct p2m_domain *p2m = p2m_get_hostp2m(dd); + d_frame = mfn_x(gfn_to_mfn_unshare(p2m, op->dest.u.gmfn, &p2mt, 1)); if ( !p2m_is_valid(p2mt) ) d_frame = INVALID_MFN; if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(dd, op->dest.u.gmfn); + p2m_mem_paging_populate(p2m, op->dest.u.gmfn); rc = -ENOENT; goto error_out; } diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/common/memory.c --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -161,7 +161,7 @@ int guest_remove_page(struct domain *d, unsigned long mfn; #ifdef CONFIG_X86 - mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); + mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); #else mfn = gmfn_to_mfn(d, gmfn); #endif @@ -359,7 +359,7 @@ static long memory_exchange(XEN_GUEST_HA p2m_type_t p2mt; /* Shared pages cannot be exchanged */ - mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt, 0)); + mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, &p2mt, 0)); if ( p2m_is_shared(p2mt) ) { rc = -ENOMEM; diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/common/tmem_xen.c --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -99,7 +99,7 @@ static inline void *cli_mfn_to_va(tmem_c unsigned long cli_mfn; p2m_type_t t; - cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t)); + cli_mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(current->domain), cmfn, &t)); if (t != p2m_ram_rw) return NULL; if (pcli_mfn != NULL) diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/include/asm-x86/mem_sharing.h --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -23,22 +23,22 @@ #define __MEM_SHARING_H__ #define sharing_supported(_d) \ - (is_hvm_domain(_d) && (_d)->arch.hvm_domain.hap_enabled) + (is_hvm_domain(_d) && paging_mode_hap(_d)) typedef uint64_t shr_handle_t; unsigned int mem_sharing_get_nr_saved_mfns(void); -int mem_sharing_nominate_page(struct domain *d, +int mem_sharing_nominate_page(struct p2m_domain *p2m, unsigned long gfn, int expected_refcnt, shr_handle_t *phandle); #define MEM_SHARING_MUST_SUCCEED (1<<0) #define MEM_SHARING_DESTROY_GFN (1<<1) -int mem_sharing_unshare_page(struct domain *d, +int mem_sharing_unshare_page(struct p2m_domain *p2m, unsigned long gfn, uint16_t flags); int mem_sharing_sharing_resume(struct domain *d); -int mem_sharing_cache_resize(struct domain *d, int new_size); +int mem_sharing_cache_resize(struct p2m_domain *p2m, int new_size); int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec); void mem_sharing_init(void); diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -170,20 +170,24 @@ struct p2m_domain { /* Shadow translated domain: p2m mapping */ pagetable_t phys_table; + struct domain *domain; /* back pointer to domain */ + /* Pages used to construct the p2m */ struct page_list_head pages; /* Functions to call to get or free pages for the p2m */ - struct page_info * (*alloc_page )(struct domain *d); - void (*free_page )(struct domain *d, + struct page_info * (*alloc_page )(struct p2m_domain *p2m); + void (*free_page )(struct p2m_domain *p2m, struct page_info *pg); - int (*set_entry )(struct domain *d, unsigned long gfn, + int (*set_entry )(struct p2m_domain *p2m, + unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt); - mfn_t (*get_entry )(struct domain *d, unsigned long gfn, + mfn_t (*get_entry )(struct p2m_domain *p2m, + unsigned long gfn, p2m_type_t *p2mt, p2m_query_t q); - void (*change_entry_type_global)(struct domain *d, + void (*change_entry_type_global)(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt); @@ -217,9 +221,9 @@ struct p2m_domain { }; /* get host p2m table */ -#define p2m_get_hostp2m(d) ((d)->arch.p2m) +#define p2m_get_hostp2m(d) ((d)->arch.p2m) -#define p2m_get_pagetable(p2m) ((p2m)->phys_table) +#define p2m_get_pagetable(p2m) ((p2m)->phys_table) /* * The P2M lock. This protects all updates to the p2m table. @@ -273,37 +277,37 @@ static inline p2m_type_t p2m_flags_to_ty #endif } -/* Read another domain's P2M table, mapping pages as we go. +/* Read P2M table, mapping pages as we go. * Do not populate PoD pages. */ -static inline -mfn_t gfn_to_mfn_type_foreign(struct domain *d, unsigned long gfn, p2m_type_t *t, - p2m_query_t q) +static inline mfn_t +gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn, + p2m_type_t *t, p2m_query_t q) { - return d->arch.p2m->get_entry(d, gfn, t, q); + return p2m->get_entry(p2m, gfn, t, q); } + /* General conversion function from gfn to mfn */ -static inline mfn_t _gfn_to_mfn_type(struct domain *d, +static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_query_t q) { - if ( !paging_mode_translate(d) ) + if ( !p2m || !paging_mode_translate(p2m->domain) ) { /* Not necessarily true, but for non-translated guests, we claim * it's the most generic kind of memory */ *t = p2m_ram_rw; return _mfn(gfn); } - return gfn_to_mfn_type_foreign(d, gfn, t, q); + + return gfn_to_mfn_type_p2m(p2m, gfn, t, q); } -#define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_alloc) -#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_query) -#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_guest) +#define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc) +#define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_query) +#define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_guest) -#define gfn_to_mfn_foreign(d, g, t) gfn_to_mfn_type_foreign((d), (g), (t), p2m_alloc) - -static inline mfn_t gfn_to_mfn_unshare(struct domain *d, +static inline mfn_t gfn_to_mfn_unshare(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *p2mt, int must_succeed) @@ -311,29 +315,28 @@ static inline mfn_t gfn_to_mfn_unshare(s mfn_t mfn; int ret; - mfn = gfn_to_mfn(d, gfn, p2mt); + mfn = gfn_to_mfn(p2m, gfn, p2mt); if(p2m_is_shared(*p2mt)) { - ret = mem_sharing_unshare_page(d, gfn, + ret = mem_sharing_unshare_page(p2m, gfn, must_succeed ? MEM_SHARING_MUST_SUCCEED : 0); if(ret < 0) { BUG_ON(must_succeed); return mfn; } - mfn = gfn_to_mfn(d, gfn, p2mt); + mfn = gfn_to_mfn(p2m, gfn, p2mt); } return mfn; } - /* Compatibility function exporting the old untyped interface */ static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) { mfn_t mfn; p2m_type_t t; - mfn = gfn_to_mfn(d, gpfn, &t); + mfn = gfn_to_mfn(d->arch.p2m, gpfn, &t); if ( p2m_is_valid(t) ) return mfn_x(mfn); return INVALID_MFN; @@ -357,16 +360,16 @@ int p2m_init(struct domain *d); * build the p2m, and to release it again at the end of day. * * Returns 0 for success or -errno. */ -int p2m_alloc_table(struct domain *d, - struct page_info * (*alloc_page)(struct domain *d), - void (*free_page)(struct domain *d, struct page_info *pg)); +int p2m_alloc_table(struct p2m_domain *p2m, + struct page_info * (*alloc_page)(struct p2m_domain *p2m), + void (*free_page)(struct p2m_domain *p2m, struct page_info *pg)); /* Return all the p2m resources to Xen. */ -void p2m_teardown(struct domain *d); +void p2m_teardown(struct p2m_domain *p2m); void p2m_final_teardown(struct domain *d); /* Dump PoD information about the domain */ -void p2m_pod_dump_data(struct domain *d); +void p2m_pod_dump_data(struct p2m_domain *p2m); /* Move all pages from the populate-on-demand cache to the domain page_list * (usually in preparation for domain destruction) */ @@ -385,15 +388,19 @@ p2m_pod_decrease_reservation(struct doma /* Called by p2m code when demand-populating a PoD page */ int -p2m_pod_demand_populate(struct domain *d, unsigned long gfn, +p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn, unsigned int order, p2m_query_t q); /* Add a page to a domain's p2m table */ -int guest_physmap_add_entry(struct domain *d, unsigned long gfn, +int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, unsigned int page_order, p2m_type_t t); +/* Remove a page from a domain's p2m table */ +void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn, + unsigned long mfn, unsigned int page_order); + /* Set a p2m range as populate-on-demand */ int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, unsigned int order); @@ -402,44 +409,49 @@ int guest_physmap_mark_populate_on_deman * * Return 0 for success */ -static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn, +static inline int guest_physmap_add_page(struct domain *d, + unsigned long gfn, unsigned long mfn, unsigned int page_order) { - return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw); + return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, p2m_ram_rw); } /* Remove a page from a domain's p2m table */ -void guest_physmap_remove_page(struct domain *d, unsigned long gfn, - unsigned long mfn, unsigned int page_order); +static inline void guest_physmap_remove_page(struct domain *d, + unsigned long gfn, + unsigned long mfn, unsigned int page_order) +{ + guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order); +} /* Change types across all p2m entries in a domain */ -void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt); -void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt); +void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt); +void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt); /* Compare-exchange the type of a single p2m entry */ -p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, +p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t ot, p2m_type_t nt); /* Set mmio addresses in the p2m table (for pass-through) */ -int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); -int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn); +int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn); +int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn); /* Modify p2m table for shared gfn */ int -set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); +set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn); /* Check if a nominated gfn is valid to be paged out */ -int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn); +int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn); /* Evict a frame */ -int p2m_mem_paging_evict(struct domain *d, unsigned long gfn); +int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn); /* Start populating a paged out frame */ -void p2m_mem_paging_populate(struct domain *d, unsigned long gfn); +void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn); /* Prepare the p2m for paging a frame in */ -int p2m_mem_paging_prep(struct domain *d, unsigned long gfn); +int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn); /* Resume normal operation (in case a domain was paused) */ -void p2m_mem_paging_resume(struct domain *d); +void p2m_mem_paging_resume(struct p2m_domain *p2m); -struct page_info *p2m_alloc_ptp(struct domain *d, unsigned long type); +struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type); #endif /* _XEN_P2M_H */