On Tuesday 20 April 2010 11:12:32 Tim Deegan wrote:
> At 08:41 +0100 on 20 Apr (1271752877), Christoph Egger wrote:
> > On Friday 16 April 2010 12:45:35 Tim Deegan wrote:
> > > Nacked pending performance eval.
> > >
> > > BTW, if perf turns out not be a problem, I welcome this with open arms;
> > > always nice to see a patch that removes a lot of code.
> >
> > kernbench performance numbers on 64bit xen:
>
> Looking good. :)
Yes.
> What was the guest?
SLES10SP3
> And what about 32bit Xen?
I provide the numbers when I have them.
Christoph
>
> Tim.
>
> > w/o patch:
> > Average Half load -j 3 Run (std deviation):
> > Elapsed Time 276.89 (1.3034)
> > User Time 668.406 (0.86025)
> > System Time 157.918 (0.297187)
> > Percent CPU 298 (2)
> > Context Switches 32798.2 (176.418)
> > Sleeps 43083.6 (227.457)
> >
> > Average Optimal load -j 16 Run (std deviation):
> > Elapsed Time 228.656 (0.901876)
> > User Time 682.568 (14.9445)
> > System Time 162.627 (4.97552)
> > Percent CPU 337.6 (41.7724)
> > Context Switches 47291.4 (15277.8)
> > Sleeps 48462.6 (5679.45)
> >
> > with patch:
> > Average Half load -j 3 Run (std deviation):
> > Elapsed Time 274.118 (0.700443)
> > User Time 659.982 (0.602387)
> > System Time 156.608 (0.515432)
> > Percent CPU 297.2 (0.447214)
> > Context Switches 32906.8 (189.361)
> > Sleeps 43076.2 (131.066)
> >
> > Average Optimal load -j 16 Run (std deviation):
> > Elapsed Time 225.296 (0.296108)
> > User Time 673.502 (14.2631)
> > System Time 161.189 (4.84903)
> > Percent CPU 337.6 (42.589)
> > Context Switches 47267.8 (15138.8)
> > Sleeps 48600.9 (5831.79)
> >
> > > Tim
> > >
> > > Content-Description: xen_nh04_p2m_cleanup.diff
> > >
> > > > # HG changeset patch
> > > > # User cegger
> > > > # Date 1271330293 -7200
> > > > Obsolete gfn_to_mfn_current and remove it.
> > > > gfn_to_mfn_current is redundant to gfn_to_mfn(current->domain, ...)
> > > >
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/hvm/emulate.c
> > > > --- a/xen/arch/x86/hvm/emulate.c
> > > > +++ b/xen/arch/x86/hvm/emulate.c
> > > > @@ -62,7 +62,7 @@ int hvmemul_do_io(
> > > > int rc;
> > > >
> > > > /* Check for paged out page */
> > > > - ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt,
> > > > 0); + ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt,
> > > > 0); if ( p2m_is_paging(p2mt) )
> > > > {
> > > > p2m_mem_paging_populate(curr->domain, ram_gfn);
> > > > @@ -638,6 +638,7 @@ static int hvmemul_rep_movs(
> > > > unsigned long saddr, daddr, bytes;
> > > > paddr_t sgpa, dgpa;
> > > > uint32_t pfec = PFEC_page_present;
> > > > + struct domain *d = current->domain;
> > > > p2m_type_t p2mt;
> > > > int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
> > > > char *buf;
> > > > @@ -668,12 +669,12 @@ static int hvmemul_rep_movs(
> > > > if ( rc != X86EMUL_OKAY )
> > > > return rc;
> > > >
> > > > - (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);
> > > > + (void)gfn_to_mfn(d, sgpa >> PAGE_SHIFT, &p2mt);
> > > > if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
> > > > return hvmemul_do_mmio(
> > > > sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
> > > >
> > > > - (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
> > > > + (void)gfn_to_mfn(d, dgpa >> PAGE_SHIFT, &p2mt);
> > > > if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
> > > > return hvmemul_do_mmio(
> > > > dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/hvm/hvm.c
> > > > --- a/xen/arch/x86/hvm/hvm.c
> > > > +++ b/xen/arch/x86/hvm/hvm.c
> > > > @@ -938,8 +938,9 @@ bool_t hvm_hap_nested_page_fault(unsigne
> > > > {
> > > > p2m_type_t p2mt;
> > > > mfn_t mfn;
> > > > -
> > > > - mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
> > > > + struct domain *d = current->domain;
> > > > +
> > > > + mfn = gfn_to_mfn_guest(d, gfn, &p2mt);
> > > >
> > > > /*
> > > > * If this GFN is emulated MMIO or marked as read-only, pass the
> > > > fault @@ -954,12 +955,12 @@ bool_t hvm_hap_nested_page_fault(unsigne
> > > >
> > > > /* Check if the page has been paged out */
> > > > if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
> > > > - p2m_mem_paging_populate(current->domain, gfn);
> > > > + p2m_mem_paging_populate(d, gfn);
> > > >
> > > > /* Mem sharing: unshare the page and try again */
> > > > if ( p2mt == p2m_ram_shared )
> > > > {
> > > > - mem_sharing_unshare_page(current->domain, gfn, 0);
> > > > + mem_sharing_unshare_page(d, gfn, 0);
> > > > return 1;
> > > > }
> > > >
> > > > @@ -971,8 +972,8 @@ bool_t hvm_hap_nested_page_fault(unsigne
> > > > * a large page, we do not change other pages type within
> > > > that large * page.
> > > > */
> > > > - paging_mark_dirty(current->domain, mfn_x(mfn));
> > > > - p2m_change_type(current->domain, gfn, p2m_ram_logdirty,
> > > > p2m_ram_rw); + paging_mark_dirty(d, mfn_x(mfn));
> > > > + p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
> > > > return 1;
> > > > }
> > > >
> > > > @@ -1093,7 +1094,7 @@ int hvm_set_cr0(unsigned long value)
> > > > {
> > > > /* The guest CR3 must be pointing to the guest physical.
> > > > */ gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT; - mfn =
> > > > mfn_x(gfn_to_mfn_current(gfn, &p2mt));
> > > > + mfn = mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt));
> > > > if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
> > > > !get_page(mfn_to_page(mfn), v->domain))
> > > > {
> > > > @@ -1180,7 +1181,7 @@ int hvm_set_cr3(unsigned long value)
> > > > {
> > > > /* Shadow-mode CR3 change. Check PDBR and update refcounts.
> > > > */ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); -
> > > > mfn = mfn_x(gfn_to_mfn_current(value >> PAGE_SHIFT, &p2mt)); +
> > > > mfn = mfn_x(gfn_to_mfn(v->domain, value >> PAGE_SHIFT, &p2mt)); if (
> > > > !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
> > > > !get_page(mfn_to_page(mfn), v->domain) )
> > > > goto bad_cr3;
> > > > @@ -1323,6 +1324,7 @@ static void *hvm_map_entry(unsigned long
> > > > unsigned long gfn, mfn;
> > > > p2m_type_t p2mt;
> > > > uint32_t pfec;
> > > > + struct vcpu *v = current;
> > > >
> > > > if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
> > > > {
> > > > @@ -1336,13 +1338,13 @@ static void *hvm_map_entry(unsigned long
> > > > * write the accessed flags in the descriptors (in 32-bit mode),
> > > > but * we still treat it as a kernel-mode read (i.e. no access
> > > > checks). */ pfec = PFEC_page_present;
> > > > - gfn = paging_gva_to_gfn(current, va, &pfec);
> > > > + gfn = paging_gva_to_gfn(v, va, &pfec);
> > > > if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
> > > > return NULL;
> > > > - mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
> > > > + mfn = mfn_x(gfn_to_mfn_unshare(v->domain, gfn, &p2mt, 0));
> > > > if ( p2m_is_paging(p2mt) )
> > > > {
> > > > - p2m_mem_paging_populate(current->domain, gfn);
> > > > + p2m_mem_paging_populate(v->domain, gfn);
> > > > return NULL;
> > > > }
> > > > if ( p2m_is_shared(p2mt) )
> > > > @@ -1350,13 +1352,13 @@ static void *hvm_map_entry(unsigned long
> > > > if ( !p2m_is_ram(p2mt) )
> > > > {
> > > > gdprintk(XENLOG_ERR, "Failed to look up descriptor table
> > > > entry\n"); - domain_crash(current->domain);
> > > > + domain_crash(v->domain);
> > > > return NULL;
> > > > }
> > > >
> > > > ASSERT(mfn_valid(mfn));
> > > >
> > > > - paging_mark_dirty(current->domain, mfn);
> > > > + paging_mark_dirty(v->domain, mfn);
> > > >
> > > > return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK);
> > > > }
> > > > @@ -1737,7 +1739,7 @@ static enum hvm_copy_result __hvm_copy(
> > > > gfn = addr >> PAGE_SHIFT;
> > > > }
> > > >
> > > > - mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt,
> > > > 0)); + mfn = mfn_x(gfn_to_mfn_unshare(curr->domain, gfn,
> > > > &p2mt, 0));
> > > >
> > > > if ( p2m_is_paging(p2mt) )
> > > > {
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/hvm/stdvga.c
> > > > --- a/xen/arch/x86/hvm/stdvga.c
> > > > +++ b/xen/arch/x86/hvm/stdvga.c
> > > > @@ -481,7 +481,8 @@ static int mmio_move(struct hvm_hw_stdvg
> > > > if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
> > > > HVMCOPY_okay )
> > > > {
> > > > - (void)gfn_to_mfn_current(data >> PAGE_SHIFT,
> > > > &p2mt); + (void)gfn_to_mfn(current->domain,
> > > > + data >> PAGE_SHIFT, &p2mt);
> > > > /*
> > > > * The only case we handle is vga_mem <->
> > > > vga_mem. * Anything else disables caching and leaves it to qemu-dm.
> > > > @@ -503,7 +504,8 @@ static int mmio_move(struct hvm_hw_stdvg if (
> > > > hvm_copy_from_guest_phys(&tmp, data, p->size) != HVMCOPY_okay )
> > > > {
> > > > - (void)gfn_to_mfn_current(data >> PAGE_SHIFT,
> > > > &p2mt); + (void)gfn_to_mfn(current->domain,
> > > > + data >> PAGE_SHIFT, &p2mt);
> > > > if ( (p2mt != p2m_mmio_dm) || (data <
> > > > VGA_MEM_BASE)
> > > >
> > > > || ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) ) return 0;
> > > >
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/hvm/svm/svm.c
> > > > --- a/xen/arch/x86/hvm/svm/svm.c
> > > > +++ b/xen/arch/x86/hvm/svm/svm.c
> > > > @@ -895,6 +895,7 @@ static void svm_do_nested_pgfault(paddr_
> > > > unsigned long gfn = gpa >> PAGE_SHIFT;
> > > > mfn_t mfn;
> > > > p2m_type_t p2mt;
> > > > + struct domain *d = current->domain;
> > > >
> > > > if ( tb_init_done )
> > > > {
> > > > @@ -907,7 +908,7 @@ static void svm_do_nested_pgfault(paddr_
> > > >
> > > > _d.gpa = gpa;
> > > > _d.qualification = 0;
> > > > - _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn,
> > > > &_d.p2mt)); + _d.mfn = mfn_x(gfn_to_mfn_query(d, gfn,
> > > > &_d.p2mt));
> > > >
> > > > __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char
> > > > *)&_d); }
> > > > @@ -916,10 +917,10 @@ static void svm_do_nested_pgfault(paddr_
> > > > return;
> > > >
> > > > /* Everything else is an error. */
> > > > - mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
> > > > + mfn = gfn_to_mfn_guest(d, gfn, &p2mt);
> > > > gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx,
> > > > type %i\n", gpa, mfn_x(mfn), p2mt);
> > > > - domain_crash(current->domain);
> > > > + domain_crash(d);
> > > > }
> > > >
> > > > static void svm_fpu_dirty_intercept(void)
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/hvm/vmx/vmx.c
> > > > --- a/xen/arch/x86/hvm/vmx/vmx.c
> > > > +++ b/xen/arch/x86/hvm/vmx/vmx.c
> > > > @@ -2145,7 +2145,7 @@ static void ept_handle_violation(unsigne
> > > > return;
> > > >
> > > > /* Everything else is an error. */
> > > > - mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
> > > > + mfn = gfn_to_mfn_guest(current->domain, gfn, &p2mt);
> > > > gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
> > > > "gpa %#"PRIpaddr", mfn %#lx, type %i.\n",
> > > > qualification,
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/mm.c
> > > > --- a/xen/arch/x86/mm.c
> > > > +++ b/xen/arch/x86/mm.c
> > > > @@ -3673,7 +3673,7 @@ static int replace_grant_p2m_mapping(
> > > > if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
> > > > return GNTST_general_error;
> > > >
> > > > - old_mfn = gfn_to_mfn_current(gfn, &type);
> > > > + old_mfn = gfn_to_mfn(current->domain, gfn, &type);
> > > > if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
> > > > {
> > > > gdprintk(XENLOG_WARNING,
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/mm/hap/p2m-ept.c
> > > > --- a/xen/arch/x86/mm/hap/p2m-ept.c
> > > > +++ b/xen/arch/x86/mm/hap/p2m-ept.c
> > > > @@ -543,12 +543,6 @@ out:
> > > > return;
> > > > }
> > > >
> > > > -static mfn_t ept_get_entry_current(unsigned long gfn, p2m_type_t *t,
> > > > - p2m_query_t q)
> > > > -{
> > > > - return ept_get_entry(current->domain, gfn, t, q);
> > > > -}
> > > > -
> > > > /*
> > > > * To test if the new emt type is the same with old,
> > > > * return 1 to not to reset ept entry.
> > > > @@ -721,7 +715,6 @@ void ept_p2m_init(struct domain *d)
> > > > {
> > > > d->arch.p2m->set_entry = ept_set_entry;
> > > > d->arch.p2m->get_entry = ept_get_entry;
> > > > - d->arch.p2m->get_entry_current = ept_get_entry_current;
> > > > d->arch.p2m->change_entry_type_global =
> > > > ept_change_entry_type_global; }
> > > >
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/arch/x86/mm/p2m.c
> > > > --- a/xen/arch/x86/mm/p2m.c
> > > > +++ b/xen/arch/x86/mm/p2m.c
> > > > @@ -1531,181 +1531,6 @@ pod_retry_l1:
> > > > return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn :
> > > > _mfn(INVALID_MFN); }
> > > >
> > > > -/* Read the current domain's p2m table (through the linear mapping).
> > > > */ -static mfn_t p2m_gfn_to_mfn_current(unsigned long gfn, p2m_type_t
> > > > *t, - p2m_query_t q)
> > > > -{
> > > > - mfn_t mfn = _mfn(INVALID_MFN);
> > > > - p2m_type_t p2mt = p2m_mmio_dm;
> > > > - paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
> > > > - /* XXX This is for compatibility with the old model, where
> > > > anything not - * XXX marked as RAM was considered to be emulated
> > > > MMIO space. - * XXX Once we start explicitly registering MMIO regions
> > > > in the p2m - * XXX we will return p2m_invalid for unmapped gfns */
> > > > -
> > > > - if ( gfn <= current->domain->arch.p2m->max_mapped_pfn )
> > > > - {
> > > > - l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
> > > > - l2_pgentry_t l2e = l2e_empty();
> > > > - int ret;
> > > > -#if CONFIG_PAGING_LEVELS >= 4
> > > > - l3_pgentry_t l3e = l3e_empty();
> > > > -#endif
> > > > -
> > > > - ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START)
> > > > - / sizeof(l1_pgentry_t));
> > > > -
> > > > -#if CONFIG_PAGING_LEVELS >= 4
> > > > - /*
> > > > - * Read & process L3
> > > > - */
> > > > - p2m_entry = (l1_pgentry_t *)
> > > > - &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START)
> > > > - + l3_linear_offset(addr)];
> > > > - pod_retry_l3:
> > > > - ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e));
> > > > -
> > > > - if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
> > > > - {
> > > > - if ( (l3e_get_flags(l3e) & _PAGE_PSE) &&
> > > > - (p2m_flags_to_type(l3e_get_flags(l3e)) ==
> > > > p2m_populate_on_demand) ) - {
> > > > - /* The read has succeeded, so we know that mapping
> > > > exists */ - if ( q != p2m_query )
> > > > - {
> > > > - if ( !p2m_pod_demand_populate(current->domain,
> > > > gfn, 18, q) ) - goto pod_retry_l3;
> > > > - p2mt = p2m_invalid;
> > > > - printk("%s: Allocate 1GB failed!\n", __func__);
> > > > - goto out;
> > > > - }
> > > > - else
> > > > - {
> > > > - p2mt = p2m_populate_on_demand;
> > > > - goto out;
> > > > - }
> > > > - }
> > > > - goto pod_retry_l2;
> > > > - }
> > > > -
> > > > - if ( l3e_get_flags(l3e) & _PAGE_PSE )
> > > > - {
> > > > - p2mt = p2m_flags_to_type(l3e_get_flags(l3e));
> > > > - ASSERT(l3e_get_pfn(l3e) != INVALID_MFN ||
> > > > !p2m_is_ram(p2mt)); - if (p2m_is_valid(p2mt) )
> > > > - mfn = _mfn(l3e_get_pfn(l3e) +
> > > > - l2_table_offset(addr) *
> > > > L1_PAGETABLE_ENTRIES + -
> > > > l1_table_offset(addr));
> > > > - else
> > > > - p2mt = p2m_mmio_dm;
> > > > -
> > > > - goto out;
> > > > - }
> > > > -#endif
> > > > - /*
> > > > - * Read & process L2
> > > > - */
> > > > - p2m_entry =
> > > > &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START) -
> > > > + l2_linear_offset(addr)]; -
> > > > - pod_retry_l2:
> > > > - ret = __copy_from_user(&l2e,
> > > > - p2m_entry,
> > > > - sizeof(l2e));
> > > > - if ( ret != 0
> > > > - || !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
> > > > - {
> > > > - if( (l2e_get_flags(l2e) & _PAGE_PSE)
> > > > - && ( p2m_flags_to_type(l2e_get_flags(l2e))
> > > > - == p2m_populate_on_demand ) )
> > > > - {
> > > > - /* The read has succeeded, so we know that the
> > > > mapping - * exits at this point. */
> > > > - if ( q != p2m_query )
> > > > - {
> > > > - if (
> > > > !p2m_pod_check_and_populate(current->domain, gfn, -
> > > > p2m_entry, 9, q) ) - goto pod_retry_l2;
> > > > -
> > > > - /* Allocate failed. */
> > > > - p2mt = p2m_invalid;
> > > > - printk("%s: Allocate failed!\n", __func__);
> > > > - goto out;
> > > > - }
> > > > - else
> > > > - {
> > > > - p2mt = p2m_populate_on_demand;
> > > > - goto out;
> > > > - }
> > > > - }
> > > > -
> > > > - goto pod_retry_l1;
> > > > - }
> > > > -
> > > > - if (l2e_get_flags(l2e) & _PAGE_PSE)
> > > > - {
> > > > - p2mt = p2m_flags_to_type(l2e_get_flags(l2e));
> > > > - ASSERT(l2e_get_pfn(l2e) != INVALID_MFN ||
> > > > !p2m_is_ram(p2mt)); -
> > > > - if ( p2m_is_valid(p2mt) )
> > > > - mfn = _mfn(l2e_get_pfn(l2e) +
> > > > l1_table_offset(addr)); - else
> > > > - p2mt = p2m_mmio_dm;
> > > > -
> > > > - goto out;
> > > > - }
> > > > -
> > > > - /*
> > > > - * Read and process L1
> > > > - */
> > > > -
> > > > - /* Need to __copy_from_user because the p2m is sparse and
> > > > this - * part might not exist */
> > > > - pod_retry_l1:
> > > > - p2m_entry = &phys_to_machine_mapping[gfn];
> > > > -
> > > > - ret = __copy_from_user(&l1e,
> > > > - p2m_entry,
> > > > - sizeof(l1e));
> > > > -
> > > > - if ( ret == 0 ) {
> > > > - p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
> > > > - ASSERT(l1e_get_pfn(l1e) != INVALID_MFN ||
> > > > !p2m_is_ram(p2mt)); -
> > > > - if ( p2m_flags_to_type(l1e_get_flags(l1e))
> > > > - == p2m_populate_on_demand )
> > > > - {
> > > > - /* The read has succeeded, so we know that the
> > > > mapping - * exits at this point. */
> > > > - if ( q != p2m_query )
> > > > - {
> > > > - if (
> > > > !p2m_pod_check_and_populate(current->domain, gfn, -
> > > > (l1_pgentry_t *)p2m_entry, 0, q) ) - goto
> > > > pod_retry_l1;
> > > > -
> > > > - /* Allocate failed. */
> > > > - p2mt = p2m_invalid;
> > > > - goto out;
> > > > - }
> > > > - else
> > > > - {
> > > > - p2mt = p2m_populate_on_demand;
> > > > - goto out;
> > > > - }
> > > > - }
> > > > -
> > > > - if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) )
> > > > - mfn = _mfn(l1e_get_pfn(l1e));
> > > > - else
> > > > - /* XXX see above */
> > > > - p2mt = p2m_mmio_dm;
> > > > - }
> > > > - }
> > > > -out:
> > > > - *t = p2mt;
> > > > - return mfn;
> > > > -}
> > > > -
> > > > /* Init the datastructures for later use by the p2m code */
> > > > int p2m_init(struct domain *d)
> > > > {
> > > > @@ -1725,7 +1550,6 @@ int p2m_init(struct domain *d)
> > > >
> > > > p2m->set_entry = p2m_set_entry;
> > > > p2m->get_entry = p2m_gfn_to_mfn;
> > > > - p2m->get_entry_current = p2m_gfn_to_mfn_current;
> > > > p2m->change_entry_type_global = p2m_change_type_global;
> > > >
> > > > if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled &&
> > > > diff -r 55ffaf97a4dd -r 891bd80cbed6 xen/include/asm-x86/p2m.h
> > > > --- a/xen/include/asm-x86/p2m.h
> > > > +++ b/xen/include/asm-x86/p2m.h
> > > > @@ -179,9 +179,6 @@ struct p2m_domain {
> > > > mfn_t (*get_entry )(struct domain *d, unsigned
> > > > long gfn, p2m_type_t *p2mt,
> > > > p2m_query_t q);
> > > > - mfn_t (*get_entry_current)(unsigned long gfn,
> > > > - p2m_type_t *p2mt,
> > > > - p2m_query_t q);
> > > > void (*change_entry_type_global)(struct domain *d,
> > > > p2m_type_t ot,
> > > > p2m_type_t nt);
> > > > @@ -267,13 +264,6 @@ static inline p2m_type_t p2m_flags_to_ty
> > > > #endif
> > > > }
> > > >
> > > > -/* Read the current domain's p2m table. Do not populate PoD pages.
> > > > */ -static inline mfn_t gfn_to_mfn_type_current(unsigned long gfn,
> > > > p2m_type_t *t, -
> > > > p2m_query_t q)
> > > > -{
> > > > - return current->domain->arch.p2m->get_entry_current(gfn, t, q);
> > > > -}
> > > > -
> > > > /* Read another domain's P2M table, mapping pages as we go.
> > > > * Do not populate PoD pages. */
> > > > static inline
> > > > @@ -295,17 +285,13 @@ static inline mfn_t _gfn_to_mfn_type(str
> > > > *t = p2m_ram_rw;
> > > > return _mfn(gfn);
> > > > }
> > > > - if ( likely(current->domain == d) )
> > > > - return gfn_to_mfn_type_current(gfn, t, q);
> > > > - else
> > > > - return gfn_to_mfn_type_foreign(d, gfn, t, q);
> > > > + return gfn_to_mfn_type_foreign(d, gfn, t, q);
> > > > }
> > > >
> > > > #define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), (g), (t),
> > > > p2m_alloc) #define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d),
> > > > (g), (t), p2m_query) #define gfn_to_mfn_guest(d, g, t)
> > > > _gfn_to_mfn_type((d), (g), (t), p2m_guest)
> > > >
> > > > -#define gfn_to_mfn_current(g, t) gfn_to_mfn_type_current((g), (t),
> > > > p2m_alloc) #define gfn_to_mfn_foreign(d, g, t)
> > > > gfn_to_mfn_type_foreign((d), (g), (t), p2m_alloc)
> > > >
> > > > static inline mfn_t gfn_to_mfn_unshare(struct domain *d,
> >
> > --
> > ---to satisfy European Law for business letters:
> > Advanced Micro Devices GmbH
> > Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
> > Geschaeftsfuehrer: Andrew Bowd, Thomas M. McCoy, Giuliano Meroni
> > Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
> > Registergericht Muenchen, HRB Nr. 43632
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Andrew Bowd, Thomas M. McCoy, Giuliano Meroni
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|