|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] xmalloc: make close-to-PAGE_SIZE allocations more efficient
On 18/02/2013 12:45, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:
> Rather than bumping their sizes to slightly above (a multiple of)
> PAGE_SIZE (in order to store tracking information), thus requiring
> a non-order-0 allocation even when no more than a page is being
> requested, return the result of alloc_xenheap_pages() directly, and use
> the struct page_info field underlying PFN_ORDER() to store the actual
> size (needed for freeing the memory).
>
> This leverages the fact that sub-allocation of memory obtained from the
> page allocator can only ever result in non-page-aligned memory chunks
> (with the exception of zero size allocations with sufficiently high
> alignment being requested, which is why zero-size allocations now get
> special cased).
>
> Use the new property to simplify allocation of the trap info array for
> PV guests on x86.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -369,13 +369,6 @@ int switch_compat(struct domain *d)
> return -ENOMEM;
> }
>
> -static inline bool_t standalone_trap_ctxt(struct vcpu *v)
> -{
> - BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
> PAGE_SIZE);
> - return NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) + sizeof(*v)
> - > PAGE_SIZE;
> -}
> -
> int vcpu_initialise(struct vcpu *v)
> {
> struct domain *d = v->domain;
> @@ -427,19 +420,15 @@ int vcpu_initialise(struct vcpu *v)
>
> if ( !is_idle_domain(d) )
> {
> - if ( standalone_trap_ctxt(v) )
> + BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
> + PAGE_SIZE);
> + v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
> + NR_VECTORS);
> + if ( !v->arch.pv_vcpu.trap_ctxt )
> {
> - v->arch.pv_vcpu.trap_ctxt = alloc_xenheap_page();
> - if ( !v->arch.pv_vcpu.trap_ctxt )
> - {
> - rc = -ENOMEM;
> - goto done;
> - }
> - clear_page(v->arch.pv_vcpu.trap_ctxt);
> + rc = -ENOMEM;
> + goto done;
> }
> - else
> - v->arch.pv_vcpu.trap_ctxt = (void *)v + PAGE_SIZE -
> - NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt);
>
> /* PV guests by default have a 100Hz ticker. */
> v->periodic_period = MILLISECS(10);
> @@ -467,8 +456,8 @@ int vcpu_initialise(struct vcpu *v)
> {
> vcpu_destroy_fpu(v);
>
> - if ( !is_hvm_domain(d) && standalone_trap_ctxt(v) )
> - free_xenheap_page(v->arch.pv_vcpu.trap_ctxt);
> + if ( !is_hvm_domain(d) )
> + xfree(v->arch.pv_vcpu.trap_ctxt);
> }
>
> return rc;
> @@ -483,8 +472,8 @@ void vcpu_destroy(struct vcpu *v)
>
> if ( is_hvm_vcpu(v) )
> hvm_vcpu_destroy(v);
> - else if ( standalone_trap_ctxt(v) )
> - free_xenheap_page(v->arch.pv_vcpu.trap_ctxt);
> + else
> + xfree(v->arch.pv_vcpu.trap_ctxt);
> }
>
> int arch_domain_create(struct domain *d, unsigned int domcr_flags)
> --- a/xen/common/xmalloc_tlsf.c
> +++ b/xen/common/xmalloc_tlsf.c
> @@ -26,6 +26,7 @@
> #include <xen/config.h>
> #include <xen/irq.h>
> #include <xen/mm.h>
> +#include <xen/pfn.h>
> #include <asm/time.h>
>
> #define MAX_POOL_NAME_LEN 16
> @@ -524,25 +525,30 @@ static void xmalloc_pool_put(void *p)
> free_xenheap_page(p);
> }
>
> -static void *xmalloc_whole_pages(unsigned long size)
> +static void *xmalloc_whole_pages(unsigned long size, unsigned long align)
> {
> - struct bhdr *b;
> - unsigned int i, pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
> - char *p;
> + unsigned int i, order = get_order_from_bytes(size);
> + void *res, *p;
> +
> + if ( align > size )
> + get_order_from_bytes(align);
>
> - b = alloc_xenheap_pages(pageorder, 0);
> - if ( b == NULL )
> + res = alloc_xenheap_pages(order, 0);
> + if ( res == NULL )
> return NULL;
>
> - b->size = PAGE_ALIGN(size + BHDR_OVERHEAD);
> - for ( p = (char *)b + b->size, i = 0; i < pageorder; ++i )
> + for ( p = res + PAGE_ALIGN(size), i = 0; i < order; ++i )
> if ( (unsigned long)p & (PAGE_SIZE << i) )
> {
> free_xenheap_pages(p, i);
> p += PAGE_SIZE << i;
> }
>
> - return (void *)b->ptr.buffer;
> + PFN_ORDER(virt_to_page(res)) = PFN_UP(size);
> + /* Check that there was no truncation: */
> + ASSERT(PFN_ORDER(virt_to_page(res)) == PFN_UP(size));
> +
> + return res;
> }
>
> static void tlsf_init(void)
> @@ -559,6 +565,11 @@ static void tlsf_init(void)
> * xmalloc()
> */
>
> +#ifndef ZERO_BLOCK_PTR
> +/* Return value for zero-size allocation, distinguished from NULL. */
> +#define ZERO_BLOCK_PTR ((void *)-1L)
> +#endif
> +
> void *_xmalloc(unsigned long size, unsigned long align)
> {
> void *p = NULL;
> @@ -566,6 +577,9 @@ void *_xmalloc(unsigned long size, unsig
>
> ASSERT(!in_irq());
>
> + if ( !size )
> + return ZERO_BLOCK_PTR;
> +
> ASSERT((align & (align - 1)) == 0);
> if ( align < MEM_ALIGN )
> align = MEM_ALIGN;
> @@ -577,7 +591,7 @@ void *_xmalloc(unsigned long size, unsig
> if ( size < PAGE_SIZE )
> p = xmem_pool_alloc(size, xenpool);
> if ( p == NULL )
> - p = xmalloc_whole_pages(size);
> + return xmalloc_whole_pages(size - align + MEM_ALIGN, align);
>
> /* Add alignment padding. */
> if ( (pad = -(long)p & (align - 1)) != 0 )
> @@ -604,11 +618,28 @@ void xfree(void *p)
> {
> struct bhdr *b;
>
> - if ( p == NULL )
> + if ( p == NULL || p == ZERO_BLOCK_PTR )
> return;
>
> ASSERT(!in_irq());
>
> + if ( !((unsigned long)p & (PAGE_SIZE - 1)) )
> + {
> + unsigned long size = PFN_ORDER(virt_to_page(p));
> + unsigned int i, order = get_order_from_pages(size);
> +
> + BUG_ON((unsigned long)p & ((PAGE_SIZE << order) - 1));
> + for ( i = 0; ; ++i )
> + {
> + if ( !(size & (1 << i)) )
> + continue;
> + size -= 1 << i;
> + free_xenheap_pages(p + (size << PAGE_SHIFT), i);
> + if ( i + 1 >= order )
> + return;
> + }
> + }
> +
> /* Strip alignment padding. */
> b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
> if ( b->size & 1 )
> @@ -618,21 +649,5 @@ void xfree(void *p)
> ASSERT(!(b->size & 1));
> }
>
> - if ( b->size >= PAGE_SIZE )
> - {
> - unsigned int i, order = get_order_from_bytes(b->size);
> -
> - BUG_ON((unsigned long)b & ((PAGE_SIZE << order) - 1));
> - for ( i = 0; ; ++i )
> - {
> - if ( !(b->size & (PAGE_SIZE << i)) )
> - continue;
> - b->size -= PAGE_SIZE << i;
> - free_xenheap_pages((void *)b + b->size, i);
> - if ( i + 1 >= order )
> - break;
> - }
> - }
> - else
> - xmem_pool_free(p, xenpool);
> + xmem_pool_free(p, xenpool);
> }
> --- a/xen/include/asm-x86/config.h
> +++ b/xen/include/asm-x86/config.h
> @@ -91,6 +91,9 @@
> /* Primary stack is restricted to 8kB by guard pages. */
> #define PRIMARY_STACK_SIZE 8192
>
> +/* Return value for zero-size _xmalloc(), distinguished from NULL. */
> +#define ZERO_BLOCK_PTR ((void *)0xBAD0BAD0BAD0BAD0UL)
> +
> #ifndef __ASSEMBLY__
> extern unsigned long trampoline_phys;
> #define bootsym_phys(sym) \
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |