# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1187582891 -32400 # Node ID 603694f91c98a6a4f12dcd729053f9ba38d5fc11 # Parent b5fdf02c38f4765697196f5fad5d1262f2c157f4 remove xencomm page size limit. Currently xencomm has page size limit so that a domain with many memory (e.g. 100GB~) can't be created. Now that xencomm of xen side accepts struct xencomm_desc whose address array crosses page boundary. Thus it isn't necessary to allocate single page not to cross page boundary. We can allocate exact sized memory. Note that struct xencomm_desc can't cross page boundary and slab allocator returns sizeof(void*) aligned pointer. Where sizeof(*desc) > sizeof(void*), e.g. 32 bit environment, the slab allocator return pointer doesn't gurantee that struct xencomm_desc doesn't cross page boundary. So we fall back to page allocator. PATCHNAME: remove_xencomm_page_size_limit_common_code Signed-off-by: Isaku Yamahata diff -r b5fdf02c38f4 -r 603694f91c98 drivers/xen/core/xencomm.c --- a/drivers/xen/core/xencomm.c Thu Aug 16 13:44:51 2007 -0600 +++ b/drivers/xen/core/xencomm.c Mon Aug 20 13:08:11 2007 +0900 @@ -68,25 +68,54 @@ static int xencomm_init(struct xencomm_d return 0; } -/* XXX use slab allocator */ -static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask) -{ - struct xencomm_desc *desc; - - desc = (struct xencomm_desc *)__get_free_page(gfp_mask); - if (desc == NULL) - return NULL; - - desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) / +static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, + void *buffer, unsigned long bytes) +{ + struct xencomm_desc *desc; + unsigned long buffer_ulong = (unsigned long)buffer; + unsigned long start = buffer_ulong & PAGE_MASK; + unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; + unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; + unsigned long size = sizeof(*desc) + + sizeof(desc->address[0]) * nr_addrs; + + /* + * slab allocator returns at least sizeof(void*) aligned pointer. + * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might + * cross page boundary. + */ + if (sizeof(*desc) > sizeof(void*)) { + unsigned long order = get_order(size); + desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, + order); + if (desc == NULL) + return NULL; + + desc->nr_addrs = + ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / sizeof(*desc->address); - + } else { + desc = kmalloc(size, gfp_mask); + if (desc == NULL) + return NULL; + + desc->nr_addrs = nr_addrs; + } return desc; } void xencomm_free(struct xencomm_handle *desc) { - if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) - free_page((unsigned long)__va(desc)); + if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { + struct xencomm_desc *desc__ = (struct xencomm_desc*)desc; + if (sizeof(*desc__) > sizeof(void*)) { + unsigned long size = sizeof(*desc__) + + sizeof(desc__->address[0]) * desc__->nr_addrs; + unsigned long order = get_order(size); + free_pages((unsigned long)__va(desc), order); + } else + kfree(__va(desc)); + } } static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask) @@ -105,7 +134,7 @@ static int xencomm_create(void *buffer, BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ - desc = xencomm_alloc(gfp_mask); + desc = xencomm_alloc(gfp_mask, buffer, bytes); if (!desc) { printk("%s failure\n", "xencomm_alloc"); return -ENOMEM;