# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1185949522 -32400 # Node ID dc2dfa31d0fefbf0b6112d993f7f889659de6355 # Parent c4d697f2367e389f3a2f1b6866ae971206198efc remove xencomm page size limit. Currently xencomm has page size limit so that a domain with many memory (e.g. 100GB+) can't be created. This patch allows that the address array of struct xencomm_desc to cross page boundary so that the size of struct xencomm_desc can exceed page size. Note that struct xencomm_desc itself can't page boundary. PATCHNAME: remove_xencomm_page_size_limit_xen_side Signed-off-by: Isaku Yamahata diff -r c4d697f2367e -r dc2dfa31d0fe xen/common/xencomm.c --- a/xen/common/xencomm.c Wed Aug 01 15:11:54 2007 +0900 +++ b/xen/common/xencomm.c Wed Aug 01 15:25:22 2007 +0900 @@ -33,6 +33,15 @@ static int xencomm_debug = 1; /* extreme #else #define xencomm_debug 0 #endif + +static int +xencomm_desc_cross_page_boundary(unsigned long paddr) +{ + unsigned long offset = paddr & ~PAGE_MASK; + if (offset > PAGE_SIZE - sizeof(struct xencomm_desc)) + return 1; + return 0; +} static unsigned long xencomm_inline_from_guest(void *to, const void *from, unsigned int n, @@ -81,6 +90,8 @@ xencomm_copy_from_guest(void *to, const unsigned int skip) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; unsigned int from_pos = 0; unsigned int to_pos = 0; unsigned int i = 0; @@ -88,6 +99,9 @@ xencomm_copy_from_guest(void *to, const if (xencomm_is_inline(from)) return xencomm_inline_from_guest(to, from, n, skip); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)from)) + return -EINVAL; /* first we need to access the descriptor */ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)from); if (desc == NULL) @@ -98,18 +112,26 @@ xencomm_copy_from_guest(void *to, const __func__, desc, desc->magic); return n; } + desc_paddr = (struct xencomm_desc *)from; + address = &desc->address[i]; /* iterate through the descriptor, copying up to a page at a time */ while ((to_pos < n) && (i < desc->nr_addrs)) { - unsigned long src_paddr = desc->address[i]; + unsigned long src_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (src_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -EFAULT; + } + src_paddr = *address; + if (src_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = src_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -135,7 +157,9 @@ xencomm_copy_from_guest(void *to, const to_pos += bytes; } + skip_to_next: i++; + address++; } return n - to_pos; @@ -188,6 +212,8 @@ xencomm_copy_to_guest(void *to, const vo unsigned int skip) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; unsigned int from_pos = 0; unsigned int to_pos = 0; unsigned int i = 0; @@ -195,6 +221,9 @@ xencomm_copy_to_guest(void *to, const vo if (xencomm_is_inline(to)) return xencomm_inline_to_guest(to, from, n, skip); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)to)) + return -EINVAL; /* first we need to access the descriptor */ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)to); if (desc == NULL) @@ -204,18 +233,26 @@ xencomm_copy_to_guest(void *to, const vo printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); return n; } + desc_paddr = (struct xencomm_desc*)to; + address = &desc->address[i]; /* iterate through the descriptor, copying up to a page at a time */ while ((from_pos < n) && (i < desc->nr_addrs)) { - unsigned long dest_paddr = desc->address[i]; + unsigned long dest_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (dest_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -EFAULT; + } + dest_paddr = *address; + if (dest_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = dest_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -241,7 +278,9 @@ xencomm_copy_to_guest(void *to, const vo to_pos += bytes; } + skip_to_next: i++; + address++; } return n - from_pos; @@ -258,11 +297,16 @@ int xencomm_add_offset(void **handle, un int xencomm_add_offset(void **handle, unsigned int bytes) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; int i = 0; if (xencomm_is_inline(*handle)) return xencomm_inline_add_offset(handle, bytes); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)handle)) + return -1; /* first we need to access the descriptor */ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)*handle); if (desc == NULL) @@ -272,13 +316,26 @@ int xencomm_add_offset(void **handle, un printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); return -1; } + desc_paddr = (struct xencomm_desc*)handle; + address = &desc->address[i]; /* iterate through the descriptor incrementing addresses */ while ((bytes > 0) && (i < desc->nr_addrs)) { - unsigned long dest_paddr = desc->address[i]; + unsigned long dest_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; + + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -1; + } + dest_paddr = *address; + if (dest_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = dest_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -286,11 +343,15 @@ int xencomm_add_offset(void **handle, un chunk_skip = min(chunksz, bytes); if (chunk_skip == chunksz) { /* exhausted this page */ - desc->address[i] = XENCOMM_INVALID; + *address = XENCOMM_INVALID; } else { - desc->address[i] += chunk_skip; + *address += chunk_skip; } bytes -= chunk_skip; + + skip_to_next: + i++; + address++; } return 0; } @@ -298,6 +359,8 @@ int xencomm_handle_is_null(void *handle) int xencomm_handle_is_null(void *handle) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; int i; if (xencomm_is_inline(handle)) @@ -306,10 +369,21 @@ int xencomm_handle_is_null(void *handle) desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)handle); if (desc == NULL) return 1; - - for (i = 0; i < desc->nr_addrs; i++) - if (desc->address[i] != XENCOMM_INVALID) + desc_paddr = (struct xencomm_desc*)handle; + address = &desc->address[0]; + + for (i = 0; i < desc->nr_addrs; i++) { + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return 1; /* EFAULT? */ + } + + if (*address != XENCOMM_INVALID) return 0; + } return 1; }