# HG changeset patch
# User Ian.Campbell@xxxxxxxxxxxxx
# Node ID 60beade30a0c01c998f8fdea4a280d329228ce74
# Parent 0eb38397e60845e72c60fbf0820c65491262d250
# Parent 17dc21008351fcbe2b326b267541d23bfba3b388
merge
diff -r 0eb38397e608 -r 60beade30a0c
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Fri Jan 27
11:31:14 2006
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Fri Jan 27
11:51:57 2006
@@ -315,9 +315,9 @@
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- unsigned long mfn, i, flags;
+ unsigned long frame, i, flags;
struct xen_memory_reservation reservation = {
- .extent_start = &mfn,
+ .extent_start = &frame,
.nr_extents = 1,
.extent_order = 0,
.domid = DOMID_SELF
@@ -333,7 +333,7 @@
pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
- mfn = pte_mfn(*pte);
+ frame = pte_mfn(*pte);
BUG_ON(HYPERVISOR_update_va_mapping(
vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
@@ -345,7 +345,8 @@
/* 2. Get a new contiguous memory extent. */
reservation.extent_order = order;
reservation.address_bits = address_bits;
- if (HYPERVISOR_memory_op(XENMEM_increase_reservation,
+ frame = __pa(vstart) >> PAGE_SHIFT;
+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
&reservation) != 1)
goto fail;
@@ -353,9 +354,8 @@
for (i = 0; i < (1<<order); i++) {
BUG_ON(HYPERVISOR_update_va_mapping(
vstart + (i*PAGE_SIZE),
- pfn_pte_ma(mfn+i, PAGE_KERNEL), 0));
- xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
- set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, mfn+i);
+ pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
}
flush_tlb_all();
@@ -371,13 +371,13 @@
reservation.address_bits = 0;
for (i = 0; i < (1<<order); i++) {
+ frame = (__pa(vstart) >> PAGE_SHIFT) + i;
BUG_ON(HYPERVISOR_memory_op(
- XENMEM_increase_reservation, &reservation) != 1);
+ XENMEM_populate_physmap, &reservation) != 1);
BUG_ON(HYPERVISOR_update_va_mapping(
vstart + (i*PAGE_SIZE),
- pfn_pte_ma(mfn, PAGE_KERNEL), 0));
- xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
- set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, mfn);
+ pfn_pte_ma(frame, PAGE_KERNEL), 0));
+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
}
flush_tlb_all();
@@ -393,9 +393,9 @@
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- unsigned long mfn, i, flags;
+ unsigned long frame, i, flags;
struct xen_memory_reservation reservation = {
- .extent_start = &mfn,
+ .extent_start = &frame,
.nr_extents = 1,
.extent_order = 0,
.domid = DOMID_SELF
@@ -413,7 +413,7 @@
pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
- mfn = pte_mfn(*pte);
+ frame = pte_mfn(*pte);
BUG_ON(HYPERVISOR_update_va_mapping(
vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
@@ -424,13 +424,13 @@
/* 2. Map new pages in place of old pages. */
for (i = 0; i < (1<<order); i++) {
+ frame = (__pa(vstart) >> PAGE_SHIFT) + i;
BUG_ON(HYPERVISOR_memory_op(
- XENMEM_increase_reservation, &reservation) != 1);
+ XENMEM_populate_physmap, &reservation) != 1);
BUG_ON(HYPERVISOR_update_va_mapping(
vstart + (i*PAGE_SIZE),
- pfn_pte_ma(mfn, PAGE_KERNEL), 0));
- xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
- set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, mfn);
+ pfn_pte_ma(frame, PAGE_KERNEL), 0));
+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
}
flush_tlb_all();
diff -r 0eb38397e608 -r 60beade30a0c
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Fri Jan 27
11:31:14 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Fri Jan 27
11:51:57 2006
@@ -139,6 +139,21 @@
return page;
}
+static struct page *balloon_first_page(void)
+{
+ if (list_empty(&ballooned_pages))
+ return NULL;
+ return LIST_TO_PAGE(ballooned_pages.next);
+}
+
+static struct page *balloon_next_page(struct page *page)
+{
+ struct list_head *next = PAGE_TO_LIST(page)->next;
+ if (next == &ballooned_pages)
+ return NULL;
+ return LIST_TO_PAGE(next);
+}
+
static void balloon_alarm(unsigned long unused)
{
schedule_work(&balloon_worker);
@@ -154,7 +169,7 @@
static int increase_reservation(unsigned long nr_pages)
{
- unsigned long *mfn_list, pfn, i, flags;
+ unsigned long *frame_list, pfn, i, flags;
struct page *page;
long rc;
struct xen_memory_reservation reservation = {
@@ -166,20 +181,27 @@
if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
nr_pages = PAGE_SIZE / sizeof(unsigned long);
- mfn_list = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (mfn_list == NULL)
+ frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (frame_list == NULL)
return -ENOMEM;
balloon_lock(flags);
- reservation.extent_start = mfn_list;
+ page = balloon_first_page();
+ for (i = 0; i < nr_pages; i++) {
+ BUG_ON(page == NULL);
+ frame_list[i] = page_to_pfn(page);;
+ page = balloon_next_page(page);
+ }
+
+ reservation.extent_start = frame_list;
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(
- XENMEM_increase_reservation, &reservation);
+ XENMEM_populate_physmap, &reservation);
if (rc < nr_pages) {
int ret;
/* We hit the Xen hard limit: reprobe. */
- reservation.extent_start = mfn_list;
+ reservation.extent_start = frame_list;
reservation.nr_extents = rc;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
@@ -196,15 +218,15 @@
BUG_ON(phys_to_machine_mapping_valid(pfn));
/* Update P->M and M->P tables. */
- set_phys_to_machine(pfn, mfn_list[i]);
- xen_machphys_update(mfn_list[i], pfn);
+ set_phys_to_machine(pfn, frame_list[i]);
+ xen_machphys_update(frame_list[i], pfn);
/* Link back into the page tables if not highmem. */
if (pfn < max_low_pfn) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
+ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
@@ -221,14 +243,14 @@
out:
balloon_unlock(flags);
- free_page((unsigned long)mfn_list);
+ free_page((unsigned long)frame_list);
return 0;
}
static int decrease_reservation(unsigned long nr_pages)
{
- unsigned long *mfn_list, pfn, i, flags;
+ unsigned long *frame_list, pfn, i, flags;
struct page *page;
void *v;
int need_sleep = 0;
@@ -242,8 +264,8 @@
if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
nr_pages = PAGE_SIZE / sizeof(unsigned long);
- mfn_list = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (mfn_list == NULL)
+ frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (frame_list == NULL)
return -ENOMEM;
for (i = 0; i < nr_pages; i++) {
@@ -254,7 +276,7 @@
}
pfn = page_to_pfn(page);
- mfn_list[i] = pfn_to_mfn(pfn);
+ frame_list[i] = pfn_to_mfn(pfn);
if (!PageHighMem(page)) {
v = phys_to_virt(pfn << PAGE_SHIFT);
@@ -280,12 +302,12 @@
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
- pfn = mfn_to_pfn(mfn_list[i]);
+ pfn = mfn_to_pfn(frame_list[i]);
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
balloon_append(pfn_to_page(pfn));
}
- reservation.extent_start = mfn_list;
+ reservation.extent_start = frame_list;
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
BUG_ON(ret != nr_pages);
@@ -295,7 +317,7 @@
balloon_unlock(flags);
- free_page((unsigned long)mfn_list);
+ free_page((unsigned long)frame_list);
return need_sleep;
}
diff -r 0eb38397e608 -r 60beade30a0c linux-2.6-xen-sparse/drivers/xen/util.c
--- a/linux-2.6-xen-sparse/drivers/xen/util.c Fri Jan 27 11:31:14 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/util.c Fri Jan 27 11:51:57 2006
@@ -1,5 +1,6 @@
#include <linux/config.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
@@ -31,7 +32,7 @@
return area;
}
-EXPORT_SYMBOL(alloc_vm_area);
+EXPORT_SYMBOL_GPL(alloc_vm_area);
void free_vm_area(struct vm_struct *area)
{
@@ -40,7 +41,7 @@
BUG_ON(ret != area);
kfree(area);
}
-EXPORT_SYMBOL(free_vm_area);
+EXPORT_SYMBOL_GPL(free_vm_area);
void lock_vm_area(struct vm_struct *area)
{
@@ -60,13 +61,13 @@
for (i = 0; i < area->size; i += PAGE_SIZE)
(void)__get_user(c, (char __user *)area->addr + i);
}
-EXPORT_SYMBOL(lock_vm_area);
+EXPORT_SYMBOL_GPL(lock_vm_area);
void unlock_vm_area(struct vm_struct *area)
{
preempt_enable();
}
-EXPORT_SYMBOL(unlock_vm_area);
+EXPORT_SYMBOL_GPL(unlock_vm_area);
/*
* Local variables:
diff -r 0eb38397e608 -r 60beade30a0c xen/common/memory.c
--- a/xen/common/memory.c Fri Jan 27 11:31:14 2006
+++ b/xen/common/memory.c Fri Jan 27 11:51:57 2006
@@ -30,7 +30,7 @@
int *preempted)
{
struct pfn_info *page;
- unsigned int i;
+ unsigned long i;
if ( (extent_list != NULL) &&
!array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
@@ -52,7 +52,7 @@
d, extent_order, flags)) == NULL) )
{
DPRINTK("Could not allocate order=%d extent: "
- "id=%d flags=%x (%d of %d)\n",
+ "id=%d flags=%x (%ld of %d)\n",
extent_order, d->domain_id, flags, i, nr_extents);
return i;
}
@@ -60,6 +60,58 @@
/* Inform the domain of the new page's machine address. */
if ( (extent_list != NULL) &&
(__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
+ return i;
+ }
+
+ return nr_extents;
+}
+
+static long
+populate_physmap(
+ struct domain *d,
+ unsigned long *extent_list,
+ unsigned int nr_extents,
+ unsigned int extent_order,
+ unsigned int flags,
+ int *preempted)
+{
+ struct pfn_info *page;
+ unsigned long i, j, pfn, mfn;
+
+ if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+ return 0;
+
+ if ( (extent_order != 0) &&
+ !multipage_allocation_permitted(current->domain) )
+ return 0;
+
+ for ( i = 0; i < nr_extents; i++ )
+ {
+ if ( hypercall_preempt_check() )
+ {
+ *preempted = 1;
+ return i;
+ }
+
+ if ( unlikely((page = alloc_domheap_pages(
+ d, extent_order, flags)) == NULL) )
+ {
+ DPRINTK("Could not allocate order=%d extent: "
+ "id=%d flags=%x (%ld of %d)\n",
+ extent_order, d->domain_id, flags, i, nr_extents);
+ return i;
+ }
+
+ mfn = page_to_pfn(page);
+
+ if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
+ return i;
+
+ for ( j = 0; j < (1 << extent_order); j++ )
+ set_pfn_from_mfn(mfn + j, pfn + j);
+
+ /* Inform the domain of the new page's machine address. */
+ if ( __put_user(mfn, &extent_list[i]) != 0 )
return i;
}
@@ -76,7 +128,7 @@
int *preempted)
{
struct pfn_info *page;
- unsigned long i, j, mpfn;
+ unsigned long i, j, mfn;
if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
return 0;
@@ -89,19 +141,19 @@
return i;
}
- if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
+ if ( unlikely(__get_user(mfn, &extent_list[i]) != 0) )
return i;
for ( j = 0; j < (1 << extent_order); j++ )
{
- if ( unlikely((mpfn + j) >= max_page) )
+ if ( unlikely((mfn + j) >= max_page) )
{
DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
- d->domain_id, mpfn + j, max_page);
+ d->domain_id, mfn + j, max_page);
return i;
}
- page = pfn_to_page(mpfn + j);
+ page = pfn_to_page(mfn + j);
if ( unlikely(!get_page(page, d)) )
{
DPRINTK("Bad page free for domain %u\n", d->domain_id);
@@ -143,6 +195,7 @@
{
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
+ case XENMEM_populate_physmap:
if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
return -EFAULT;
@@ -170,14 +223,37 @@
else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
return -ESRCH;
- rc = ((op == XENMEM_increase_reservation) ?
- increase_reservation : decrease_reservation)(
- d,
- reservation.extent_start,
- reservation.nr_extents,
- reservation.extent_order,
- flags,
- &preempted);
+ switch ( op )
+ {
+ case XENMEM_increase_reservation:
+ rc = increase_reservation(
+ d,
+ reservation.extent_start,
+ reservation.nr_extents,
+ reservation.extent_order,
+ flags,
+ &preempted);
+ break;
+ case XENMEM_decrease_reservation:
+ rc = decrease_reservation(
+ d,
+ reservation.extent_start,
+ reservation.nr_extents,
+ reservation.extent_order,
+ flags,
+ &preempted);
+ break;
+ case XENMEM_populate_physmap:
+ default:
+ rc = populate_physmap(
+ d,
+ reservation.extent_start,
+ reservation.nr_extents,
+ reservation.extent_order,
+ flags,
+ &preempted);
+ break;
+ }
if ( unlikely(reservation.domid != DOMID_SELF) )
put_domain(d);
diff -r 0eb38397e608 -r 60beade30a0c xen/include/public/memory.h
--- a/xen/include/public/memory.h Fri Jan 27 11:31:14 2006
+++ b/xen/include/public/memory.h Fri Jan 27 11:51:57 2006
@@ -16,11 +16,18 @@
*/
#define XENMEM_increase_reservation 0
#define XENMEM_decrease_reservation 1
+#define XENMEM_populate_physmap 6
typedef struct xen_memory_reservation {
/*
- * MFN bases of extents to free (XENMEM_decrease_reservation).
- * MFN bases of extents that were allocated (XENMEM_increase_reservation).
+ * XENMEM_increase_reservation:
+ * OUT: MFN bases of extents that were allocated
+ * XENMEM_decrease_reservation:
+ * IN: MFN bases of extents to free
+ * XENMEM_populate_physmap:
+ * IN: PFN bases of extents to populate with memory
+ * OUT: MFN bases of extents that were allocated
+ * (NB. This command also updates the mach_to_phys translation table)
*/
unsigned long *extent_start;
@@ -29,11 +36,10 @@
unsigned int extent_order;
/*
- * XENMEM_increase_reservation: maximum # bits addressable by the user
- * of the allocated region (e.g., I/O devices often have a 32-bit
- * limitation even in 64-bit systems). If zero then the user has no
- * addressing restriction.
- * XENMEM_decrease_reservation: unused.
+ * Mmaximum # bits addressable by the user of the allocated region (e.g.,
+ * I/O devices often have a 32-bit limitation even in 64-bit systems). If
+ * zero then the user has no addressing restriction.
+ * This field is not used by XENMEM_decrease_reservation.
*/
unsigned int address_bits;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|