On Tue, Apr 12, 2011 at 02:06:05PM +0100, Keir Fraser wrote:
> On 12/04/2011 13:53, "Konrad Rzeszutek Wilk" <konrad.wilk@xxxxxxxxxx> wrote:
>
> >> How cunning.
> >>
> >> Why wouldn't you just allocate exactly the right size of array in
> >> XENMEM_set_memory_map?
> >
> > I was thinking about it, but the mm.c code did not have the
> > xen/xmalloc.h header, nor any references to xmalloc_array.
> >
> > Is it OK to make an xmalloc_array during a hypercall?
>
> Yes. I think the toolstack should be able to clean up on the newly-possible
> ENOMEM return from this hypercall.
Hm, not sure what I am hitting, but I can't seem to be able to copy over the
contents to the newly allocated array from the guest (this works
fine with the previous version of the patch). This is what I get
(XEN) mm.c:4734:d0 We want 8 E820 entries. We have: 1.
(XEN) mm.c:4754:d0 Copying E820 8 entries.
(XEN) ----[ Xen-4.2-110412 : 00000000000000a0
(XEN) rdx: 0000000000000000 rsi: 0000000000680004 rdi: 0000000000000000
(XEN) rbp: ffff82c48028fc68 rsp: ffff82c48028fc58 r8: ffff82c4802c8bd0
(XEN) r9: 0000000000000004 r10: 0000000000000004 r11: 0000000000000001
(XEN) r12: 00000000000000a0 r13: 0000000000000000 r14: 0000000000680004
(XEN) r15: 0000000000000008 cr0: 0000000080050033 cr4: 00000000000026f0
(XEN) cr3: 00000001056af000 cr2: 0000000000000000
(XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: e010 cs: e008
(XEN) Xen stack trace from rsp=ffff82c48028fc58:
(XEN) 00000000000000a0 00000000000000a0 ffff82c48028fc98 ffff82c48017fa7d
(XEN) 0000000000000008 0000000000000000 0000000000000008 ffff88005c51d7c0
(XEN) ffff82c48028fd38 ffff82c480169857 ffff82c48028fcb8 ffff82c480167d9a
(XEN) 000000008028fd08 ffff82f60214be50 0000000000000001 0000000000000008
(XEN) 0000000000680004 0000000000000001 ffff88005d8aa1f8 ffff8301056e0000
(XEN) ffff83011f9b3000 0000000000000000 ffff82c48028fe28 ffff8300cf0fd000
(XEN) 000000000000000d 00007fff2d629060 ffff88005c51d7c0 0000000000000008
(XEN) ffff82c48028fef8 ffff82c48011399c ffff82c4802c9940 000000008028ff18
(XEN) ffff82c48028fd88 0000000000123412 ffff82f602468250 ffff82c48028ff18
(XEN) ffff82c48028ff18 ffff82c4802c8e38 000000008028fde8 ffff82f602468250
(XEN) 00000000001068f6 ffff83011f9b3000 0000000000000070 0000000123412000
(XEN) ffff82f602468240 ffff88005d8a9220 0000000000000000 0000000000682004
(XEN) ffff82c48028fe28 ffff82c48016aa14 8000000127365065 000000018015fd21
(XEN) 0000000000000000 ffff8300cf0fd1c8 ffff8300cf0fd1c8 ffff82c48028fe88
(XEN) aaaaaaaaaaaaaaaa ffff88005d8a9220 ffff82c48028fef8 ffff82c480113cb7
(XEN) ffff8300cf0fd1d0 ffff8300cf0fd000 000000018028fef8 ffff82c48028ff18
(XEN) ffff82c48028fe68 ffff82c48028ff18 ffff8300cf0fd000 00007fff2d6290b0
(XEN) 0000000000000001 0000000000000008 ffff82c48028fef8 ffff82c4802038c3
(XEN) 0000000000680000 00007fb540012cd0 0000000000000000 0000000000000000
(XEN) 00007fb540012e60 000000000000e033 ffff82c48028fed8 ffff8300cf0fd000
(XEN) Xen call trace:
(XEN) [<ffff82c48020a3c5>] vmac+0x5da/0x927
(XEN) [<ffff82c48017fa7d>] copy_from_user+0x72/0x9f
(XEN) [<ffff82c480169857>] arch_memory_op+0x7cc/0xf05
(XEN) [<ffff82c48011399c>] do_memory_op+0x1dca/0x1df2
(XEN) [<ffff82c4802082c8>] syscall_enter+0xc8/0x122
And the patch is:
diff -r 97763efc41f9 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Tue Apr 05 18:23:54 2011 +0100
+++ b/xen/arch/x86/domain.c Tue Apr 12 13:18:34 2011 -0400
@@ -696,6 +696,8 @@ void arch_domain_destroy(struct domain *
if ( is_hvm_domain(d) )
hvm_domain_destroy(d);
+ else
+ xfree(d->arch.pv_domain.e820);
vmce_destroy_msr(d);
pci_release_devices(d);
diff -r 97763efc41f9 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Apr 05 18:23:54 2011 +0100
+++ b/xen/arch/x86/mm.c Tue Apr 12 13:18:34 2011 -0400
@@ -100,6 +100,7 @@
#include <xen/iocap.h>
#include <xen/guest_access.h>
#include <xen/pfn.h>
+#include <xen/xmalloc.h>
#include <asm/paging.h>
#include <asm/shadow.h>
#include <asm/page.h>
@@ -4710,7 +4711,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
if ( copy_from_guest(&fmap, arg, 1) )
return -EFAULT;
- if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) )
+ if ( fmap.map.nr_entries > E820MAX )
return -EINVAL;
rc = rcu_lock_target_domain_by_id(fmap.domid, &d);
@@ -4730,9 +4731,39 @@ long arch_memory_op(int op, XEN_GUEST_HA
return -EPERM;
}
+ gdprintk(XENLOG_INFO, "We want %d E820 entries. We have: %d.\n",
fmap.map.nr_entries, d->arch.pv_domain.nr_e820);
+ if ( d->arch.pv_domain.e820 )
+ {
+ if ( fmap.map.nr_entries >= d->arch.pv_domain.nr_e820 )
+ {
+ xfree(d->arch.pv_domain.e820);
+ d->arch.pv_domain.e820 = NULL;
+ d->arch.pv_domain.nr_e820 = 0;
+ }
+ } else {
+ d->arch.pv_domain.e820 = xmalloc_array(e820entry_t,
+ fmap.map.nr_entries + 1);
+ if ( !d->arch.pv_domain.e820 )
+ {
+ rcu_unlock_domain(d);
+ return -ENOMEM;
+ }
+ gdprintk(XENLOG_INFO, "Allocated E820 for %d entries.\n",
fmap.map.nr_entries);
+ memset(d->arch.pv_domain.e820, 0, (fmap.map.nr_entries + 1) *
sizeof(e820entry_t));
+ }
+ gdprintk(XENLOG_INFO, "Copying E820 %d entries.\n",
fmap.map.nr_entries);
rc = copy_from_guest(d->arch.pv_domain.e820, fmap.map.buffer,
fmap.map.nr_entries) ? -EFAULT : 0;
- d->arch.pv_domain.nr_e820 = fmap.map.nr_entries;
+
+ if ( rc == 0 )
+ d->arch.pv_domain.nr_e820 = fmap.map.nr_entries;
+ else {
+ /* Don't free the E820 if it had been set before and we failed. */
+ if ( !d->arch.pv_domain.nr_e820 ) {
+ xfree(d->arch.pv_domain.e820);
+ d->arch.pv_domain.e820 = NULL;
+ }
+ }
rcu_unlock_domain(d);
return rc;
@@ -4747,6 +4778,9 @@ long arch_memory_op(int op, XEN_GUEST_HA
if ( d->arch.pv_domain.nr_e820 == 0 )
return -ENOSYS;
+ if ( d->arch.pv_domain.e820 == NULL )
+ return -ENOSYS;
+
if ( copy_from_guest(&map, arg, 1) )
return -EFAULT;
diff -r 97763efc41f9 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Tue Apr 05 18:23:54 2011 +0100
+++ b/xen/include/asm-x86/domain.h Tue Apr 12 13:18:34 2011 -0400
@@ -238,7 +238,7 @@ struct pv_domain
unsigned long pirq_eoi_map_mfn;
/* Pseudophysical e820 map (XENMEM_memory_map). */
- struct e820entry e820[3];
+ struct e820entry *e820;
unsigned int nr_e820;
};
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|