This patch reinstates the XENMEM_remove_from_physmap hypercall
which was removed in 19041:ee62aaafff46 because it was not used.
However, is now needed in order to support xenstored stub domains.
The xenstored stub domain is not priviliged like dom0 and so cannot
unilaterally map the xenbus page of other guests into it's address
space. Therefore, before creating a domU the domain builder needs to
seed its grant table with a grant ref allowing the xenstored stub
domain to access the new domU's xenbus page.
At present domU's do not start with their grant table mapped.
Instead it gets mapped when the guest requests a grant table from
the hypervisor.
In order to seed the grant table, the domain builder first needs to
map it into dom0 address space. But the hypercall to do this
requires a gpfn (guest pfn), which is an mfn for PV guest, but a pfn
for HVM guests. Therfore, in order to seed the grant table of an
HVM guest, dom0 needs to *temporarily* map it into the guest's
"physical" address space.
Hence the need to reinstate the XENMEM_remove_from_physmap hypercall.
Signed-off-by: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx>
diff -r 0cca6939e5c8 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c Tue Mar 17 11:23:40 2009 +0000
+++ b/tools/libxc/xc_private.c Tue Mar 17 15:12:07 2009 +0000
@@ -307,6 +307,13 @@
goto out1;
}
break;
+ case XENMEM_remove_from_physmap:
+ if ( lock_pages(arg, sizeof(struct xen_remove_from_physmap)) )
+ {
+ PERROR("Could not lock");
+ goto out1;
+ }
+ break;
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
case XENMEM_maximum_gpfn:
@@ -347,6 +354,9 @@
break;
case XENMEM_add_to_physmap:
unlock_pages(arg, sizeof(struct xen_add_to_physmap));
+ break;
+ case XENMEM_remove_from_physmap:
+ unlock_pages(arg, sizeof(struct xen_remove_from_physmap));
break;
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
diff -r 0cca6939e5c8 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/arch/ia64/xen/mm.c Tue Mar 17 15:12:07 2009 +0000
@@ -3416,6 +3416,40 @@
break;
}
+ case XENMEM_remove_from_physmap:
+ {
+ struct xen_remove_from_physmap xrfp;
+ unsigned long mfn;
+ struct domain *d;
+
+ if ( copy_from_guest(&xrfp, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_target_domain_by_id(xrfp.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ if ( xsm_remove_from_physmap(current->domain, d) )
+ {
+ rcu_unlock_domain(d);
+ return -EPERM;
+ }
+
+ domain_lock(d);
+
+ mfn = gmfn_to_mfn(d, xrfp.gpfn);
+
+ if ( mfn_valid(mfn) )
+ guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
+
+ domain_unlock(d);
+
+ rcu_unlock_domain(d);
+
+ break;
+ }
+
+
case XENMEM_machine_memory_map:
{
struct xen_memory_map memmap;
diff -r 0cca6939e5c8 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/arch/x86/mm.c Tue Mar 17 15:12:07 2009 +0000
@@ -3927,6 +3927,39 @@
break;
}
+ case XENMEM_remove_from_physmap:
+ {
+ struct xen_remove_from_physmap xrfp;
+ unsigned long mfn;
+ struct domain *d;
+
+ if ( copy_from_guest(&xrfp, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_target_domain_by_id(xrfp.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ if ( xsm_remove_from_physmap(current->domain, d) )
+ {
+ rcu_unlock_domain(d);
+ return -EPERM;
+ }
+
+ domain_lock(d);
+
+ mfn = gmfn_to_mfn(d, xrfp.gpfn);
+
+ if ( mfn_valid(mfn) )
+ guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
+
+ domain_unlock(d);
+
+ rcu_unlock_domain(d);
+
+ break;
+ }
+
case XENMEM_set_memory_map:
{
struct xen_foreign_memory_map fmap;
diff -r 0cca6939e5c8 xen/arch/x86/x86_64/compat/mm.c
--- a/xen/arch/x86/x86_64/compat/mm.c Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/arch/x86/x86_64/compat/mm.c Tue Mar 17 15:12:07 2009 +0000
@@ -64,6 +64,20 @@
return -EFAULT;
XLAT_add_to_physmap(nat, &cmp);
+ rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+
+ break;
+ }
+
+ case XENMEM_remove_from_physmap:
+ {
+ struct compat_remove_from_physmap cmp;
+ struct xen_remove_from_physmap *nat = (void
*)COMPAT_ARG_XLAT_VIRT_BASE;
+
+ if ( copy_from_guest(&cmp, arg, 1) )
+ return -EFAULT;
+
+ XLAT_remove_from_physmap(nat, &cmp);
rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
break;
diff -r 0cca6939e5c8 xen/include/public/memory.h
--- a/xen/include/public/memory.h Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/include/public/memory.h Tue Mar 17 15:12:07 2009 +0000
@@ -218,6 +218,22 @@
typedef struct xen_add_to_physmap xen_add_to_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+/*
+ * Unmaps the page appearing at a particular GPFN from the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_remove_from_physmap_t.
+ */
+#define XENMEM_remove_from_physmap 15
+struct xen_remove_from_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* GPFN of the current mapping of the page. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
+
/*** REMOVED ***/
/*#define XENMEM_translate_gpfn_list 8*/
diff -r 0cca6939e5c8 xen/include/xlat.lst
--- a/xen/include/xlat.lst Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/include/xlat.lst Tue Mar 17 15:12:07 2009 +0000
@@ -33,6 +33,7 @@
! kexec_image kexec.h
! kexec_range kexec.h
! add_to_physmap memory.h
+! remove_from_physmap memory.h
! foreign_memory_map memory.h
! memory_exchange memory.h
! memory_map memory.h
diff -r 0cca6939e5c8 xen/include/xsm/xsm.h
--- a/xen/include/xsm/xsm.h Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/include/xsm/xsm.h Tue Mar 17 15:12:07 2009 +0000
@@ -142,6 +142,7 @@
int (*update_va_mapping) (struct domain *d, struct domain *f,
l1_pgentry_t pte);
int (*add_to_physmap) (struct domain *d1, struct domain *d2);
+ int (*remove_from_physmap) (struct domain *d1, struct domain *d2);
int (*sendtrigger) (struct domain *d);
int (*test_assign_device) (uint32_t machine_bdf);
int (*assign_device) (struct domain *d, uint32_t machine_bdf);
@@ -577,6 +578,11 @@
return xsm_call(add_to_physmap(d1, d2));
}
+static inline int xsm_remove_from_physmap(struct domain *d1, struct domain *d2)
+{
+ return xsm_call(remove_from_physmap(d1, d2));
+}
+
static inline int xsm_sendtrigger(struct domain *d)
{
return xsm_call(sendtrigger(d));
diff -r 0cca6939e5c8 xen/xsm/dummy.c
--- a/xen/xsm/dummy.c Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/xsm/dummy.c Tue Mar 17 15:12:07 2009 +0000
@@ -452,6 +452,10 @@
return 0;
}
+static int dummy_remove_from_physmap (struct domain *d1, struct domain *d2)
+{
+ return 0;
+}
#endif
struct xsm_operations dummy_xsm_ops;
@@ -558,6 +562,7 @@
set_to_dummy_if_null(ops, mmu_machphys_update);
set_to_dummy_if_null(ops, update_va_mapping);
set_to_dummy_if_null(ops, add_to_physmap);
+ set_to_dummy_if_null(ops, remove_from_physmap);
set_to_dummy_if_null(ops, sendtrigger);
set_to_dummy_if_null(ops, test_assign_device);
set_to_dummy_if_null(ops, assign_device);
diff -r 0cca6939e5c8 xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c Tue Mar 17 11:23:40 2009 +0000
+++ b/xen/xsm/flask/hooks.c Tue Mar 17 15:12:07 2009 +0000
@@ -1058,6 +1058,11 @@
return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
}
+static int flask_remove_from_physmap(struct domain *d1, struct domain *d2)
+{
+ return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP);
+}
+
static int flask_sendtrigger(struct domain *d)
{
return domain_has_perm(current->domain, d, SECCLASS_DOMAIN,
DOMAIN__TRIGGER);
@@ -1306,6 +1311,7 @@
.mmu_machphys_update = flask_mmu_machphys_update,
.update_va_mapping = flask_update_va_mapping,
.add_to_physmap = flask_add_to_physmap,
+ .remove_from_physmap = flask_remove_from_physmap,
.sendtrigger = flask_sendtrigger,
.test_assign_device = flask_test_assign_device,
.assign_device = flask_assign_device,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|