|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v6 1/3] ioreq: switch ioreq page allocation to vmap
Switch the Xen-side ioreq page mapping from prepare_ring_for_helper() /
map_domain_page_global() to explicit vmap(), to ensure vmap_to_page()
can recover the struct page_info * uniformly during teardown.
This is a prerequisite for multi-page ioreq support: the non-buf ioreq
region will need to span multiple pages for domains with more vCPUs than
fit in a single page, and vmap() is the natural interface for contiguous
multi-page Xen VA mappings.
In non-debug builds map_domain_page_global() uses the directmap for low
MFNs rather than vmap(), so this change has a small overhead in the
common case. Debug builds already used vmap() indirectly.
With both paths using vmap(), vmap_to_page() can recover the struct
page_info * uniformly, so drop the 'page' field from struct ioreq_page
and update all callers accordingly.
Signed-off-by: Julian Vetter <julian.vetter@xxxxxxxxxx>
---
Changes in v6:
- Updated commit message to clearly specify why these changes are made
- Added comment to say that this is {prepare,destroy}_ring_for_helper()
just using vmap_to_page() + v{map,unmap}()
- Kept proper ordering in ioreq_server_free_mfn(), first clearing the va
pointer before unmapping
---
xen/arch/x86/hvm/ioreq.c | 55 +++++++++++++++++++++++++++++++++-------
xen/common/ioreq.c | 34 +++++++++++++------------
xen/include/xen/ioreq.h | 1 -
3 files changed, 64 insertions(+), 26 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index a5fa97e149..3cabec141c 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -15,6 +15,7 @@
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/trace.h>
+#include <xen/vmap.h>
#include <xen/vpci.h>
#include <asm/hvm/emulate.h>
@@ -128,8 +129,13 @@ static void hvm_unmap_ioreq_gfn(struct ioreq_server *s,
bool buf)
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- destroy_ring_for_helper(&iorp->va, iorp->page);
- iorp->page = NULL;
+ /* Equivalent to destroy_ring_for_helper(), using vmap_to_page(). */
+ if ( iorp->va )
+ {
+ put_page_and_type(vmap_to_page(iorp->va));
+ vunmap(iorp->va);
+ iorp->va = NULL;
+ }
hvm_free_ioreq_gfn(s, iorp->gfn);
iorp->gfn = INVALID_GFN;
@@ -139,9 +145,12 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool
buf)
{
struct domain *d = s->target;
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+ struct page_info *page;
+ p2m_type_t p2mt;
+ mfn_t mfn;
int rc;
- if ( iorp->page )
+ if ( iorp->va )
{
/*
* If a page has already been allocated (which will happen on
@@ -162,12 +171,40 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool
buf)
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return -ENOMEM;
- rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
- &iorp->va);
-
+ /*
+ * Equivalent to prepare_ring_for_helper() using vmap(). Using vmap()
+ * rather than map_domain_page_global() ensures vmap_to_page() can
+ * recover the struct page_info * uniformly at teardown, which is
+ * needed to support multi-page ioreq mappings (see nr_ioreq_pages()).
+ */
+ rc = check_get_page_from_gfn(d, iorp->gfn, false, &p2mt, &page);
if ( rc )
- hvm_unmap_ioreq_gfn(s, buf);
+ {
+ if ( rc == -EAGAIN )
+ rc = -ENOENT;
+ goto fail;
+ }
+
+ if ( !get_page_type(page, PGT_writable_page) )
+ {
+ put_page(page);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ mfn = page_to_mfn(page);
+ iorp->va = vmap(&mfn, 1);
+ if ( !iorp->va )
+ {
+ put_page_and_type(page);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+ fail:
+ hvm_unmap_ioreq_gfn(s, buf);
return rc;
}
@@ -179,7 +216,7 @@ static void hvm_remove_ioreq_gfn(struct ioreq_server *s,
bool buf)
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- if ( p2m_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0) )
+ if ( p2m_remove_page(d, iorp->gfn, vmap_to_mfn(iorp->va), 0) )
domain_crash(d);
clear_page(iorp->va);
}
@@ -195,7 +232,7 @@ static int hvm_add_ioreq_gfn(struct ioreq_server *s, bool
buf)
clear_page(iorp->va);
- rc = p2m_add_page(d, iorp->gfn, page_to_mfn(iorp->page), 0, p2m_ram_rw);
+ rc = p2m_add_page(d, iorp->gfn, vmap_to_mfn(iorp->va), 0, p2m_ram_rw);
if ( rc == 0 )
paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index f5fd30ce12..d8d02167b4 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -17,11 +17,11 @@
*/
#include <xen/domain.h>
-#include <xen/domain_page.h>
#include <xen/event.h>
#include <xen/init.h>
#include <xen/ioreq.h>
#include <xen/irq.h>
+#include <xen/vmap.h>
#include <xen/lib.h>
#include <xen/paging.h>
#include <xen/sched.h>
@@ -262,8 +262,9 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page;
+ mfn_t mfn;
- if ( iorp->page )
+ if ( iorp->va )
{
/*
* If a guest frame has already been mapped (which may happen
@@ -291,11 +292,11 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
return -ENODATA;
}
- iorp->va = __map_domain_page_global(page);
+ mfn = page_to_mfn(page);
+ iorp->va = vmap(&mfn, 1);
if ( !iorp->va )
goto fail;
- iorp->page = page;
clear_page(iorp->va);
return 0;
@@ -309,15 +310,16 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
static void ioreq_server_free_mfn(struct ioreq_server *s, bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- struct page_info *page = iorp->page;
+ struct page_info *page;
+ void *va;
- if ( !page )
+ if ( !iorp->va )
return;
- iorp->page = NULL;
-
- unmap_domain_page_global(iorp->va);
+ va = iorp->va;
+ page = vmap_to_page(va);
iorp->va = NULL;
+ vunmap(va);
put_page_alloc_ref(page);
put_page_and_type(page);
@@ -333,7 +335,8 @@ bool is_ioreq_server_page(struct domain *d, const struct
page_info *page)
FOR_EACH_IOREQ_SERVER(d, id, s)
{
- if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
+ if ( (s->ioreq.va && vmap_to_page(s->ioreq.va) == page) ||
+ (s->bufioreq.va && vmap_to_page(s->bufioreq.va) == page) )
{
found = true;
break;
@@ -627,10 +630,9 @@ static void ioreq_server_deinit(struct ioreq_server *s)
* NOTE: It is safe to call both arch_ioreq_server_unmap_pages() and
* ioreq_server_free_pages() in that order.
* This is because the former will do nothing if the pages
- * are not mapped, leaving the page to be freed by the latter.
- * However if the pages are mapped then the former will set
- * the page_info pointer to NULL, meaning the latter will do
- * nothing.
+ * are not mapped, leaving the pages to be freed by the latter.
+ * However if the pages are mapped then the former will clear
+ * iorp->va, meaning the latter will do nothing.
*/
arch_ioreq_server_unmap_pages(s);
ioreq_server_free_pages(s);
@@ -819,12 +821,12 @@ int ioreq_server_get_frame(struct domain *d, ioservid_t
id,
if ( !HANDLE_BUFIOREQ(s) )
goto out;
- *mfn = page_to_mfn(s->bufioreq.page);
+ *mfn = vmap_to_mfn(s->bufioreq.va);
rc = 0;
break;
case XENMEM_resource_ioreq_server_frame_ioreq(0):
- *mfn = page_to_mfn(s->ioreq.page);
+ *mfn = vmap_to_mfn(s->ioreq.va);
rc = 0;
break;
diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
index e86f0869fa..d63fa4729e 100644
--- a/xen/include/xen/ioreq.h
+++ b/xen/include/xen/ioreq.h
@@ -25,7 +25,6 @@
struct ioreq_page {
gfn_t gfn;
- struct page_info *page;
void *va;
};
--
2.53.0
--
Julian Vetter | Vates Hypervisor & Kernel Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |