# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 7a9a00c515880b5f8d50d823b86c99763fe2504c
# Parent ae0d41bd3bba7fc87155d8d9283f215e47131f14
[IA64] introduce dom0vp hypercalls
implement dom0vp hypercall.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r ae0d41bd3bba -r 7a9a00c51588 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Tue Apr 25 13:48:02 2006 -0600
+++ b/xen/arch/ia64/xen/dom0_ops.c Tue Apr 25 13:56:30 2006 -0600
@@ -236,6 +236,55 @@ long arch_do_dom0_op(dom0_op_t *op, GUES
return ret;
}
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+unsigned long
+do_dom0vp_op(unsigned long cmd,
+ unsigned long arg0, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3)
+{
+ unsigned long ret = 0;
+ struct domain *d = current->domain;
+
+ switch (cmd) {
+ case IA64_DOM0VP_ioremap:
+ ret = assign_domain_mmio_page(d, arg0, arg1);
+ break;
+ case IA64_DOM0VP_phystomach:
+ ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
+ if (ret == INVALID_MFN) {
+ DPRINTK("%s:%d INVALID_MFN ret: 0x%lx\n", __func__, __LINE__, ret);
+ } else {
+ ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
+ }
+ break;
+ case IA64_DOM0VP_machtophys:
+ if (max_page <= arg0) {
+ ret = INVALID_M2P_ENTRY;
+ break;
+ }
+ ret = get_gpfn_from_mfn(arg0);
+ break;
+ case IA64_DOM0VP_populate_physmap:
+ ret = dom0vp_populate_physmap(d, arg0,
+ (unsigned int)arg1, (unsigned int)arg2);
+ break;
+ case IA64_DOM0VP_zap_physmap:
+ ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
+ break;
+ case IA64_DOM0VP_add_physmap:
+ ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
+ (domid_t)arg3);
+ break;
+ default:
+ ret = -1;
+ printf("unknown dom0_vp_op 0x%lx\n", cmd);
+ break;
+ }
+
+ return ret;
+}
+#endif
+
/*
* Local variables:
* mode: C
diff -r ae0d41bd3bba -r 7a9a00c51588 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Apr 25 13:48:02 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c Tue Apr 25 13:56:30 2006 -0600
@@ -886,6 +886,151 @@ unsigned long lookup_domain_mpa(struct d
mpafoo(mpaddr);
return 0;
}
+
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+//XXX SMP
+unsigned long
+dom0vp_populate_physmap(struct domain *d, unsigned long gpfn,
+ unsigned int extent_order, unsigned int address_bits)
+{
+ unsigned long ret = 0;
+ int flags = 0;
+ unsigned long mpaddr = gpfn << PAGE_SHIFT;
+ unsigned long extent_size = 1UL << extent_order;
+ unsigned long offset;
+ struct page_info* page;
+ unsigned long physaddr;
+
+ if (extent_order > 0 && !multipage_allocation_permitted(d)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gpfn + (1 << extent_order) < gpfn) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (gpfn > d->max_pages || gpfn + (1 << extent_order) > d->max_pages) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((extent_size << PAGE_SHIFT) < extent_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ //XXX check address_bits and set flags = ALLOC_DOM_DMA if needed
+
+ // check the rage is not populated yet.
+ //XXX loop optimization
+ for (offset = 0; offset < extent_size << PAGE_SHIFT; offset += PAGE_SIZE) {
+ if (____lookup_domain_mpa(d, mpaddr + offset) != INVALID_MFN) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ page = alloc_domheap_pages(d, extent_order, flags);
+ if (page == NULL) {
+ ret = -ENOMEM;
+ DPRINTK("Could not allocate order=%d extent: id=%d flags=%x\n",
+ extent_order, d->domain_id, flags);
+ goto out;
+ }
+
+ //XXX loop optimization
+ physaddr = page_to_maddr(page);
+ for (offset = 0; offset < extent_size << PAGE_SHIFT; offset += PAGE_SIZE) {
+ assign_domain_page(d, mpaddr + offset, physaddr + offset);
+ }
+
+out:
+ return ret;
+}
+
+//XXX SMP
+unsigned long
+dom0vp_zap_physmap(struct domain *d, unsigned long gpfn,
+ unsigned int extent_order)
+{
+ unsigned long ret = 0;
+ if (extent_order != 0) {
+ //XXX
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ zap_domain_page_one(d, gpfn << PAGE_SHIFT);
+
+out:
+ return ret;
+}
+
+static void
+assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
+ unsigned long mfn, unsigned int flags)
+{
+ struct mm_struct *mm = d->arch.mm;
+ pte_t* pte;
+ pte_t old_pte;
+
+ pte = lookup_alloc_domain_pte(d, mpaddr);
+
+ // update pte
+ old_pte = ptep_get_and_clear(mm, mpaddr, pte);
+ set_pte(pte, pfn_pte(mfn,
+ __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+ if (!pte_none(old_pte)) {
+ unsigned long old_mfn;
+ struct page_info* old_page;
+
+ // XXX should previous underlying page be removed?
+ // or should error be returned because it is a due to a domain?
+ old_mfn = pte_pfn(old_pte);//XXX
+ old_page = mfn_to_page(old_mfn);
+
+ if (page_get_owner(old_page) == d) {
+ BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
+ set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
+ }
+
+ domain_page_flush(d, mpaddr, old_mfn, mfn);
+
+ put_page(old_page);
+ } else {
+ BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
+ get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
+ }
+}
+
+unsigned long
+dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
+ unsigned int flags, domid_t domid)
+{
+ int error = 0;
+
+ struct domain* rd;
+ rd = find_domain_by_id(domid);
+ if (unlikely(rd == NULL)) {
+ error = -EINVAL;
+ goto out0;
+ }
+ if (unlikely(rd == d)) {
+ error = -EINVAL;
+ goto out1;
+ }
+ if (unlikely(get_page(mfn_to_page(mfn), rd) == 0)) {
+ error = -EINVAL;
+ goto out1;
+ }
+
+ assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* flags:XXX */);
+out1:
+ put_domain(rd);
+out0:
+ return error;
+}
+#endif
/* Flush cache of domain d. */
void domain_cache_flush (struct domain *d, int sync_only)
diff -r ae0d41bd3bba -r 7a9a00c51588 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Tue Apr 25 13:48:02 2006 -0600
+++ b/xen/arch/ia64/xen/hypercall.c Tue Apr 25 13:56:30 2006 -0600
@@ -124,6 +124,12 @@ xen_hypercall (struct pt_regs *regs)
guest_handle_from_ptr(regs->r15, void));
break;
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+ case __HYPERVISOR_ia64_dom0vp_op:
+ regs->r8 = do_dom0vp_op(regs->r14, regs->r15, regs->r16,
+ regs->r17, regs->r18);
+ break;
+#endif
default:
printf("unknown xen hypercall %lx\n", regs->r2);
regs->r8 = do_ni_hypercall();
diff -r ae0d41bd3bba -r 7a9a00c51588 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Apr 25 13:48:02 2006 -0600
+++ b/xen/include/asm-ia64/domain.h Tue Apr 25 13:56:30 2006 -0600
@@ -169,6 +169,10 @@ void assign_domain_io_page(struct domain
#ifdef CONFIG_XEN_IA64_DOM0_VP
unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
unsigned long size);
unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr,
unsigned long size);
+unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned
long arg1, unsigned long arg2, unsigned long arg3);
+unsigned long dom0vp_populate_physmap(struct domain *d, unsigned long gpfn,
unsigned int extent_order, unsigned int address_bits);
+unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn,
unsigned int extent_order);
+unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn,
unsigned long mfn, unsigned int flags, domid_t domid);
#endif
#include <asm/uaccess.h> /* for KERNEL_DS */
diff -r ae0d41bd3bba -r 7a9a00c51588 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Tue Apr 25 13:48:02 2006 -0600
+++ b/xen/include/public/arch-ia64.h Tue Apr 25 13:56:30 2006 -0600
@@ -326,6 +326,41 @@ typedef struct vcpu_guest_context {
} vcpu_guest_context_t;
DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
+// dom0 vp op
+#define __HYPERVISOR_ia64_dom0vp_op 256 // XXX sufficient large
+ // TODO
+ // arch specific hypercall
+ // number conversion
+#define IA64_DOM0VP_ioremap 0 // map io space in machine
+ // address to dom0 physical
+ // address space.
+ // currently physical
+ // assignedg address equals to
+ // machine address
+#define IA64_DOM0VP_phystomach 1 // convert a pseudo physical
+ // page frame number
+ // to the corresponding
+ // machine page frame number.
+ // if no page is assigned,
+ // INVALID_MFN or GPFN_INV_MASK
+ // is returned depending on
+ // domain's non-vti/vti mode.
+#define IA64_DOM0VP_machtophys 3 // convert a machine page
+ // frame number
+ // to the corresponding
+ // pseudo physical page frame
+ // number of the caller domain
+#define IA64_DOM0VP_populate_physmap 16 // allocate machine-contigusous
+ // memory region and
+ // map it to pseudo physical
+ // address
+#define IA64_DOM0VP_zap_physmap 17 // unmap and free pages
+ // contained in the specified
+ // pseudo physical region
+#define IA64_DOM0VP_add_physmap 18 // assigne machine page frane
+ // to dom0's pseudo physical
+ // address space.
+
#endif /* !__ASSEMBLY__ */
#endif /* __HYPERVISOR_IF_IA64_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|