Hi,
This patch enables ia64 to support XEN_SYSCTL_topologyinfo and
XEN_SYSCTL_numainfo.
Signed-off-by: KUWAMURA Shin'ya <kuwa@xxxxxxxxxxxxxx>
--
KUWAMURA Shin'ya
# HG changeset patch
# User KUWAMURA Shin'ya <kuwa@xxxxxxxxxxxxxx>
# Date 1274752784 -32400
# Node ID df836bdec13b4faf104f857de9502a05c87e02f1
# Parent 948c933f8839f77f436f3df5548ed2101b887a55
ia64, xencomm: support XEN_SYSCTL_topologyinfo and XEN_SYSCTL_numainfo
This patch enables ia64 to support XEN_SYSCTL_topologyinfo and
XEN_SYSCTL_numainfo.
Signed-off-by: KUWAMURA Shin'ya <kuwa@xxxxxxxxxxxxxx>
diff -r 948c933f8839 -r df836bdec13b arch/ia64/xen/xcom_privcmd.c
--- a/arch/ia64/xen/xcom_privcmd.c Thu May 06 15:52:48 2010 +0100
+++ b/arch/ia64/xen/xcom_privcmd.c Tue May 25 10:59:44 2010 +0900
@@ -87,6 +87,7 @@ xencomm_privcmd_sysctl(privcmd_hypercall
struct xencomm_handle *op_desc;
struct xencomm_handle *desc = NULL;
struct xencomm_handle *desc1 = NULL;
+ struct xencomm_handle *desc2 = NULL;
int ret = 0;
user_op = (xen_sysctl_t __user *)hypercall->arg[0];
@@ -111,6 +112,7 @@ xencomm_privcmd_sysctl(privcmd_hypercall
(void *)desc);
break;
case XEN_SYSCTL_tbuf_op:
+ case XEN_SYSCTL_physinfo:
case XEN_SYSCTL_sched_id:
case XEN_SYSCTL_availheap:
break;
@@ -183,18 +185,6 @@ xencomm_privcmd_sysctl(privcmd_hypercall
(void *)desc);
break;
- case XEN_SYSCTL_physinfo:
- desc = xencomm_map(
- xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
- kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t));
- if (xen_guest_handle(kern_op.u.physinfo.cpu_to_node) != NULL &&
- kern_op.u.physinfo.max_cpu_id > 0 && desc == NULL)
- return -ENOMEM;
-
- set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
- (void *)desc);
- break;
-
case XEN_SYSCTL_get_pmstat:
if (kern_op.u.get_pmstat.type == PMSTAT_get_pxstat) {
struct pm_px_stat *getpx =
@@ -219,6 +209,79 @@ xencomm_privcmd_sysctl(privcmd_hypercall
}
break;
+ case XEN_SYSCTL_topologyinfo:
+ {
+ xen_sysctl_topologyinfo_t *info = &kern_op.u.topologyinfo;
+ unsigned long size =
+ (info->max_cpu_index + 1) * sizeof(uint32_t);
+
+ desc = xencomm_map(xen_guest_handle(info->cpu_to_core), size);
+ if (xen_guest_handle(info->cpu_to_core) != NULL &&
+ info->max_cpu_index > 0 && desc == NULL)
+ return -ENOMEM;
+
+ set_xen_guest_handle(info->cpu_to_core, (void *)desc);
+
+ desc1 = xencomm_map(
+ xen_guest_handle(info->cpu_to_socket), size);
+ if (xen_guest_handle(info->cpu_to_socket) != NULL &&
+ info->max_cpu_index > 0 && desc1 == NULL) {
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->cpu_to_socket, (void *)desc1);
+
+ desc2 = xencomm_map(xen_guest_handle(info->cpu_to_node), size);
+ if (xen_guest_handle(info->cpu_to_node) != NULL &&
+ info->max_cpu_index > 0 && desc2 == NULL) {
+ xencomm_free(desc1);
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->cpu_to_node, (void *)desc2);
+ break;
+ }
+
+ case XEN_SYSCTL_numainfo:
+ {
+ xen_sysctl_numainfo_t *info = &kern_op.u.numainfo;
+ uint32_t max = info->max_node_index;
+
+ desc = xencomm_map(xen_guest_handle(info->node_to_memsize),
+ (max + 1) * sizeof(uint64_t));
+ if (xen_guest_handle(info->node_to_memsize) != NULL &&
+ desc == NULL)
+ return -ENOMEM;
+
+ set_xen_guest_handle(info->node_to_memsize, (void *)desc);
+
+ desc1 = xencomm_map(xen_guest_handle(info->node_to_memfree),
+ (max + 1) * sizeof(uint64_t));
+ if (xen_guest_handle(info->node_to_memfree) != NULL &&
+ desc1 == NULL) {
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->node_to_memfree, (void *)desc1);
+
+ desc2 = xencomm_map(
+ xen_guest_handle(info->node_to_node_distance),
+ (max + 1) * (max + 1) * sizeof(uint32_t));
+ if (xen_guest_handle(info->node_to_node_distance) != NULL &&
+ desc2 == NULL) {
+ xencomm_free(desc1);
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->node_to_node_distance,
+ (void *)desc2);
+ break;
+ }
+
default:
printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
return -ENOSYS;
@@ -237,6 +300,7 @@ xencomm_privcmd_sysctl(privcmd_hypercall
xencomm_free(desc);
xencomm_free(desc1);
+ xencomm_free(desc2);
return ret;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|