xen-devel
[Xen-devel] Re: [PATCH] Add memory hotadd to pvops dom0
On 12/18/2009 02:27 AM, Jiang, Yunhong wrote:
When memory hotadd event happen, a Xen hook will be called, to notify
hypervisor of the new added memory.
Because xen hypervisor will use the new memory to setup frametable/m2p table,
so dom0 will always return success to acpi bios, and notify xen hypervisor
later.
It add a hook in driver/acpi/acpi_memhotplug.c, but that change is quite small,
not sure if it is acceptable. Other method is to provide a xen specific
acpi_memory_device_driver, but I'm not sure if it worth to add so much changes,
to simply avoid two line hooks.
Yeah; run it by Len and see if he objects violently or not.
Aside from that, it looks OK, though I agree with all Konrad's comments.
But I have to say it looks more like an argument for "Xen should handle
ACPI" than against...
J
Signed-off-by: Jiang, Yunhong<yunhong.jiang@xxxxxxxxx>
---
drivers/acpi/acpi_memhotplug.c | 5 +
drivers/xen/Makefile | 1 +
drivers/xen/xen_acpi_memhotplug.c | 197 +++++++++++++++++++++++++++++++++++++
include/xen/acpi.h | 4 +
include/xen/interface/platform.h | 10 ++
5 files changed, 217 insertions(+), 0 deletions(-)
create mode 100644 drivers/xen/xen_acpi_memhotplug.c
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 50e17c7..75e6b72 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -31,6 +31,8 @@
#include<linux/types.h>
#include<linux/memory_hotplug.h>
#include<acpi/acpi_drivers.h>
+#include<acpi/acpi_numa.h>
+#include<xen/acpi.h>
#define ACPI_MEMORY_DEVICE_CLASS "memory"
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
@@ -215,6 +217,9 @@ static int acpi_memory_enable_device(struct
acpi_memory_device *mem_device)
return result;
}
+ if (xen_initial_domain())
+ return xen_add_memory(mem_device);
+
node = acpi_get_node(mem_device->device->handle);
/*
* Tell the VM there is more memory here...
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 6325be1..fbacea7 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_S3) += acpi.o
obj-$(CONFIG_XEN_MCE) += mce.o
obj-$(CONFIG_ACPI_PROCESSOR_XEN) += acpi_processor.o
+obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += xen_acpi_memhotplug.o
xen-gntdev-y := gntdev.o
xen-evtchn-y := evtchn.o
diff --git a/drivers/xen/xen_acpi_memhotplug.c
b/drivers/xen/xen_acpi_memhotplug.c
new file mode 100644
index 0000000..17b936e
--- /dev/null
+++ b/drivers/xen/xen_acpi_memhotplug.c
@@ -0,0 +1,197 @@
+#include<linux/kernel.h>
+#include<linux/module.h>
+#include<linux/init.h>
+#include<linux/types.h>
+#include<linux/memory_hotplug.h>
+#include<acpi/acpi_drivers.h>
+#include<xen/interface/platform.h>
+#include<linux/interrupt.h>
+#include<linux/spinlock.h>
+#include<asm/xen/hypervisor.h>
+#include<asm/xen/hypercall.h>
+#include<xen/acpi.h>
+
+struct xen_hotmem_entry {
+ struct list_head hotmem_list;
+ uint64_t start;
+ uint64_t end;
+ uint32_t flags;
+ uint32_t pxm;
+};
+
+struct xen_hotmem_list {
+ struct list_head list;
+ int entry_nr;
+} xen_hotmem;
+
+DEFINE_SPINLOCK(xen_hotmem_lock);
+
+static int xen_hyper_addmem(struct xen_hotmem_entry *entry)
+{
+ int ret;
+
+ xen_platform_op_t op = {
+ .cmd = XENPF_mem_hotadd,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ };
+ op.u.mem_add.spfn = entry->start>> PAGE_SHIFT;
+ op.u.mem_add.epfn = entry->end>> PAGE_SHIFT;
+ op.u.mem_add.flags = entry->flags;
+ op.u.mem_add.pxm = entry->pxm;
+
+ ret = HYPERVISOR_dom0_op(&op);
+ return ret;
+}
+static int xen_hotmem_free(struct xen_hotmem_entry *entry)
+{
+ list_del(&entry->hotmem_list);
+ kfree(entry);
+
+ return 0;
+}
+
+static void xen_hotmem_dpc(struct work_struct *work)
+{
+ struct list_head *elem, *tmp;
+ struct xen_hotmem_entry *entry;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xen_hotmem_lock, flags);
+ list_for_each_safe(elem, tmp,&xen_hotmem.list) {
+ entry = list_entry(elem, struct xen_hotmem_entry, hotmem_list);
+ ret = xen_hyper_addmem(entry);
+ if (ret)
+ printk(KERN_WARNING "xen addmem failed with %x\n", ret);
+ xen_hotmem_free(entry);
+ }
+ spin_unlock_irqrestore(&xen_hotmem_lock, flags);
+}
+
+static DECLARE_WORK(xen_hotmem_work, xen_hotmem_dpc);
+
+static int xen_acpi_get_pxm(acpi_handle h)
+{
+ unsigned long long pxm;
+ acpi_status status;
+ acpi_handle handle;
+ acpi_handle phandle = h;
+
+ do {
+ handle = phandle;
+ status = acpi_evaluate_integer(handle, "_PXM", NULL,&pxm);
+ if (ACPI_SUCCESS(status))
+ return pxm;
+ status = acpi_get_parent(handle,&phandle);
+ } while (ACPI_SUCCESS(status));
+ return -1;
+}
+
+static int add_memory_entry(int pxm, uint64_t start,
+ uint64_t length, uint32_t flags)
+{
+ struct xen_hotmem_entry *entry;
+
+ if (pxm< 0 || !length)
+ return -1;
+
+ entry = kzalloc(sizeof(struct xen_hotmem_entry), GFP_ATOMIC);
+ if (!entry)
+ return -1;
+
+ INIT_LIST_HEAD(&entry->hotmem_list);
+ entry->start = start;
+ entry->end = start + length;
+ entry->flags = flags;
+ entry->pxm = pxm;
+
+ spin_lock(&xen_hotmem_lock);
+
+ list_add_tail(&entry->hotmem_list,&xen_hotmem.list);
+
+ spin_unlock(&xen_hotmem_lock);
+
+ return 0;
+}
+
+int xen_add_memory(struct acpi_memory_device *mem_device)
+{
+ int pxm, result;
+ int num_enabled = 0;
+ struct acpi_memory_info *info;
+
+ if (!mem_device)
+ return -EINVAL;
+
+ pxm = xen_acpi_get_pxm(mem_device->device->handle);
+
+ if (pxm< 0)
+ return -EINVAL;
+
+ /*
+ * Always return success to ACPI driver, and notify hypervisor later
+ * because hypervisor will utilize the memory in memory hotadd hypercal
+ */
+ list_for_each_entry(info,&mem_device->res_list, list) {
+ if (info->enabled) { /* just sanity check...*/
+ num_enabled++;
+ continue;
+ }
+ /*
+ * If the memory block size is zero, please ignore it.
+ * Don't try to do the following memory hotplug flowchart.
+ */
+ if (!info->length)
+ continue;
+
+ result = add_memory_entry(pxm, info->start_addr,
+ info->length, 0);
+ if (result)
+ continue;
+ info->enabled = 1;
+ num_enabled++;
+ }
+
+ if (!num_enabled)
+ return -EINVAL;
+
+ schedule_work(&xen_hotmem_work);
+ return 0;
+}
+EXPORT_SYMBOL(xen_add_memory);
+
+static int xen_memadd_init(void)
+{
+ if (!xen_initial_domain()) {
+ printk(KERN_ERR "xen_memadd is only for Xen dom0\n");
+ return -1;
+ }
+
+ INIT_LIST_HEAD(&xen_hotmem.list);
+ xen_hotmem.entry_nr = 0;
+
+ return 0;
+}
+
+static void xen_memadd_exit(void)
+{
+ unsigned long flags;
+ struct list_head *elem, *tmp;
+ struct xen_hotmem_entry *entry;
+
+ spin_lock_irqsave(&xen_hotmem_lock, flags);
+ if (xen_hotmem.entry_nr) {
+ printk(KERN_WARNING "Still %x mem entries not added\n",
+ xen_hotmem.entry_nr);
+ list_for_each_safe(elem, tmp,&xen_hotmem.list) {
+ entry = list_entry(elem, struct xen_hotmem_entry,
+ hotmem_list);
+ xen_hotmem_free(entry);
+ }
+ }
+ spin_unlock_irqrestore(&xen_hotmem_lock, flags);
+}
+
+module_init(xen_memadd_init);
+module_exit(xen_memadd_exit);
+MODULE_LICENSE("GPL");
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 3e7eab4..b15aa3e 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -41,6 +41,10 @@ int acpi_notify_hypervisor_state(u8 sleep_state,
#define HOTPLUG_TYPE_ADD 0
#define HOTPLUG_TYPE_REMOVE 1
+#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
+int xen_add_memory(struct acpi_memory_device *mem_device);
+#endif
+
#if defined(CONFIG_ACPI_PROCESSOR_XEN) ||
defined(CONFIG_ACPI_PROCESSOR_XEN_MODULE)
struct processor_cntl_xen_ops {
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index bff3a65..17ae622 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -344,6 +344,15 @@ struct xenpf_cpu_hotadd {
uint32_t pxm;
};
+
+#define XENPF_mem_hotadd 59
+struct xenpf_mem_hotadd {
+ uint64_t spfn;
+ uint64_t epfn;
+ uint32_t pxm;
+ uint32_t flags;
+};
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -362,6 +371,7 @@ struct xen_platform_op {
struct xenpf_pcpuinfo pcpu_info;
struct xenpf_cpu_ol cpu_ol;
struct xenpf_cpu_hotadd cpu_add;
+ struct xenpf_mem_hotadd mem_add;
uint8_t pad[128];
} u;
};
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
|
|