ChangeSet 1.1644.1.1, 2005/06/02 22:05:33+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
The last annoying rename:
struct exec_domain *ed -> struct vcpu *v
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
arch/ia64/asm-offsets.c | 24 +--
arch/ia64/domain.c | 164 ++++++++++----------
arch/ia64/hypercall.c | 38 ++--
arch/ia64/idle0_task.c | 2
arch/ia64/irq.c | 2
arch/ia64/mmio.c | 2
arch/ia64/patch/linux-2.6.11/irq_ia64.c | 2
arch/ia64/patch/linux-2.6.7/current.h | 1
arch/ia64/patch/linux-2.6.7/irq_ia64.c | 2
arch/ia64/patch/linux-2.6.7/time.c | 5
arch/ia64/privop.c | 14 -
arch/ia64/process.c | 94 +++++------
arch/ia64/regionreg.c | 62 +++----
arch/ia64/vmmu.c | 14 -
arch/ia64/vmx_init.c | 46 ++---
arch/ia64/vmx_process.c | 20 +-
arch/ia64/xenirq.c | 4
arch/ia64/xenmisc.c | 22 +-
arch/ia64/xensetup.c | 8 -
arch/ia64/xentime.c | 14 -
arch/x86/audit.c | 16 +-
arch/x86/dom0_ops.c | 18 +-
arch/x86/domain.c | 234 ++++++++++++++---------------
arch/x86/domain_build.c | 41 ++---
arch/x86/i387.c | 4
arch/x86/idle0_task.c | 2
arch/x86/irq.c | 18 +-
arch/x86/mm.c | 158 +++++++++----------
arch/x86/setup.c | 4
arch/x86/shadow.c | 244 +++++++++++++++---------------
arch/x86/smpboot.c | 6
arch/x86/time.c | 10 -
arch/x86/traps.c | 138 ++++++++---------
arch/x86/vmx.c | 48 +++---
arch/x86/vmx_intercept.c | 8 -
arch/x86/vmx_io.c | 38 ++--
arch/x86/vmx_platform.c | 2
arch/x86/vmx_vmcs.c | 14 -
arch/x86/x86_32/asm-offsets.c | 20 +-
arch/x86/x86_32/entry.S | 28 +--
arch/x86/x86_32/mm.c | 2
arch/x86/x86_32/seg_fixup.c | 6
arch/x86/x86_32/traps.c | 14 -
arch/x86/x86_64/asm-offsets.c | 16 +-
arch/x86/x86_64/entry.S | 40 ++---
arch/x86/x86_64/mm.c | 10 -
arch/x86/x86_64/traps.c | 2
common/dom0_ops.c | 82 +++++-----
common/domain.c | 120 +++++++--------
common/event_channel.c | 66 ++++----
common/grant_table.c | 4
common/keyhandler.c | 30 +--
common/sched_bvt.c | 139 ++++++++---------
common/sched_sedf.c | 256 ++++++++++++++++----------------
common/schedule.c | 162 ++++++++++----------
drivers/char/console.c | 2
include/asm-ia64/config.h | 4
include/asm-ia64/domain.h | 8 -
include/asm-ia64/vcpu.h | 4
include/asm-ia64/vmmu.h | 10 -
include/asm-ia64/vmx.h | 6
include/asm-ia64/vmx_vpd.h | 4
include/asm-x86/current.h | 4
include/asm-x86/debugger.h | 2
include/asm-x86/domain.h | 4
include/asm-x86/i387.h | 12 -
include/asm-x86/ldt.h | 10 -
include/asm-x86/mm.h | 6
include/asm-x86/processor.h | 14 -
include/asm-x86/shadow.h | 132 ++++++++--------
include/asm-x86/vmx.h | 8 -
include/asm-x86/vmx_platform.h | 2
include/asm-x86/vmx_virpit.h | 2
include/asm-x86/vmx_vmcs.h | 4
include/public/event_channel.h | 4
include/xen/domain.h | 10 -
include/xen/event.h | 24 +--
include/xen/irq.h | 4
include/xen/sched-if.h | 16 +-
include/xen/sched.h | 64 ++++----
include/xen/time.h | 2
include/xen/types.h | 2
82 files changed, 1449 insertions(+), 1455 deletions(-)
diff -Nru a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c 2005-06-02 18:04:08 -04:00
+++ b/xen/arch/ia64/asm-offsets.c 2005-06-02 18:04:08 -04:00
@@ -13,7 +13,7 @@
#include <asm/tlb.h>
#endif // CONFIG_VTI
-#define task_struct exec_domain
+#define task_struct vcpu
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -60,8 +60,8 @@
//DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct,
sighand));
//DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
//DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
- DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct exec_domain,
arch._thread.ksp));
- DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct exec_domain,
arch._thread.on_ustack));
+ DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu,
arch._thread.ksp));
+ DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu,
arch._thread.on_ustack));
BLANK();
@@ -112,14 +112,14 @@
DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr));
DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs,
eml_unat));
DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs,
rfi_pfs));
- DEFINE(RFI_IIP_OFFSET, offsetof(struct exec_domain,
arch.arch_vmx.rfi_iip));
- DEFINE(RFI_IPSR_OFFSET, offsetof(struct exec_domain,
arch.arch_vmx.rfi_ipsr));
- DEFINE(RFI_IFS_OFFSET,offsetof(struct exec_domain
,arch.arch_vmx.rfi_ifs));
- DEFINE(RFI_PFS_OFFSET,offsetof(struct exec_domain
,arch.arch_vmx.rfi_pfs));
- DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct exec_domain
,arch.arch_vmx.mrr5));
- DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct exec_domain
,arch.arch_vmx.mrr6));
- DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct exec_domain
,arch.arch_vmx.mrr7));
- DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct exec_domain
,arch.arch_vmx.mpta));
+ DEFINE(RFI_IIP_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_iip));
+ DEFINE(RFI_IPSR_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_ipsr));
+ DEFINE(RFI_IFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_ifs));
+ DEFINE(RFI_PFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_pfs));
+ DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr5));
+ DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr6));
+ DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr7));
+ DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
#endif //CONFIG_VTI
DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
@@ -193,7 +193,7 @@
BLANK();
#ifdef CONFIG_VTI
- DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain,
arch.arch_vmx.vpd));
+ DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd));
DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c 2005-06-02 18:04:08 -04:00
+++ b/xen/arch/ia64/domain.c 2005-06-02 18:04:08 -04:00
@@ -154,23 +154,23 @@
continue_cpu_idle_loop();
}
-struct exec_domain *arch_alloc_exec_domain_struct(void)
+struct vcpu *arch_alloc_vcpu_struct(void)
{
- /* Per-vp stack is used here. So we need keep exec_domain
+ /* Per-vp stack is used here. So we need keep vcpu
* same page as per-vp stack */
return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
}
-void arch_free_exec_domain_struct(struct exec_domain *ed)
+void arch_free_vcpu_struct(struct vcpu *v)
{
- free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
+ free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
#ifdef CONFIG_VTI
-void arch_do_createdomain(struct exec_domain *ed)
+void arch_do_createdomain(struct vcpu *v)
{
- struct domain *d = ed->domain;
- struct thread_info *ti = alloc_thread_info(ed);
+ struct domain *d = v->domain;
+ struct thread_info *ti = alloc_thread_info(v);
/* If domain is VMX domain, shared info area is created
* by domain and then domain notifies HV by specific hypercall.
@@ -187,18 +187,18 @@
* normal xen convention.
*/
d->shared_info = NULL;
- ed->vcpu_info = (void *)alloc_xenheap_page();
- if (!ed->vcpu_info) {
+ v->vcpu_info = (void *)alloc_xenheap_page();
+ if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
while (1);
}
- memset(ed->vcpu_info, 0, PAGE_SIZE);
+ memset(v->vcpu_info, 0, PAGE_SIZE);
/* Clear thread_info to clear some important fields, like preempt_count
*/
memset(ti, 0, sizeof(struct thread_info));
/* Allocate per-domain vTLB and vhpt */
- ed->arch.vtlb = init_domain_tlb(ed);
+ v->arch.vtlb = init_domain_tlb(v);
/* Physical->machine page table will be allocated when
* final setup, since we have no the maximum pfn number in
@@ -215,20 +215,20 @@
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches
// to user stack
- ed->arch._thread.on_ustack = 0;
+ v->arch._thread.on_ustack = 0;
}
#else // CONFIG_VTI
-void arch_do_createdomain(struct exec_domain *ed)
+void arch_do_createdomain(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
d->shared_info = (void *)alloc_xenheap_page();
- ed->vcpu_info = (void *)alloc_xenheap_page();
- if (!ed->vcpu_info) {
+ v->vcpu_info = (void *)alloc_xenheap_page();
+ if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
while (1);
}
- memset(ed->vcpu_info, 0, PAGE_SIZE);
+ memset(v->vcpu_info, 0, PAGE_SIZE);
/* pin mapping */
// FIXME: Does this belong here? Or do only at domain switch time?
#if 0
@@ -246,7 +246,7 @@
d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
BUG();
- ed->vcpu_info->arch.metaphysical_mode = 1;
+ v->vcpu_info->arch.metaphysical_mode = 1;
#define DOMAIN_RID_BITS_DEFAULT 18
if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
BUG();
@@ -258,22 +258,22 @@
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches
// to user stack
- ed->arch._thread.on_ustack = 0;
+ v->arch._thread.on_ustack = 0;
}
#endif // CONFIG_VTI
-void arch_do_boot_vcpu(struct exec_domain *p)
+void arch_do_boot_vcpu(struct vcpu *v)
{
return;
}
-int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c)
+int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
dummy();
return 1;
}
-int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c)
+int arch_final_setup_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
dummy();
return 1;
@@ -285,12 +285,12 @@
}
#ifdef CONFIG_VTI
-void new_thread(struct exec_domain *ed,
+void new_thread(struct vcpu *v,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
struct switch_stack *sw;
struct xen_regs *regs;
struct ia64_boot_param *bp;
@@ -302,12 +302,12 @@
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
- regs = (struct xen_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
+ regs = (struct xen_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
sw = (struct switch_stack *) regs - 1;
/* Sanity Clear */
memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
- if (VMX_DOMAIN(ed)) {
+ if (VMX_DOMAIN(v)) {
/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro
*/
} else {
@@ -320,42 +320,42 @@
regs->ar_rsc = 0x0;
regs->cr_ifs = 0x0;
regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
- sw->ar_bspstore = (unsigned long)ed + IA64_RBS_OFFSET;
- printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p,
IA64_STK_OFFSET=%p, &r8=%p\n",
- ed,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,®s->r8);
+ sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
+ printf("new_thread: v=%p, regs=%p, sw=%p, new_rbs=%p,
IA64_STK_OFFSET=%p, &r8=%p\n",
+ v,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,®s->r8);
printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
sw->b0 = (unsigned long) &ia64_ret_from_clone;
- ed->arch._thread.ksp = (unsigned long) sw - 16;
+ v->arch._thread.ksp = (unsigned long) sw - 16;
printk("new_thread, about to call init_all_rr\n");
- if (VMX_DOMAIN(ed)) {
- vmx_init_all_rr(ed);
+ if (VMX_DOMAIN(v)) {
+ vmx_init_all_rr(v);
} else
- init_all_rr(ed);
+ init_all_rr(v);
// set up boot parameters (and fake firmware)
printk("new_thread, about to call dom_fw_setup\n");
- VMX_VPD(ed,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME
+ VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME
printk("new_thread, done with dom_fw_setup\n");
- if (VMX_DOMAIN(ed)) {
+ if (VMX_DOMAIN(v)) {
/* Virtual processor context setup */
- VMX_VPD(ed, vpsr) = IA64_PSR_BN;
- VPD_CR(ed, dcr) = 0;
+ VMX_VPD(v, vpsr) = IA64_PSR_BN;
+ VPD_CR(v, dcr) = 0;
} else {
// don't forget to set this!
- ed->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.banknum = 1;
}
}
#else // CONFIG_VTI
// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|