diff -r 869cc1f44e52 xen/arch/ia64/Makefile --- a/xen/arch/ia64/Makefile Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/Makefile Fri Sep 29 12:12:24 2006 +0200 @@ -68,7 +68,7 @@ asm-xsi-offsets.s: asm-xsi-offsets.c $(H || ln -sf ../../../arch/x86/hvm/vioapic.c $(BASEDIR)/arch/ia64/vmx/hvm_vioapic.c # I'm sure a Makefile wizard would know a better way to do this -xen.lds.s: xen/xen.lds.S +xen.lds.s: xen/xen.lds.S $(HDRS) $(CC) -E $(CPPFLAGS) -P -DXEN $(AFLAGS) \ -o xen.lds.s xen/xen.lds.S diff -r 869cc1f44e52 xen/arch/ia64/linux-xen/entry.S --- a/xen/arch/ia64/linux-xen/entry.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/linux-xen/entry.S Fri Sep 29 12:12:24 2006 +0200 @@ -199,7 +199,7 @@ GLOBAL_ENTRY(ia64_switch_to) movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;; ld8 r27=[r27] adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 - dep r20=0,in0,60,4 // physical address of "next" + XEN_VA_TO_PA(r20,in0) // physical address of "next" #else mov r27=IA64_KR(CURRENT_STACK) adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 diff -r 869cc1f44e52 xen/arch/ia64/linux-xen/head.S --- a/xen/arch/ia64/linux-xen/head.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/linux-xen/head.S Fri Sep 29 12:12:24 2006 +0200 @@ -344,7 +344,7 @@ 1: // now we are in virtual mode ;; or r18=r17,r18 #ifdef XEN - dep r2=-1,r3,60,4 // IMVA of task + XEN_PA_TO_VA(r2,r3) // IMVA of task #else dep r2=-1,r3,61,3 // IMVA of task #endif @@ -397,7 +397,7 @@ 1: // now we are in virtual mode mov ar.rsc=0x3 // place RSE in eager mode #ifdef XEN -(isBP) dep r28=-1,r28,60,4 // make address virtual +(isBP) XEN_PA_TO_VA(r28,r28) // make addr virtual #else (isBP) dep r28=-1,r28,61,3 // make address virtual #endif diff -r 869cc1f44e52 xen/arch/ia64/linux-xen/pal.S --- a/xen/arch/ia64/linux-xen/pal.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/linux-xen/pal.S Fri Sep 29 12:12:24 2006 +0200 @@ -14,7 +14,9 @@ #include #include - +#ifdef XEN +#include +#endif .data pal_entry_point: data8 ia64_pal_default_handler @@ -167,7 +169,7 @@ 1: { ;; mov loc4=ar.rsc // save RSE configuration #ifdef XEN - dep.z loc2=loc2,0,60 // convert pal entry point to physical + XEN_VA_TO_PA(loc2,loc2) // convert pal entry point to physical #else // XEN dep.z loc2=loc2,0,61 // convert pal entry point to physical #endif // XEN @@ -230,7 +232,7 @@ 1: { ;; mov loc4=ar.rsc // save RSE configuration #ifdef XEN - dep.z loc2=loc2,0,60 // convert pal entry point to physical + XEN_VA_TO_PA(loc2,loc2) // convert pal entry point to physical #else // XEN dep.z loc2=loc2,0,61 // convert pal entry point to physical #endif // XEN diff -r 869cc1f44e52 xen/arch/ia64/linux-xen/setup.c --- a/xen/arch/ia64/linux-xen/setup.c Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/linux-xen/setup.c Fri Sep 29 12:12:24 2006 +0200 @@ -162,9 +162,7 @@ filter_rsvd_memory (unsigned long start, if (range_start < range_end) #ifdef XEN { - /* init_boot_pages requires "ps, pe" */ - printk("Init boot pages: 0x%lx -> 0x%lx.\n", - __pa(range_start), __pa(range_end)); + /* init_boot_pages requires "ps, pe" */ (*func)(__pa(range_start), __pa(range_end), 0); } #else @@ -227,7 +225,7 @@ reserve_memory (void) n++; rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); -#ifdef XEN +#if 0 //def XEN /* Reserve xen image/bitmap/xen-heap */ rsvd_region[n].end = rsvd_region[n].start + xenheap_size; #else diff -r 869cc1f44e52 xen/arch/ia64/vmx/vmx_entry.S --- a/xen/arch/ia64/vmx/vmx_entry.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/vmx/vmx_entry.S Fri Sep 29 12:12:24 2006 +0200 @@ -599,10 +599,8 @@ 1: { ;; tpa loc2 = loc2 // get physical address of per cpu date ;; - dep loc3 = 0,in1,60,4 // get physical address of shared_info - dep loc4 = 0,in2,60,4 // get physical address of shared_arch_info - dep loc5 = 0,in3,60,4 // get physical address of guest_vhpt - dep loc6 = 0,in4,60,4 // get physical address of pal code + XEN_VA_TO_PA(loc5,in3) // get physical address of guest_vhpt + XEN_VA_TO_PA(loc6,in4) // get physical address of pal code ;; mov loc7 = psr // save psr ;; diff -r 869cc1f44e52 xen/arch/ia64/vmx/vmx_init.c --- a/xen/arch/ia64/vmx/vmx_init.c Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/vmx/vmx_init.c Fri Sep 29 12:12:24 2006 +0200 @@ -93,6 +93,13 @@ identify_vmx_feature(void) goto no_vti; } + /* Be sure the hypervisor is protected by VTi VA. */ + if (XEN_VA_SZ != 59) { + printk("VTi disabled due to CONFIG_XEN_VA_SZ != 59 (value is %d)\n", + CONFIG_XEN_VA_SZ); + goto no_vti; + } + /* Does xen has ability to decode itself? */ if (!(vp_env_info & VP_OPCODE)) printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info); diff -r 869cc1f44e52 xen/arch/ia64/vmx/vmx_ivt.S --- a/xen/arch/ia64/vmx/vmx_ivt.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/vmx/vmx_ivt.S Fri Sep 29 12:12:24 2006 +0200 @@ -300,7 +300,7 @@ vmx_alt_itlb_miss_1: movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) ;; and r19=r19,r16 // clear ed, reserved bits, and PTE control bits - shr.u r18=r16,55 // move address bit 59 to bit 4 + shr.u r18=r16,XEN_VA_QUAD_SZ-4 // move address bit nocache to bit 4 ;; and r18=0x10,r18 // bit 4=address-bit(61) or r19=r17,r19 // insert PTE control bits into r19 @@ -332,8 +332,8 @@ vmx_alt_dtlb_miss_1: ;; #ifdef CONFIG_VIRTUAL_FRAME_TABLE // Test for the address of virtual frame_table - shr r22=r16,56;; - cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22 + shr r22=r16,XEN_VA_MAPS_SZ;; + cmp.eq p8,p0=VIRT_FRAME_TABLE_MSBS,r22 (p8)br.cond.sptk frametable_miss ;; #endif tbit.z p6,p7=r16,63 @@ -346,11 +346,11 @@ vmx_alt_dtlb_miss_1: ;; and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? - shr.u r18=r16,55 // move address bit 59 to bit 4 + shr.u r18=r16,XEN_VA_QUAD_SZ-4 // move address bit nocache to bit 4 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; - and r18=0x10,r18 // bit 4=address-bit(61) + and r18=0x10,r18 // bit 4=address-bit(nocache) (p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field dep r24=-1,r24,IA64_PSR_ED_BIT,1 or r19=r19,r17 // insert PTE control bits into r19 @@ -512,7 +512,7 @@ ENTRY(vmx_interrupt) ;; .mem.offset 0,0; st8.spill [r16]=r15,16 .mem.offset 8,0; st8.spill [r17]=r14,16 - dep r14=-1,r0,60,4 + XEN_PA_TO_VA(r14,r0) ;; .mem.offset 0,0; st8.spill [r16]=r2,16 .mem.offset 8,0; st8.spill [r17]=r3,16 diff -r 869cc1f44e52 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/domain.c Fri Sep 29 12:12:24 2006 +0200 @@ -48,8 +48,8 @@ #include #include -unsigned long dom0_size = 512*1024*1024; -unsigned long dom0_align = 64*1024*1024; +static unsigned long dom0_size = 512*1024*1024; +static unsigned long dom0_align = 64*1024*1024; /* dom0_max_vcpus: maximum number of VCPUs to create for dom0. */ static unsigned int dom0_max_vcpus = 1; diff -r 869cc1f44e52 xen/arch/ia64/xen/hyperprivop.S --- a/xen/arch/ia64/xen/hyperprivop.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/hyperprivop.S Fri Sep 29 12:12:24 2006 +0200 @@ -1797,9 +1797,16 @@ ENTRY(hyper_set_rr) // but adjust value actually placed in rr[r8] // r22 contains adjusted rid, "mangle" it (see regionreg.c) // and set ps to PAGE_SHIFT and ve to 1 +#ifdef CONFIG_MANGLE_RID_1_3 extr.u r27=r22,0,8 extr.u r28=r22,8,8 - extr.u r29=r22,16,8;; + extr.u r29=r22,16,8 +#else + extr.u r27=r22,16,8 + extr.u r28=r22,8,8 + extr.u r29=r22,0,8 +#endif + ;; dep.z r23=PAGE_SHIFT,2,6;; dep r23=-1,r23,0,1;; // mangling is swapping bytes 1 & 3 dep r23=r27,r23,24,8;; diff -r 869cc1f44e52 xen/arch/ia64/xen/ivt.S --- a/xen/arch/ia64/xen/ivt.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/ivt.S Fri Sep 29 12:12:24 2006 +0200 @@ -257,13 +257,10 @@ ENTRY(itlb_miss) mov r16 = cr.ifa mov r31 = pr ;; - extr.u r17=r16,59,5 + shr r17=r16,XEN_VA_SZ ;; /* If address belongs to VMM, go to alt tlb handler */ - cmp.eq p6,p0=0x1e,r17 -(p6) br.cond.spnt late_alt_itlb_miss - ;; - cmp.eq p6,p0=0x1d,r17 + cmp.eq p6,p0=XEN_VA_MSBS,r17 (p6) br.cond.spnt late_alt_itlb_miss ;; mov pr = r31, 0x1ffff @@ -322,13 +319,10 @@ ENTRY(dtlb_miss) mov r16=cr.ifa // get virtual address mov r31=pr ;; - extr.u r17=r16,59,5 - ;; - cmp.eq p6,p0=0x1e,r17 // if the address belongs to VMM, go - // to the alternate tlb handler -(p6) br.cond.spnt late_alt_dtlb_miss - ;; - cmp.eq p6,p0=0x1d,r17 + shr r17=r16,XEN_VA_SZ + ;; + /* If address belongs to VMM, go to alt tlb handler */ + cmp.eq p6,p0=XEN_VA_MSBS,r17 (p6) br.cond.spnt late_alt_dtlb_miss ;; #if VHPT_ENABLED @@ -346,35 +340,27 @@ ENTRY(dtlb_miss) ;; (p7) br.cond.spnt 2f - // Is the faulted iip in the vmm area? - // -- check [59:58] bit - // -- if 00, 11: guest - // -- if 01, 10: vmm - extr.u r19 = r28, 58, 2 + // Is the faulted iip in vmm area? + shr r19 = r28, XEN_VA_SZ ;; - cmp.eq p10, p0 = 0x0, r19 + cmp.eq p10, p0 = XEN_VA_MSBS, r19 (p10) br.cond.sptk 2f - cmp.eq p11, p0 = 0x3, r19 -(p11) br.cond.sptk 2f // Is the faulted address is in the identity mapping area? - // must be either 0xf000... or 0xe8000... - extr.u r20 = r16, 59, 5 + // (cache or nocache). + shr r20 = r16, XEN_VA_QUAD_SZ + 1 ;; - cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e -(p12) br.cond.spnt 1f - cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d + cmp.eq p0, p13 = XEN_CACHE_QUAD_MSBS / 2, r20 (p13) br.cond.sptk 2f 1: movl r24=PAGE_KERNEL // xen identity mapping area. movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) ;; - shr.u r26=r16,55 // move address bit 59 to bit 4 - and r25=r25,r16 // clear ed, reserved bits, and PTE control bits + shr.u r26=r16,XEN_VA_QUAD_SZ-4 // move addr bit nocache to bit 4 + and r25=r25,r16 // clear ed, reserved bits and PTE control bits ;; and r26=0x10,r26 // bit 4=address-bit(59) - ;; or r25=r25,r24 // insert PTE control bits into r25 ;; or r25=r25,r26 // set bit 4 (uncached) if the access was to @@ -470,9 +456,9 @@ late_alt_itlb_miss: extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl and r19=r19,r16 // clear ed, reserved bits, and PTE control bits #ifdef XEN - shr.u r18=r16,55 // move address bit 59 to bit 4 - ;; - and r18=0x10,r18 // bit 4=address-bit(59) + shr.u r18=r16,XEN_VA_QUAD_SZ-4 // move address uncache bit to bit 4 + ;; + and r18=0x10,r18 // bit 4=address-bit(uncache) #else shr.u r18=r16,57 // move address bit 61 to bit 4 ;; @@ -521,12 +507,12 @@ late_alt_dtlb_miss: and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? #ifdef XEN - shr.u r18=r16,55 // move address bit 59 to bit 4 + shr.u r18=r16,XEN_VA_QUAD_SZ-4 // move address bit nocache to bit 4 and r19=r19,r16 // clear ed, reserved bits, and // PTE control bits tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; - and r18=0x10,r18 // bit 4=address-bit(59) + and r18=0x10,r18 // bit 4=address-bit(nocache) #else shr.u r18=r16,57 // move address bit 61 to bit 4 and r19=r19,r16 // clear ed, reserved bits, and @@ -541,20 +527,16 @@ late_alt_dtlb_miss: #ifdef XEN ;; #ifdef CONFIG_VIRTUAL_FRAME_TABLE - shr r22=r16,56 // Test for the address of virtual frame_table - ;; - cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22 -(p8) br.cond.sptk frametable_miss ;; -#endif - // If it is not a Xen address, handle it via page_fault. - // Note that 0xf000 (cached) and 0xe800 (uncached) addresses - // should be OK. - extr.u r22=r16,59,5 - ;; - cmp.eq p8,p0=0x1e,r22 -(p8) br.cond.spnt 1f - ;; - cmp.ne p8,p0=0x1d,r22 + // Test for the address of virtual frame_table + shr r22=r16,XEN_VA_MAPS_SZ + ;; + cmp.eq p8,p0=VIRT_FRAME_TABLE_MSBS,r22 +(p8) br.cond.sptk frametable_miss +#endif + // Test for Xen address, if not handle via page_fault + shr r22=r16,XEN_VA_SZ + ;; + cmp.ne p8,p0=XEN_VA_MSBS,r22 (p8) br.cond.sptk page_fault ;; 1: @@ -574,13 +556,15 @@ END(alt_dtlb_miss) #ifdef CONFIG_VIRTUAL_FRAME_TABLE GLOBAL_ENTRY(frametable_miss) + movl r24=frametable_pg_dir + ;; + tpa r24=r24 // Won't fail as .data is TR. + extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3) + ;; + shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)] rsm psr.dt // switch to using physical data addressing - movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir) ;; srlz.d - extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3) - ;; - shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)] ;; ld8 r24=[r24] // r24=pgd[pgd_offset(addr)] extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset @@ -1097,7 +1081,6 @@ ENTRY(break_fault) ld4 r23=[r22];; cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm? (p6) br.spnt.many dispatch_break_fault - ;; br.sptk.many fast_break_reflect ;; #else /* !XEN */ @@ -1197,7 +1180,6 @@ ENTRY(interrupt) ENTRY(interrupt) DBG_FAULT(12) mov r31=pr // prepare to save predicates - ;; #ifdef XEN mov r30=cr.ivr // pass cr.ivr as first arg // FIXME: this is a hack... use cpuinfo.ksoftirqd because its @@ -1206,7 +1188,6 @@ ENTRY(interrupt) movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET ;; st8 [r29]=r30 - ;; movl r28=slow_interrupt ;; mov r29=rp @@ -1708,10 +1689,9 @@ ENTRY(daccess_rights) ENTRY(daccess_rights) DBG_FAULT(23) #ifdef XEN - mov r31=pr - ;; mov r16=cr.isr mov r17=cr.ifa + mov r31=pr mov r19=23 movl r20=0x5300 br.sptk.many fast_access_reflect diff -r 869cc1f44e52 xen/arch/ia64/xen/regionreg.c --- a/xen/arch/ia64/xen/regionreg.c Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/regionreg.c Fri Sep 29 12:12:24 2006 +0200 @@ -103,6 +103,7 @@ void init_rid_allocator (void) if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS) implemented_rid_bits = IA64_MAX_IMPL_RID_BITS; +#ifdef CONFIG_MANGLE_RID_1_3 /* Due to RID mangling, we expect 24 RID bits! This test should be removed if RID mangling is removed/modified. */ if (implemented_rid_bits != 24) { @@ -110,6 +111,7 @@ void init_rid_allocator (void) implemented_rid_bits); BUG(); } +#endif /* Allow the creation of at least domain 0. */ if (domain_rid_bits_default > implemented_rid_bits - 1) diff -r 869cc1f44e52 xen/arch/ia64/xen/xenasm.S --- a/xen/arch/ia64/xen/xenasm.S Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/xenasm.S Fri Sep 29 12:12:24 2006 +0200 @@ -60,13 +60,14 @@ 1: 1: // now in physical mode with psr.i/ic off so do rr7 switch movl r16=pal_vaddr // Note: belong to region 7! - ;; - mov rr[r16]=in0 + ;; + tpa r17=r16 // Get physical address. + ;; + mov rr[r16]=in0 // Must be done after tpa! ;; srlz.d - dep r16=0,r16,60,4 // Get physical address. - ;; - ld8 loc5=[r16] // read pal_vaddr + ;; + ld8 loc5=[r17] // read pal_vaddr movl r26=PAGE_KERNEL ;; @@ -93,7 +94,7 @@ 1: cmp.eq p7,p0=r17,r18 (p7) br.cond.sptk .stack_overlaps mov r25=IA64_GRANULE_SHIFT<<2 - dep r21=0,r13,60,4 // physical address of "current" + XEN_VA_TO_PA(r21,r13) // physical address of "current" ;; ptr.d r13,r25 or r23=r21,r26 // construct PA | page properties @@ -159,7 +160,7 @@ 1: // Purge/insert PAL TR mov r24=IA64_TR_PALCODE mov r23=IA64_GRANULE_SHIFT<<2 - dep r25=0,loc5,60,4 // convert pal vaddr to paddr + XEN_VA_TO_PA(r25,loc5) // convert pal vaddr to paddr ;; ptr.i loc5,r23 or r25=r25,r26 // construct PA | page properties diff -r 869cc1f44e52 xen/arch/ia64/xen/xenmem.c --- a/xen/arch/ia64/xen/xenmem.c Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/xenmem.c Fri Sep 29 12:12:24 2006 +0200 @@ -178,9 +178,8 @@ void init_virtual_frametable(void) mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page; efi_memmap_walk(create_mpttable_page_table, NULL); - printk("virtual machine to physical table: %p size: %lukB\n" - "max_page: 0x%lx\n", - mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page); + printk("virtual machine to physical table: %p size: %lukB\n", + mpt_table, ((table_size << PAGE_SHIFT) >> 10)); } int diff -r 869cc1f44e52 xen/arch/ia64/xen/xensetup.c --- a/xen/arch/ia64/xen/xensetup.c Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/arch/ia64/xen/xensetup.c Fri Sep 29 12:12:24 2006 +0200 @@ -27,7 +27,13 @@ #include #include +extern long running_on_sim; + unsigned long xenheap_phys_end, total_pages; +static unsigned long xenheap_phys_start; + +unsigned long xen_pstart; +void *xen_heap_start; char saved_command_line[COMMAND_LINE_SIZE]; char dom0_command_line[COMMAND_LINE_SIZE]; @@ -69,19 +75,11 @@ boolean_param("xencons_poll", opt_xencon /* * opt_xenheap_megabytes: Size of Xen heap in megabytes, including: - * xen image * bootmap bits * xen heap - * Note: To allow xenheap size configurable, the prerequisite is - * to configure elilo allowing relocation defaultly. Then since - * elilo chooses 256M as alignment when relocating, alignment issue - * on IPF can be addressed. */ -unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB; -unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE; -extern long running_on_sim; -unsigned long xen_pstart; -void *xen_heap_start; +static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB; +integer_param("xenheap_megabytes", opt_xenheap_megabytes); static int xen_count_pages(u64 start, u64 end, void *arg) @@ -93,6 +91,43 @@ xen_count_pages(u64 start, u64 end, void * is owned by Dom0? */ *count += (end - start) >> PAGE_SHIFT; return 0; +} + +static int +find_xenheap_start(u64 start, u64 end) +{ + /* Round up. */ + start = (start + PAGE_SIZE - 1) & PAGE_MASK; + + if (xenheap_phys_start == 0 + && end > start + && end - start >= ((u64)opt_xenheap_megabytes << 20)) + xenheap_phys_start = start; + return 0; +} + +static void +ia64_init_boot_pages1(u64 start, u64 end) +{ + if (start >= end) + return; + printk("Init boot pages: 0x%lx -> 0x%lx.\n", start, end); + init_boot_pages(start, end); +} + +static int +ia64_init_boot_pages(u64 start, u64 end) +{ + /* Overlap with xenheap ? */ + if (start <= xenheap_phys_end && end >= xenheap_phys_start) + { + ia64_init_boot_pages1(start, xenheap_phys_start); + ia64_init_boot_pages1(xenheap_phys_end, end); + } + else + ia64_init_boot_pages1(start, end); + + return 0; } static void __init do_initcalls(void) @@ -176,9 +211,6 @@ efi_print(void) void *p; int i; - if (!opt_efi_print) - return; - efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; @@ -192,63 +224,13 @@ efi_print(void) } } -/* - * These functions are utility functions for getting and - * testing memory descriptors for allocating the xenheap area. - */ -static efi_memory_desc_t * -efi_get_md (unsigned long phys_addr) -{ - void *efi_map_start, *efi_map_end, *p; - efi_memory_desc_t *md; - u64 efi_desc_size; - - efi_map_start = __va(ia64_boot_param->efi_memmap); - efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; - efi_desc_size = ia64_boot_param->efi_memdesc_size; - - for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { - md = p; - if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) - return md; - } - return 0; -} - -static int -is_xenheap_usable_memory(efi_memory_desc_t *md) -{ - if (!(md->attribute & EFI_MEMORY_WB)) - return 0; - - switch (md->type) { - case EFI_LOADER_CODE: - case EFI_LOADER_DATA: - case EFI_BOOT_SERVICES_CODE: - case EFI_BOOT_SERVICES_DATA: - case EFI_CONVENTIONAL_MEMORY: - return 1; - } - return 0; -} - -static inline int -md_overlaps(efi_memory_desc_t *md, unsigned long phys_addr) -{ - return (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)); -} - -#define MD_SIZE(md) (md->num_pages << EFI_PAGE_SHIFT) - void start_kernel(void) { char *cmdline; unsigned long nr_pages; unsigned long dom0_memory_start, dom0_memory_size; unsigned long dom0_initrd_start, dom0_initrd_size; - unsigned long md_end, relo_start, relo_end, relo_size = 0; struct domain *idle_domain; - efi_memory_desc_t *kern_md, *last_md, *md; #ifdef CONFIG_SMP int i; #endif @@ -288,98 +270,6 @@ void start_kernel(void) } printk("Xen command line: %s\n", saved_command_line); - /* xenheap should be in same TR-covered range with xen image */ - xenheap_phys_end = xen_pstart + xenheap_size; - printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n", - xen_pstart, xenheap_phys_end); - - kern_md = md = efi_get_md(xen_pstart); - md_end = __pa(ia64_imva(&_end)); - relo_start = xenheap_phys_end; - - /* - * Scan through the memory descriptors after the kernel - * image to make sure we have enough room for the xenheap - * area, pushing out whatever may already be there. - */ - while (relo_start + relo_size >= md_end) { - md = efi_get_md(md_end); - - BUG_ON(!md); - BUG_ON(!is_xenheap_usable_memory(md)); - - md_end = md->phys_addr + MD_SIZE(md); - /* - * The dom0 kernel or initrd could overlap, reserve space - * at the end to relocate them later. - */ - if (md->type == EFI_LOADER_DATA) { - /* Test for ranges we're not prepared to move */ - BUG_ON(md_overlaps(md, __pa(ia64_boot_param)) || - md_overlaps(md, ia64_boot_param->efi_memmap) || - md_overlaps(md, ia64_boot_param->command_line)); - - relo_size += MD_SIZE(md); - /* If range overlaps the end, push out the relocation start */ - if (md_end > relo_start) - relo_start = md_end; - } - } - last_md = md; - relo_end = relo_start + relo_size; - - md_end = __pa(ia64_imva(&_end)); - - /* - * Move any relocated data out into the previously found relocation - * area. Any extra memory descriptrs are moved out to the end - * and set to zero pages. - */ - for (md = efi_get_md(md_end) ;; md = efi_get_md(md_end)) { - md_end = md->phys_addr + MD_SIZE(md); - - if (md->type == EFI_LOADER_DATA) { - unsigned long relo_offset; - - if (md_overlaps(md, ia64_boot_param->domain_start)) { - relo_offset = ia64_boot_param->domain_start - md->phys_addr; - printk("Moving Dom0 kernel image: 0x%lx -> 0x%lx (%ld KiB)\n", - ia64_boot_param->domain_start, relo_start + relo_offset, - ia64_boot_param->domain_size >> 10); - ia64_boot_param->domain_start = relo_start + relo_offset; - } - if (ia64_boot_param->initrd_size && - md_overlaps(md, ia64_boot_param->initrd_start)) { - relo_offset = ia64_boot_param->initrd_start - md->phys_addr; - printk("Moving Dom0 initrd image: 0x%lx -> 0x%lx (%ld KiB)\n", - ia64_boot_param->initrd_start, relo_start + relo_offset, - ia64_boot_param->initrd_size >> 10); - ia64_boot_param->initrd_start = relo_start + relo_offset; - } - memcpy(__va(relo_start), __va(md->phys_addr), MD_SIZE(md)); - relo_start += MD_SIZE(md); - } - - if (md == kern_md) - continue; - if (md == last_md) - break; - - md->phys_addr = relo_end; - md->num_pages = 0; - } - - /* Trim the last entry */ - md->phys_addr = relo_end; - md->num_pages = (md_end - relo_end) >> EFI_PAGE_SHIFT; - - /* - * Expand the new kernel/xenheap (and maybe dom0/initrd) out to - * the full size. This range will already be type EFI_LOADER_DATA, - * therefore the xenheap area is now protected being allocated for - * use by find_memmap_space() in efi.c - */ - kern_md->num_pages = (relo_end - kern_md->phys_addr) >> EFI_PAGE_SHIFT; reserve_memory(); @@ -387,16 +277,26 @@ void start_kernel(void) max_page = 0; efi_memmap_walk(find_max_pfn, &max_page); printf("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page); - efi_print(); - - xen_heap_start = memguard_init(ia64_imva(&_end)); - printf("Before xen_heap_start: %p\n", xen_heap_start); - xen_heap_start = __va(init_boot_allocator(__pa(xen_heap_start))); - printf("After xen_heap_start: %p\n", xen_heap_start); - - efi_memmap_walk(filter_rsvd_memory, init_boot_pages); + if ((max_page >> (20 + 3)) > opt_xenheap_megabytes) { + printf ("Xen heap size (%uMB) is too small for max_page\n", + opt_xenheap_megabytes); + BUG(); + } + + if (opt_efi_print) + efi_print(); + + /* Find place for xen heap. */ + efi_memmap_walk(filter_rsvd_memory, find_xenheap_start); + xenheap_phys_end = xenheap_phys_start + ((u64)opt_xenheap_megabytes << 20); + printk("xen pstart: 0x%lx, xen_heap: 0x%lx - 0x%lx\n", + xen_pstart, xenheap_phys_start, xenheap_phys_end); + + /* Init and fill the boot allocator. */ + xen_heap_start = __va(init_boot_allocator(xenheap_phys_start)); + efi_memmap_walk(filter_rsvd_memory, ia64_init_boot_pages); + efi_memmap_walk(xen_count_pages, &nr_pages); - printk("System RAM: %luMB (%lukB)\n", nr_pages >> (20 - PAGE_SHIFT), nr_pages << (PAGE_SHIFT - 10)); @@ -531,6 +431,8 @@ printk("num_online_cpus=%d, max_cpus=%d\ schedulers_start(); + printf ("unpause dom0\n"); + domain_unpause_by_systemcontroller(dom0); startup_cpu_idle_loop(); diff -r 869cc1f44e52 xen/include/asm-ia64/config.h --- a/xen/include/asm-ia64/config.h Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/include/asm-ia64/config.h Fri Sep 29 12:12:24 2006 +0200 @@ -37,6 +37,17 @@ #define MAX_DMADOM_PFN (0x7FFFFFFFUL >> PAGE_SHIFT) /* 31 addressable bits */ +#undef CONFIG_XOX + +#ifdef CONFIG_XOX +/* Can run Xen within VTi */ +#define CONFIG_XEN_VA_SZ 56 +#undef CONFIG_MANGLE_RID_1_3 +#else +#define CONFIG_XEN_VA_SZ 59 +#define CONFIG_MANGLE_RID_1_3 +#endif + /* If PERFC is used, include privop maps. */ #ifdef PERF_COUNTERS #define CONFIG_PRIVOP_ADDRS @@ -69,12 +80,6 @@ typedef unsigned long paddr_t; // FIXME?: x86-ism used in xen/mm.h #define LOCK_PREFIX - -extern unsigned long xenheap_phys_end; -extern unsigned long xen_pstart; -extern unsigned long xenheap_size; -//extern struct domain *dom0; -extern unsigned long dom0_size; // from linux/include/linux/mm.h extern struct page_info *mem_map; diff -r 869cc1f44e52 xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/include/asm-ia64/mm.h Fri Sep 29 12:12:24 2006 +0200 @@ -114,10 +114,13 @@ struct page_info /* 29-bit count of references to this frame. */ #define PGC_count_mask ((1U<<29)-1) +extern void *xen_heap_start; +extern unsigned long xenheap_phys_end; +extern unsigned long xen_pstart; + #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \ && (page_to_maddr(_pfn) >= xen_pstart)) -extern void *xen_heap_start; #define __pickle(a) ((unsigned long)a - (unsigned long)xen_heap_start) #define __unpickle(a) (void *)(a + xen_heap_start) diff -r 869cc1f44e52 xen/include/asm-ia64/regionreg.h --- a/xen/include/asm-ia64/regionreg.h Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/include/asm-ia64/regionreg.h Fri Sep 29 12:12:24 2006 +0200 @@ -1,5 +1,7 @@ #ifndef _REGIONREG_H_ #define _REGIONREG_H_ +#include + #define XEN_DEFAULT_RID 7 #define IA64_MIN_IMPL_RID_MSB 17 #define _REGION_ID(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.rid;}) @@ -48,7 +50,8 @@ static inline unsigned long static inline unsigned long vmMangleRID(unsigned long RIDVal) { - union bits64 { unsigned char bytes[4]; unsigned long uint; }; +#ifdef CONFIG_RID_MANGLE_1_3 + union bits64 { unsigned char bytes[8]; unsigned long uint; }; union bits64 t; unsigned char tmp; @@ -59,6 +62,9 @@ vmMangleRID(unsigned long RIDVal) t.bytes[3] = tmp; return t.uint; +#else + return RIDVal; +#endif } // since vmMangleRID is symmetric, use it for unmangling also diff -r 869cc1f44e52 xen/include/asm-ia64/xenpage.h --- a/xen/include/asm-ia64/xenpage.h Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/include/asm-ia64/xenpage.h Fri Sep 29 12:12:24 2006 +0200 @@ -33,10 +33,11 @@ extern int ia64_mfn_valid (unsigned long #define mfn_to_virt(mfn) maddr_to_virt(mfn << PAGE_SHIFT) #ifndef __ASSEMBLY__ +#include typedef union xen_va { struct { - unsigned long off : 60; - unsigned long reg : 4; + unsigned long off : XEN_VA_QUAD_SZ; + unsigned long reg : 64 - XEN_VA_QUAD_SZ; } f; unsigned long l; void *p; @@ -72,9 +73,9 @@ static inline int get_order_from_shift(u #undef __pa #undef __va #define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) -#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) +#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = XEN_CACHE_QUAD_MSBS; _v.p;}) /* It is sometimes very useful to have unsigned long as result. */ -#define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;}) +#define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = XEN_CACHE_QUAD_MSBS; _v.l;}) #endif /* _ASM_IA64_XENPAGE_H */ diff -r 869cc1f44e52 xen/include/asm-ia64/xensystem.h --- a/xen/include/asm-ia64/xensystem.h Thu Sep 28 12:47:49 2006 +0200 +++ b/xen/include/asm-ia64/xensystem.h Fri Sep 29 12:12:24 2006 +0200 @@ -11,27 +11,82 @@ * */ #include -#include + +#ifdef __ASSEMBLY__ +#define __IA64_L(x) x +#else +#define __IA64_L(x) x##L +#endif /* Define HV space hierarchy. VMM memory space is protected by CPL for paravirtualized domains and by VA for VTi domains. VTi imposes VA bit 60 != VA bit 59 for VMM. */ -#define HYPERVISOR_VIRT_START 0xe800000000000000 -#define KERNEL_START 0xf000000004000000 -#define GATE_ADDR KERNEL_START -#define DEFAULT_SHAREDINFO_ADDR 0xf100000000000000 -#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE) -#define VHPT_ADDR 0xf200000000000000 +/* Number of bits for Xen virtual address space. The bits above are always + set to ...11110 == -2. + Therefore if you want to use VTi, CONFIG_XEN_VA_SZ must be set to 59. +*/ +#define XEN_VA_SZ CONFIG_XEN_VA_SZ +#define XEN_VA_MSBS __IA64_L(-2) +#define XEN_VA_MSBS_SZ (64 - XEN_VA_SZ) + +/* The Xen virtual space is divided into 4 quads. + The first two quads are cache/nocache ident mapping. */ +#define XEN_VA_QUAD_SZ (XEN_VA_SZ - 2) +#define XEN_CACHE_QUAD_MSBS __IA64_L(-8) /* MSBS: ...1111000 */ +#define XEN_NOCACHE_QUAD_MSBS __IA64_L(-7) /* MSBS: ...1111001 */ +#define XEN_MAPS_QUAD_MSBS __IA64_L(-6) /* MSBS: ...1111010 */ +#define XEN_UNUSED_QUAD_MSBS __IA64_L(-5) /* MSBS: ...1111011 */ + + +/* When XEN_VA_SZ==59, the values are: + XEN_CACHE_QUAD = 0xf0000000_00000000 + XEN_NOCACHE_QUAD= 0xf2000000_00000000 + XEN_MAPS_QUAD = 0xf4000000_00000000 + XEN_UNUSED_QUAD = 0xf6000000_00000000 +*/ +#define XEN_CACHE_QUAD (XEN_CACHE_QUAD_MSBS << XEN_VA_QUAD_SZ) +#define XEN_NOCACHE_QUAD (XEN_NOCACHE_QUAD_MSBS << XEN_VA_QUAD_SZ) +#define XEN_MAPS_QUAD (XEN_MAPS_QUAD_MSBS << XEN_VA_QUAD_SZ) + +/* The MAPS quad is divided into 4 sub zones (values when XEN_VA_SZ=59): + KERNEL_START_ADDR = 0xf4000000_00000000 + DEFAULT_SHAREDINFO_ADDR = 0xf4800000_00000000 + VHPT_ADDR = 0xf5000000_00000000 + VIRT_FRAME_TABLE_ADDR = 0xf5800000_00000000 +*/ +#define XEN_VA_MAPS_SZ (XEN_VA_QUAD_SZ - 2) +#define XEN_MAPS_SUB_ADDR(n) (XEN_MAPS_QUAD + ((n) << XEN_VA_MAPS_SZ)) + +#define KERNEL_START_ADDR XEN_MAPS_SUB_ADDR(__IA64_L(0x0)) +#define DEFAULT_SHAREDINFO_ADDR XEN_MAPS_SUB_ADDR(__IA64_L(0x1)) +#define VHPT_ADDR XEN_MAPS_SUB_ADDR(__IA64_L(0x2)) +#define PERCPU_ADDR (VHPT_ADDR - PERCPU_PAGE_SIZE) #ifdef CONFIG_VIRTUAL_FRAME_TABLE -#define VIRT_FRAME_TABLE_ADDR 0xf300000000000000 -#define VIRT_FRAME_TABLE_END 0xf400000000000000 +#define VIRT_FRAME_TABLE_ADDR XEN_MAPS_SUB_ADDR(__IA64_L(0x3)) +#define VIRT_FRAME_TABLE_END XEN_MAPS_SUB_ADDR(__IA64_L(0x4)) +#define VIRT_FRAME_TABLE_MSBS ((XEN_MAPS_QUAD_MSBS << 2) + 0x3) #endif -#define HYPERVISOR_VIRT_END 0xf800000000000000 -#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000) -#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL +#define HYPERVISOR_VIRT_START XEN_CACHE_QUAD +#define KERNEL_START (KERNEL_START_ADDR + 0x4000000) +#define GATE_ADDR KERNEL_START +#define PAGE_OFFSET XEN_CACHE_QUAD +#define __IA64_UNCACHED_OFFSET XEN_NOCACHE_QUAD +#define HYPERVISOR_VIRT_END (__IA64_L(-4) << XEN_VA_QUAD_SZ) -#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1) +/* Return true iff ADDR belongs to Xen virtual address space. */ +#define IS_VMM_ADDRESS(addr) (((signed long)(addr)>>XEN_VA_SZ) == XEN_VA_MSBS) + +#ifdef __ASSEMBLY__ +/* Macro to convert virtual address (from cache and nocache quads) + to physical address. */ +#define XEN_VA_TO_PA(dest,src) dep.z dest=src,0,XEN_VA_QUAD_SZ + +/* Macro to convert physical address to a (cache quad) virtual address. */ +/* Note: we cheat a little bit: we assume MSBs bits are all set to 0. */ +#define XEN_PA_TO_VA(dest,src) \ + dep dest=-1,src,XEN_VA_SZ+1,64-XEN_VA_SZ-1 +#endif #endif // _ASM_IA64_XENSYSTEM_H