ChangeSet 1.1327.1.14, 2005/04/29 16:09:13-06:00, djm@xxxxxxxxxxxxxxx
More code cleanup
b/xen/arch/ia64/Makefile | 2
b/xen/arch/ia64/ivt.S | 1870 ++++++++++++++++++++++++++
b/xen/arch/ia64/patch/linux-2.6.11/efi.c | 29
b/xen/arch/ia64/patch/linux-2.6.11/entry.S | 170 +-
b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c | 103 -
b/xen/arch/ia64/tools/mkbuildtree | 7
b/xen/arch/ia64/xenirq.c | 77 +
xen/arch/ia64/patch/linux-2.6.11/bootmem.h | 16
xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h | 30
xen/arch/ia64/patch/linux-2.6.11/ivt.S | 532 -------
xen/arch/ia64/patch/linux-2.6.11/lds.S | 21
11 files changed, 2048 insertions(+), 809 deletions(-)
diff -Nru a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile 2005-05-03 04:06:42 -04:00
+++ b/xen/arch/ia64/Makefile 2005-05-03 04:06:42 -04:00
@@ -7,7 +7,7 @@
machvec.o dom0_ops.o domain.o \
idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
xenmem.o sal.o cmdline.o mm_init.o tlb.o smpboot.o \
- extable.o linuxextable.o \
+ extable.o linuxextable.o xenirq.o \
regionreg.o entry.o unaligned.o privop.o vcpu.o \
irq_ia64.o irq_lsapic.o vhpt.o xenasm.o dom_fw.o
# perfmon.o
diff -Nru a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S
--- /dev/null Wed Dec 31 16:00:00 196900
+++ b/xen/arch/ia64/ivt.S 2005-05-03 04:06:42 -04:00
@@ -0,0 +1,1870 @@
+
+#ifdef XEN
+//#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
+// these are all hacked out for now as the entire IVT
+// will eventually be replaced... just want to use it
+// for startup code to handle TLB misses
+//#define ia64_leave_kernel 0
+//#define ia64_ret_from_syscall 0
+//#define ia64_handle_irq 0
+//#define ia64_fault 0
+#define ia64_illegal_op_fault 0
+#define ia64_prepare_handle_unaligned 0
+#define ia64_bad_break 0
+#define ia64_trace_syscall 0
+#define sys_call_table 0
+#define sys_ni_syscall 0
+#include <asm/vhpt.h>
+#endif
+/*
+ * arch/ia64/kernel/ivt.S
+ *
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@xxxxxxxxxx>
+ * David Mosberger <davidm@xxxxxxxxxx>
+ * Copyright (C) 2000, 2002-2003 Intel Co
+ * Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ * Kenneth Chen <kenneth.w.chen@xxxxxxxxx>
+ * Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ *
+ * 00/08/23 Asit Mallick <asit.k.mallick@xxxxxxxxx> TLB handling for SMP
+ * 00/12/20 David Mosberger-Tang <davidm@xxxxxxxxxx> DTLB/ITLB handler now
uses virtual PT.
+ */
+/*
+ * This file defines the interruption vector table used by the CPU.
+ * It does not include one entry per possible cause of interruption.
+ *
+ * The first 20 entries of the table contain 64 bundles each while the
+ * remaining 48 entries contain only 16 bundles each.
+ *
+ * The 64 bundles are used to allow inlining the whole handler for critical
+ * interruptions like TLB misses.
+ *
+ * For each entry, the comment is as follows:
+ *
+ * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ * entry offset ----/ / / / /
+ * entry number ---------/ / / /
+ * size of the entry -------------/ / /
+ * vector name -------------------------------------/ /
+ * interruptions triggering this vector ----------------------/
+ *
+ * The table is 32KB in size and must be aligned on 32KB boundary.
+ * (The CPU ignores the 15 lower bits of the address)
+ *
+ * Table is based upon EAS2.6 (Oct 1999)
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/break.h>
+#include <asm/ia32.h>
+#include <asm/kregs.h>
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+
+#if 1
+# define PSR_DEFAULT_BITS psr.ac
+#else
+# define PSR_DEFAULT_BITS 0
+#endif
+
+#if 0
+ /*
+ * This lets you track the last eight faults that occurred on the CPU. Make
sure ar.k2 isn't
+ * needed for something else before enabling this...
+ */
+# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov
ar.k2=r16
+#else
+# define DBG_FAULT(i)
+#endif
+
+#define MINSTATE_VIRT /* needed by minstate.h */
+#include "minstate.h"
+
+#define FAULT(n)
\
+ mov r31=pr;
\
+ mov r19=n;; /* prepare to save predicates */
\
+ br.sptk.many dispatch_to_fault_handler
+
+#ifdef XEN
+#define REFLECT(n)
\
+ mov r31=pr;
\
+ mov r19=n;; /* prepare to save predicates */
\
+ br.sptk.many dispatch_reflection
+#endif
+
+ .section .text.ivt,"ax"
+
+ .align 32768 // align on 32KB boundary
+ .global ia64_ivt
+ia64_ivt:
+/////////////////////////////////////////////////////////////////////////////////////////
+// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
+ENTRY(vhpt_miss)
+ DBG_FAULT(0)
+ /*
+ * The VHPT vector is invoked when the TLB entry for the virtual page
table
+ * is missing. This happens only as a result of a previous
+ * (the "original") TLB miss, which may either be caused by an
instruction
+ * fetch or a data access (or non-access).
+ *
+ * What we do here is normal TLB miss handing for the _original_ miss,
followed
+ * by inserting the TLB entry for the virtual page table page that the
VHPT
+ * walker was attempting to access. The latter gets inserted as long
+ * as both L1 and L2 have valid mappings for the faulting address.
+ * The TLB entry for the original miss gets inserted only if
+ * the L3 entry indicates that the page is present.
+ *
+ * do_page_fault gets invoked in the following cases:
+ * - the faulting virtual address uses unimplemented address bits
+ * - the faulting virtual address has no L1, L2, or L3 mapping
+ */
+ mov r16=cr.ifa // get address that caused the
TLB miss
+#ifdef CONFIG_HUGETLB_PAGE
+ movl r18=PAGE_SHIFT
+ mov r25=cr.itir
+#endif
+ ;;
+ rsm psr.dt // use physical addressing for
data
+ mov r31=pr // save the predicate registers
+ mov r19=IA64_KR(PT_BASE) // get page table base address
+ shl r21=r16,3 // shift bit 60 into sign bit
+ shr.u r17=r16,61 // get the region number into
r17
+ ;;
+ shr r22=r21,3
+#ifdef CONFIG_HUGETLB_PAGE
+ extr.u r26=r25,2,6
+ ;;
+ cmp.ne p8,p0=r18,r26
+ sub r27=r26,r18
+ ;;
+(p8) dep r25=r18,r25,2,6
+(p8) shr r22=r22,r27
+#endif
+ ;;
+ cmp.eq p6,p7=5,r17 // is IFA pointing into to
region 5?
+ shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the
faulting address
+ ;;
+(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in
place
+
+ srlz.d
+ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at
swapper_pg_dir
+
+ .pred.rel "mutex", p6, p7
+(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
+(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
+ ;;
+(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
+(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) <<
7) | IFA(33,39))*8)
+ cmp.eq p7,p6=0,r21 // unused address bits all
zeroes?
+ shr.u r18=r22,PMD_SHIFT // shift L2 index into position
+ ;;
+ ld8 r17=[r17] // fetch the L1 entry (may be 0)
+ ;;
+(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
+ dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page
table entry
+ ;;
+(p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
+ shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
+ ;;
+(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
+ dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page
table entry
+ ;;
+(p7) ld8 r18=[r21] // read the L3 PTE
+ mov r19=cr.isr // cr.isr bit 0 tells us if
this is an insn miss
+ ;;
+(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
+ mov r22=cr.iha // get the VHPT address that
caused the TLB miss
+ ;; // avoid RAW on p7
+(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB
miss?
+ dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page
address
+ ;;
+(p10) itc.i r18 // insert the instruction TLB
entry
+(p11) itc.d r18 // insert the data TLB entry
+(p6) br.cond.spnt.many page_fault // handle bad address/page not
present (page fault)
+ mov cr.ifa=r22
+
+#ifdef CONFIG_HUGETLB_PAGE
+(p8) mov cr.itir=r25 // change to default page-size
for VHPT
+#endif
+
+ /*
+ * Now compute and insert the TLB entry for the virtual page table. We
never
+ * execute in a page table page so there is no need to set the
exception deferral
+ * bit.
+ */
+ adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
+ ;;
+(p7) itc.d r24
+ ;;
+#ifdef CONFIG_SMP
+ /*
+ * Tell the assemblers dependency-violation checker that the above
"itc" instructions
+ * cannot possibly affect the following loads:
+ */
+ dv_serialize_data
+
+ /*
+ * Re-check L2 and L3 pagetable. If they changed, we may have received
a ptc.g
+ * between reading the pagetable and the "itc". If so, flush the entry
we
+ * inserted and retry.
+ */
+ ld8 r25=[r21] // read L3 PTE again
+ ld8 r26=[r17] // read L2 entry again
+ ;;
+ cmp.ne p6,p7=r26,r20 // did L2 entry change
+ mov r27=PAGE_SHIFT<<2
+ ;;
+(p6) ptc.l r22,r27 // purge PTE page translation
+(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
+ ;;
+(p6) ptc.l r16,r27 // purge translation
+#endif
+
+ mov pr=r31,-1 // restore predicate registers
+ rfi
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|