# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1190049981 21600
# Node ID b91d16ab68be801382c4b7ef99fa6b6929856bae
# Parent 0902e4aae8100c8130617ec7170cf26a6692bacb
[IA64] Cleanup: remove unused declarations, add static and reindent
Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
---
xen/arch/ia64/vmx/vmmu.c | 67 ------------
xen/arch/ia64/vmx/vtlb.c | 145 ++++++++++++++-------------
xen/include/asm-ia64/linux-xen/asm/pgtable.h | 1
xen/include/asm-ia64/vmmu.h | 93 -----------------
4 files changed, 82 insertions(+), 224 deletions(-)
diff -r 0902e4aae810 -r b91d16ab68be xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Mon Sep 17 11:08:46 2007 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c Mon Sep 17 11:26:21 2007 -0600
@@ -66,7 +66,7 @@ custom_param("vti_vhpt_size", parse_vhpt
* Input:
* d:
*/
-u64 get_mfn(struct domain *d, u64 gpfn)
+static u64 get_mfn(struct domain *d, u64 gpfn)
{
// struct domain *d;
u64 xen_gppn, xen_mppn, mpfn;
@@ -91,68 +91,6 @@ u64 get_mfn(struct domain *d, u64 gpfn)
mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
return mpfn;
-}
-
-/*
- * The VRN bits of va stand for which rr to get.
- */
-//ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
-//{
-// ia64_rr vrr;
-// vcpu_get_rr(vcpu, va, &vrr.rrval);
-// return vrr;
-//}
-
-/*
-void recycle_message(thash_cb_t *hcb, u64 para)
-{
- if(hcb->ht == THASH_VHPT)
- {
- printk("ERROR : vhpt recycle happenning!!!\n");
- }
- printk("hcb=%p recycled with %lx\n",hcb,para);
-}
- */
-
-/*
- * Purge all guest TCs in logical processor.
- * Instead of purging all LP TCs, we should only purge
- * TCs that belong to this guest.
- */
-void
-purge_machine_tc_by_domid(domid_t domid)
-{
-#ifndef PURGE_GUEST_TC_ONLY
- // purge all TCs
- struct ia64_pal_retval result;
- u64 addr;
- u32 count1,count2;
- u32 stride1,stride2;
- u32 i,j;
- u64 psr;
-
- result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
- if ( result.status != 0 ) {
- panic ("PAL_PTCE_INFO failed\n");
- }
- addr = result.v0;
- count1 = HIGH_32BITS(result.v1);
- count2 = LOW_32BITS (result.v1);
- stride1 = HIGH_32BITS(result.v2);
- stride2 = LOW_32BITS (result.v2);
-
- local_irq_save(psr);
- for (i=0; i<count1; i++) {
- for (j=0; j<count2; j++) {
- ia64_ptce(addr);
- addr += stride2;
- }
- addr += stride1;
- }
- local_irq_restore(psr);
-#else
- // purge all TCs belong to this guest.
-#endif
}
static int init_domain_vhpt(struct vcpu *v)
@@ -313,7 +251,8 @@ fetch_code(VCPU *vcpu, u64 gip, IA64_BUN
}
if( gpip){
mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
- if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code:
invalid memory\n");
+ if (mfn == INVALID_MFN)
+ panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
}else{
tlb = vhpt_lookup(gip);
diff -r 0902e4aae810 -r b91d16ab68be xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Mon Sep 17 11:08:46 2007 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c Mon Sep 17 11:26:21 2007 -0600
@@ -69,9 +69,9 @@ __is_tr_overlap(thash_data_t *trp, u64 r
return 0;
}
sa1 = trp->vadr;
- ea1 = sa1 + PSIZE(trp->ps) -1;
+ ea1 = sa1 + PSIZE(trp->ps) - 1;
eva -= 1;
- if ( (sva>ea1) || (sa1>eva) )
+ if (sva > ea1 || sa1 > eva)
return 0;
else
return 1;
@@ -85,10 +85,11 @@ static thash_data_t *__vtr_lookup(VCPU *
int i;
u64 rid;
vcpu_get_rr(vcpu, va, &rid);
- rid = rid & RR_RID_MASK;;
+ rid &= RR_RID_MASK;
if (is_data) {
if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
- for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++,
trp++) {
+ trp = (thash_data_t *)vcpu->arch.dtrs;
+ for (i = 0; i < NDTRS; i++, trp++) {
if (__is_tr_translated(trp, rid, va)) {
return trp;
}
@@ -97,7 +98,8 @@ static thash_data_t *__vtr_lookup(VCPU *
}
else {
if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
- for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++,
trp++) {
+ trp = (thash_data_t *)vcpu->arch.itrs;
+ for (i = 0; i < NITRS; i++, trp++) {
if (__is_tr_translated(trp, rid, va)) {
return trp;
}
@@ -107,35 +109,34 @@ static thash_data_t *__vtr_lookup(VCPU *
return NULL;
}
-
static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
{
thash_data_t *p, *q;
- int i=0;
-
- p=hash;
- for(i=0; i < MAX_CCN_DEPTH; i++){
- p=p->next;
- }
- q=hash->next;
- hash->len=0;
- hash->next=0;
- p->next=hcb->cch_freelist;
- hcb->cch_freelist=q;
-}
-
-
-
+ int i = 0;
+
+ p = hash;
+ for (i = 0; i < MAX_CCN_DEPTH; i++) {
+ p = p->next;
+ }
+ q = hash->next;
+ hash->len = 0;
+ hash->next = 0;
+ p->next = hcb->cch_freelist;
+ hcb->cch_freelist = q;
+}
static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
{
u64 tag;
ia64_rr rr;
thash_data_t *head, *cch;
+
pte = pte & ~PAGE_FLAGS_RV_MASK;
rr.rrval = ia64_get_rr(ifa);
head = (thash_data_t *)ia64_thash(ifa);
tag = ia64_ttag(ifa);
+
+ /* Find a free (ie invalid) entry. */
cch = head;
while (cch) {
if (INVALID_VHPT(cch))
@@ -143,16 +144,16 @@ static void vmx_vhpt_insert(thash_cb_t *
cch = cch->next;
}
if (cch) {
+ /* As we insert in head, copy head. */
if (cch != head) {
local_irq_disable();
cch->page_flags = head->page_flags;
cch->itir = head->itir;
- cch->etag = head->etag;
+ cch->etag = head->etag;
head->ti = 1;
local_irq_enable();
}
- }
- else{
+ } else {
if (head->len >= MAX_CCN_DEPTH) {
thash_recycle_cch(hcb, head);
cch = cch_alloc(hcb);
@@ -181,7 +182,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte,
ia64_rr mrr;
mrr.rrval = ia64_get_rr(va);
- phy_pte=translate_phy_pte(v, &pte, itir, va);
+ phy_pte = translate_phy_pte(v, &pte, itir, va);
if (itir_ps(itir) >= mrr.ps) {
vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
@@ -232,26 +233,31 @@ thash_data_t * vhpt_lookup(u64 va)
{
thash_data_t *hash, *head;
u64 tag, pte, itir;
+
head = (thash_data_t *)ia64_thash(va);
- hash=head;
+ hash = head;
tag = ia64_ttag(va);
- do{
- if(hash->etag == tag)
+ do {
+ if (hash->etag == tag)
break;
- hash=hash->next;
- }while(hash);
- if(hash && hash!=head){
+ hash = hash->next;
+ } while(hash);
+ if (hash && hash != head) {
+ /* Put the entry on the front of the list (ie swap hash and head). */
pte = hash->page_flags;
hash->page_flags = head->page_flags;
head->page_flags = pte;
+
tag = hash->etag;
hash->etag = head->etag;
head->etag = tag;
+
itir = hash->itir;
hash->itir = head->itir;
head->itir = itir;
+
head->len = hash->len;
- hash->len=0;
+ hash->len = 0;
return head;
}
return hash;
@@ -368,14 +374,15 @@ void thash_recycle_cch_all(thash_cb_t *h
{
int num;
thash_data_t *head;
- head=hcb->hash;
+
+ head = hcb->hash;
num = (hcb->hash_sz/sizeof(thash_data_t));
- do{
+ do {
head->len = 0;
head->next = 0;
head++;
num--;
- }while(num);
+ } while(num);
cch_mem_init(hcb);
}
@@ -409,6 +416,7 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
/* u64 gppn, ppns, ppne; */
u64 tag, len;
thash_cb_t *hcb = &v->arch.vtlb;
+
vcpu_get_rr(v, va, &vrr.rrval);
vrr.ps = itir_ps(itir);
VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
@@ -419,13 +427,13 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
len = cch->len;
cch->page_flags = pte;
cch->len = len;
- cch->itir=itir;
- cch->etag=tag;
+ cch->itir = itir;
+ cch->etag = tag;
return;
}
cch = cch->next;
}
- if (hash_table->len>=MAX_CCN_DEPTH){
+ if (hash_table->len >= MAX_CCN_DEPTH) {
thash_recycle_cch(hcb, hash_table);
cch = cch_alloc(hcb);
}
@@ -450,11 +458,12 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
u64 end, rid;
vcpu_get_rr(vcpu, va, &rid);
- rid = rid & RR_RID_MASK;;
+ rid &= RR_RID_MASK;
end = va + PSIZE(ps);
if (is_data) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
- for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++,
trp++) {
+ if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+ trp = (thash_data_t *)vcpu->arch.dtrs;
+ for (i = 0; i < NDTRS; i++, trp++) {
if (__is_tr_overlap(trp, rid, va, end )) {
return i;
}
@@ -463,7 +472,8 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
}
else {
if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
- for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++,
trp++) {
+ trp = (thash_data_t *)vcpu->arch.itrs;
+ for (i = 0; i < NITRS; i++, trp++) {
if (__is_tr_overlap(trp, rid, va, end )) {
return i;
}
@@ -478,7 +488,7 @@ int vtr_find_overlap(VCPU *vcpu, u64 va,
*/
void thash_purge_entries(VCPU *v, u64 va, u64 ps)
{
- if(vcpu_quick_region_check(v->arch.tc_regions,va))
+ if (vcpu_quick_region_check(v->arch.tc_regions, va))
vtlb_purge(v, va, ps);
vhpt_purge(v, va, ps);
}
@@ -497,6 +507,7 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
u64 ps, ps_mask, paddr, maddr;
// ia64_rr rr;
union pte_flags phy_pte;
+
ps = itir_ps(itir);
ps_mask = ~((1UL << ps) - 1);
phy_pte.val = *pte;
@@ -536,7 +547,7 @@ int thash_purge_and_insert(VCPU *v, u64
ps = itir_ps(itir);
mrr.rrval = ia64_get_rr(ifa);
- if(VMX_DOMAIN(v)){
+ if (VMX_DOMAIN(v)) {
phy_pte = translate_phy_pte(v, &pte, itir, ifa);
if (pte & VTLB_PTE_IO)
@@ -544,18 +555,18 @@ int thash_purge_and_insert(VCPU *v, u64
vtlb_purge(v, ifa, ps);
vhpt_purge(v, ifa, ps);
if (ps == mrr.ps) {
- if(!(pte&VTLB_PTE_IO)){
+ if (!(pte & VTLB_PTE_IO)) {
vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
}
else{
vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+ vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
}
}
else if (ps > mrr.ps) {
vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
- if(!(pte&VTLB_PTE_IO)){
+ vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
+ if (!(pte & VTLB_PTE_IO)) {
vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
}
}
@@ -572,9 +583,9 @@ int thash_purge_and_insert(VCPU *v, u64
}
else{
phy_pte = translate_phy_pte(v, &pte, itir, ifa);
- if(ps!=PAGE_SHIFT){
+ if (ps != PAGE_SHIFT) {
vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+ vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
}
machine_tlb_purge(ifa, ps);
vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
@@ -593,13 +604,13 @@ void thash_purge_all(VCPU *v)
int num;
thash_data_t *head;
thash_cb_t *vtlb,*vhpt;
- vtlb =&v->arch.vtlb;
- vhpt =&v->arch.vhpt;
+ vtlb = &v->arch.vtlb;
+ vhpt = &v->arch.vhpt;
for (num = 0; num < 8; num++)
VMX(v, psbits[num]) = 0;
- head=vtlb->hash;
+ head = vtlb->hash;
num = (vtlb->hash_sz/sizeof(thash_data_t));
do{
head->page_flags = 0;
@@ -608,10 +619,10 @@ void thash_purge_all(VCPU *v)
head->next = 0;
head++;
num--;
- }while(num);
+ } while(num);
cch_mem_init(vtlb);
- head=vhpt->hash;
+ head = vhpt->hash;
num = (vhpt->hash_sz/sizeof(thash_data_t));
do{
head->page_flags = 0;
@@ -619,7 +630,7 @@ void thash_purge_all(VCPU *v)
head->next = 0;
head++;
num--;
- }while(num);
+ } while(num);
cch_mem_init(vhpt);
local_flush_tlb_all();
}
@@ -635,19 +646,19 @@ void thash_purge_all(VCPU *v)
thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
{
- thash_data_t *cch;
- u64 psbits, ps, tag;
+ thash_data_t *cch;
+ u64 psbits, ps, tag;
ia64_rr vrr;
- thash_cb_t * hcb= &v->arch.vtlb;
-
- cch = __vtr_lookup(v, va, is_data);;
+ thash_cb_t *hcb = &v->arch.vtlb;
+
+ cch = __vtr_lookup(v, va, is_data);
if (cch)
return cch;
- if (vcpu_quick_region_check(v->arch.tc_regions,va) == 0)
+ if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
return NULL;
psbits = VMX(v, psbits[(va >> 61)]);
- vcpu_get_rr(v,va,&vrr.rrval);
+ vcpu_get_rr(v, va, &vrr.rrval);
while (psbits) {
ps = __ffs(psbits);
psbits &= ~(1UL << ps);
@@ -676,16 +687,16 @@ static void thash_init(thash_cb_t *hcb,
hcb->pta.ve = 1;
hcb->pta.size = sz;
- head=hcb->hash;
+ head = hcb->hash;
num = (hcb->hash_sz/sizeof(thash_data_t));
- do{
+ do {
head->page_flags = 0;
head->itir = 0;
- head->etag = 1UL<<63;
+ head->etag = 1UL << 63;
head->next = 0;
head++;
num--;
- }while(num);
+ } while(num);
hcb->cch_free_idx = 0;
hcb->cch_freelist = NULL;
diff -r 0902e4aae810 -r b91d16ab68be
xen/include/asm-ia64/linux-xen/asm/pgtable.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h Mon Sep 17 11:08:46
2007 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h Mon Sep 17 11:26:21
2007 -0600
@@ -73,6 +73,7 @@
#ifdef XEN
#define _PAGE_VIRT_D (__IA64_UL(1) << 53) /* Virtual dirty bit */
#define _PAGE_PROTNONE 0
+#define _PAGE_PL_PRIV (CONFIG_CPL0_EMUL << 7)
#ifdef CONFIG_XEN_IA64_TLB_TRACK
#define _PAGE_TLB_TRACKING_BIT 54
diff -r 0902e4aae810 -r b91d16ab68be xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h Mon Sep 17 11:08:46 2007 -0600
+++ b/xen/include/asm-ia64/vmmu.h Mon Sep 17 11:26:21 2007 -0600
@@ -28,7 +28,6 @@
#define DEFAULT_VHPT_SZ (23) // 8M hash + 8M c-chain for VHPT
#define VTLB(v,_x) (v->arch.vtlb._x)
#define VHPT(v,_x) (v->arch.vhpt._x)
-#define _PAGE_PL_PRIV (CONFIG_CPL0_EMUL << 7)
#ifndef __ASSEMBLY__
@@ -39,34 +38,6 @@
#include <asm/regionreg.h>
#include <asm/vmx_mm_def.h>
#include <asm/bundle.h>
-//#define THASH_TLB_TR 0
-//#define THASH_TLB_TC 1
-
-
-// bit definition of TR, TC search cmobination
-//#define THASH_SECTION_TR (1<<0)
-//#define THASH_SECTION_TC (1<<1)
-
-/*
- * Next bit definition must be same with THASH_TLB_XX
-#define PTA_BASE_SHIFT (15)
- */
-
-
-
-
-#define HIGH_32BITS(x) bits(x,32,63)
-#define LOW_32BITS(x) bits(x,0,31)
-
-typedef union search_section {
- struct {
- u32 tr : 1;
- u32 tc : 1;
- u32 rsv: 30;
- };
- u32 v;
-} search_section_t;
-
enum {
ISIDE_TLB=0,
@@ -169,28 +140,6 @@ static inline u64 xen_to_arch_ppn(u64 xp
return (xppn <<(PAGE_SHIFT- ARCH_PAGE_SHIFT));
}
-typedef enum {
- THASH_TLB=0,
- THASH_VHPT
-} THASH_TYPE;
-
-struct thash_cb;
-/*
- * Use to calculate the HASH index of thash_data_t.
- */
-typedef u64 *(THASH_FN)(PTA pta, u64 va);
-typedef u64 *(TTAG_FN)(PTA pta, u64 va);
-typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
-typedef void *(REM_NOTIFIER_FN)(struct thash_cb *hcb, thash_data_t *entry);
-typedef void (RECYCLE_FN)(struct thash_cb *hc, u64 para);
-typedef ia64_rr (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
-typedef thash_data_t *(FIND_OVERLAP_FN)(struct thash_cb *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
-typedef thash_data_t *(FIND_NEXT_OVL_FN)(struct thash_cb *hcb);
-typedef void (REM_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry);
-typedef void (INS_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry, u64 va);
-
-
typedef struct thash_cb {
/* THASH base information */
thash_data_t *hash; // hash table pointer, aligned at thash_sz.
@@ -224,45 +173,6 @@ extern void thash_free(thash_cb_t *hcb);
//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
//extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va,
int idx);
extern int vtr_find_overlap(struct vcpu *vcpu, u64 va, u64 ps, int is_data);
-extern u64 get_mfn(struct domain *d, u64 gpfn);
-/*
- * Force to delete a found entry no matter TR or foreign map for TLB.
- * NOTES:
- * 1: TLB entry may be TR, TC or Foreign Map. For TR entry,
- * itr[]/dtr[] need to be updated too.
- * 2: This API must be called after thash_find_overlap() or
- * thash_find_next_overlap().
- * 3: Return TRUE or FALSE
- *
- */
-extern void thash_remove(thash_cb_t *hcb, thash_data_t *entry);
-extern void thash_tr_remove(thash_cb_t *hcb, thash_data_t *entry/*, int idx*/);
-
-/*
- * Find an overlap entry in hash table and its collision chain.
- * Refer to SDM2 4.1.1.4 for overlap definition.
- * PARAS:
- * 1: in: TLB format entry, rid:ps must be same with vrr[].
- * va & ps identify the address space for overlap lookup
- * 2: section can be combination of TR, TC and FM. (THASH_SECTION_XX)
- * 3: cl means I side or D side.
- * RETURNS:
- * NULL to indicate the end of findings.
- * NOTES:
- *
- */
-extern thash_data_t *thash_find_overlap(thash_cb_t *hcb,
- thash_data_t *in, search_section_t s_sect);
-extern thash_data_t *thash_find_overlap_ex(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
-
-
-/*
- * Similar with thash_find_overlap but find next entry.
- * NOTES:
- * Intermediate position information is stored in hcb->priv.
- */
-extern thash_data_t *thash_find_next_overlap(thash_cb_t *hcb);
/*
* Find and purge overlap entries in hash table and its collision chain.
@@ -290,7 +200,6 @@ extern void thash_purge_all(struct vcpu
*
*/
extern thash_data_t *vtlb_lookup(struct vcpu *v,u64 va,int is_data);
-extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl,
int lock);
#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
@@ -298,12 +207,10 @@ extern int thash_lock_tc(thash_cb_t *hcb
#define PAGE_FLAGS_AR_PL_MASK ((0x7UL<<9)|(0x3UL<<7))
extern u64 machine_ttag(PTA pta, u64 va);
extern u64 machine_thash(PTA pta, u64 va);
-extern void purge_machine_tc_by_domid(domid_t domid);
extern void machine_tlb_insert(struct vcpu *v, thash_data_t *tlb);
extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
extern int init_domain_tlb(struct vcpu *v);
extern void free_domain_tlb(struct vcpu *v);
-extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
extern thash_data_t * vhpt_lookup(u64 va);
extern void machine_tlb_purge(u64 va, u64 ps);
extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE
*pbundle);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|