diff -urN --exclude=SCCS --exclude=BitKeeper xen-unstable.latest/xen/arch/x86/mm.c xeno-ft/xen/arch/x86/mm.c --- xen-unstable.latest/xen/arch/x86/mm.c 2005-06-04 18:07:26.000000000 -0400 +++ xeno-ft/xen/arch/x86/mm.c 2005-06-06 11:07:01.000000000 -0400 @@ -1695,6 +1695,8 @@ break; case MMUEXT_NEW_BASEPTR: + if ( shadow_mode_translate(d) ) + op.mfn = __gpfn_to_mfn(d, op.mfn); okay = new_guest_cr3(op.mfn); percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB; break; @@ -1807,6 +1809,9 @@ okay = 0; break; } + + if ( shadow_mode_translate(d) ) + BUG(); // not supported yet, need to think about this. e = percpu_info[cpu].foreign; if ( unlikely(e == NULL) ) @@ -2431,6 +2436,7 @@ { int nr_pages = (entries + 511) / 512; unsigned long frames[16]; + struct domain* d = current->domain; long ret; if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) ) @@ -2438,6 +2444,18 @@ LOCK_BIGLOCK(current->domain); + if(shadow_mode_translate(d)) { + int i; + unsigned long mfn; + shadow_lock(d); + for(i=0; iarch.shadow_ht[i]; x != NULL; x = x->next ) - if ( MFN_PINNED(x->smfn) ) - count++; + + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) { + /* Skip entries that are writable_pred) */ + switch(x->gpfn_and_flags & PGT_type_mask){ + case PGT_l1_shadow: + case PGT_l2_shadow: + case PGT_l3_shadow: + case PGT_l4_shadow: + case PGT_hl2_shadow: + if ( MFN_PINNED(x->smfn) ) + count++; + break; + case PGT_snapshot: + case PGT_writable_pred: + printk(" SKIPPING type %lx\n", + x->gpfn_and_flags & PGT_type_mask); + break; + default: + BUG(); + + } + } + if ( !count ) continue; mfn_list = xmalloc_array(unsigned long, count); count = 0; - for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) - if ( MFN_PINNED(x->smfn) ) - mfn_list[count++] = x->smfn; + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) { + /* Skip entries that are writable_pred) */ + switch(x->gpfn_and_flags & PGT_type_mask){ + case PGT_l1_shadow: + case PGT_l2_shadow: + case PGT_l3_shadow: + case PGT_l4_shadow: + case PGT_hl2_shadow: + if ( MFN_PINNED(x->smfn) ) + mfn_list[count++] = x->smfn; + break; + case PGT_snapshot: + case PGT_writable_pred: + printk(" SKIPPING type %lx\n", + x->gpfn_and_flags & PGT_type_mask); + break; + default: + BUG(); + + } + } while ( count ) { @@ -773,6 +811,9 @@ unsigned long va = pfn << PAGE_SHIFT; ASSERT( phystab ); + ASSERT(shadow_lock_is_acquired(d)); + + l2 = map_domain_mem_with_cache(phystab, l2cache); l2 = map_domain_mem_with_cache(phystab, l2cache); l2e = l2[l2_table_offset(va)]; @@ -1366,7 +1407,7 @@ case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE: free_shadow_pages(d); rc = __shadow_mode_enable( - d, d->arch.shadow_mode|SHM_enable|SHM_refcounts|SHM_translate); + d, d->arch.shadow_mode|SHM_enable|SHM_refcounts|SHM_translate|SHM_write_all); break; default: @@ -1420,7 +1461,7 @@ unmap_domain_mem(l1); #if 0 - printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx phystab=%lx l2e=%lx l1tab=%lx, l1e=%lx\n", + printk("gpfn_to_mfn_foreign(d->domain_id=%d, gpfn=%lx) => %lx phystab=%lx l2e=%lx l1tab=%lx, l1e=%lx\n", d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e); #endif @@ -1677,6 +1718,7 @@ shadow_lock(d); __shadow_sync_va(v, va); + SH_VVLOG("shadow_invlpg va=%lx", va); // XXX mafetter: will need to think about 4MB pages... @@ -2053,7 +2095,11 @@ while ( count ) { count--; + /* Delete_shadow_status does a shadow_audit(), so we need to + * keep accurate count of writable_pte_predictions to keep it + * happy. */ delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred); + perfc_decr(writable_pte_predictions); } xfree(gpfn_list); @@ -2580,6 +2626,7 @@ * STEP 2. Check the guest PTE. */ __guest_get_l2e(v, va, &gpde); + SH_VVLOG("shadow_fault: gpde=%" PRIpte, l2e_get_intpte(gpde)); if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) ) { SH_VVLOG("shadow_fault - EXIT: L1 not present"); @@ -2607,19 +2654,11 @@ if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_RW)) ) { - if ( shadow_mode_page_writable(d, l1e_get_pfn(gpte)) ) - { - allow_writes = 1; - l1e_add_flags(gpte, _PAGE_RW); - } - else - { /* Write fault on a read-only mapping. */ SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")", l1e_get_intpte(gpte)); perfc_incrc(shadow_fault_bail_ro_mapping); goto fail; - } } if ( !l1pte_write_fault(v, &gpte, &spte, va) ) @@ -2764,8 +2803,9 @@ // just everything involved in getting to this L1 (i.e. we need // linear_pg_table[l1_linear_offset(va)] to be in sync)... // + SH_VVLOG("shadow_do_update_va_maping va=%lx, val=%" PRIpte ")", + va, l1e_get_intpte(val)); __shadow_sync_va(v, va); - l1pte_propagate_from_guest(d, val, &spte); shadow_set_l1e(va, spte, 0); diff -urN --exclude=SCCS --exclude=BitKeeper xen-unstable.latest/xen/include/asm-x86/shadow.h xeno-ft/xen/include/asm-x86/shadow.h --- xen-unstable.latest/xen/include/asm-x86/shadow.h 2005-06-04 18:07:26.000000000 -0400 +++ xeno-ft/xen/include/asm-x86/shadow.h 2005-06-06 11:06:17.000000000 -0400 @@ -1038,6 +1038,21 @@ { int live = 0, free = 0, j = 0, abs; struct shadow_status *a; + int live_shadow_l1_pages, + live_shadow_l2_pages, + live_shadow_l3_pages, + live_shadow_l4_pages, + live_hl2_table_pages, + live_snapshot_pages, + live_writable_pte_predictions; + + live_shadow_l1_pages= + live_shadow_l2_pages= + live_shadow_l3_pages= + live_shadow_l4_pages= + live_hl2_table_pages= + live_snapshot_pages= + live_writable_pte_predictions=0; for ( j = 0; j < shadow_ht_buckets; j++ ) { @@ -1045,11 +1060,37 @@ if ( a->gpfn_and_flags ) { live++; + switch(a->gpfn_and_flags & PGT_type_mask) { + case PGT_l1_shadow: + live_shadow_l1_pages++; + break; + case PGT_l2_shadow: + live_shadow_l2_pages++; + break; + case PGT_l3_shadow: + live_shadow_l3_pages++; + break; + case PGT_l4_shadow: + live_shadow_l4_pages++; + break; + case PGT_hl2_shadow: + live_hl2_table_pages++; + break; + case PGT_snapshot: + live_snapshot_pages++; + break; + case PGT_writable_pred: + live_writable_pte_predictions++; + break; + default: + BUG(); + } ASSERT(a->smfn); } else ASSERT(!a->next); + a = a->next; while ( a && (live < 9999) ) { @@ -1060,6 +1101,31 @@ live, a->gpfn_and_flags, a->smfn, a->next); BUG(); } + switch(a->gpfn_and_flags & PGT_type_mask) { + case PGT_l1_shadow: + live_shadow_l1_pages++; + break; + case PGT_l2_shadow: + live_shadow_l2_pages++; + break; + case PGT_l3_shadow: + live_shadow_l3_pages++; + break; + case PGT_l4_shadow: + live_shadow_l4_pages++; + break; + case PGT_hl2_shadow: + live_hl2_table_pages++; + break; + case PGT_snapshot: + live_snapshot_pages++; + break; + case PGT_writable_pred: + live_writable_pte_predictions++; + break; + default: + BUG(); + } ASSERT(a->smfn); a = a->next; } @@ -1085,13 +1151,21 @@ #ifdef PERF_COUNTERS if ( (abs < -1) || (abs > 1) ) { - printk("live=%d free=%d l1=%d l2=%d hl2=%d snapshot=%d writable_ptes=%d\n", + printk("live=%d free=%d l1=%d l2=%d hl2=%d snapshot=%d writable_pred=%d\n", live, free, perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages), perfc_value(hl2_table_pages), perfc_value(snapshot_pages), perfc_value(writable_pte_predictions)); + printk("counted: l1=%d l2=%d l3=%d l4=%d hl2=%d snapshot=%d writable_pred=%d\n", + live_shadow_l1_pages, + live_shadow_l2_pages, + live_shadow_l3_pages, + live_shadow_l4_pages, + live_hl2_table_pages, + live_snapshot_pages, + live_writable_pte_predictions); BUG(); } #endif @@ -1194,7 +1268,7 @@ #ifndef NDEBUG if ( ___shadow_status(d, gpfn, stype) != 0 ) { - printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%x " + printk("d->domain_id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%x " "mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n", d->domain_id, gpfn, gmfn, stype, frame_table[gmfn].count_info, @@ -1441,7 +1515,8 @@ if ( stype != PGT_writable_pred ) BUG(); // we should never replace entries into the hash table x->smfn = smfn; - put_page(pfn_to_page(gmfn)); // already had a ref... + if ( stype != PGT_writable_pred ) + put_page(pfn_to_page(gmfn)); // already had a ref... goto done; } diff -urN --exclude=SCCS --exclude=BitKeeper xen-unstable.latest/xen/include/asm-x86/x86_32/domain_page.h xeno-ft/xen/include/asm-x86/x86_32/domain_page.h --- xen-unstable.latest/xen/include/asm-x86/x86_32/domain_page.h 2005-06-04 18:07:26.000000000 -0400 +++ xeno-ft/xen/include/asm-x86/x86_32/domain_page.h 2005-06-06 16:56:18.000000000 -0400 @@ -63,7 +63,6 @@ unmap_domain_mem_with_cache(void *va, struct map_dom_mem_cache *cache) { ASSERT(cache != NULL); - unmap_domain_mem(va); } static inline void @@ -76,5 +75,4 @@ cache->pa = 0; } } - #endif /* __ASM_DOMAIN_PAGE_H__ */