ChangeSet 1.1375, 2005/04/23 12:18:20+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
xenpmap.h, hypervisor.h, xen_machdep.c, pmap.c, machdep.c:
Writable pagetables for freebsd.
i386-xen/machdep.c | 56 +++++++++++------
i386-xen/pmap.c | 153 ++++++++++++++++++++++++++++++++++++-------------
i386-xen/xen_machdep.c | 2
include/hypervisor.h | 8 +-
include/xenpmap.h | 42 +++++++++----
5 files changed, 188 insertions(+), 73 deletions(-)
diff -Nru a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c
b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c
--- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c 2005-04-23
08:03:19 -04:00
+++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c 2005-04-23
08:03:19 -04:00
@@ -1374,7 +1374,6 @@
extern unsigned long *SMPpt;
pteinfo_t *pteinfo_list;
unsigned long *xen_machine_phys = ((unsigned long *)VADDR(1008, 0));
-pt_entry_t *KPTphysv;
int preemptable;
int gdt_set;
@@ -1386,8 +1385,9 @@
initvalues(start_info_t *startinfo)
{
int i;
+ vm_paddr_t pdir_shadow_ma, KPTphys;
#ifdef WRITABLE_PAGETABLES
- XENPRINTF("using writable pagetables\n");
+ printk("using writable pagetables\n");
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
#endif
@@ -1398,18 +1398,17 @@
/* pre-zero unused mapped pages */
bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (1024 -
tmpindex)*PAGE_SIZE);
IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base));
- KPTphysv = (pt_entry_t *)(startinfo->pt_base + PAGE_SIZE);
+ KPTphys = xpmap_ptom(__pa(startinfo->pt_base + PAGE_SIZE));
XENPRINTF("IdlePTD %p\n", IdlePTD);
XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx "
"mod_start: 0x%lx mod_len: 0x%lx\n",
xen_start_info->nr_pages, xen_start_info->shared_info,
xen_start_info->flags, xen_start_info->pt_base,
xen_start_info->mod_start, xen_start_info->mod_len);
-
- /* setup self-referential mapping first so vtomach will work */
- xen_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD |
- PG_V | PG_A);
- xen_flush_queue();
+
+
+
+
/* Map proc0's UPAGES */
proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT));
tmpindex += UAREA_PAGES;
@@ -1439,9 +1438,11 @@
/* map SMP page table RO */
PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW);
- /* put the page table into the pde */
- xen_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex <<
PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
-
+ /* put the page table into the page directory */
+ xen_queue_pt_update((vm_paddr_t)(IdlePTD + MPPTDI),
+ xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW |
PG_V | PG_A);
+ xen_queue_pt_update(pdir_shadow_ma + MPPTDI*sizeof(vm_paddr_t),
+ xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_V | PG_A);
tmpindex++;
#endif
@@ -1454,16 +1455,34 @@
#endif
/* unmap remaining pages from initial 4MB chunk */
for (i = tmpindex; i%1024 != 0; i++)
- PT_CLEAR_VA(KPTphysv + i, TRUE);
+ xen_queue_pt_update(KPTphys + i*sizeof(vm_paddr_t), 0);
+ xen_flush_queue();
+
+ pdir_shadow_ma = xpmap_ptom(tmpindex << PAGE_SHIFT);
+ tmpindex++;
+
+ /* setup shadow mapping first so vtomach will work */
+ xen_pt_pin((vm_paddr_t)pdir_shadow_ma);
+ xen_queue_pt_update((vm_paddr_t)(IdlePTD + PTDPTDI),
+ pdir_shadow_ma | PG_V | PG_A | PG_RW | PG_M);
+ xen_queue_pt_update(pdir_shadow_ma + PTDPTDI*sizeof(vm_paddr_t),
+ ((vm_paddr_t)IdlePTD) | PG_V | PG_A);
+ xen_queue_pt_update(pdir_shadow_ma + KPTDI*sizeof(vm_paddr_t),
+ KPTphys | PG_V | PG_A);
/* allocate remainder of NKPT pages */
- for (i = 0; i < NKPT-1; i++, tmpindex++)
- PD_SET_VA(((unsigned long *)startinfo->pt_base) + KPTDI + i + 1,
(tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A, TRUE);
+ for (i = 0; i < NKPT-1; i++, tmpindex++) {
+ xen_queue_pt_update((vm_paddr_t)(IdlePTD + KPTDI + i + 1),
+ xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_M | PG_RW |
PG_V | PG_A));
+ xen_queue_pt_update(pdir_shadow_ma + (KPTDI + i +
1)*sizeof(vm_paddr_t),
+ xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_V | PG_A));
+ }
tmpindex += NKPT-1;
PT_UPDATES_FLUSH();
HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex <<
PAGE_SHIFT));
- PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A |
PG_V | PG_RW | PG_M);
+ PT_SET_MA(HYPERVISOR_shared_info,
+ xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
tmpindex++;
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list = (unsigned
long)xen_phys_machine;
@@ -1572,10 +1591,9 @@
PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW);
gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
- if (HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1)) {
- XENPRINTF("set_gdt failed\n");
-
- }
+ if ((error = HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY +
1)))
+ panic("set_gdt failed");
+
lgdt_finish();
gdt_set = 1;
diff -Nru a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c
b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c
--- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c 2005-04-23 08:03:19
-04:00
+++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c 2005-04-23 08:03:19
-04:00
@@ -273,6 +273,7 @@
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
vm_offset_t va);
+static void pmap_copy_ma(vm_paddr_t src, vm_paddr_t dst);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@@ -300,6 +301,32 @@
static void pmap_dec_ref_page(vm_page_t m);
int pmap_pid_dump(int pid);
#endif
+
+void
+pd_set(struct pmap *pmap, vm_paddr_t *ptr, vm_paddr_t val, int type)
+{
+ vm_paddr_t shadow_pdir_ma = pmap->pm_pdir[PTDPTDI] & ~0xFFF;
+ vm_paddr_t shadow_offset = (vm_paddr_t)(ptr -
pmap->pm_pdir)*sizeof(vm_paddr_t);
+
+ switch (type) {
+ case SH_PD_SET_VA:
+ xen_queue_pt_update(shadow_pdir_ma + shadow_offset,
+ xpmap_ptom(val & ~(PG_RW|PG_M)));
+ xen_queue_pt_update(vtomach(ptr),
+ xpmap_ptom(val));
+ break;
+ case SH_PD_SET_VA_MA:
+ xen_queue_pt_update(shadow_pdir_ma + shadow_offset,
+ val & ~(PG_RW|PG_M));
+ xen_queue_pt_update(vtomach(ptr), val);
+ break;
+ case SH_PD_SET_VA_CLEAR:
+ xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 0);
+ xen_queue_pt_update(vtomach(ptr), 0);
+ break;
+ }
+}
+
/*
* Move the kernel virtual free pointer to the next
* 4MB. This is used to help improve performance
@@ -335,7 +362,6 @@
{
vm_offset_t va;
pt_entry_t *pte, *unused;
- int i;
/*
* XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE
too
@@ -416,8 +442,6 @@
PT_CLEAR_VA(CMAP1, FALSE);
PT_CLEAR_VA(CMAP2, FALSE);
- for (i = 0; i < NKPT; i++)
- PD_CLEAR_VA(&PTD[i], FALSE);
PT_UPDATES_FLUSH();
#ifdef XEN_UNNEEDED
/* Turn on PG_G on kernel page(s) */
@@ -767,7 +791,7 @@
static __inline int
pmap_is_current(pmap_t pmap)
{
-
+ /* XXX validate */
return (pmap == kernel_pmap ||
(pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
(pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
@@ -794,7 +818,7 @@
newpf = PT_GET(pde) & PG_FRAME;
tmppf = PT_GET(PMAP2) & PG_FRAME;
if (tmppf != newpf) {
- PD_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
+ PT_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
}
return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
@@ -853,7 +877,7 @@
newpf = PT_GET(pde) & PG_FRAME;
tmppf = PT_GET(PMAP1) & PG_FRAME;
if (tmppf != newpf) {
- PD_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
+ PT_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
#ifdef SMP
PMAP1cpu = PCPU_GET(cpuid);
#endif
@@ -1088,7 +1112,7 @@
* unmap the page table page
*/
xen_pt_unpin(pmap->pm_pdir[m->pindex]);
- PD_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE);
+ PD_CLEAR_VA(pmap, &pmap->pm_pdir[m->pindex], TRUE);
--pmap->pm_stats.resident_count;
/*
@@ -1146,8 +1170,8 @@
void
pmap_pinit(struct pmap *pmap)
{
- vm_page_t m, ptdpg[NPGPTD];
- vm_paddr_t ma;
+ vm_page_t m, ptdpg[NPGPTD*2];
+ vm_paddr_t ma, ma_shadow;
static int color;
int i;
@@ -1173,7 +1197,7 @@
/*
* allocate the page directory page(s)
*/
- for (i = 0; i < NPGPTD;) {
+ for (i = 0; i < NPGPTD*2;) {
m = vm_page_alloc(NULL, color++,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
@@ -1184,36 +1208,51 @@
ptdpg[i++] = m;
}
}
+#ifdef PAE
+ #error "missing shadow handling for PAE"
+#endif
pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
- for (i = 0; i < NPGPTD; i++) {
- if ((ptdpg[i]->flags & PG_ZERO) == 0)
- bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
- }
-
mtx_lock_spin(&allpmaps_lock);
LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);
/* Wire in kernel global address entries. */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|