ChangeSet 1.1236.32.3, 2005/03/14 22:10:10+00:00, mafetter@xxxxxxxxxxxxxxxx
Temporary hack for linux 2.6.10 to use shadow mode instead of
writable page tables.
Signed-off-by: michael.fetterman@xxxxxxxxxxxx
hypervisor.c | 42 ++++++------------------------------------
init.c | 4 ++--
pgtable.c | 6 +++---
3 files changed, 11 insertions(+), 41 deletions(-)
diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c
b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-04-05
12:07:43 -04:00
+++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-04-05
12:07:43 -04:00
@@ -125,28 +125,14 @@
void queue_l1_entry_update(pte_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ _flush_page_update_queue();
+ *(unsigned long *)ptr = val;
}
void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ _flush_page_update_queue();
+ *(unsigned long *)ptr = val;
}
void queue_pt_switch(unsigned long ptr)
@@ -275,28 +261,12 @@
/* queue and flush versions of the above */
void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ *(unsigned long *)ptr = val;
}
void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ *(unsigned long *)ptr = val;
}
void xen_pt_switch(unsigned long ptr)
diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c
b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c
--- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c 2005-04-05 12:07:43
-04:00
+++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c 2005-04-05 12:07:43
-04:00
@@ -77,7 +77,7 @@
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *)
alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(page_table);
+ //make_page_readonly(page_table);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
@@ -349,7 +349,7 @@
* it. We clean up by write-enabling and then freeing the old page dir.
*/
memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
- make_page_readonly(new_pgd);
+ //make_page_readonly(new_pgd);
queue_pgd_pin(__pa(new_pgd));
load_cr3(new_pgd);
queue_pgd_unpin(__pa(old_pgd));
diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c
b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c 2005-04-05
12:07:43 -04:00
+++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c 2005-04-05
12:07:43 -04:00
@@ -181,7 +181,7 @@
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
- make_page_readonly(pte);
+ //make_page_readonly(pte);
xen_flush_page_update_queue();
}
return pte;
@@ -194,7 +194,7 @@
set_page_count(page, 1);
clear_page(pte);
- make_page_readonly(pte);
+ //make_page_readonly(pte);
queue_pte_pin(__pa(pte));
flush_page_update_queue();
}
@@ -304,7 +304,7 @@
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
out:
- make_page_readonly(pgd);
+ //make_page_readonly(pgd);
queue_pgd_pin(__pa(pgd));
flush_page_update_queue();
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|