Hi Jeremy,
> I haven't been brave enough to even attempt starting X; there's a lot of
> work to be done if you want to do anything beyond dumb framebuffer.
>
> Anyway, in this case, I definitely haven't added this call, so it will
> need to be done somewhere. Does it need to be called once at startup,
> or on every context switch? There may well be a suitable place to hook
> this into already, but we can add a new pvop if its really needed.
Just to let you know. I have this very preliminary, incomplete (x86_32
parts are missing) and barely tested patch (barely tested under Xen, not
checked if the native stuff works). It seems to work on my test case,
with a program where I ioperm some I/O range and try to inb() some bytes
from it (segfaults when it should and/or returns values when it should),
and it also survives context switches.
However, the X server is now giving me a "Corrupted page table", which I
caught on netconsole and I have no clue how I should debug that one.
Cheers,
Christophe
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8a976ea..40795f4 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -130,6 +130,8 @@ struct pv_cpu_ops {
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
void (*set_iopl_mask)(unsigned mask);
+ void (*set_io_bitmap)(struct thread_struct *thread,
+ int changed, unsigned long bytes_updated);
void (*wbinvd)(void);
void (*io_delay)(void);
@@ -908,6 +910,11 @@ static inline void set_iopl_mask(unsigned mask)
{
PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
}
+static inline void set_io_bitmap(struct thread_struct *thread,
+ int changed, unsigned long bytes_updated)
+{
+ PVOP_VCALL3(pv_cpu_ops.set_io_bitmap, thread, changed, bytes_updated);
+}
/* The paravirtualized I/O functions */
static inline void slow_down_io(void)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 091cd88..7ad072e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -519,6 +519,9 @@ static inline void native_set_iopl_mask(unsigned mask)
#endif
}
+extern void native_set_io_bitmap(struct thread_struct *thread,
+ int changed, unsigned long updated_bytes);
+
static inline void
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
{
@@ -560,6 +563,7 @@ static inline void load_sp0(struct tss_struct *tss,
}
#define set_iopl_mask native_set_iopl_mask
+#define set_io_bitmap native_set_io_bitmap
#endif /* CONFIG_PARAVIRT */
/*
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index b12208f..5778936 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -30,14 +30,43 @@ static void set_bitmap(unsigned long *bitmap, unsigned int
base,
}
}
+void native_set_io_bitmap(struct thread_struct *t,
+ int changed, unsigned long bytes_updated)
+{
+ unsigned long copy = bytes_updated;
+ struct tss_struct *tss;
+
+ if (!bytes_updated)
+ return;
+
+ tss = &__get_cpu_var(init_tss);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Sets the lazy trigger so that the next I/O operation will
+ * reload the correct bitmap.
+ * Reset the owner so that a process switch will not set
+ * tss->io_bitmap_base to IO_BITMAP_OFFSET.
+ */
+ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+ tss->io_bitmap_owner = NULL;
+#else
+ /* Update the TSS: */
+ if (t->io_bitmap_ptr)
+ memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
+ else
+ memset(tss->io_bitmap, 0xff, bytes_updated);
+#endif
+}
+
/*
* this changes the io permissions bitmap in the current task.
*/
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
struct thread_struct *t = ¤t->thread;
- struct tss_struct *tss;
unsigned int i, max_long, bytes, bytes_updated;
+ int changed;
if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
return -EINVAL;
@@ -58,16 +87,17 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned
long num, int turn_on)
memset(bitmap, 0xff, IO_BITMAP_BYTES);
t->io_bitmap_ptr = bitmap;
set_thread_flag(TIF_IO_BITMAP);
- }
+ changed = 1;
+ } else
+ changed = 0;
/*
- * do it in the per-thread copy and in the TSS ...
- *
- * Disable preemption via get_cpu() - we must not switch away
+ * do it in the per-thread copy
+ * * Disable preemption - we must not switch away
* because the ->io_bitmap_max value must match the bitmap
* contents:
*/
- tss = &per_cpu(init_tss, get_cpu());
+ preempt_disable();
set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
@@ -85,21 +115,9 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned
long num, int turn_on)
t->io_bitmap_max = bytes;
-#ifdef CONFIG_X86_32
- /*
- * Sets the lazy trigger so that the next I/O operation will
- * reload the correct bitmap.
- * Reset the owner so that a process switch will not set
- * tss->io_bitmap_base to IO_BITMAP_OFFSET.
- */
- tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
- tss->io_bitmap_owner = NULL;
-#else
- /* Update the TSS: */
- memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
-#endif
+ set_io_bitmap(t, changed, bytes_updated);
- put_cpu();
+ preempt_enable();
return 0;
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 006cec4..602edc0 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -357,6 +357,7 @@ struct pv_cpu_ops pv_cpu_ops = {
.swapgs = native_swapgs,
.set_iopl_mask = native_set_iopl_mask,
+ .set_io_bitmap = native_set_io_bitmap,
.io_delay = native_io_delay,
.lazy_mode = {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index efb0396..a75d058 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -237,17 +237,13 @@ void exit_thread(void)
struct thread_struct *t = &me->thread;
if (me->thread.io_bitmap_ptr) {
- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-
+ preempt_disable();
kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
- /*
- * Careful, clear this in the TSS too:
- */
- memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
+ set_io_bitmap(t, 1, t->io_bitmap_max);
t->io_bitmap_max = 0;
- put_cpu();
+ preempt_enable();
}
ds_exit_thread(current);
@@ -513,6 +509,12 @@ static inline void __switch_to_xtra(struct task_struct
*prev_p,
hard_enable_TSC();
}
+#if 1
+ if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP) ||
+ test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
+ set_io_bitmap(next_p, 1,
+ max(prev->io_bitmap_max, next->io_bitmap_max));
+#else
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
/*
* Copy the relevant range of the IO bitmap.
@@ -526,6 +528,7 @@ static inline void __switch_to_xtra(struct task_struct
*prev_p,
*/
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
+#endif
}
/*
@@ -556,6 +559,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct
*next_p)
*/
load_sp0(tss, next);
+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
+ set_io_bitmap(next, 1, prev->io_bitmap_max);
+
/*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e8a1e0a..a6cd15a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -563,6 +563,21 @@ static void xen_set_iopl_mask(unsigned mask)
HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
}
+static void xen_set_io_bitmap(struct thread_struct *thread,
+ int changed, unsigned long bytes_updated)
+{
+ struct physdev_set_iobitmap set_iobitmap;
+
+ if (!changed)
+ return;
+
+ set_xen_guest_handle(set_iobitmap.bitmap,
+ (char *)current->thread.io_bitmap_ptr);
+ set_iobitmap.nr_ports = thread->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
+ &set_iobitmap));
+}
+
static void xen_io_delay(void)
{
}
@@ -1286,6 +1301,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
.load_sp0 = xen_load_sp0,
.set_iopl_mask = xen_set_iopl_mask,
+ .set_io_bitmap = xen_set_io_bitmap,
.io_delay = xen_io_delay,
/* Xen takes care of %gs when switching to usermode for us */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|