In the spirit of portability and stronger type checking, took one
from the pages of Linux and converted all bitops to use "unsigned
long *" instead "void *".
Standardizing on this will also help with ports to architectures that
have limited atomic bitop types, especially in the linux code rather
than the Xen code.
Linux did it:
http://www.ussg.iu.edu/hypermail/linux/kernel/0204.2/0024.html
Only compile tested.
Interesting items are:
1. vcpu_info_t.evtchn_upcall_pending, is really a boolean of one bit,
so perhaps this should be an atomic_t or compare/exchange?
2. INTER_LEN could be based on long rather that u64
3. more use of DECLARE_BITMAP
-JX
Signed-Off-By: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
$ diffstat -p1 b.diff
linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S | 2
linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S | 2
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/synch_bitops.h |
18 +++---
xen/arch/x86/audit.c
| 8 +--
xen/arch/x86/domain.c
| 4 -
xen/arch/x86/domain_build.c |
12 ++--
xen/arch/x86/hvm/svm/svm.c
| 5 +
xen/arch/x86/hvm/svm/vmcb.c | 2
xen/arch/x86/hvm/vlapic.c
| 6 +-
xen/arch/x86/hvm/vmx/vmx.c |
18 ++++--
xen/arch/x86/irq.c
| 8 +--
xen/arch/x86/mm.c | 2
xen/arch/x86/smp.c | 2
xen/common/keyhandler.c | 2
xen/include/asm-x86/bitops.h |
26 +++++-----
xen/include/asm-x86/hardirq.h | 2
xen/include/asm-x86/hvm/svm/vmcb.h | 2
xen/include/asm-x86/hvm/vioapic.h
| 4 -
xen/include/asm-x86/hvm/vlapic.h |
10 +--
xen/include/asm-x86/mm.h | 2
xen/include/asm-x86/processor.h | 2
xen/include/asm-x86/shadow.h
| 6 +-
xen/include/public/grant_table.h | 2
xen/include/public/hvm/ioreq.h | 1
xen/include/public/xen.h | 2
xen/include/xen/sched.h | 2
26 files changed, 79 insertions(+), 73 deletions(-)
diff -r 401624a17ef7 linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S
--- a/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S Thu Mar 23
14:45:21 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S Fri Mar 24
07:33:22 2006 -0500
@@ -103,7 +103,7 @@ NMI_MASK = 0x80000000
__DISABLE_INTERRUPTS
#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
__ENABLE_INTERRUPTS
-#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
+#define __TEST_PENDING testl $-1,evtchn_upcall_pending(%esi)
#endif
#ifdef CONFIG_PREEMPT
diff -r 401624a17ef7 linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S Thu Mar 23
14:45:21 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S Fri Mar 24
07:33:22 2006 -0500
@@ -33,7 +33,7 @@
#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
XEN_PUT_VCPU_INFO(reg)
-#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+#define XEN_TEST_PENDING(reg) testl $-1,evtchn_upcall_pending(reg)
EVENT_MASK = (CS+4)
VGCF_IN_SYSCALL = (1<<8)
diff -r 401624a17ef7 linux-2.6-xen-sparse/include/asm-i386/mach-xen/
asm/synch_bitops.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/
synch_bitops.h Thu Mar 23 14:45:21 2006 +0100
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/
synch_bitops.h Fri Mar 24 07:33:22 2006 -0500
@@ -11,28 +11,28 @@
#define ADDR (*(volatile long *) addr)
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
+static __inline__ void synch_set_bit(int nr, volatile unsigned long
* addr)
{
__asm__ __volatile__ (
"lock btsl %1,%0"
: "+m" (ADDR) : "Ir" (nr) : "memory" );
}
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+static __inline__ void synch_clear_bit(int nr, volatile unsigned
long * addr)
{
__asm__ __volatile__ (
"lock btrl %1,%0"
: "+m" (ADDR) : "Ir" (nr) : "memory" );
}
-static __inline__ void synch_change_bit(int nr, volatile void * addr)
+static __inline__ void synch_change_bit(int nr, volatile unsigned
long * addr)
{
__asm__ __volatile__ (
"lock btcl %1,%0"
: "+m" (ADDR) : "Ir" (nr) : "memory" );
}
-static __inline__ int synch_test_and_set_bit(int nr, volatile void *
addr)
+static __inline__ int synch_test_and_set_bit(int nr, volatile
unsigned long * addr)
{
int oldbit;
__asm__ __volatile__ (
@@ -41,7 +41,7 @@ static __inline__ int synch_test_and_set
return oldbit;
}
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void
* addr)
+static __inline__ int synch_test_and_clear_bit(int nr, volatile
unsigned long * addr)
{
int oldbit;
__asm__ __volatile__ (
@@ -50,7 +50,7 @@ static __inline__ int synch_test_and_cle
return oldbit;
}
-static __inline__ int synch_test_and_change_bit(int nr, volatile
void * addr)
+static __inline__ int synch_test_and_change_bit(int nr, volatile
unsigned long * addr)
{
int oldbit;
@@ -69,7 +69,7 @@ struct __synch_xchg_dummy { unsigned lon
(unsigned long)(new), \
sizeof(*(ptr))))
-static inline unsigned long __synch_cmpxchg(volatile void *ptr,
+static inline unsigned long __synch_cmpxchg(volatile unsigned long
*ptr,
unsigned long old,
unsigned long new, int size)
{
@@ -118,13 +118,13 @@ static inline unsigned long __synch_cmpx
}
static __always_inline int synch_const_test_bit(int nr,
- const volatile void * addr)
+ const volatile unsigned long *
addr)
{
return ((1UL << (nr & 31)) &
(((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+static __inline__ int synch_var_test_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
__asm__ __volatile__ (
diff -r 401624a17ef7 xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/audit.c Fri Mar 24 07:33:22 2006 -0500
@@ -259,7 +259,7 @@ int audit_adjust_pgtables(struct domain
if ( page_get_owner(gpage) != d )
{
printk("Audit %d: [hl2mfn=%lx,i=%x] Skip
foreign page "
- "dom=%p (id=%d) mfn=%lx c=%08x t=%"
+ "dom=%p (id=%d) mfn=%lx c=%08lx t=%"
PRtype_info "\n",
d->domain_id, hl2mfn, i,
page_get_owner(gpage),
@@ -331,7 +331,7 @@ int audit_adjust_pgtables(struct domain
if ( page_get_owner(gpage) != d )
{
printk("Audit %d: [l1mfn=%lx,i=%x] Skip
foreign page "
- "dom=%p (id=%d) mfn=%lx c=%08x t=%"
+ "dom=%p (id=%d) mfn=%lx c=%08lx t=%"
PRtype_info "\n",
d->domain_id, l1mfn, i,
page_get_owner(gpage),
@@ -480,7 +480,7 @@ int audit_adjust_pgtables(struct domain
if ( shadow_refcounts )
{
printk("Audit %d: found an L2 guest page "
- "mfn=%lx t=%" PRtype_info " c=%08x
while in shadow mode\n",
+ "mfn=%lx t=%" PRtype_info " c=%08lx
while in shadow mode\n",
d->domain_id, mfn, page-
>u.inuse.type_info,
page->count_info);
errors++;
@@ -516,7 +516,7 @@ int audit_adjust_pgtables(struct domain
if ( shadow_refcounts )
{
printk("found an L1 guest page mfn=%lx t=%"
- PRtype_info " c=%08x "
+ PRtype_info " c=%08lx "
"while in shadow mode\n",
mfn, page->u.inuse.type_info, page-
>count_info);
errors++;
diff -r 401624a17ef7 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/domain.c Fri Mar 24 07:33:22 2006 -0500
@@ -194,7 +194,7 @@ void dump_pageframe_info(struct domain *
{
list_for_each_entry ( page, &d->page_list, list )
{
- printk(" DomPage %p: mfn=%p, caf=%08x, taf=%"
PRtype_info "\n",
+ printk(" DomPage %p: mfn=%p, caf=%08lx, taf=%"
PRtype_info "\n",
_p(page_to_maddr(page)), _p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
}
@@ -202,7 +202,7 @@ void dump_pageframe_info(struct domain *
list_for_each_entry ( page, &d->xenpage_list, list )
{
- printk(" XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info
"\n",
+ printk(" XenPage %p: mfn=%p, caf=%08lx, taf=%"
PRtype_info "\n",
_p(page_to_maddr(page)), _p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
}
diff -r 401624a17ef7 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/domain_build.c Fri Mar 24 07:33:22 2006 -0500
@@ -147,8 +147,8 @@ static const char *feature_names[XENFEAT
static void parse_features(
const char *feats,
- uint32_t supported[XENFEAT_NR_SUBMAPS],
- uint32_t required[XENFEAT_NR_SUBMAPS])
+ unsigned long supported[XENFEAT_NR_SUBMAPS],
+ unsigned long required[XENFEAT_NR_SUBMAPS])
{
const char *end, *p;
int i, req;
@@ -249,8 +249,8 @@ int construct_dom0(struct domain *d,
unsigned long mpt_alloc;
/* Features supported. */
- uint32_t dom0_features_supported[XENFEAT_NR_SUBMAPS] = { 0 };
- uint32_t dom0_features_required[XENFEAT_NR_SUBMAPS] = { 0 };
+ unsigned long dom0_features_supported[XENFEAT_NR_SUBMAPS] = { 0 };
+ unsigned long dom0_features_required[XENFEAT_NR_SUBMAPS] = { 0 };
/* Sanity! */
BUG_ON(d->domain_id != 0);
@@ -308,9 +308,9 @@ int construct_dom0(struct domain *d,
p + strlen("FEATURES="),
dom0_features_supported,
dom0_features_required);
- printk("Domain 0 kernel supports features = { %08x }.\n",
+ printk("Domain 0 kernel supports features = { %08lx }.\n",
dom0_features_supported[0]);
- printk("Domain 0 kernel requires features = { %08x }.\n",
+ printk("Domain 0 kernel requires features = { %08lx }.\n",
dom0_features_required[0]);
if ( dom0_features_required[0] )
panic("Domain 0 requires an unsupported hypervisor
feature.\n");
diff -r 401624a17ef7 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Fri Mar 24 07:33:22 2006 -0500
@@ -430,7 +430,7 @@ int start_svm(void)
ecx = cpuid_ecx(0x80000001);
boot_cpu_data.x86_capability[5] = ecx;
- if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
+ if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability
[0])))
return 0;
rdmsr(MSR_EFER, eax, edx);
@@ -901,7 +901,8 @@ static void svm_vmexit_do_cpuid(struct v
static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb, unsigned
long input,
struct cpu_user_regs *regs)
{
- unsigned int eax, ebx, ecx, edx;
+ unsigned int eax, ebx;
+ unsigned long ecx, edx;
unsigned long eip;
struct vcpu *v = current;
int inst_len;
diff -r 401624a17ef7 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c Fri Mar 24 07:33:22 2006 -0500
@@ -106,7 +106,7 @@ static int construct_vmcb_controls(struc
static int construct_vmcb_controls(struct arch_svm_struct *arch_svm)
{
struct vmcb_struct *vmcb;
- u32 *iopm;
+ unsigned long *iopm;
u32 *msrpm;
vmcb = arch_svm->vmcb;
diff -r 401624a17ef7 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Fri Mar 24 07:33:22 2006 -0500
@@ -86,7 +86,7 @@ int vlapic_find_highest_isr(struct vlapi
int i = 0;
printk("VLAPIC: isr on reserved bits %d, isr is\n ", result);
for ( i = 0; i < INTR_LEN_32; i += 2 )
- printk("%d: 0x%08x%08x\n", i, vlapic->isr[i], vlapic->isr
[i+1]);
+ printk("%d: 0x%08lx%08lx\n", i, vlapic->isr[i], vlapic-
>isr[i+1]);
return -1;
}
@@ -287,7 +287,7 @@ struct vlapic* apic_round_robin(struct d
struct vlapic* apic_round_robin(struct domain *d,
uint8_t dest_mode,
uint8_t vector,
- uint32_t bitmap)
+ unsigned long bitmap)
{
int next, old;
struct vlapic* target = NULL;
@@ -377,7 +377,7 @@ void vlapic_ipi(struct vlapic *vlapic)
struct vlapic *target;
struct vcpu *v = NULL;
- uint32_t lpr_map;
+ unsigned long lpr_map;
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level
0x%x, "
diff -r 401624a17ef7 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Mar 24 07:33:22 2006 -0500
@@ -562,7 +562,7 @@ int start_vmx(void)
ecx = cpuid_ecx(1);
boot_cpu_data.x86_capability[4] = ecx;
- if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
+ if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability
[0])))
return 0;
rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
@@ -730,7 +730,8 @@ static void vmx_vmexit_do_cpuid(struct c
{
unsigned int input = (unsigned int)regs->eax;
unsigned int count = (unsigned int)regs->ecx;
- unsigned int eax, ebx, ecx, edx;
+ unsigned int eax, ebx, ecx32, edx32;
+ unsigned long ecx, edx;
unsigned long eip;
struct vcpu *v = current;
@@ -739,13 +740,16 @@ static void vmx_vmexit_do_cpuid(struct c
HVM_DBG_LOG(DBG_LEVEL_3, "(eax) 0x%08lx, (ebx) 0x%08lx, "
"(ecx) 0x%08lx, (edx) 0x%08lx, (esi) 0x%08lx, (edi)
0x%08lx",
(unsigned long)regs->eax, (unsigned long)regs->ebx,
- (unsigned long)regs->ecx, (unsigned long)regs->edx,
+ regs->ecx, regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
if ( input == 4 )
- cpuid_count(input, count, &eax, &ebx, &ecx, &edx);
+ cpuid_count(input, count, &eax, &ebx, &ecx32, &edx32);
else
- cpuid(input, &eax, &ebx, &ecx, &edx);
+ cpuid(input, &eax, &ebx, &ecx32, &edx32);
+
+ ecx = ecx32;
+ edx = edx32;
if ( input == 1 )
{
@@ -782,8 +786,8 @@ static void vmx_vmexit_do_cpuid(struct c
regs->eax = (unsigned long) eax;
regs->ebx = (unsigned long) ebx;
- regs->ecx = (unsigned long) ecx;
- regs->edx = (unsigned long) edx;
+ regs->ecx = ecx;
+ regs->edx = edx;
HVM_DBG_LOG(DBG_LEVEL_3, "eip@%lx, input: 0x%lx, "
"output: eax = 0x%08lx, ebx = 0x%08lx, "
diff -r 401624a17ef7 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/irq.c Fri Mar 24 07:33:22 2006 -0500
@@ -162,7 +162,7 @@ static void __do_IRQ_guest(int vector)
for ( i = 0; i < action->nr_guests; i++ )
{
d = action->guest[i];
- if ( !test_and_set_bit(irq, &d->pirq_mask) )
+ if ( !test_and_set_bit(irq, &d->pirq_mask[0]) )
action->in_flight++;
send_guest_pirq(d, irq);
}
@@ -186,7 +186,7 @@ int pirq_guest_unmask(struct domain *d)
desc = &irq_desc[irq_to_vector(pirq)];
spin_lock_irq(&desc->lock);
if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask
[0]) &&
- test_and_clear_bit(pirq, &d->pirq_mask) &&
+ test_and_clear_bit(pirq, &d->pirq_mask[0]) &&
(--((irq_guest_action_t *)desc->action)->in_flight
== 0) )
desc->handler->end(irq_to_vector(pirq));
spin_unlock_irq(&desc->lock);
@@ -286,7 +286,7 @@ int pirq_guest_unbind(struct domain *d,
action = (irq_guest_action_t *)desc->action;
- if ( test_and_clear_bit(irq, &d->pirq_mask) &&
+ if ( test_and_clear_bit(irq, &d->pirq_mask[0]) &&
(--action->in_flight == 0) )
desc->handler->end(vector);
@@ -358,7 +358,7 @@ static void dump_irqs(unsigned char key)
(test_bit(d->pirq_to_evtchn[irq],
&d->shared_info->evtchn_mask[0]) ?
'M' : '-'),
- (test_bit(irq, &d->pirq_mask) ?
+ (test_bit(irq, &d->pirq_mask[0]) ?
'M' : '-'));
if ( i != action->nr_guests )
printk(",");
diff -r 401624a17ef7 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/mm.c Fri Mar 24 07:33:22 2006 -0500
@@ -1322,7 +1322,7 @@ int alloc_page_type(struct page_info *pa
case PGT_ldt_page:
return alloc_segdesc_page(page);
default:
- printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=
%x\n",
+ printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=
%lx\n",
type, page->u.inuse.type_info,
page->count_info);
BUG();
diff -r 401624a17ef7 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/arch/x86/smp.c Fri Mar 24 07:33:22 2006 -0500
@@ -328,7 +328,7 @@ extern int on_selected_cpus(
static void stop_this_cpu (void *dummy)
{
- clear_bit(smp_processor_id(), &cpu_online_map);
+ clear_bit(smp_processor_id(), &cpu_online_map.bits[0]);
disable_local_APIC();
diff -r 401624a17ef7 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/common/keyhandler.c Fri Mar 24 07:33:22 2006 -0500
@@ -144,7 +144,7 @@ static void dump_domains(unsigned char k
d->domain_id);
for_each_vcpu ( d, v ) {
printk(" VCPU%d: CPU%d [has=%c] flags=%lx "
- "upcall_pend = %02x, upcall_mask = %02x ",
+ "upcall_pend = %02lx, upcall_mask = %02x ",
v->vcpu_id, v->processor,
test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F',
v->vcpu_flags,
diff -r 401624a17ef7 xen/include/asm-x86/bitops.h
--- a/xen/include/asm-x86/bitops.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/bitops.h Fri Mar 24 07:33:22 2006 -0500
@@ -33,7 +33,7 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void set_bit(int nr, volatile void * addr)
+static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
@@ -50,7 +50,7 @@ static __inline__ void set_bit(int nr, v
* If it's called on the same region of memory simultaneously, the
effect
* may be that only one operation succeeds.
*/
-static __inline__ void __set_bit(int nr, volatile void * addr)
+static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
{
__asm__(
"btsl %1,%0"
@@ -68,7 +68,7 @@ static __inline__ void __set_bit(int nr,
* you should call smp_mb__before_clear_bit() and/or
smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static __inline__ void clear_bit(int nr, volatile void * addr)
+static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
@@ -87,7 +87,7 @@ static __inline__ void clear_bit(int nr,
* If it's called on the same region of memory simultaneously, the
effect
* may be that only one operation succeeds.
*/
-static __inline__ void __change_bit(int nr, volatile void * addr)
+static __inline__ void __change_bit(int nr, volatile unsigned long *
addr)
{
__asm__ __volatile__(
"btcl %1,%0"
@@ -104,7 +104,7 @@ static __inline__ void __change_bit(int
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void change_bit(int nr, volatile void * addr)
+static __inline__ void change_bit(int nr, volatile unsigned long *
addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
@@ -120,7 +120,7 @@ static __inline__ void change_bit(int nr
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+static __inline__ int test_and_set_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
@@ -140,7 +140,7 @@ static __inline__ int test_and_set_bit(i
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+static __inline__ int __test_and_set_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
@@ -159,7 +159,7 @@ static __inline__ int __test_and_set_bit
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
@@ -179,7 +179,7 @@ static __inline__ int test_and_clear_bit
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_clear_bit(int nr, volatile void *
addr)
+static __inline__ int __test_and_clear_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
@@ -191,7 +191,7 @@ static __inline__ int __test_and_clear_b
}
/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void *
addr)
+static __inline__ int __test_and_change_bit(int nr, volatile
unsigned long * addr)
{
int oldbit;
@@ -210,7 +210,7 @@ static __inline__ int __test_and_change_
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+static __inline__ int test_and_change_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
@@ -222,12 +222,12 @@ static __inline__ int test_and_change_bi
}
-static __inline__ int constant_test_bit(int nr, const volatile void
* addr)
+static __inline__ int constant_test_bit(int nr, const volatile
unsigned long * addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)
[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, volatile void * addr)
+static __inline__ int variable_test_bit(int nr, volatile unsigned
long * addr)
{
int oldbit;
diff -r 401624a17ef7 xen/include/asm-x86/hardirq.h
--- a/xen/include/asm-x86/hardirq.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/hardirq.h Fri Mar 24 07:33:22 2006 -0500
@@ -5,7 +5,7 @@
#include <xen/cache.h>
typedef struct {
- unsigned int __softirq_pending;
+ unsigned long __softirq_pending;
unsigned int __local_irq_count;
unsigned int __nmi_count;
unsigned long idle_timestamp;
diff -r 401624a17ef7 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Fri Mar 24 07:33:22 2006 -0500
@@ -437,7 +437,7 @@ struct arch_svm_struct {
void *host_save_area;
u64 host_save_pa;
u64 vmcb_pa;
- u32 *iopm;
+ unsigned long *iopm;
u32 *msrpm;
u64 vmexit_tsc; /* tsc read at #VMEXIT. for
TSC_OFFSET */
int injecting_event;
diff -r 401624a17ef7 xen/include/asm-x86/hvm/vioapic.h
--- a/xen/include/asm-x86/hvm/vioapic.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/hvm/vioapic.h Fri Mar 24 07:33:22 2006 -0500
@@ -93,8 +93,8 @@ typedef union RedirStatus
typedef struct hvm_vioapic {
uint32_t irr;
- uint32_t isr; /* This is used for level trigger */
- uint32_t imr;
+ unsigned long isr; /* This is used for level trigger */
+ unsigned long imr;
uint32_t ioregsel;
uint32_t flags;
uint32_t lapic_count;
diff -r 401624a17ef7 xen/include/asm-x86/hvm/vlapic.h
--- a/xen/include/asm-x86/hvm/vlapic.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/hvm/vlapic.h Fri Mar 24 07:33:22 2006 -0500
@@ -150,13 +150,13 @@ struct vlapic
{
//FIXME check what would be 64 bit on EM64T
uint32_t version;
- uint32_t status;
+ unsigned long status;
uint32_t id;
uint32_t vcpu_id;
unsigned long base_address;
- uint32_t isr[8];
- uint32_t irr[INTR_LEN_32];
- uint32_t tmr[INTR_LEN_32];
+ unsigned long isr[32/BITS_PER_LONG];
+ unsigned long irr[INTR_LEN_LONG];
+ unsigned long tmr[INTR_LEN_LONG];
uint32_t task_priority;
uint32_t processor_priority;
uint32_t logical_dest;
@@ -225,7 +225,7 @@ struct vlapic* apic_round_robin(struct d
struct vlapic* apic_round_robin(struct domain *d,
uint8_t dest_mode,
uint8_t vector,
- uint32_t bitmap);
+ unsigned long bitmap);
s_time_t get_apictime_scheduled(struct vcpu *v);
diff -r 401624a17ef7 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/mm.h Fri Mar 24 07:33:22 2006 -0500
@@ -23,7 +23,7 @@ struct page_info
struct list_head list;
/* Reference count and various PGC_xxx flags and fields. */
- u32 count_info;
+ unsigned long count_info;
/* Context-dependent fields follow... */
union {
diff -r 401624a17ef7 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/processor.h Fri Mar 24 07:33:22 2006 -0500
@@ -155,7 +155,7 @@ struct cpuinfo_x86 {
char hard_math;
char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
- unsigned int x86_capability[NCAPINTS];
+ unsigned long x86_capability[NCAPINTS];
char x86_vendor_id[16];
char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support
this call */
diff -r 401624a17ef7 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/asm-x86/shadow.h Fri Mar 24 07:33:22 2006 -0500
@@ -642,14 +642,14 @@ static inline void shadow_drop_reference
/* XXX This needs more thought... */
printk("%s: needing to call shadow_remove_all_access for mfn=%lx
\n",
__func__, page_to_mfn(page));
- printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn
(page),
+ printk("Before: mfn=%lx c=%08lx t=%" PRtype_info "\n",
page_to_mfn(page),
page->count_info, page->u.inuse.type_info);
shadow_lock(d);
shadow_remove_all_access(d, page_to_mfn(page));
shadow_unlock(d);
- printk("After: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn
(page),
+ printk("After: mfn=%lx c=%08lx t=%" PRtype_info "\n",
page_to_mfn(page),
page->count_info, page->u.inuse.type_info);
}
@@ -756,7 +756,7 @@ put_shadow_ref(unsigned long smfn)
if ( unlikely(x == 0) )
{
- printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%"
+ printk("put_shadow_ref underflow, smfn=%lx oc=%08lx t=%"
PRtype_info "\n",
smfn,
mfn_to_page(smfn)->count_info,
diff -r 401624a17ef7 xen/include/public/grant_table.h
--- a/xen/include/public/grant_table.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/public/grant_table.h Fri Mar 24 07:33:22 2006 -0500
@@ -73,7 +73,7 @@
*/
typedef struct grant_entry {
/* GTF_xxx: various type and flag information. [XEN,GST] */
- uint16_t flags;
+ unsigned long flags;
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
diff -r 401624a17ef7 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/public/hvm/ioreq.h Fri Mar 24 07:33:22 2006 -0500
@@ -60,6 +60,7 @@ typedef struct {
#define BITS_PER_BYTE 8
#define INTR_LEN (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint64_t)))
#define INTR_LEN_32 (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint32_t)))
+#define INTR_LEN_LONG (MAX_VECTOR/BITS_PER_LONG)
typedef struct {
uint16_t pic_elcr;
diff -r 401624a17ef7 xen/include/public/xen.h
--- a/xen/include/public/xen.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/public/xen.h Fri Mar 24 07:33:22 2006 -0500
@@ -312,7 +312,7 @@ typedef struct vcpu_info {
* an upcall activation. The mask is cleared when the VCPU
requests
* to block: this avoids wakeup-waiting races.
*/
- uint8_t evtchn_upcall_pending;
+ unsigned long evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel;
arch_vcpu_info_t arch;
diff -r 401624a17ef7 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Thu Mar 23 14:45:21 2006 +0100
+++ b/xen/include/xen/sched.h Fri Mar 24 07:33:22 2006 -0500
@@ -133,7 +133,7 @@ struct domain
*/
#define NR_PIRQS 256 /* Put this somewhere sane! */
u16 pirq_to_evtchn[NR_PIRQS];
- u32 pirq_mask[NR_PIRQS/32];
+ unsigned long pirq_mask[NR_PIRQS/BITS_PER_LONG];
/* I/O capabilities (access to IRQs and memory-mapped I/O). */
struct rangeset *iomem_caps;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|