# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID 06b80b837c92b5d6f6a50a2a569313293659d8f5
# Parent 95584b819b72a87775584323b27731ddab7111f2
# Parent 198828cc103b78c75da6a3a083d2d633c85df39b
Merged.
diff -r 95584b819b72 -r 06b80b837c92 extras/mini-os/events.c
--- a/extras/mini-os/events.c Tue Dec 6 16:40:43 2005
+++ b/extras/mini-os/events.c Tue Dec 6 16:40:50 2005
@@ -77,6 +77,7 @@
/* Try to bind the virq to a port */
op.cmd = EVTCHNOP_bind_virq;
op.u.bind_virq.virq = virq;
+ op.u.bind_virq.vcpu = smp_processor_id();
if ( HYPERVISOR_event_channel_op(&op) != 0 )
{
diff -r 95584b819b72 -r 06b80b837c92 extras/mini-os/hypervisor.c
--- a/extras/mini-os/hypervisor.c Tue Dec 6 16:40:43 2005
+++ b/extras/mini-os/hypervisor.c Tue Dec 6 16:40:50 2005
@@ -39,7 +39,7 @@
unsigned int l1i, l2i, port;
int cpu = 0;
shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
vcpu_info->evtchn_upcall_pending = 0;
@@ -71,7 +71,7 @@
inline void unmask_evtchn(u32 port)
{
shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
+ vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
synch_clear_bit(port, &s->evtchn_mask[0]);
diff -r 95584b819b72 -r 06b80b837c92 extras/mini-os/include/os.h
--- a/extras/mini-os/include/os.h Tue Dec 6 16:40:43 2005
+++ b/extras/mini-os/include/os.h Tue Dec 6 16:40:50 2005
@@ -70,7 +70,7 @@
#define __cli()
\
do { \
vcpu_info_t *_vcpu; \
- _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 1; \
barrier(); \
} while (0)
@@ -79,7 +79,7 @@
do { \
vcpu_info_t *_vcpu; \
barrier(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 0; \
barrier(); /* unmask then check (avoid races) */ \
if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
@@ -89,7 +89,7 @@
#define __save_flags(x)
\
do { \
vcpu_info_t *_vcpu; \
- _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
(x) = _vcpu->evtchn_upcall_mask; \
} while (0)
@@ -97,7 +97,7 @@
do { \
vcpu_info_t *_vcpu; \
barrier(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
barrier(); /* unmask then check (avoid races) */ \
if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
@@ -110,7 +110,7 @@
#define __save_and_cli(x) \
do { \
vcpu_info_t *_vcpu; \
- _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
(x) = _vcpu->evtchn_upcall_mask; \
_vcpu->evtchn_upcall_mask = 1; \
barrier(); \
@@ -123,7 +123,7 @@
#define local_irq_enable() __sti()
#define irqs_disabled() \
- HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
+ HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
/* This is a barrier for the compiler only, NOT the processor! */
#define barrier() __asm__ __volatile__("": : :"memory")
diff -r 95584b819b72 -r 06b80b837c92 extras/mini-os/time.c
--- a/extras/mini-os/time.c Tue Dec 6 16:40:43 2005
+++ b/extras/mini-os/time.c Tue Dec 6 16:40:50 2005
@@ -73,7 +73,7 @@
static inline int time_values_up_to_date(void)
{
- struct vcpu_time_info *src = &HYPERVISOR_shared_info->vcpu_time[0];
+ struct vcpu_time_info *src =
&HYPERVISOR_shared_info->vcpu_info[0].time;
return (shadow.version == src->version);
}
@@ -127,7 +127,7 @@
static void get_time_values_from_xen(void)
{
- struct vcpu_time_info *src = &HYPERVISOR_shared_info->vcpu_time[0];
+ struct vcpu_time_info *src =
&HYPERVISOR_shared_info->vcpu_info[0].time;
do {
shadow.version = src->version;
diff -r 95584b819b72 -r 06b80b837c92 extras/mini-os/x86_32.S
--- a/extras/mini-os/x86_32.S Tue Dec 6 16:40:43 2005
+++ b/extras/mini-os/x86_32.S Tue Dec 6 16:40:50 2005
@@ -3,7 +3,11 @@
.section __xen_guest
- .asciz "XEN_VER=3.0,LOADER=generic,PT_MODE_WRITABLE"
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",LOADER=generic"
+ .ascii ",PT_MODE_WRITABLE"
+ .byte 0
.text
.globl _start, shared_info
diff -r 95584b819b72 -r 06b80b837c92
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Dec 6
16:40:43 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Dec 6
16:40:50 2005
@@ -76,9 +76,6 @@
skb_shinfo(_skb)->frag_list = NULL; \
} while (0)
-/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
-#define RX_HEADROOM 200
-
static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
@@ -153,14 +150,15 @@
#endif
#ifdef DEBUG
-#define DPRINTK(fmt, args...) \
- printk(KERN_ALERT "netfront (%s:%d) " fmt, __FUNCTION__, __LINE__,
##args)
+#define DPRINTK(fmt, args...) \
+ printk(KERN_ALERT "netfront (%s:%d) " fmt, __FUNCTION__, \
+ __LINE__, ##args)
#else
#define DPRINTK(fmt, args...) ((void)0)
#endif
-#define IPRINTK(fmt, args...) \
+#define IPRINTK(fmt, args...) \
printk(KERN_INFO "netfront: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
+#define WPRINTK(fmt, args...) \
printk(KERN_WARNING "netfront: " fmt, ##args)
@@ -537,7 +535,13 @@
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
- skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
+ /*
+ * Subtract dev_alloc_skb headroom (16 bytes) and shared info
+ * tailroom then round down to SKB_DATA_ALIGN boundary.
+ */
+ skb = alloc_xen_skb(
+ (PAGE_SIZE - 16 - sizeof(struct skb_shared_info)) &
+ (-SKB_DATA_ALIGN(1)));
if (skb == NULL)
break;
__skb_queue_tail(&np->rx_batch, skb);
@@ -567,7 +571,8 @@
rx_pfn_array[i] = virt_to_mfn(skb->head);
/* Remove this page from map before passing back to Xen. */
- set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
+ set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
+ INVALID_P2M_ENTRY);
MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
__pte(0), 0);
@@ -809,36 +814,43 @@
}
while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ if (skb->len > (dev->mtu + ETH_HLEN)) {
+ if (net_ratelimit())
+ printk(KERN_INFO "Received packet too big for "
+ "MTU (%d > %d)\n",
+ skb->len - ETH_HLEN, dev->mtu);
+ skb->len = 0;
+ skb->tail = skb->data;
+ init_skb_shinfo(skb);
+ dev_kfree_skb(skb);
+ continue;
+ }
+
/*
* Enough room in skbuff for the data we were passed? Also,
* Linux expects at least 16 bytes headroom in each rx buffer.
*/
if (unlikely(skb->tail > skb->end) ||
unlikely((skb->data - skb->head) < 16)) {
- nskb = NULL;
-
- /* Only copy the packet if it fits in the MTU. */
- if (skb->len <= (dev->mtu + ETH_HLEN)) {
- if ((skb->tail > skb->end) && net_ratelimit())
+ if (net_ratelimit()) {
+ if (skb->tail > skb->end)
printk(KERN_INFO "Received packet "
- "needs %zd bytes more "
- "headroom.\n",
+ "is %zd bytes beyond tail.\n",
skb->tail - skb->end);
-
- nskb = alloc_xen_skb(skb->len + 2);
- if (nskb != NULL) {
- skb_reserve(nskb, 2);
- skb_put(nskb, skb->len);
- memcpy(nskb->data,
- skb->data,
- skb->len);
- nskb->dev = skb->dev;
- }
+ else
+ printk(KERN_INFO "Received packet "
+ "is %zd bytes before head.\n",
+ 16 - (skb->data - skb->head));
}
- else if (net_ratelimit())
- printk(KERN_INFO "Received packet too big for "
- "MTU (%d > %d)\n",
- skb->len - ETH_HLEN, dev->mtu);
+
+ nskb = alloc_xen_skb(skb->len + 2);
+ if (nskb != NULL) {
+ skb_reserve(nskb, 2);
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ nskb->ip_summed = skb->ip_summed;
+ }
/* Reinitialise and then destroy the old skbuff. */
skb->len = 0;
diff -r 95584b819b72 -r 06b80b837c92 tools/python/xen/xend/server/tpmif.py
--- a/tools/python/xen/xend/server/tpmif.py Tue Dec 6 16:40:43 2005
+++ b/tools/python/xen/xend/server/tpmif.py Tue Dec 6 16:40:50 2005
@@ -50,10 +50,9 @@
result = DevController.configuration(self, devid)
- (instance,) = self.readBackend(devid, 'instance')
+ instance = self.readBackend(devid, 'instance')
if instance:
result.append(['instance', instance])
- log.info("configuration: instance=%d." % instance)
return result
diff -r 95584b819b72 -r 06b80b837c92 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Tue Dec 6 16:40:43 2005
+++ b/xen/arch/x86/setup.c Tue Dec 6 16:40:50 2005
@@ -571,7 +571,7 @@
p += sprintf(p, "xen-%d.%d-x86_64 ", XEN_VERSION, XEN_SUBVERSION);
if ( hvm_enabled )
{
- //p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION);
+ p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION);
//p += sprintf(p, "hvm-%d.%d-x86_32p ", XEN_VERSION, XEN_SUBVERSION);
p += sprintf(p, "hvm-%d.%d-x86_64 ", XEN_VERSION, XEN_SUBVERSION);
}
@@ -581,7 +581,7 @@
p++;
#endif
-
+
*(p-1) = 0;
BUG_ON((p - info) > sizeof(xen_capabilities_info_t));
diff -r 95584b819b72 -r 06b80b837c92 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Tue Dec 6 16:40:43 2005
+++ b/xen/arch/x86/shadow32.c Tue Dec 6 16:40:50 2005
@@ -2203,7 +2203,7 @@
}
if ( shadow_mode_external(d) ) {
- if (write_refs-- == 0)
+ if (--write_refs == 0)
return 0;
// Use the back pointer to locate the shadow page that can contain
diff -r 95584b819b72 -r 06b80b837c92 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Tue Dec 6 16:40:43 2005
+++ b/xen/arch/x86/x86_32/domain_page.c Tue Dec 6 16:40:50 2005
@@ -40,10 +40,10 @@
cache[i] = l1e_empty();
}
-void *map_domain_page(unsigned long pfn)
+void *map_domain_pages(unsigned long pfn, unsigned int order)
{
unsigned long va;
- unsigned int idx, cpu = smp_processor_id();
+ unsigned int idx, i, flags, cpu = smp_processor_id();
l1_pgentry_t *cache = mapcache;
#ifndef NDEBUG
unsigned int flush_count = 0;
@@ -72,10 +72,15 @@
local_flush_tlb();
shadow_epoch[cpu] = ++epoch;
}
+
+ flags = 0;
+ for ( i = 0; i < (1U << order); i++ )
+ flags |= l1e_get_flags(cache[idx+i]);
}
- while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT );
+ while ( flags & _PAGE_PRESENT );
- cache[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
+ for ( i = 0; i < (1U << order); i++ )
+ cache[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR);
spin_unlock(&map_lock);
@@ -83,11 +88,12 @@
return (void *)va;
}
-void unmap_domain_page(void *va)
+void unmap_domain_pages(void *va, unsigned int order)
{
- unsigned int idx;
+ unsigned int idx, i;
ASSERT((void *)MAPCACHE_VIRT_START <= va);
ASSERT(va < (void *)MAPCACHE_VIRT_END);
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
- l1e_add_flags(mapcache[idx], READY_FOR_TLB_FLUSH);
+ for ( i = 0; i < (1U << order); i++ )
+ l1e_add_flags(mapcache[idx+i], READY_FOR_TLB_FLUSH);
}
diff -r 95584b819b72 -r 06b80b837c92 xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h Tue Dec 6 16:40:43 2005
+++ b/xen/include/xen/domain_page.h Tue Dec 6 16:40:50 2005
@@ -10,19 +10,22 @@
#include <xen/config.h>
#include <xen/mm.h>
+#define map_domain_page(pfn) map_domain_pages(pfn,0)
+#define unmap_domain_page(va) unmap_domain_pages(va,0)
+
#ifdef CONFIG_DOMAIN_PAGE
/*
- * Maps a given page frame, returning the mmap'ed virtual address. The page is
- * now accessible until a corresponding call to unmap_domain_page().
+ * Maps a given range of page frames, returning the mapped virtual address. The
+ * pages are now accessible until a corresponding call to unmap_domain_page().
*/
-extern void *map_domain_page(unsigned long pfn);
+extern void *map_domain_pages(unsigned long pfn, unsigned int order);
/*
- * Pass a VA within a page previously mapped with map_domain_page().
- * That page will then be removed from the mapping lists.
+ * Pass a VA within the first page of a range previously mapped with
+ * map_omain_pages(). Those pages will then be removed from the mapping lists.
*/
-extern void unmap_domain_page(void *va);
+extern void unmap_domain_pages(void *va, unsigned int order);
#define DMCACHE_ENTRY_VALID 1U
#define DMCACHE_ENTRY_HELD 2U
@@ -84,8 +87,8 @@
#else /* !CONFIG_DOMAIN_PAGE */
-#define map_domain_page(pfn) phys_to_virt((pfn)<<PAGE_SHIFT)
-#define unmap_domain_page(va) ((void)(va))
+#define map_domain_pages(pfn,order) phys_to_virt((pfn)<<PAGE_SHIFT)
+#define unmap_domain_pages(va,order) ((void)((void)(va),(void)(order)))
struct domain_mmap_cache {
};
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|