# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 7997d8f162404327c45f59ac4b656e8c83565231
# Parent a19cc748469e99eae7f289f3beb03206982b9ba2
Change semantics of grant transfers for vp guests so that the
operation automatically gets you a fresh page at the same
pseudo-physical address as Keir suggested.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r a19cc748469e -r 7997d8f16240
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Sun Apr 30
09:52:59 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Mon May 01
15:28:01 2006 +0100
@@ -235,23 +235,35 @@ static void net_rx_action(unsigned long
vdata = (unsigned long)skb->data;
old_mfn = virt_to_mfn(vdata);
- /* Memory squeeze? Back off for an arbitrary while. */
- if ((new_mfn = alloc_mfn()) == 0) {
- if ( net_ratelimit() )
- WPRINTK("Memory squeeze in netback driver.\n");
- mod_timer(&net_timer, jiffies + HZ);
- skb_queue_head(&rx_queue, skb);
- break;
- }
- /*
- * Set the new P2M table entry before reassigning the old data
- * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
- */
- set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn);
-
- MULTI_update_va_mapping(mcl, vdata,
- pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
- mcl++;
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Memory squeeze? Back off for an arbitrary while. */
+ if ((new_mfn = alloc_mfn()) == 0) {
+ if ( net_ratelimit() )
+ WPRINTK("Memory squeeze in netback "
+ "driver.\n");
+ mod_timer(&net_timer, jiffies + HZ);
+ skb_queue_head(&rx_queue, skb);
+ break;
+ }
+ /*
+ * Set the new P2M table entry before reassigning
+ * the old data page. Heed the comment in
+ * pgtable-2level.h:pte_page(). :-)
+ */
+ set_phys_to_machine(
+ __pa(skb->data) >> PAGE_SHIFT,
+ new_mfn);
+
+ MULTI_update_va_mapping(mcl, vdata,
+ pfn_pte_ma(new_mfn,
+ PAGE_KERNEL), 0);
+ mcl++;
+
+ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
+ MMU_MACHPHYS_UPDATE;
+ mmu->val = __pa(vdata) >> PAGE_SHIFT;
+ mmu++;
+ }
gop->mfn = old_mfn;
gop->domid = netif->domid;
@@ -260,13 +272,6 @@ static void net_rx_action(unsigned long
netif->rx.req_cons++;
gop++;
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
- MMU_MACHPHYS_UPDATE;
- mmu->val = __pa(vdata) >> PAGE_SHIFT;
- mmu++;
- }
-
__skb_queue_tail(&rxq, skb);
/* Filled the batch queue? */
@@ -274,22 +279,24 @@ static void net_rx_action(unsigned long
break;
}
- if (mcl == rx_mcl)
- return;
-
- mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-
- if (mmu - rx_mmu) {
- mcl->op = __HYPERVISOR_mmu_update;
- mcl->args[0] = (unsigned long)rx_mmu;
- mcl->args[1] = mmu - rx_mmu;
- mcl->args[2] = 0;
- mcl->args[3] = DOMID_SELF;
- mcl++;
- }
-
- ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
- BUG_ON(ret != 0);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (mcl == rx_mcl)
+ return;
+
+ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+
+ if (mmu - rx_mmu) {
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)rx_mmu;
+ mcl->args[1] = mmu - rx_mmu;
+ mcl->args[2] = 0;
+ mcl->args[3] = DOMID_SELF;
+ mcl++;
+ }
+
+ ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+ BUG_ON(ret != 0);
+ }
ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
gop - grant_rx_op);
@@ -308,8 +315,11 @@ static void net_rx_action(unsigned long
netif->stats.tx_bytes += size;
netif->stats.tx_packets++;
- /* The update_va_mapping() must not fail. */
- BUG_ON(mcl->result != 0);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* The update_va_mapping() must not fail. */
+ BUG_ON(mcl->result != 0);
+ mcl++;
+ }
/* Check the reassignment error code. */
status = NETIF_RSP_OKAY;
@@ -340,7 +350,6 @@ static void net_rx_action(unsigned long
netif_put(netif);
dev_kfree_skb(skb);
- mcl++;
gop++;
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|