# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1243500802 -3600
# Node ID 4ffa9ad54890a237ed6ddbf25386d27dc92fab8c
# Parent 2ab54cc407616623d36efec300a1dd2ac11b7fc5
Upgrade forcedeth net driver to 0.62 (driver package v1.25)
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
drivers/net/forcedeth.c | 4448 +++++++++++++++++++++++++++++++++++++-----------
1 files changed, 3459 insertions(+), 989 deletions(-)
diff -r 2ab54cc40761 -r 4ffa9ad54890 drivers/net/forcedeth.c
--- a/drivers/net/forcedeth.c Wed May 27 11:21:00 2009 +0100
+++ b/drivers/net/forcedeth.c Thu May 28 09:53:22 2009 +0100
@@ -108,7 +108,13 @@
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
* 0.55: 22 Mar 2006: Add flow control (pause frame).
- * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
+ * 0.56: 22 Mar 2006: Additional ethtool and moduleparam support.
+ * 0.57: 14 May 2006: Moved mac address writes to nv_probe and nv_remove.
+ * 0.58: 20 May 2006: Optimized rx and tx data paths.
+ * 0.59: 31 May 2006: Added support for sideband management unit.
+ * 0.60: 31 May 2006: Added support for recoverable error.
+ * 0.61: 18 Jul 2006: Added support for suspend/resume.
+ * 0.62: 16 Jan 2007: Fixed statistics, mgmt communication, and low phy
speed on S5.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -120,8 +126,9 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.56"
+#define FORCEDETH_VERSION "0.62-Driver Package V1.25"
#define DRV_NAME "forcedeth"
+#define DRV_DATE "2008/01/30"
#include <linux/module.h>
#include <linux/types.h>
@@ -138,41 +145,351 @@
#include <linux/random.h>
#include <linux/init.h>
#include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
+#include <linux/reboot.h>
+#include <linux/version.h>
+
+#define RHES3 0
+#define SLES9 1
+#define RHES4 2
+#define SUSE10 3
+#define FEDORA5 4
+#define FEDORA6 5
+#define SLES10U1 5
+#define FEDORA7 6
+#define OPENSUSE10U3 7
+#define NVNEW 8
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)
+#define NVVER NVNEW
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
+#define NVVER OPENSUSE10U3
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
+#define NVVER FEDORA7
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
+#define NVVER FEDORA6
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
+#define NVVER FEDORA5
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+#define NVVER SUSE10
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,6)
+#define NVVER RHES4
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+#define NVVER SLES9
+#else
+#define NVVER RHES3
+#endif
+
+#if NVVER > RHES3
#include <linux/dma-mapping.h>
+#else
+#include <linux/forcedeth-compat.h>
+#endif
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
-#if 0
+#ifdef NVLAN_DEBUG
#define dprintk printk
#else
#define dprintk(x...) do { } while (0)
#endif
+#define DPRINTK(nlevel,klevel,args...) (void)((debug & NETIF_MSG_##nlevel) &&
printk(klevel args))
+
+ /* pci_ids.h */
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_12
+#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_13
+#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_14
+#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_15
+#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_16
+#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_17
+#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_18
+#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_19
+#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_20
+#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_21
+#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_22
+#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_23
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_24
+#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054c
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_25
+#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054d
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_26
+#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054e
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_27
+#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054f
+#endif
+
+ /* mii.h */
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_28
+#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07dc
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_29
+#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07dd
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_30
+#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07de
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_31
+#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07df
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_32
+#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_33
+#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_34
+#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_35
+#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_36
+#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_37
+#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_38
+#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
+#endif
+
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_39
+#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
+#endif
+
+#ifndef ADVERTISE_1000HALF
+#define ADVERTISE_1000HALF 0x0100
+#endif
+#ifndef ADVERTISE_1000FULL
+#define ADVERTISE_1000FULL 0x0200
+#endif
+#ifndef ADVERTISE_PAUSE_CAP
+#define ADVERTISE_PAUSE_CAP 0x0400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+#define ADVERTISE_PAUSE_ASYM 0x0800
+#endif
+#ifndef MII_CTRL1000
+#define MII_CTRL1000 0x09
+#endif
+#ifndef MII_STAT1000
+#define MII_STAT1000 0x0A
+#endif
+#ifndef LPA_1000FULL
+#define LPA_1000FULL 0x0800
+#endif
+#ifndef LPA_1000HALF
+#define LPA_1000HALF 0x0400
+#endif
+#ifndef LPA_PAUSE_CAP
+#define LPA_PAUSE_CAP 0x0400
+#endif
+#ifndef LPA_PAUSE_ASYM
+#define LPA_PAUSE_ASYM 0x0800
+#endif
+#ifndef BMCR_SPEED1000
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0 /* driver took care of packet */
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
+#endif
+
+#ifndef DMA_39BIT_MASK
+#define DMA_39BIT_MASK 0x0000007fffffffffULL
+#endif
+
+#ifndef __iomem
+#define __iomem
+#endif
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+
+#ifndef __force
+#define __force
+#endif
+
+#ifndef PCI_D0
+#define PCI_D0 ((int __bitwise __force) 0)
+#endif
+
+#ifndef PM_EVENT_SUSPEND
+#define PM_EVENT_SUSPEND 2
+#endif
+
+#ifndef MODULE_VERSION
+#define MODULE_VERSION(ver)
+#endif
+
+#if NVVER > FEDORA6
+#define CHECKSUM_HW CHECKSUM_PARTIAL
+#endif
+
+#if NVVER < SUSE10
+#define pm_message_t u32
+#endif
+
+ /* rx/tx mac addr + type + vlan + align + slack*/
+#ifndef RX_NIC_BUFSIZE
+#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64)
+#endif
+ /* even more slack */
+#ifndef RX_ALLOC_BUFSIZE
+#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#if NVVER < RHES4
+ struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+ };
+#endif
+
+#ifndef PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
+#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0x00
+#endif
+
+#ifndef PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
+#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 0x04
+#endif
+
+#ifndef PCI_MSIX_ENTRY_DATA_OFFSET
+#define PCI_MSIX_ENTRY_DATA_OFFSET 0x08
+#endif
+
+#ifndef PCI_MSIX_ENTRY_SIZE
+#define PCI_MSIX_ENTRY_SIZE 16
+#endif
+
+#ifndef PCI_MSIX_FLAGS_BIRMASK
+#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
+#endif
+
+#ifndef PCI_CAP_ID_MSIX
+#define PCI_CAP_ID_MSIX 0x11
+#endif
+
+#if NVVER > FEDORA7
+#define IRQ_FLAG IRQF_SHARED
+#else
+#define IRQ_FLAG SA_SHIRQ
+#endif
/*
* Hardware access:
*/
-#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq
mask */
-#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the
timer irq */
-#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and
needs packet format 2 */
-#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
-#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum
offloads */
-#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and
striping */
-#define DEV_HAS_MSI 0x0040 /* device supports MSI */
-#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
-#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
-#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
-#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
-#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic
test */
+#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the
irq mask */
+#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on
the timer irq */
+#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and
needs packet format 2 */
+#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
+#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx
checksum offloads */
+#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging
and striping */
+#define DEV_HAS_MSI 0x00040 /* device supports MSI */
+#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
+#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
+#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics
version 1 */
+#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics
version 2 */
+#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended
diagnostic test */
+#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit
*/
+#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac
address */
+#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision
fix */
+#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames
version 1 */
+#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames
version 2 */
+#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames
version 3 */
+
+
+#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\
+ .vendor = PCI_VENDOR_ID_NVIDIA, \
+ .device = deviceid, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = nv_driver_data, \
+},
+
+#define Mv_LED_Control 16
+#define Mv_Page_Address 22
+#define Mv_LED_FORCE_OFF 0x88
+#define Mv_LED_DUAL_MODE3 0x40
+
+struct nvmsi_msg{
+ u32 address_lo;
+ u32 address_hi;
+ u32 data;
+};
enum {
NvRegIrqStatus = 0x000,
#define NVREG_IRQSTAT_MIIEVENT 0x040
-#define NVREG_IRQSTAT_MASK 0x1ff
+#define NVREG_IRQSTAT_MASK 0x81ff
NvRegIrqMask = 0x004,
#define NVREG_IRQ_RX_ERROR 0x0001
#define NVREG_IRQ_RX 0x0002
@@ -183,23 +500,24 @@ enum {
#define NVREG_IRQ_LINK 0x0040
#define NVREG_IRQ_RX_FORCED 0x0080
#define NVREG_IRQ_TX_FORCED 0x0100
+#define NVREG_IRQ_RECOVER_ERROR 0x8000
#define NVREG_IRQMASK_THROUGHPUT 0x00df
-#define NVREG_IRQMASK_CPU 0x0040
+#define NVREG_IRQMASK_CPU 0x0060
#define NVREG_IRQ_TX_ALL
(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
#define NVREG_IRQ_RX_ALL
(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
-#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
+#define NVREG_IRQ_OTHER
(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
#define NVREG_IRQ_UNKNOWN
(~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
-
NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
- NVREG_IRQ_TX_FORCED))
+
NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
+ NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
NvRegUnknownSetupReg6 = 0x008,
#define NVREG_UNKSETUP6_VAL 3
-/*
- * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
- * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
- */
+ /*
+ * NVREG_POLL_DEFAULT is the interval length of the timer source on the
nic
+ * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
+ */
NvRegPollingInterval = 0x00c,
#define NVREG_POLL_DEFAULT_THROUGHPUT 970
#define NVREG_POLL_DEFAULT_CPU 13
@@ -212,10 +530,20 @@ enum {
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
- NvRegMacReset = 0x3c,
+ NvRegMacReset = 0x34,
#define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01
+#define NVREG_XMITCTL_MGMT_ST 0x40000000
+#define NVREG_XMITCTL_SYNC_MASK 0x000f0000
+#define NVREG_XMITCTL_SYNC_NOT_READY 0x0
+#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
+#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
+#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
+#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
+#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
+#define NVREG_XMITCTL_HOST_LOADED 0x00004000
+#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
NvRegTransmitterStatus = 0x088,
#define NVREG_XMITSTAT_BUSY 0x01
@@ -231,6 +559,7 @@ enum {
#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
NvRegReceiverControl = 0x094,
#define NVREG_RCVCTL_START 0x01
+#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
NvRegReceiverStatus = 0x98,
#define NVREG_RCVSTAT_BUSY 0x01
@@ -241,7 +570,7 @@ enum {
#define NVREG_RNDSEED_FORCE3 0x7400
NvRegTxDeferral = 0xA0,
-#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
+#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
NvRegRxDeferral = 0xA4,
@@ -252,7 +581,9 @@ enum {
#define NVREG_MCASTADDRA_FORCE 0x01
NvRegMulticastAddrB = 0xB4,
NvRegMulticastMaskA = 0xB8,
+#define NVREG_MCASTMASKA_NONE 0xffffffff
NvRegMulticastMaskB = 0xBC,
+#define NVREG_MCASTMASKB_NONE 0xffff
NvRegPhyInterface = 0xC0,
#define PHY_RGMII 0x10000000
@@ -262,7 +593,8 @@ enum {
NvRegRingSizes = 0x108,
#define NVREG_RINGSZ_TXSHIFT 0
#define NVREG_RINGSZ_RXSHIFT 16
- NvRegUnknownTransmitterReg = 0x10c,
+ NvRegTransmitPoll = 0x10c,
+#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
NvRegLinkSpeed = 0x110,
#define NVREG_LINKSPEED_FORCE 0x10000
#define NVREG_LINKSPEED_10 1000
@@ -283,22 +615,24 @@ enum {
#define NVREG_TXRXCTL_RESET 0x0010
#define NVREG_TXRXCTL_RXCHECK 0x0400
#define NVREG_TXRXCTL_DESC_1 0
-#define NVREG_TXRXCTL_DESC_2 0x02100
-#define NVREG_TXRXCTL_DESC_3 0x02200
+#define NVREG_TXRXCTL_DESC_2 0x002100
+#define NVREG_TXRXCTL_DESC_3 0xc02200
#define NVREG_TXRXCTL_VLANSTRIP 0x00040
#define NVREG_TXRXCTL_VLANINS 0x00080
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170,
-#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
-#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
+#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
+#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
-#define NVREG_MIISTAT_MASK 0x000f
-#define NVREG_MIISTAT_MASK2 0x000f
- NvRegUnknownSetupReg4 = 0x184,
-#define NVREG_UNKSETUP4_VAL 8
+#define NVREG_MIISTAT_MASK_RW 0x0007
+#define NVREG_MIISTAT_MASK_ALL 0x000f
+ NvRegMIIMask = 0x184,
+#define NVREG_MII_LINKCHANGE 0x0008
NvRegAdapterControl = 0x188,
#define NVREG_ADAPTCTL_START 0x02
@@ -328,6 +662,7 @@ enum {
#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
NvRegPatternCRC = 0x204,
+#define NV_UNKNOWN_VAL 0x01
NvRegPatternMask = 0x208,
NvRegPowerCap = 0x268,
#define NVREG_POWERCAP_D3SUPP (1<<30)
@@ -368,6 +703,7 @@ enum {
NvRegTxPause = 0x2e0,
NvRegRxPause = 0x2e4,
NvRegRxDropFrame = 0x2e8,
+
NvRegVlanControl = 0x300,
#define NVREG_VLANCONTROL_ENABLE 0x2000
NvRegMSIXMap0 = 0x3e0,
@@ -409,7 +745,7 @@ typedef union _ring_type {
#define NV_TX_CARRIERLOST (1<<27)
#define NV_TX_LATECOLLISION (1<<28)
#define NV_TX_UNDERFLOW (1<<29)
-#define NV_TX_ERROR (1<<30)
+#define NV_TX_ERROR (1<<30) /* logical OR of all errors */
#define NV_TX_VALID (1<<31)
#define NV_TX2_LASTPACKET (1<<29)
@@ -420,7 +756,7 @@ typedef union _ring_type {
#define NV_TX2_LATECOLLISION (1<<27)
#define NV_TX2_UNDERFLOW (1<<28)
/* error and valid are the same for both */
-#define NV_TX2_ERROR (1<<30)
+#define NV_TX2_ERROR (1<<30) /* logical OR of all errors */
#define NV_TX2_VALID (1<<31)
#define NV_TX2_TSO (1<<28)
#define NV_TX2_TSO_SHIFT 14
@@ -441,13 +777,13 @@ typedef union _ring_type {
#define NV_RX_CRCERR (1<<27)
#define NV_RX_OVERFLOW (1<<28)
#define NV_RX_FRAMINGERR (1<<29)
-#define NV_RX_ERROR (1<<30)
+#define NV_RX_ERROR (1<<30) /* logical OR of all errors */
#define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
-#define NV_RX2_CHECKSUMOK1 (0x10000000)
-#define NV_RX2_CHECKSUMOK2 (0x14000000)
-#define NV_RX2_CHECKSUMOK3 (0x18000000)
+#define NV_RX2_CHECKSUM_IP (0x10000000)
+#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
+#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
#define NV_RX2_SUBSTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
@@ -458,7 +794,7 @@ typedef union _ring_type {
#define NV_RX2_OVERFLOW (1<<23)
#define NV_RX2_FRAMINGERR (1<<24)
/* error and avail are the same for both */
-#define NV_RX2_ERROR (1<<30)
+#define NV_RX2_ERROR (1<<30) /* logical OR of all errors */
#define NV_RX2_AVAIL (1<<31)
#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
@@ -466,7 +802,8 @@ typedef union _ring_type {
/* Miscelaneous hardware related defines: */
#define NV_PCI_REGSZ_VER1 0x270
-#define NV_PCI_REGSZ_VER2 0x604
+#define NV_PCI_REGSZ_VER2 0x2d4
+#define NV_PCI_REGSZ_VER3 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
@@ -492,12 +829,12 @@ typedef union _ring_type {
#define NV_WATCHDOG_TIMEO (5*HZ)
#define RX_RING_DEFAULT 128
-#define TX_RING_DEFAULT 256
-#define RX_RING_MIN 128
-#define TX_RING_MIN 64
+#define TX_RING_DEFAULT 64
+#define RX_RING_MIN RX_RING_DEFAULT
+#define TX_RING_MIN TX_RING_DEFAULT
#define RING_MAX_DESC_VER_1 1024
#define RING_MAX_DESC_VER_2_3 16384
-/*
+/*
* Difference between the get and put pointers for the tx ring.
* This is used to throttle the amount of data outstanding in the
* tx ring.
@@ -518,7 +855,7 @@ typedef union _ring_type {
#define LINK_TIMEOUT (3*HZ)
#define STATS_INTERVAL (10*HZ)
-/*
+/*
* desc_ver values:
* The nic supports three different descriptor types:
* - DESC_VER_1: Original
@@ -532,16 +869,46 @@ typedef union _ring_type {
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
#define PHY_OUI_CICADA 0x03f1
+#define PHY_OUI_VITESSE 0x01c1
+#define PHY_OUI_REALTEK 0x0732
#define PHYID1_OUI_MASK 0x03ff
#define PHYID1_OUI_SHFT 6
#define PHYID2_OUI_MASK 0xfc00
#define PHYID2_OUI_SHFT 10
-#define PHY_INIT1 0x0f000
-#define PHY_INIT2 0x0e00
-#define PHY_INIT3 0x01000
-#define PHY_INIT4 0x0200
-#define PHY_INIT5 0x0004
-#define PHY_INIT6 0x02000
+#define PHYID2_MODEL_MASK 0x03f0
+#define PHY_MODEL_MARVELL_E3016 0x220
+#define PHY_MODEL_MARVELL_E1011 0xb0
+#define PHY_MARVELL_E3016_INITMASK 0x0300
+#define PHY_CICADA_INIT1 0x0f000
+#define PHY_CICADA_INIT2 0x0e00
+#define PHY_CICADA_INIT3 0x01000
+#define PHY_CICADA_INIT4 0x0200
+#define PHY_CICADA_INIT5 0x0004
+#define PHY_CICADA_INIT6 0x02000
+#define PHY_VITESSE_INIT_REG1 0x1f
+#define PHY_VITESSE_INIT_REG2 0x10
+#define PHY_VITESSE_INIT_REG3 0x11
+#define PHY_VITESSE_INIT_REG4 0x12
+#define PHY_VITESSE_INIT_MSK1 0xc
+#define PHY_VITESSE_INIT_MSK2 0x0180
+#define PHY_VITESSE_INIT1 0x52b5
+#define PHY_VITESSE_INIT2 0xaf8a
+#define PHY_VITESSE_INIT3 0x8
+#define PHY_VITESSE_INIT4 0x8f8a
+#define PHY_VITESSE_INIT5 0xaf86
+#define PHY_VITESSE_INIT6 0x8f86
+#define PHY_VITESSE_INIT7 0xaf82
+#define PHY_VITESSE_INIT8 0x0100
+#define PHY_VITESSE_INIT9 0x8f82
+#define PHY_VITESSE_INIT10 0x0
+#define PHY_REALTEK_INIT_REG1 0x1f
+#define PHY_REALTEK_INIT_REG2 0x19
+#define PHY_REALTEK_INIT_REG3 0x13
+#define PHY_REALTEK_INIT1 0x0000
+#define PHY_REALTEK_INIT2 0x8e00
+#define PHY_REALTEK_INIT3 0x0001
+#define PHY_REALTEK_INIT4 0xad17
+
#define PHY_GIGABIT 0x0100
#define PHY_TIMEOUT 0x1
@@ -572,74 +939,97 @@ typedef union _ring_type {
#define NV_MSI_X_VECTOR_TX 0x1
#define NV_MSI_X_VECTOR_OTHER 0x2
-/* statistics */
+#define NV_RESTART_TX 0x1
+#define NV_RESTART_RX 0x2
+#define NVLAN_DISABLE_ALL_FEATURES do { \
+ msi = NV_MSI_INT_DISABLED; \
+ msix = NV_MSIX_INT_DISABLED; \
+ scatter_gather = NV_SCATTER_GATHER_DISABLED; \
+ tso_offload = NV_TSO_DISABLED; \
+ tx_checksum_offload = NV_TX_CHECKSUM_DISABLED; \
+ rx_checksum_offload = NV_RX_CHECKSUM_DISABLED; \
+ tx_flow_control = NV_TX_FLOW_CONTROL_DISABLED; \
+ rx_flow_control = NV_RX_FLOW_CONTROL_DISABLED; \
+ wol = NV_WOL_DISABLED; \
+ tagging_8021pq = NV_8021PQ_DISABLED; \
+} while (0)
+
struct nv_ethtool_str {
char name[ETH_GSTRING_LEN];
};
static const struct nv_ethtool_str nv_estats_str[] = {
+ { "tx_dropped" },
+ { "tx_fifo_errors" },
+ { "tx_carrier_errors" },
+ { "tx_packets" },
{ "tx_bytes" },
+ { "rx_crc_errors" },
+ { "rx_over_errors" },
+ { "rx_errors_total" },
+ { "rx_packets" },
+ { "rx_bytes" },
+
+ /* hardware counters */
{ "tx_zero_rexmt" },
{ "tx_one_rexmt" },
{ "tx_many_rexmt" },
{ "tx_late_collision" },
- { "tx_fifo_errors" },
- { "tx_carrier_errors" },
{ "tx_excess_deferral" },
{ "tx_retry_error" },
- { "tx_deferral" },
- { "tx_packets" },
- { "tx_pause" },
{ "rx_frame_error" },
{ "rx_extra_byte" },
{ "rx_late_collision" },
{ "rx_runt" },
{ "rx_frame_too_long" },
- { "rx_over_errors" },
- { "rx_crc_errors" },
{ "rx_frame_align_error" },
{ "rx_length_error" },
{ "rx_unicast" },
{ "rx_multicast" },
{ "rx_broadcast" },
- { "rx_bytes" },
+ { "tx_deferral" },
+ { "tx_pause" },
{ "rx_pause" },
- { "rx_drop_frame" },
- { "rx_packets" },
- { "rx_errors_total" }
+ { "rx_drop_frame" }
};
struct nv_ethtool_stats {
+ u64 tx_dropped;
+ u64 tx_fifo_errors;
+ u64 tx_carrier_errors;
+ u64 tx_packets;
u64 tx_bytes;
+ u64 rx_crc_errors;
+ u64 rx_over_errors;
+ u64 rx_errors_total;
+ u64 rx_packets;
+ u64 rx_bytes;
+
+ /* hardware counters */
u64 tx_zero_rexmt;
u64 tx_one_rexmt;
u64 tx_many_rexmt;
u64 tx_late_collision;
- u64 tx_fifo_errors;
- u64 tx_carrier_errors;
u64 tx_excess_deferral;
u64 tx_retry_error;
- u64 tx_deferral;
- u64 tx_packets;
- u64 tx_pause;
u64 rx_frame_error;
u64 rx_extra_byte;
u64 rx_late_collision;
u64 rx_runt;
u64 rx_frame_too_long;
- u64 rx_over_errors;
- u64 rx_crc_errors;
u64 rx_frame_align_error;
u64 rx_length_error;
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
- u64 rx_bytes;
+ u64 tx_deferral;
+ u64 tx_pause;
u64 rx_pause;
u64 rx_drop_frame;
- u64 rx_packets;
- u64 rx_errors_total;
};
+#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct
nv_ethtool_stats)/sizeof(u64))
+#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 4)
+#define NV_DEV_STATISTICS_SW_COUNT 10
/* diagnostics */
#define NV_TEST_COUNT_BASE 3
@@ -667,20 +1057,63 @@ static const struct register_test nv_reg
{ 0,0 }
};
+struct nv_skb_map {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
/*
* SMP locking:
* All hardware access under dev->priv->lock, except the performance
* critical parts:
* - rx is (pseudo-) lockless: it relies on the single-threading provided
* by the arch code for interrupts.
- * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
+ * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
* needs dev->priv->lock :-(
- * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
*/
/* in dev: base, irq */
struct fe_priv {
+
+ /* fields used in fast path are grouped together
+ for better cache performance
+ */
spinlock_t lock;
+ spinlock_t timer_lock;
+ void __iomem *base;
+ struct pci_dev *pci_dev;
+ u32 txrxctl_bits;
+ int stop_tx;
+ int need_linktimer;
+ unsigned long link_timeout;
+ u32 irqmask;
+ u32 msi_flags;
+
+ unsigned int rx_buf_sz;
+ struct vlan_group *vlangrp;
+ int tx_ring_size;
+ int rx_csum;
+
+ /*
+ * rx specific fields in fast path
+ */
+ ring_type get_rx __attribute__((aligned(L1_CACHE_BYTES)));
+ ring_type put_rx, first_rx, last_rx;
+ struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
+ struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+
+ /*
+ * tx specific fields in fast path
+ */
+ ring_type get_tx __attribute__((aligned(L1_CACHE_BYTES)));
+ ring_type put_tx, first_tx, last_tx;
+ struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
+ struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+
+ struct nv_skb_map *rx_skb;
+ struct nv_skb_map *tx_skb;
/* General data:
* Locking: spin_lock(&np->lock); */
@@ -689,69 +1122,60 @@ struct fe_priv {
int in_shutdown;
u32 linkspeed;
int duplex;
+ int speed_duplex;
int autoneg;
int fixed_mode;
int phyaddr;
int wolenabled;
unsigned int phy_oui;
+ unsigned int phy_model;
u16 gigabit;
int intr_test;
+ int recover_error;
/* General data: RO fields */
dma_addr_t ring_addr;
- struct pci_dev *pci_dev;
u32 orig_mac[2];
- u32 irqmask;
u32 desc_ver;
- u32 txrxctl_bits;
u32 vlanctl_bits;
u32 driver_data;
u32 register_size;
-
- void __iomem *base;
+ u32 mac_in_use;
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
ring_type rx_ring;
- unsigned int cur_rx, refill_rx;
- struct sk_buff **rx_skbuff;
- dma_addr_t *rx_dma;
- unsigned int rx_buf_sz;
unsigned int pkt_limit;
struct timer_list oom_kick;
struct timer_list nic_poll;
struct timer_list stats_poll;
u32 nic_poll_irq;
int rx_ring_size;
-
- /* media detection workaround.
- * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
- */
- int need_linktimer;
- unsigned long link_timeout;
+ u32 rx_len_errors;
/*
* tx specific fields.
*/
ring_type tx_ring;
- unsigned int next_tx, nic_tx;
- struct sk_buff **tx_skbuff;
- dma_addr_t *tx_dma;
- unsigned int *tx_dma_len;
u32 tx_flags;
- int tx_ring_size;
int tx_limit_start;
int tx_limit_stop;
- /* vlan fields */
- struct vlan_group *vlangrp;
/* msi/msi-x fields */
- u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
/* flow control */
u32 pause_flags;
+ u32 led_stats[3];
+ u32 saved_config_space[64];
+ u32 saved_nvregphyinterface;
+#if NVVER < SUSE10
+ u32 pci_state[16];
+#endif
+ /* msix table */
+ struct nvmsi_msg nvmsg[NV_MSI_X_MAX_VECTORS];
+ unsigned long msix_pa_addr;
};
/*
@@ -762,12 +1186,12 @@ static int max_interrupt_work = 5;
/*
* Optimization can be either throuput mode or cpu mode
- *
+ *
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
enum {
- NV_OPTIMIZATION_MODE_THROUGHPUT,
+ NV_OPTIMIZATION_MODE_THROUGHPUT,
NV_OPTIMIZATION_MODE_CPU
};
static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
@@ -788,16 +1212,112 @@ enum {
NV_MSI_INT_DISABLED,
NV_MSI_INT_ENABLED
};
+
+#ifdef CONFIG_PCI_MSI
static int msi = NV_MSI_INT_ENABLED;
+#else
+static int msi = NV_MSI_INT_DISABLED;
+#endif
/*
* MSIX interrupts
*/
enum {
- NV_MSIX_INT_DISABLED,
+ NV_MSIX_INT_DISABLED,
NV_MSIX_INT_ENABLED
};
+
+#ifdef CONFIG_PCI_MSI
static int msix = NV_MSIX_INT_ENABLED;
+#else
+static int msix = NV_MSIX_INT_DISABLED;
+#endif
+/*
+ * PHY Speed and Duplex
+ */
+enum {
+ NV_SPEED_DUPLEX_AUTO,
+ NV_SPEED_DUPLEX_10_HALF_DUPLEX,
+ NV_SPEED_DUPLEX_10_FULL_DUPLEX,
+ NV_SPEED_DUPLEX_100_HALF_DUPLEX,
+ NV_SPEED_DUPLEX_100_FULL_DUPLEX,
+ NV_SPEED_DUPLEX_1000_FULL_DUPLEX
+};
+static int speed_duplex = NV_SPEED_DUPLEX_AUTO;
+
+/*
+ * PHY autonegotiation
+ */
+static int autoneg = AUTONEG_ENABLE;
+
+/*
+ * Scatter gather
+ */
+enum {
+ NV_SCATTER_GATHER_DISABLED,
+ NV_SCATTER_GATHER_ENABLED
+};
+static int scatter_gather = NV_SCATTER_GATHER_ENABLED;
+
+/*
+ * TCP Segmentation Offload (TSO)
+ */
+enum {
+ NV_TSO_DISABLED,
+ NV_TSO_ENABLED
+};
+static int tso_offload = NV_TSO_ENABLED;
+
+/*
+ * MTU settings
+ */
+static int mtu = ETH_DATA_LEN;
+
+/*
+ * Tx checksum offload
+ */
+enum {
+ NV_TX_CHECKSUM_DISABLED,
+ NV_TX_CHECKSUM_ENABLED
+};
+static int tx_checksum_offload = NV_TX_CHECKSUM_ENABLED;
+
+/*
+ * Rx checksum offload
+ */
+enum {
+ NV_RX_CHECKSUM_DISABLED,
+ NV_RX_CHECKSUM_ENABLED
+};
+static int rx_checksum_offload = NV_RX_CHECKSUM_ENABLED;
+
+/*
+ * Tx ring size
+ */
+static int tx_ring_size = TX_RING_DEFAULT;
+
+/*
+ * Rx ring size
+ */
+static int rx_ring_size = RX_RING_DEFAULT;
+
+/*
+ * Tx flow control
+ */
+enum {
+ NV_TX_FLOW_CONTROL_DISABLED,
+ NV_TX_FLOW_CONTROL_ENABLED
+};
+static int tx_flow_control = NV_TX_FLOW_CONTROL_ENABLED;
+
+/*
+ * Rx flow control
+ */
+enum {
+ NV_RX_FLOW_CONTROL_DISABLED,
+ NV_RX_FLOW_CONTROL_ENABLED
+};
+static int rx_flow_control = NV_RX_FLOW_CONTROL_ENABLED;
/*
* DMA 64bit
@@ -808,14 +1328,98 @@ enum {
};
static int dma_64bit = NV_DMA_64BIT_ENABLED;
+/*
+ * Wake On Lan
+ */
+enum {
+ NV_WOL_DISABLED,
+ NV_WOL_ENABLED
+};
+static int wol = NV_WOL_DISABLED;
+
+/*
+ * Tagging 802.1pq
+ */
+enum {
+ NV_8021PQ_DISABLED,
+ NV_8021PQ_ENABLED
+};
+static int tagging_8021pq = NV_8021PQ_ENABLED;
+
+enum {
+ NV_LOW_POWER_DISABLED,
+ NV_LOW_POWER_ENABLED
+};
+static int lowpowerspeed = NV_LOW_POWER_ENABLED;
+
+static int debug = 0;
+
+#if NVVER < RHES4
+static inline unsigned long nv_msecs_to_jiffies(const unsigned int m)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+ return (m + (1000 / HZ) - 1) / (1000 / HZ);
+#elif HZ > 1000 && !(HZ % 1000)
+ return m * (HZ / 1000);
+#else
+ return (m * HZ + 999) / 1000;
+#endif
+}
+#endif
+
+static void nv_msleep(unsigned int msecs)
+{
+#if NVVER > SLES9
+ msleep(msecs);
+#else
+ unsigned long timeout = nv_msecs_to_jiffies(msecs);
+
+ while (timeout) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+#endif
+}
+
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
+#if NVVER > RHES3
return netdev_priv(dev);
+#else
+ return (struct fe_priv *) dev->priv;
+#endif
+}
+
+static void __init quirk_nforce_network_class(struct pci_dev *pdev)
+{
+ /* Some implementations of the nVidia network controllers
+ * show up as bridges, when we need to see them as network
+ * devices.
+ */
+
+ /* If this is already known as a network ctlr, do nothing. */
+ if ((pdev->class >> 8) == PCI_CLASS_NETWORK_ETHERNET)
+ return;
+
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_OTHER) {
+ char c;
+
+ /* Clearing bit 6 of the register at 0xf8
+ * selects Ethernet device class
+ */
+ pci_read_config_byte(pdev, 0xf8, &c);
+ c &= 0xbf;
+ pci_write_config_byte(pdev, 0xf8, c);
+
+ /* sysfs needs pdev->class to be set correctly */
+ pdev->class &= 0x0000ff;
+ pdev->class |= (PCI_CLASS_NETWORK_ETHERNET << 8);
+ }
}
static inline u8 __iomem *get_hwbase(struct net_device *dev)
{
- return ((struct fe_priv *)netdev_priv(dev))->base;
+ return ((struct fe_priv *)get_nvpriv(dev))->base;
}
static inline void pci_push(u8 __iomem *base)
@@ -836,7 +1440,7 @@ static inline u32 nv_descr_getlength_ex(
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
- int delay, int delaymax, const char *msg)
+ int delay, int delaymax, const char *msg)
{
u8 __iomem *base = get_hwbase(dev);
@@ -887,22 +1491,16 @@ static void free_rings(struct net_device
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if(np->rx_ring.orig)
pci_free_consistent(np->pci_dev, sizeof(struct
ring_desc) * (np->rx_ring_size + np->tx_ring_size),
- np->rx_ring.orig, np->ring_addr);
+ np->rx_ring.orig, np->ring_addr);
} else {
if (np->rx_ring.ex)
pci_free_consistent(np->pci_dev, sizeof(struct
ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
- np->rx_ring.ex, np->ring_addr);
- }
- if (np->rx_skbuff)
- kfree(np->rx_skbuff);
- if (np->rx_dma)
- kfree(np->rx_dma);
- if (np->tx_skbuff)
- kfree(np->tx_skbuff);
- if (np->tx_dma)
- kfree(np->tx_dma);
- if (np->tx_dma_len)
- kfree(np->tx_dma_len);
+ np->rx_ring.ex, np->ring_addr);
+ }
+ if (np->rx_skb)
+ kfree(np->rx_skb);
+ if (np->tx_skb)
+ kfree(np->tx_skb);
}
static int using_multi_irqs(struct net_device *dev)
@@ -910,8 +1508,8 @@ static int using_multi_irqs(struct net_d
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
return 0;
else
return 1;
@@ -921,11 +1519,13 @@ static void nv_enable_irq(struct net_dev
{
struct fe_priv *np = get_nvpriv(dev);
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ /* modify network device class id */
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- enable_irq(dev->irq);
+ enable_irq(np->pci_dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
@@ -937,11 +1537,12 @@ static void nv_disable_irq(struct net_de
{
struct fe_priv *np = get_nvpriv(dev);
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- disable_irq(dev->irq);
+ disable_irq(np->pci_dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
@@ -953,8 +1554,11 @@ static void nv_enable_hw_interrupts(stru
static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
{
u8 __iomem *base = get_hwbase(dev);
+ struct fe_priv *np = get_nvpriv(dev);
writel(mask, base + NvRegIrqMask);
+ if (np->msi_flags & NV_MSI_ENABLED)
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
}
static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
@@ -982,7 +1586,7 @@ static int mii_rw(struct net_device *dev
u32 reg;
int retval;
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
reg = readl(base + NvRegMIIControl);
if (reg & NVREG_MIICTL_INUSE) {
@@ -998,7 +1602,7 @@ static int mii_rw(struct net_device *dev
writel(reg, base + NvRegMIIControl);
if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
- NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
+ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed
out.\n",
dev->name, miireg, addr);
retval = -1;
@@ -1020,29 +1624,111 @@ static int mii_rw(struct net_device *dev
return retval;
}
-static int phy_reset(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
+static void nv_save_LED_stats(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 reg=0;
+ u32 value=0;
+ int i=0;
+
+ reg = Mv_Page_Address;
+ value = 3;
+ mii_rw(dev,np->phyaddr,reg,value);
+ udelay(5);
+
+ reg = Mv_LED_Control;
+ for(i=0;i<3;i++){
+ np->led_stats[i]=mii_rw(dev,np->phyaddr,reg+i,MII_READ);
+ dprintk(KERN_DEBUG "%s: save LED reg%d:
value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
+ }
+
+}
+
+static void nv_restore_LED_stats(struct net_device *dev)
+{
+
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 reg=0;
+ u32 value=0;
+ int i=0;
+
+ reg = Mv_Page_Address;
+ value = 3;
+ mii_rw(dev,np->phyaddr,reg,value);
+ udelay(5);
+
+ reg = Mv_LED_Control;
+ for(i=0;i<3;i++){
+ mii_rw(dev,np->phyaddr,reg+i,np->led_stats[i]);
+ udelay(1);
+ dprintk(KERN_DEBUG "%s: restore LED reg%d:
value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
+ }
+
+}
+
+static void nv_LED_on(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 reg=0;
+ u32 value=0;
+
+ reg = Mv_Page_Address;
+ value = 3;
+ mii_rw(dev,np->phyaddr,reg,value);
+ udelay(5);
+
+ reg = Mv_LED_Control;
+ mii_rw(dev,np->phyaddr,reg,Mv_LED_DUAL_MODE3);
+
+}
+
+static void nv_LED_off(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 reg=0;
+ u32 value=0;
+
+ reg = Mv_Page_Address;
+ value = 3;
+ mii_rw(dev,np->phyaddr,reg,value);
+ udelay(5);
+
+ reg = Mv_LED_Control;
+ mii_rw(dev,np->phyaddr,reg,Mv_LED_FORCE_OFF);
+ udelay(1);
+
+}
+
+static int phy_reset(struct net_device *dev, u32 bmcr_setup)
+{
+ struct fe_priv *np = get_nvpriv(dev);
u32 miicontrol;
unsigned int tries = 0;
- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- miicontrol |= BMCR_RESET;
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model ==
PHY_MODEL_MARVELL_E1011) {
+ nv_save_LED_stats(dev);
+ }
+ miicontrol = BMCR_RESET | bmcr_setup;
if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
return -1;
}
/* wait for 500ms */
- msleep(500);
+ nv_msleep(500);
/* must wait till reset is deasserted */
while (miicontrol & BMCR_RESET) {
- msleep(10);
+ nv_msleep(10);
miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
/* FIXME: 100 tries seem excessive */
if (tries++ > 100)
return -1;
}
+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model ==
PHY_MODEL_MARVELL_E1011) {
+ nv_restore_LED_stats(dev);
+ }
+
return 0;
}
@@ -1052,9 +1738,59 @@ static int phy_init(struct net_device *d
u8 __iomem *base = get_hwbase(dev);
u32 phyinterface, phy_reserved, mii_status, mii_control,
mii_control_1000,reg;
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ /* phy errata for E3016 phy */
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ reg &= ~PHY_MARVELL_E3016_INITMASK;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
+ printk(KERN_INFO "%s: phy write to errata reg
failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
+ if (np->phy_oui == PHY_OUI_REALTEK) {
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1,
PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2,
PHY_REALTEK_INIT2)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1,
PHY_REALTEK_INIT3)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3,
PHY_REALTEK_INIT4)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1,
PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |=
(ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
+ reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
+ if (np->speed_duplex == NV_SPEED_DUPLEX_AUTO)
+ reg |=
(ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
+ if (np->speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
+ reg |= ADVERTISE_10HALF;
+ if (np->speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
+ reg |= ADVERTISE_10FULL;
+ if (np->speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
+ reg |= ADVERTISE_100HALF;
+ if (np->speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
+ reg |= ADVERTISE_100FULL;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both
advertisments but disable tx pause */
+ reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ reg |= ADVERTISE_PAUSE_ASYM;
+ np->fixed_mode = reg;
+
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
printk(KERN_INFO "%s: phy write to advertise failed.\n",
pci_name(np->pci_dev));
return PHY_ERROR;
@@ -1069,11 +1805,15 @@ static int phy_init(struct net_device *d
np->gigabit = PHY_GIGABIT;
mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000,
MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
- if (phyinterface & PHY_RGMII)
+ if (phyinterface & PHY_RGMII &&
+ (np->speed_duplex == NV_SPEED_DUPLEX_AUTO ||
+ (np->speed_duplex ==
NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_ENABLE)))
mii_control_1000 |= ADVERTISE_1000FULL;
- else
+ else {
+ if (np->speed_duplex ==
NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_DISABLE)
+ printk(KERN_INFO "%s: 1000mpbs full only
allowed with autoneg\n", pci_name(np->pci_dev));
mii_control_1000 &= ~ADVERTISE_1000FULL;
-
+ }
if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
return PHY_ERROR;
@@ -1082,8 +1822,25 @@ static int phy_init(struct net_device *d
else
np->gigabit = 0;
- /* reset the phy */
- if (phy_reset(dev)) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (np->autoneg == AUTONEG_DISABLE){
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE |
NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ mii_control &=
~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
+ if (reg & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+ mii_control |= BMCR_FULLDPLX;
+ if (reg & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+ mii_control |= BMCR_SPEED100;
+ } else {
+ mii_control |= BMCR_ANENABLE;
+ }
+
+ /* reset the phy and setup BMCR
+ * (certain phys need reset at same time new values are set) */
+ if (phy_reset(dev, mii_control)) {
printk(KERN_INFO "%s: phy reset failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -1091,14 +1848,14 @@ static int phy_init(struct net_device *d
/* phy vendor specific configuration */
if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
- phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
- phy_reserved |= (PHY_INIT3 | PHY_INIT4);
+ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
- phy_reserved |= PHY_INIT5;
+ phy_reserved |= PHY_CICADA_INIT5;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
return PHY_ERROR;
@@ -1106,20 +1863,114 @@ static int phy_init(struct net_device *d
}
if (np->phy_oui == PHY_OUI_CICADA) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION,
MII_READ);
- phy_reserved |= PHY_INIT6;
+ phy_reserved |= PHY_CICADA_INIT6;
if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
}
+ if (np->phy_oui == PHY_OUI_VITESSE) {
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1,
PHY_VITESSE_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2,
PHY_VITESSE_INIT2)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4,
MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4,
phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3,
MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3,
phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2,
PHY_VITESSE_INIT4)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2,
PHY_VITESSE_INIT5)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4,
MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4,
phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3,
MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3,
phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2,
PHY_VITESSE_INIT6)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2,
PHY_VITESSE_INIT7)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4,
MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4,
phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3,
MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+ phy_reserved |= PHY_VITESSE_INIT8;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3,
phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2,
PHY_VITESSE_INIT9)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1,
PHY_VITESSE_INIT10)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ if (np->phy_oui == PHY_OUI_REALTEK) {
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1,
PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2,
PHY_REALTEK_INIT2)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1,
PHY_REALTEK_INIT3)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3,
PHY_REALTEK_INIT4)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1,
PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n",
pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
/* some phys clear out pause advertisment on reset, set it back */
mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
/* restart auto negotiation */
- mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
- if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
- return PHY_ERROR;
+ if (np->autoneg == AUTONEG_ENABLE) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+ return PHY_ERROR;
+ }
}
return 0;
@@ -1127,80 +1978,118 @@ static int phy_init(struct net_device *d
static void nv_start_rx(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-
- dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+
/* Already running? Stop it. */
- if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
- writel(0, base + NvRegReceiverControl);
+ if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) &&
!np->mac_in_use) {
+ rx_ctrl &= ~NVREG_RCVCTL_START;
+ writel(rx_ctrl, base + NvRegReceiverControl);
pci_push(base);
}
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
- writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
+ rx_ctrl |= NVREG_RCVCTL_START;
+ if (np->mac_in_use)
+ rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
+ writel(rx_ctrl, base + NvRegReceiverControl);
dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
- dev->name, np->duplex, np->linkspeed);
+ dev->name, np->duplex, np->linkspeed);
pci_push(base);
}
static void nv_stop_rx(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-
- dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
- writel(0, base + NvRegReceiverControl);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (!np->mac_in_use)
+ rx_ctrl &= ~NVREG_RCVCTL_START;
+ else
+ rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
+ writel(rx_ctrl, base + NvRegReceiverControl);
reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
udelay(NV_RXSTOP_DELAY2);
- writel(0, base + NvRegLinkSpeed);
+ if (!np->mac_in_use)
+ writel(0, base + NvRegLinkSpeed);
}
static void nv_start_tx(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-
- dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
- writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ tx_ctrl |= NVREG_XMITCTL_START;
+ if (np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
pci_push(base);
}
static void nv_stop_tx(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-
- dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
- writel(0, base + NvRegTransmitterControl);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (!np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_START;
+ else
+ tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
KERN_INFO "nv_stop_tx: TransmitterStatus remained
busy");
udelay(NV_TXSTOP_DELAY2);
- writel(0, base + NvRegUnknownTransmitterReg);
+ if (!np->mac_in_use)
+ writel(readl(base + NvRegTransmitPoll) &
NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
}
static void nv_txrx_reset(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-
- dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
+ unsigned int i;
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
+ for(i=0;i<10000;i++){
+ udelay(1);
+ if(readl(base+NvRegTxRxControl) & NVREG_TXRXCTL_IDLE)
+ break;
+ }
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits,
base + NvRegTxRxControl);
pci_push(base);
udelay(NV_TXRX_RESET_DELAY);
- writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
static void nv_mac_reset(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-
- dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
+ u32 temp1,temp2,temp3;
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits,
base + NvRegTxRxControl);
+
+ /* save registers since they will be cleared on reset */
+ temp1 = readl(base + NvRegMacAddrA);
+ temp2 = readl(base + NvRegMacAddrB);
+ temp3 = readl(base + NvRegTransmitPoll);
+
pci_push(base);
writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
pci_push(base);
@@ -1208,89 +2097,208 @@ static void nv_mac_reset(struct net_devi
writel(0, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
+
+ /* restore saved registers */
+ writel(temp1, base + NvRegMacAddrA);
+ writel(temp2, base + NvRegMacAddrB);
+ writel(temp3, base + NvRegTransmitPoll);
+
writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
+#if NVVER < SLES9
+static int nv_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 *base = get_hwbase(dev);
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO:
+ {
+ struct ethtool_drvinfo info = {
ETHTOOL_GDRVINFO };
+ strcpy(info.driver, "forcedeth");
+ strcpy(info.version, FORCEDETH_VERSION);
+ strcpy(info.bus_info, pci_name(np->pci_dev));
+ if (copy_to_user(useraddr, &info, sizeof
(info)))
+ return -EFAULT;
+ return 0;
+ }
+ case ETHTOOL_GLINK:
+ {
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+
+ edata.data = !!netif_carrier_ok(dev);
+
+ if (copy_to_user(useraddr, &edata,
sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+ case ETHTOOL_GWOL:
+ {
+ struct ethtool_wolinfo wolinfo;
+ memset(&wolinfo, 0, sizeof(wolinfo));
+ wolinfo.supported = WAKE_MAGIC;
+
+ spin_lock_irq(&np->lock);
+ if (np->wolenabled)
+ wolinfo.wolopts = WAKE_MAGIC;
+ spin_unlock_irq(&np->lock);
+
+ if (copy_to_user(useraddr, &wolinfo,
sizeof(wolinfo)))
+ return -EFAULT;
+ return 0;
+ }
+ case ETHTOOL_SWOL:
+ {
+ struct ethtool_wolinfo wolinfo;
+ if (copy_from_user(&wolinfo, useraddr,
sizeof(wolinfo)))
+ return -EFAULT;
+
+ spin_lock_irq(&np->lock);
+ if (wolinfo.wolopts == 0) {
+ writel(0, base + NvRegWakeUpFlags);
+ np->wolenabled = NV_WOL_DISABLED;
+ }
+ if (wolinfo.wolopts & WAKE_MAGIC) {
+ writel(NVREG_WAKEUPFLAGS_ENABLE, base +
NvRegWakeUpFlags);
+ np->wolenabled = NV_WOL_ENABLED;
+ }
+ spin_unlock_irq(&np->lock);
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
/*
- * nv_get_stats: dev->get_stats function
- * Get latest stats value from the nic.
- * Called with read_lock(&dev_base_lock) held for read -
- * only synchronized against unregister_netdevice.
+ * nv_ioctl: dev->do_ioctl function
+ * Called with rtnl_lock held.
*/
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
-
- /* It seems that the nic always generates interrupts and doesn't
- * accumulate errors internally. Thus the current values in np->stats
- * are already up to date.
- */
- return &np->stats;
-}
+static int nv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ switch(cmd) {
+ case SIOCETHTOOL:
+ return nv_ethtool_ioctl(dev, rq->ifr_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif
/*
* nv_alloc_rx: fill rx ring entries.
* Return 1 if the allocations for the skbs failed and the
* rx engine is without Available descriptors
*/
-static int nv_alloc_rx(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
- unsigned int refill_rx = np->refill_rx;
- int nr;
-
- while (np->cur_rx != refill_rx) {
- struct sk_buff *skb;
-
- nr = refill_rx % np->rx_ring_size;
- if (np->rx_skbuff[nr] == NULL) {
-
- skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (!skb)
- break;
-
+static inline int nv_alloc_rx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ struct ring_desc* less_rx;
+ struct sk_buff *skb;
+
+ less_rx = np->get_rx.orig;
+ if (less_rx-- == np->first_rx.orig)
+ less_rx = np->last_rx.orig;
+
+ while (np->put_rx.orig != less_rx) {
+ skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ if (skb) {
skb->dev = dev;
- np->rx_skbuff[nr] = skb;
+ np->put_rx_ctx->skb = skb;
+#if NVVER > FEDORA7
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
skb->data,
+ skb_tailroom(skb), PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb_tailroom(skb);
+#else
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
skb->data,
+ skb->end-skb->data, PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb->end-skb->data;
+#endif
+ np->put_rx.orig->PacketBuffer =
cpu_to_le32(np->put_rx_ctx->dma);
+ wmb();
+ np->put_rx.orig->FlagLen = cpu_to_le32(np->rx_buf_sz |
NV_RX_AVAIL);
+ if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
+ np->put_rx.orig = np->first_rx.orig;
+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
+ np->put_rx_ctx = np->first_rx_ctx;
} else {
- skb = np->rx_skbuff[nr];
- }
- np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline int nv_alloc_rx_optimized(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ struct ring_desc_ex* less_rx;
+ struct sk_buff *skb;
+
+ less_rx = np->get_rx.ex;
+ if (less_rx-- == np->first_rx.ex)
+ less_rx = np->last_rx.ex;
+
+ while (np->put_rx.ex != less_rx) {
+ skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ if (skb) {
+ skb->dev = dev;
+ np->put_rx_ctx->skb = skb;
+#if NVVER > FEDORA7
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
skb->data,
+ skb_tailroom(skb), PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb_tailroom(skb);
+#else
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
skb->data,
skb->end-skb->data, PCI_DMA_FROMDEVICE);
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->rx_ring.orig[nr].PacketBuffer =
cpu_to_le32(np->rx_dma[nr]);
+ np->put_rx_ctx->dma_len = skb->end-skb->data;
+#endif
+ np->put_rx.ex->PacketBufferHigh =
cpu_to_le64(np->put_rx_ctx->dma) >> 32;
+ np->put_rx.ex->PacketBufferLow =
cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
wmb();
- np->rx_ring.orig[nr].FlagLen =
cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ np->put_rx.ex->FlagLen = cpu_to_le32(np->rx_buf_sz |
NV_RX2_AVAIL);
+ if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
+ np->put_rx.ex = np->first_rx.ex;
+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
+ np->put_rx_ctx = np->first_rx_ctx;
} else {
- np->rx_ring.ex[nr].PacketBufferHigh =
cpu_to_le64(np->rx_dma[nr]) >> 32;
- np->rx_ring.ex[nr].PacketBufferLow =
cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
- wmb();
- np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz
| NV_RX2_AVAIL);
- }
- dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as
Available\n",
- dev->name, refill_rx);
- refill_rx++;
- }
- np->refill_rx = refill_rx;
- if (np->cur_rx - refill_rx == np->rx_ring_size)
- return 1;
+ return 1;
+ }
+ }
return 0;
+
}
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
-
+ struct fe_priv *np = get_nvpriv(dev);
+ int retcode;
+
+ spin_lock_irq(&np->timer_lock);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- disable_irq(dev->irq);
+ disable_irq(np->pci_dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
- if (nv_alloc_rx(dev)) {
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ retcode = nv_alloc_rx(dev);
+ else
+ retcode = nv_alloc_rx_optimized(dev);
+ if (retcode) {
spin_lock_irq(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -1300,66 +2308,96 @@ static void nv_do_rx_refill(unsigned lon
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- enable_irq(dev->irq);
+ enable_irq(np->pci_dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
-}
-
-static void nv_init_rx(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
+ spin_unlock_irq(&np->timer_lock);
+}
+
+static void nv_init_rx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
int i;
- np->cur_rx = np->rx_ring_size;
- np->refill_rx = 0;
- for (i = 0; i < np->rx_ring_size; i++)
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
+ else
+ np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
+ np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+ np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
+
+ for (i = 0; i < np->rx_ring_size; i++) {
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig[i].FlagLen = 0;
- else
+ np->rx_ring.orig[i].PacketBuffer = 0;
+ } else {
np->rx_ring.ex[i].FlagLen = 0;
+ np->rx_ring.ex[i].TxVlan = 0;
+ np->rx_ring.ex[i].PacketBufferHigh = 0;
+ np->rx_ring.ex[i].PacketBufferLow = 0;
+ }
+ np->rx_skb[i].skb = NULL;
+ np->rx_skb[i].dma = 0;
+ }
}
static void nv_init_tx(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int i;
- np->next_tx = np->nic_tx = 0;
+ np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
+ else
+ np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+ np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+
for (i = 0; i < np->tx_ring_size; i++) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[i].FlagLen = 0;
- else
+ np->tx_ring.orig[i].PacketBuffer = 0;
+ } else {
np->tx_ring.ex[i].FlagLen = 0;
- np->tx_skbuff[i] = NULL;
- np->tx_dma[i] = 0;
+ np->tx_ring.ex[i].TxVlan = 0;
+ np->tx_ring.ex[i].PacketBufferHigh = 0;
+ np->tx_ring.ex[i].PacketBufferLow = 0;
+ }
+ np->tx_skb[i].skb = NULL;
+ np->tx_skb[i].dma = 0;
}
}
static int nv_init_ring(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
nv_init_tx(dev);
nv_init_rx(dev);
- return nv_alloc_rx(dev);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ return nv_alloc_rx(dev);
+ else
+ return nv_alloc_rx_optimized(dev);
}
static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
- dev->name, skbnr);
-
- if (np->tx_dma[skbnr]) {
- pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
- np->tx_dma_len[skbnr],
- PCI_DMA_TODEVICE);
- np->tx_dma[skbnr] = 0;
- }
-
- if (np->tx_skbuff[skbnr]) {
- dev_kfree_skb_any(np->tx_skbuff[skbnr]);
- np->tx_skbuff[skbnr] = NULL;
+ dev->name, skbnr);
+
+ if (np->tx_skb[skbnr].dma) {
+ pci_unmap_page(np->pci_dev, np->tx_skb[skbnr].dma,
+ np->tx_skb[skbnr].dma_len,
+ PCI_DMA_TODEVICE);
+ np->tx_skb[skbnr].dma = 0;
+ }
+ if (np->tx_skb[skbnr].skb) {
+ dev_kfree_skb_any(np->tx_skb[skbnr].skb);
+ np->tx_skb[skbnr].skb = NULL;
return 1;
} else {
return 0;
@@ -1368,14 +2406,19 @@ static int nv_release_txskb(struct net_d
static void nv_drain_tx(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
unsigned int i;
for (i = 0; i < np->tx_ring_size; i++) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[i].FlagLen = 0;
- else
+ np->tx_ring.orig[i].PacketBuffer = 0;
+ } else {
np->tx_ring.ex[i].FlagLen = 0;
+ np->tx_ring.ex[i].TxVlan = 0;
+ np->tx_ring.ex[i].PacketBufferHigh = 0;
+ np->tx_ring.ex[i].PacketBufferLow = 0;
+ }
if (nv_release_txskb(dev, i))
np->stats.tx_dropped++;
}
@@ -1383,20 +2426,31 @@ static void nv_drain_tx(struct net_devic
static void nv_drain_rx(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int i;
for (i = 0; i < np->rx_ring_size; i++) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig[i].FlagLen = 0;
- else
+ np->rx_ring.orig[i].PacketBuffer = 0;
+ } else {
np->rx_ring.ex[i].FlagLen = 0;
+ np->rx_ring.ex[i].TxVlan = 0;
+ np->rx_ring.ex[i].PacketBufferHigh = 0;
+ np->rx_ring.ex[i].PacketBufferLow = 0;
+ }
wmb();
- if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev, np->rx_dma[i],
-
np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(np->rx_skbuff[i]);
- np->rx_skbuff[i] = NULL;
+ if (np->rx_skb[i].skb) {
+#if NVVER > FEDORA7
+ pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
+ (skb_end_pointer(np->rx_skb[i].skb) -
np->rx_skb[i].skb->data),
+ PCI_DMA_FROMDEVICE);
+#else
+ pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
+
np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
+ PCI_DMA_FROMDEVICE);
+#endif
+ dev_kfree_skb(np->rx_skb[i].skb);
+ np->rx_skb[i].skb = NULL;
}
}
}
@@ -1409,134 +2463,245 @@ static void drain_ring(struct net_device
/*
* nv_start_xmit: dev->hard_start_xmit function
- * Called with netif_tx_lock held.
+ * Called with dev->xmit_lock held.
*/
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u32 tx_flags = 0;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET :
NV_TX2_LASTPACKET);
unsigned int fragments = skb_shinfo(skb)->nr_frags;
- unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
- unsigned int start_nr = np->next_tx % np->tx_ring_size;
unsigned int i;
u32 offset = 0;
u32 bcnt;
u32 size = skb->len-skb->data_len;
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size &
(NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
- u32 tx_flags_vlan = 0;
-
+ u32 empty_slots;
+ struct ring_desc* put_tx;
+ struct ring_desc* start_tx;
+ struct ring_desc* prev_tx;
+ struct nv_skb_map* prev_tx_ctx;
+
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
entries += (skb_shinfo(skb)->frags[i].size >>
NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size &
(NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
- }
-
- spin_lock_irq(&np->lock);
-
- if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
- spin_unlock_irq(&np->lock);
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
-
- /* setup the header buffer */
- do {
- bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE :
size;
- nr = (nr + 1) % np->tx_ring_size;
-
- np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data +
offset, bcnt,
- PCI_DMA_TODEVICE);
- np->tx_dma_len[nr] = bcnt;
-
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer =
cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) |
tx_flags);
- } else {
- np->tx_ring.ex[nr].PacketBufferHigh =
cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow =
cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) |
tx_flags);
- }
- tx_flags = np->tx_flags;
- offset += bcnt;
- size -= bcnt;
- } while(size);
-
- /* setup the fragments */
- for (i = 0; i < fragments; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
- offset = 0;
-
+ ((skb_shinfo(skb)->frags[i].size &
(NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ }
+
+ empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size +
(np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
+ if (likely(empty_slots > entries)) {
+
+ start_tx = put_tx = np->put_tx.orig;
+
+ /* setup the header buffer */
do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ?
NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % np->tx_ring_size;
-
- np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page,
frag->page_offset+offset, bcnt,
- PCI_DMA_TODEVICE);
- np->tx_dma_len[nr] = bcnt;
-
- if (np->desc_ver == DESC_VER_1 || np->desc_ver ==
DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer =
cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen =
cpu_to_le32((bcnt-1) | tx_flags);
- } else {
- np->tx_ring.ex[nr].PacketBufferHigh =
cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow =
cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen =
cpu_to_le32((bcnt-1) | tx_flags);
- }
+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev,
skb->data + offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
+ put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ tx_flags = np->tx_flags;
offset += bcnt;
size -= bcnt;
- } while (size);
- }
-
- /* set last fragment flag */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+ if (unlikely(put_tx++ == np->last_tx.orig))
+ put_tx = np->first_tx.orig;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while(size);
+
+ /* setup the fragments */
+ for (i = 0; i < fragments; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+ offset = 0;
+
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ?
NV_TX2_TSO_MAX_SIZE : size;
+
+ np->put_tx_ctx->dma = pci_map_page(np->pci_dev,
frag->page, frag->page_offset+offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+
+ put_tx->PacketBuffer =
cpu_to_le32(np->put_tx_ctx->dma);
+ put_tx->FlagLen = cpu_to_le32((bcnt-1) |
tx_flags);
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.orig))
+ put_tx = np->first_tx.orig;
+ if (unlikely(np->put_tx_ctx++ ==
np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+ }
+
+ /* set last fragment flag */
+ prev_tx->FlagLen |= cpu_to_le32(tx_flags_extra);
+
+ /* save skb in this slot's context area */
+ prev_tx_ctx->skb = skb;
+
+#ifdef NETIF_F_TSO
+#if NVVER > FEDORA5
+ if (skb_shinfo(skb)->gso_size)
+ tx_flags_extra = NV_TX2_TSO |
(skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
+#else
+ if (skb_shinfo(skb)->tso_size)
+ tx_flags_extra = NV_TX2_TSO |
(skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+#endif
+ else
+#endif
+ tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ?
(NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+
+ spin_lock_irq(&np->lock);
+
+ /* set tx flags */
+ start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->put_tx.orig = put_tx;
+
+ spin_unlock_irq(&np->lock);
+
+ dev->trans_start = jiffies;
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) +
NvRegTxRxControl);
+ return NETDEV_TX_OK;
} else {
- np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
- }
-
- np->tx_skbuff[nr] = skb;
+ spin_lock_irq(&np->lock);
+ netif_stop_queue(dev);
+ np->stop_tx = 1;
+ spin_unlock_irq(&np->lock);
+ return NETDEV_TX_BUSY;
+ }
+}
+
+static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 tx_flags = 0;
+ u32 tx_flags_extra;
+ unsigned int fragments = skb_shinfo(skb)->nr_frags;
+ unsigned int i;
+ u32 offset = 0;
+ u32 bcnt;
+ u32 size = skb->len-skb->data_len;
+ u32 empty_slots;
+ struct ring_desc_ex* put_tx;
+ struct ring_desc_ex* start_tx;
+ struct ring_desc_ex* prev_tx;
+ struct nv_skb_map* prev_tx_ctx;
+
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size &
(NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >>
NV_TX2_TSO_MAX_SHIFT) +
+ ((skb_shinfo(skb)->frags[i].size &
(NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ }
+
+ empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size +
(np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
+ if (likely(empty_slots > entries)) {
+
+ start_tx = put_tx = np->put_tx.ex;
+
+ /* setup the header buffer */
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ?
NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev,
skb->data + offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ put_tx->PacketBufferHigh =
cpu_to_le64(np->put_tx_ctx->dma) >> 32;
+ put_tx->PacketBufferLow =
cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
+ put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ tx_flags = NV_TX2_VALID;
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.ex))
+ put_tx = np->first_tx.ex;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while(size);
+ /* setup the fragments */
+ for (i = 0; i < fragments; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+ offset = 0;
+
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ?
NV_TX2_TSO_MAX_SIZE : size;
+
+ np->put_tx_ctx->dma = pci_map_page(np->pci_dev,
frag->page, frag->page_offset+offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+
+ put_tx->PacketBufferHigh =
cpu_to_le64(np->put_tx_ctx->dma) >> 32;
+ put_tx->PacketBufferLow =
cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
+ put_tx->FlagLen = cpu_to_le32((bcnt-1) |
tx_flags);
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.ex))
+ put_tx = np->first_tx.ex;
+ if (unlikely(np->put_tx_ctx++ ==
np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+ }
+
+ /* set last fragment flag */
+ prev_tx->FlagLen |= cpu_to_le32(NV_TX2_LASTPACKET);
+
+ /* save skb in this slot's context area */
+ prev_tx_ctx->skb = skb;
#ifdef NETIF_F_TSO
- if (skb_is_gso(skb))
- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size <<
NV_TX2_TSO_SHIFT);
- else
-#endif
- tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ?
(NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
-
- /* vlan tag */
- if (np->vlangrp && vlan_tx_tag_present(skb)) {
- tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
- }
-
- /* set tx flags */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags |
tx_flags_extra);
+#if NVVER > FEDORA5
+ if (skb_shinfo(skb)->gso_size)
+ tx_flags_extra = NV_TX2_TSO |
(skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
+#else
+ if (skb_shinfo(skb)->tso_size)
+ tx_flags_extra = NV_TX2_TSO |
(skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+#endif
+ else
+#endif
+ tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ?
(NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+
+ /* vlan tag */
+ if (likely(!np->vlangrp)) {
+ start_tx->TxVlan = 0;
+ } else {
+ if (vlan_tx_tag_present(skb))
+ start_tx->TxVlan =
cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
+ else
+ start_tx->TxVlan = 0;
+ }
+
+ spin_lock_irq(&np->lock);
+
+ /* set tx flags */
+ start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->put_tx.ex = put_tx;
+
+ spin_unlock_irq(&np->lock);
+
+ dev->trans_start = jiffies;
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) +
NvRegTxRxControl);
+ return NETDEV_TX_OK;
+
} else {
- np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
- np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags |
tx_flags_extra);
- }
-
- dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued
for transmission. tx_flags_extra: %x\n",
- dev->name, np->next_tx, entries, tx_flags_extra);
- {
- int j;
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
-
- np->next_tx += entries;
-
- dev->trans_start = jiffies;
- spin_unlock_irq(&np->lock);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) +
NvRegTxRxControl);
- pci_push(get_hwbase(dev));
- return NETDEV_TX_OK;
+ spin_lock_irq(&np->lock);
+ netif_stop_queue(dev);
+ np->stop_tx = 1;
+ spin_unlock_irq(&np->lock);
+ return NETDEV_TX_BUSY;
+ }
}
/*
@@ -1544,30 +2709,26 @@ static int nv_start_xmit(struct sk_buff
*
* Caller must own np->lock.
*/
-static void nv_tx_done(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
+static inline void nv_tx_done(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
u32 Flags;
- unsigned int i;
- struct sk_buff *skb;
-
- while (np->nic_tx != np->next_tx) {
- i = np->nic_tx % np->tx_ring_size;
-
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
- else
- Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
-
- dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags
0x%x.\n",
- dev->name, np->nic_tx, Flags);
- if (Flags & NV_TX_VALID)
- break;
+ struct ring_desc* orig_get_tx = np->get_tx.orig;
+ struct ring_desc* put_tx = np->put_tx.orig;
+
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
+ while ((np->get_tx.orig != put_tx) &&
+ !((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) &
NV_TX_VALID)) {
+ dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name);
+
+ pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
+ np->get_tx_ctx->dma_len,
+ PCI_DMA_TODEVICE);
+ np->get_tx_ctx->dma = 0;
+
if (np->desc_ver == DESC_VER_1) {
if (Flags & NV_TX_LASTPACKET) {
- skb = np->tx_skbuff[i];
- if (Flags &
(NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
- NV_TX_UNDERFLOW|NV_TX_ERROR)) {
+ if (Flags & NV_TX_ERROR) {
if (Flags & NV_TX_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (Flags & NV_TX_CARRIERLOST)
@@ -1575,14 +2736,15 @@ static void nv_tx_done(struct net_device
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
- np->stats.tx_bytes += skb->len;
+ np->stats.tx_bytes +=
np->get_tx_ctx->skb->len;
}
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
+
}
} else {
if (Flags & NV_TX2_LASTPACKET) {
- skb = np->tx_skbuff[i];
- if (Flags &
(NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
- NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
+ if (Flags & NV_TX2_ERROR) {
if (Flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (Flags & NV_TX2_CARRIERLOST)
@@ -1590,26 +2752,73 @@ static void nv_tx_done(struct net_device
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
- np->stats.tx_bytes += skb->len;
- }
+ np->stats.tx_bytes +=
np->get_tx_ctx->skb->len;
+ }
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
}
}
- nv_release_txskb(dev, i);
- np->nic_tx++;
- }
- if (np->next_tx - np->nic_tx < np->tx_limit_start)
+
+ if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
+ np->get_tx.orig = np->first_tx.orig;
+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
+ np->get_tx_ctx = np->first_tx_ctx;
+ }
+ if (unlikely((np->stop_tx == 1) && (np->get_tx.orig != orig_get_tx))) {
+ np->stop_tx = 0;
netif_wake_queue(dev);
+ }
+}
+
+static inline void nv_tx_done_optimized(struct net_device *dev, int max_work)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 Flags;
+ struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+ struct ring_desc_ex* put_tx = np->put_tx.ex;
+
+ while ((np->get_tx.ex != put_tx) &&
+ !((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) &
NV_TX_VALID) &&
+ (max_work-- > 0)) {
+ dprintk(KERN_DEBUG "%s: nv_tx_done_optimized:NVLAN tx done\n",
dev->name);
+
+ pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
+ np->get_tx_ctx->dma_len,
+ PCI_DMA_TODEVICE);
+ np->get_tx_ctx->dma = 0;
+
+ if (Flags & NV_TX2_LASTPACKET) {
+ if (!(Flags & NV_TX2_ERROR)) {
+ np->stats.tx_packets++;
+ }
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
+ }
+
+ if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
+ np->get_tx.ex = np->first_tx.ex;
+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
+ np->get_tx_ctx = np->first_tx_ctx;
+ }
+ if (unlikely((np->stop_tx == 1) && (np->get_tx.ex != orig_get_tx))) {
+ np->stop_tx = 0;
+ netif_wake_queue(dev);
+ }
}
/*
* nv_tx_timeout: dev->tx_timeout function
- * Called with netif_tx_lock held.
+ * Called with dev->xmit_lock held.
+ *
*/
static void nv_tx_timeout(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 status;
+
+ if (!netif_running(dev))
+ return;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
@@ -1621,9 +2830,15 @@ static void nv_tx_timeout(struct net_dev
{
int i;
- printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
- dev->name, (unsigned long)np->ring_addr,
- np->next_tx, np->nic_tx);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
+ dev->name, (unsigned
long)np->tx_ring.orig,
+ (unsigned long)np->get_tx.orig,
(unsigned long)np->put_tx.orig);
+ } else {
+ printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
+ dev->name, (unsigned
long)np->tx_ring.ex,
+ (unsigned long)np->get_tx.ex, (unsigned
long)np->put_tx.ex);
+ }
printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
for (i=0;i<=np->register_size;i+= 32) {
printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x
%08x %08x\n",
@@ -1637,54 +2852,64 @@ static void nv_tx_timeout(struct net_dev
for (i=0;i<np->tx_ring_size;i+= 4) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver ==
DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x
// %08x %08x // %08x %08x\n",
- i,
-
le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i].FlagLen),
-
le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
-
le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
-
le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
-
le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
-
le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
-
le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
+ i,
+
le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
+
le32_to_cpu(np->tx_ring.orig[i].FlagLen),
+
le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
+
le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
+
le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
+
le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
+
le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
+
le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
} else {
printk(KERN_INFO "%03x: %08x %08x %08x // %08x
%08x %08x // %08x %08x %08x // %08x %08x %08x\n",
- i,
-
le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
-
le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i].FlagLen),
-
le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
-
le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
-
le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
-
le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
-
le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
-
le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
-
le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
+ i,
+
le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
+
le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
+
le32_to_cpu(np->tx_ring.ex[i].FlagLen),
+
le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
+
le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
+
le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
+
le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
+
le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
+
le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
+
le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
+
le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
+
le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
}
}
}
+ nv_disable_irq(dev);
spin_lock_irq(&np->lock);
/* 1) stop tx engine */
nv_stop_tx(dev);
/* 2) check that the packets were not sent already: */
- nv_tx_done(dev);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ nv_tx_done(dev);
+ else
+ nv_tx_done_optimized(dev, np->tx_ring_size);
/* 3) if there are dead entries: clear everything */
- if (np->next_tx != np->nic_tx) {
+ if (np->get_tx_ctx != np->put_tx_ctx) {
printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
nv_drain_tx(dev);
- np->next_tx = np->nic_tx = 0;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->get_tx.orig = np->put_tx.orig = np->first_tx.orig;
+ else
+ np->get_tx.ex = np->put_tx.ex = np->first_tx.ex;
+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx;
setup_hw_rings(dev, NV_SETUP_TX_RING);
- netif_wake_queue(dev);
- }
-
+ }
+
+ netif_wake_queue(dev);
/* 4) restart tx engine */
nv_start_tx(dev);
+
spin_unlock_irq(&np->lock);
+ nv_enable_irq(dev);
}
/*
@@ -1705,7 +2930,7 @@ static int nv_getlen(struct net_device *
hdrlen = ETH_HLEN;
}
dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen
%d\n",
- dev->name, datalen, protolen, hdrlen);
+ dev->name, datalen, protolen, hdrlen);
if (protolen > ETH_DATA_LEN)
return datalen; /* Value in proto field not a len, no checks
possible */
@@ -1740,43 +2965,23 @@ static int nv_getlen(struct net_device *
}
}
-static void nv_rx_process(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
+static inline void nv_rx_process(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
u32 Flags;
- u32 vlanflags = 0;
-
- for (;;) {
- struct sk_buff *skb;
- int len;
- int i;
- if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
- break; /* we scanned the whole ring - do not continue
*/
-
- i = np->cur_rx % np->rx_ring_size;
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
- len = nv_descr_getlength(&np->rx_ring.orig[i],
np->desc_ver);
- } else {
- Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
- len = nv_descr_getlength_ex(&np->rx_ring.ex[i],
np->desc_ver);
- vlanflags =
le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
- }
-
- dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d,
Flags 0x%x.\n",
- dev->name, np->cur_rx, Flags);
-
- if (Flags & NV_RX_AVAIL)
- break; /* still owned by hardware, */
-
- /*
- * the packet is for us - immediately tear down the pci mapping.
- * TODO: check if a prefetch of the first cacheline improves
- * the performance.
- */
- pci_unmap_single(np->pci_dev, np->rx_dma[i],
- np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
+ struct sk_buff *skb;
+ int len;
+
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
+ while((np->get_rx.orig != np->put_rx.orig) &&
+ !((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) &
NV_RX_AVAIL)) {
+
+ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
+ np->get_rx_ctx->dma_len,
PCI_DMA_FROMDEVICE);
+
+ skb = np->get_rx_ctx->skb;
+ np->get_rx_ctx->skb = NULL;
{
int j;
@@ -1784,118 +2989,186 @@ static void nv_rx_process(struct net_dev
for (j=0; j<64; j++) {
if ((j%16) == 0)
dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned
char*)np->rx_skbuff[i]->data)[j]);
+ dprintk(" %02x", ((unsigned
char*)skb->data)[j]);
}
dprintk("\n");
}
- /* look at what we actually got: */
+
if (np->desc_ver == DESC_VER_1) {
- if (!(Flags & NV_RX_DESCRIPTORVALID))
- goto next_pkt;
-
- if (Flags & NV_RX_ERROR) {
- if (Flags & NV_RX_MISSEDFRAME) {
- np->stats.rx_missed_errors++;
- np->stats.rx_errors++;
- goto next_pkt;
- }
- if (Flags &
(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
- np->stats.rx_errors++;
- goto next_pkt;
- }
- if (Flags & NV_RX_CRCERR) {
- np->stats.rx_crc_errors++;
- np->stats.rx_errors++;
- goto next_pkt;
- }
- if (Flags & NV_RX_OVERFLOW) {
- np->stats.rx_over_errors++;
- np->stats.rx_errors++;
- goto next_pkt;
- }
- if (Flags & NV_RX_ERROR4) {
- len = nv_getlen(dev,
np->rx_skbuff[i]->data, len);
- if (len < 0) {
+
+ if (likely(Flags & NV_RX_DESCRIPTORVALID)) {
+ len = Flags & LEN_MASK_V1;
+ if (unlikely(Flags & NV_RX_ERROR)) {
+ if (Flags & NV_RX_ERROR4) {
+ len = nv_getlen(dev, skb->data,
len);
+ if (len < 0 || len >
np->rx_buf_sz) {
+ np->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ /* framing errors are soft errors */
+ else if (Flags & NV_RX_FRAMINGERR) {
+ if (Flags & NV_RX_SUBSTRACT1) {
+ len--;
+ }
+ }
+ /* the rest are hard errors */
+ else {
+ if (Flags & NV_RX_MISSEDFRAME)
+
np->stats.rx_missed_errors++;
+ if (Flags & NV_RX_CRCERR)
+
np->stats.rx_crc_errors++;
+ if (Flags & NV_RX_OVERFLOW)
+
np->stats.rx_over_errors++;
np->stats.rx_errors++;
+ dev_kfree_skb(skb);
goto next_pkt;
}
}
- /* framing errors are soft errors. */
- if (Flags & NV_RX_FRAMINGERR) {
- if (Flags & NV_RX_SUBSTRACT1) {
- len--;
+ } else {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ } else {
+ if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
+ len = Flags & LEN_MASK_V2;
+ if (unlikely(Flags & NV_RX2_ERROR)) {
+ if (Flags & NV_RX2_ERROR4) {
+ len = nv_getlen(dev, skb->data,
len);
+ if (len < 0 || len >
np->rx_buf_sz) {
+ np->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ /* framing errors are soft errors */
+ else if (Flags & NV_RX2_FRAMINGERR) {
+ if (Flags & NV_RX2_SUBSTRACT1) {
+ len--;
+ }
+ }
+ /* the rest are hard errors */
+ else {
+ if (Flags & NV_RX2_CRCERR)
+
np->stats.rx_crc_errors++;
+ if (Flags & NV_RX2_OVERFLOW)
+
np->stats.rx_over_errors++;
+ np->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
}
}
+ if (((Flags & NV_RX2_CHECKSUMMASK) ==
NV_RX2_CHECKSUM_IP_TCP) || ((Flags & NV_RX2_CHECKSUMMASK) ==
NV_RX2_CHECKSUM_IP_UDP))
+ /*ip and tcp or udp */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ dev_kfree_skb(skb);
+ goto next_pkt;
}
- } else {
- if (!(Flags & NV_RX2_DESCRIPTORVALID))
- goto next_pkt;
-
- if (Flags & NV_RX2_ERROR) {
- if (Flags &
(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
- np->stats.rx_errors++;
- goto next_pkt;
- }
- if (Flags & NV_RX2_CRCERR) {
- np->stats.rx_crc_errors++;
- np->stats.rx_errors++;
- goto next_pkt;
- }
- if (Flags & NV_RX2_OVERFLOW) {
- np->stats.rx_over_errors++;
- np->stats.rx_errors++;
- goto next_pkt;
- }
+ }
+
+ /* got a valid packet - forward it to the network core */
+ dprintk(KERN_DEBUG "%s: nv_rx_process:NVLAN rx done\n",
dev->name);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += len;
+next_pkt:
+ if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
+ np->get_rx.orig = np->first_rx.orig;
+ if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
+ np->get_rx_ctx = np->first_rx_ctx;
+ }
+}
+
+static inline int nv_rx_process_optimized(struct net_device *dev, int max_work)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 Flags;
+ u32 vlanflags = 0;
+ u32 rx_processed_cnt = 0;
+ struct sk_buff *skb;
+ int len;
+
+ while((np->get_rx.ex != np->put_rx.ex) &&
+ !((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) &
NV_RX2_AVAIL) &&
+ (rx_processed_cnt++ < max_work)) {
+
+ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
+ np->get_rx_ctx->dma_len,
+ PCI_DMA_FROMDEVICE);
+
+ skb = np->get_rx_ctx->skb;
+ np->get_rx_ctx->skb = NULL;
+
+ /* look at what we actually got: */
+ if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
+ len = Flags & LEN_MASK_V2;
+ if (unlikely(Flags & NV_RX2_ERROR)) {
if (Flags & NV_RX2_ERROR4) {
- len = nv_getlen(dev,
np->rx_skbuff[i]->data, len);
- if (len < 0) {
- np->stats.rx_errors++;
+ len = nv_getlen(dev, skb->data, len);
+ if (len < 0 || len > np->rx_buf_sz) {
+ np->rx_len_errors++;
+ dev_kfree_skb(skb);
goto next_pkt;
}
}
/* framing errors are soft errors */
- if (Flags & NV_RX2_FRAMINGERR) {
+ else if (Flags & NV_RX2_FRAMINGERR) {
if (Flags & NV_RX2_SUBSTRACT1) {
len--;
}
}
- }
- if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
- Flags &= NV_RX2_CHECKSUMMASK;
- if (Flags == NV_RX2_CHECKSUMOK1 ||
- Flags == NV_RX2_CHECKSUMOK2 ||
- Flags == NV_RX2_CHECKSUMOK3) {
- dprintk(KERN_DEBUG "%s: hw checksum
hit!.\n", dev->name);
- np->rx_skbuff[i]->ip_summed =
CHECKSUM_UNNECESSARY;
- } else {
- dprintk(KERN_DEBUG "%s: hwchecksum
miss!.\n", dev->name);
+ /* the rest are hard errors */
+ else {
+ dev_kfree_skb(skb);
+ goto next_pkt;
}
}
- }
- /* got a valid packet - forward it to the network core */
- skb = np->rx_skbuff[i];
- np->rx_skbuff[i] = NULL;
-
- skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
- dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes,
proto %d accepted.\n",
- dev->name, np->cur_rx, len,
skb->protocol);
- if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
- vlan_hwaccel_rx(skb, np->vlangrp, vlanflags &
NV_RX3_VLAN_TAG_MASK);
+
+ if (likely(np->rx_csum)) {
+ if (likely(((Flags & NV_RX2_CHECKSUMMASK) ==
NV_RX2_CHECKSUM_IP_TCP) || ((Flags & NV_RX2_CHECKSUMMASK) ==
NV_RX2_CHECKSUM_IP_UDP)))
+ /*ip and tcp or udp */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ dprintk(KERN_DEBUG "%s: nv_rx_process_optimized:NVLAN
rx done\n", dev->name);
+
+ /* got a valid packet - forward it to the network core
*/
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ prefetch(skb->data);
+
+ if (likely(!np->vlangrp)) {
+ netif_rx(skb);
+ } else {
+ vlanflags =
le32_to_cpu(np->get_rx.ex->PacketBufferLow);
+ if (vlanflags & NV_RX3_VLAN_TAG_PRESENT)
+ vlan_hwaccel_rx(skb, np->vlangrp,
vlanflags & NV_RX3_VLAN_TAG_MASK);
+ else
+ netif_rx(skb);
+ }
+
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += len;
} else {
- netif_rx(skb);
- }
- dev->last_rx = jiffies;
- np->stats.rx_packets++;
- np->stats.rx_bytes += len;
+ dev_kfree_skb(skb);
+ }
next_pkt:
- np->cur_rx++;
- }
+ if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
+ np->get_rx.ex = np->first_rx.ex;
+ if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
+ np->get_rx_ctx = np->first_rx_ctx;
+ }
+ return rx_processed_cnt;
}
static void set_bufsize(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
if (dev->mtu <= ETH_DATA_LEN)
np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
@@ -1909,7 +3182,7 @@ static void set_bufsize(struct net_devic
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int old_mtu;
if (new_mtu < 64 || new_mtu > np->pkt_limit)
@@ -1933,8 +3206,13 @@ static int nv_change_mtu(struct net_devi
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
+ nv_disable_hw_interrupts(dev,np->irqmask);
nv_disable_irq(dev);
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
@@ -1953,7 +3231,7 @@ static int nv_change_mtu(struct net_devi
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) +
((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
- base + NvRegRingSizes);
+ base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) +
NvRegTxRxControl);
pci_push(base);
@@ -1962,8 +3240,13 @@ static int nv_change_mtu(struct net_devi
nv_start_rx(dev);
nv_start_tx(dev);
spin_unlock(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
nv_enable_irq(dev);
+ nv_enable_hw_interrupts(dev,np->irqmask);
}
return 0;
}
@@ -1974,11 +3257,11 @@ static void nv_copy_mac_to_hw(struct net
u32 mac[2];
mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
- (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
-
writel(mac[0], base + NvRegMacAddrA);
writel(mac[1], base + NvRegMacAddrB);
+
}
/*
@@ -1987,17 +3270,22 @@ static void nv_copy_mac_to_hw(struct net
*/
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
struct sockaddr *macaddr = (struct sockaddr*)addr;
if(!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
if (netif_running(dev)) {
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock_irq(&np->lock);
/* stop rx engine */
@@ -2009,7 +3297,11 @@ static int nv_set_mac_address(struct net
/* restart rx engine */
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
} else {
nv_copy_mac_to_hw(dev);
}
@@ -2018,11 +3310,11 @@ static int nv_set_mac_address(struct net
/*
* nv_set_multicast: dev->set_multicast function
- * Called with netif_tx_lock held.
+ * Called with dev->xmit_lock held.
*/
static void nv_set_multicast(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 addr[2];
u32 mask[2];
@@ -2032,7 +3324,7 @@ static void nv_set_multicast(struct net_
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
dev->name);
+ dprintk(KERN_DEBUG "%s: Promiscuous mode enabled.\n",
dev->name);
pff |= NVREG_PFF_PROMISC;
} else {
pff |= NVREG_PFF_MYADDR;
@@ -2063,6 +3355,9 @@ static void nv_set_multicast(struct net_
addr[1] = alwaysOn[1];
mask[0] = alwaysOn[0] | alwaysOff[0];
mask[1] = alwaysOn[1] | alwaysOff[1];
+ } else {
+ mask[0] = NVREG_MCASTMASKA_NONE;
+ mask[1] = NVREG_MCASTMASKB_NONE;
}
}
addr[0] |= NVREG_MCASTADDRA_FORCE;
@@ -2075,15 +3370,16 @@ static void nv_set_multicast(struct net_
writel(mask[1], base + NvRegMulticastMaskB);
writel(pff, base + NvRegPacketFilterFlags);
dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
- dev->name);
+ dev->name);
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
}
static void nv_update_pause(struct net_device *dev, u32 pause_flags)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 pause_enable;
np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
@@ -2099,12 +3395,17 @@ static void nv_update_pause(struct net_d
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
- writel(NVREG_TX_PAUSEFRAME_ENABLE, base +
NvRegTxPauseFrame);
+ pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
+ if(np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
+ pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
+ if(np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
+ pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
+ writel(pause_enable , base + NvRegTxPauseFrame);
writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
} else {
writel(NVREG_TX_PAUSEFRAME_DISABLE, base +
NvRegTxPauseFrame);
- writel(regmisc, base + NvRegMisc1);
+ writel(regmisc, base + NvRegMisc1);
}
}
}
@@ -2122,7 +3423,7 @@ static void nv_update_pause(struct net_d
*/
static int nv_update_linkspeed(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int adv = 0;
int lpa = 0;
@@ -2132,6 +3433,7 @@ static int nv_update_linkspeed(struct ne
int mii_status;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
+ u32 txrxFlags = 0 ;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -2148,7 +3450,7 @@ static int nv_update_linkspeed(struct ne
goto set_speed;
}
- if (np->autoneg == 0) {
+ if (np->autoneg == AUTONEG_DISABLE) {
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY
set to 0x%04x.\n",
dev->name, np->fixed_mode);
if (np->fixed_mode & LPA_100FULL) {
@@ -2180,17 +3482,16 @@ static int nv_update_linkspeed(struct ne
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa
0x%04x.\n",
- dev->name, adv, lpa);
-
+ dev->name, adv, lpa);
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
if ((control_1000 & ADVERTISE_1000FULL) &&
- (status_1000 & LPA_1000FULL)) {
+ (status_1000 & LPA_1000FULL)) {
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit
ethernet detected.\n",
- dev->name);
+ dev->name);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
newdup = 1;
goto set_speed;
@@ -2227,6 +3528,17 @@ set_speed:
np->duplex = newdup;
np->linkspeed = newls;
+ /* The transmitter and receiver must be restarted for safe update */
+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
+ txrxFlags |= NV_RESTART_TX;
+ nv_stop_tx(dev);
+ }
+ if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
+ txrxFlags |= NV_RESTART_RX;
+ nv_stop_rx(dev);
+ }
+
+
if (np->gigabit == PHY_GIGABIT) {
phyreg = readl(base + NvRegRandomSeed);
phyreg &= ~(0x3FF00);
@@ -2268,9 +3580,8 @@ set_speed:
txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
}
writel(txreg, base + NvRegTxWatermark);
-
writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
- base + NvRegMisc1);
+ base + NvRegMisc1);
pci_push(base);
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
@@ -2283,37 +3594,42 @@ set_speed:
lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
switch (adv_pause) {
- case (ADVERTISE_PAUSE_CAP):
- if (lpa_pause & LPA_PAUSE_CAP) {
- pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
- if (np->pause_flags &
NV_PAUSEFRAME_TX_REQ)
- pause_flags |=
NV_PAUSEFRAME_TX_ENABLE;
- }
- break;
- case (ADVERTISE_PAUSE_ASYM):
- if (lpa_pause == (LPA_PAUSE_CAP|
LPA_PAUSE_ASYM))
+ case (ADVERTISE_PAUSE_CAP):
+ if (lpa_pause & LPA_PAUSE_CAP) {
+ pause_flags |=
NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags &
NV_PAUSEFRAME_TX_REQ)
+ pause_flags |=
NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause == (LPA_PAUSE_CAP|
LPA_PAUSE_ASYM))
{
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
- break;
- case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
- if (lpa_pause & LPA_PAUSE_CAP)
+ break;
+ case (ADVERTISE_PAUSE_CAP|
ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause & LPA_PAUSE_CAP)
{
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags &
NV_PAUSEFRAME_TX_REQ)
pause_flags |=
NV_PAUSEFRAME_TX_ENABLE;
}
- if (lpa_pause == LPA_PAUSE_ASYM)
- {
- pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
- }
- break;
+ if (lpa_pause == LPA_PAUSE_ASYM)
+ {
+ pause_flags |=
NV_PAUSEFRAME_RX_ENABLE;
+ }
+ break;
}
} else {
pause_flags = np->pause_flags;
}
}
nv_update_pause(dev, pause_flags);
+
+ if (txrxFlags & NV_RESTART_TX)
+ nv_start_tx(dev);
+ if (txrxFlags & NV_RESTART_RX)
+ nv_start_rx(dev);
return retval;
}
@@ -2341,7 +3657,7 @@ static void nv_link_irq(struct net_devic
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name,
miistat);
if (miistat & (NVREG_MIISTAT_LINKCHANGE))
@@ -2349,15 +3665,19 @@ static void nv_link_irq(struct net_devic
dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
}
+#if NVVER < FEDORA7
static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
+#else
+static irqreturn_t nv_nic_irq(int foo, void *data)
+#endif
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 events;
+ u32 events,mask;
int i;
- dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
for (i=0; ; i++) {
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
@@ -2369,7 +3689,8 @@ static irqreturn_t nv_nic_irq(int foo, v
}
pci_push(base);
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
- if (!(events & np->irqmask))
+ mask = readl(base + NvRegIrqMask);
+ if (!(events & mask))
break;
spin_lock(&np->lock);
@@ -2397,11 +3718,11 @@ static irqreturn_t nv_nic_irq(int foo, v
}
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x.
Probably TX fail.\n",
- dev->name, events);
+ dev->name, events);
}
if (events & (NVREG_IRQ_UNKNOWN)) {
printk(KERN_DEBUG "%s: received irq with unknown events
0x%x. Please report\n",
- dev->name, events);
+ dev->name, events);
}
if (i > max_interrupt_work) {
spin_lock(&np->lock);
@@ -2427,34 +3748,112 @@ static irqreturn_t nv_nic_irq(int foo, v
return IRQ_RETVAL(i);
}
+#define TX_WORK_PER_LOOP 64
+#define RX_WORK_PER_LOOP 64
+#if NVVER < FEDORA7
+static irqreturn_t nv_nic_irq_optimized(int foo, void *data, struct pt_regs
*regs)
+#else
+static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
+#endif
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events,mask;
+ int i = 1;
+
+ do {
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ events = readl(base + NvRegIrqStatus) &
NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ } else {
+ events = readl(base + NvRegMSIXIrqStatus) &
NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+ }
+
+ mask = readl(base + NvRegIrqMask);
+ if (events & mask) {
+
+ spin_lock(&np->lock);
+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+ spin_unlock(&np->lock);
+
+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
+ if (unlikely(nv_alloc_rx_optimized(dev))) {
+ spin_lock(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick,
jiffies + OOM_REFILL);
+ spin_unlock(&np->lock);
+ }
+ }
+ if (unlikely(events & NVREG_IRQ_LINK)) {
+ spin_lock(&np->lock);
+ nv_link_irq(dev);
+ spin_unlock(&np->lock);
+ }
+ if (unlikely(np->need_linktimer && time_after(jiffies,
np->link_timeout))) {
+ spin_lock(&np->lock);
+ nv_linkchange(dev);
+ spin_unlock(&np->lock);
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ }
+ if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
+ spin_lock(&np->lock);
+ /* disable interrupts on the nic */
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(0, base + NvRegIrqMask);
+ else
+ writel(np->irqmask, base +
NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq = np->irqmask;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies +
POLL_WAIT);
+ }
+ spin_unlock(&np->lock);
+ break;
+ }
+ } else
+ break;
+ }
+ while (i++ <= max_interrupt_work);
+
+ return IRQ_RETVAL(i);
+}
+
+#if NVVER < FEDORA7
static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
+#else
+static irqreturn_t nv_nic_irq_tx(int foo, void *data)
+#endif
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
-
- dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
+ unsigned long flags;
+
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
for (i=0; ; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
- pci_push(base);
dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
- spin_lock_irq(&np->lock);
- nv_tx_done(dev);
- spin_unlock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+ spin_unlock_irqrestore(&np->lock, flags);
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x.
Probably TX fail.\n",
- dev->name, events);
+ dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2464,7 +3863,7 @@ static irqreturn_t nv_nic_irq_tx(int foo
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in
nv_nic_irq_tx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
@@ -2474,34 +3873,39 @@ static irqreturn_t nv_nic_irq_tx(int foo
return IRQ_RETVAL(i);
}
+#if NVVER < FEDORA7
static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
+#else
+static irqreturn_t nv_nic_irq_rx(int foo, void *data)
+#endif
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
-
- dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
+ unsigned long flags;
+
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
for (i=0; ; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
- pci_push(base);
dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
- nv_rx_process(dev);
- if (nv_alloc_rx(dev)) {
- spin_lock_irq(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
+ if (unlikely(nv_alloc_rx_optimized(dev))) {
+ spin_lock_irqsave(&np->lock, flags);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies +
OOM_REFILL);
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2511,7 +3915,7 @@ static irqreturn_t nv_nic_irq_rx(int foo
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in
nv_nic_irq_rx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
@@ -2521,23 +3925,32 @@ static irqreturn_t nv_nic_irq_rx(int foo
return IRQ_RETVAL(i);
}
+#if NVVER < FEDORA7
static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
+#else
+static irqreturn_t nv_nic_irq_other(int foo, void *data)
+#endif
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
-
- dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
+ unsigned long flags;
+
+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
for (i=0; ; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
- pci_push(base);
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
+
+ /* check tx in case we reached max loop limit in tx isr */
+ spin_lock_irqsave(&np->lock, flags);
+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+ spin_unlock_irqrestore(&np->lock, flags);
if (events & NVREG_IRQ_LINK) {
spin_lock_irq(&np->lock);
@@ -2550,9 +3963,23 @@ static irqreturn_t nv_nic_irq_other(int
spin_unlock_irq(&np->lock);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
+ if (events & NVREG_IRQ_RECOVER_ERROR) {
+ spin_lock_irq(&np->lock);
+ /* disable interrupts on the nic */
+ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq |= NVREG_IRQ_OTHER;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irq(&np->lock);
+ break;
+ }
if (events & (NVREG_IRQ_UNKNOWN)) {
printk(KERN_DEBUG "%s: received irq with unknown events
0x%x. Please report\n",
- dev->name, events);
+ dev->name, events);
}
if (i > max_interrupt_work) {
spin_lock_irq(&np->lock);
@@ -2575,14 +4002,18 @@ static irqreturn_t nv_nic_irq_other(int
return IRQ_RETVAL(i);
}
+#if NVVER < FEDORA7
static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
+#else
+static irqreturn_t nv_nic_irq_test(int foo, void *data)
+#endif
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
@@ -2605,6 +4036,7 @@ static irqreturn_t nv_nic_irq_test(int f
return IRQ_RETVAL(1);
}
+#ifdef CONFIG_PCI_MSI
static void set_msix_vector_map(struct net_device *dev, u32 vector, u32
irqmask)
{
u8 __iomem *base = get_hwbase(dev);
@@ -2630,12 +4062,15 @@ static void set_msix_vector_map(struct n
}
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
+#endif
static int nv_request_irq(struct net_device *dev, int intr_test)
{
struct fe_priv *np = get_nvpriv(dev);
+ int ret = 1;
+
+#if NVVER > SLES9
u8 __iomem *base = get_hwbase(dev);
- int ret = 1;
int i;
if (np->msi_flags & NV_MSI_X_CAPABLE) {
@@ -2646,21 +4081,21 @@ static int nv_request_irq(struct net_dev
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode ==
NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
- if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx,
IRQF_SHARED, dev->name, dev) != 0) {
+ if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx,
IRQ_FLAG, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth:
request_irq failed for rx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* Request irq for tx handling */
- if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx,
IRQF_SHARED, dev->name, dev) != 0) {
+ if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx,
IRQ_FLAG, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth:
request_irq failed for tx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_rx;
}
/* Request irq for link and timer handling */
- if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other,
IRQF_SHARED, dev->name, dev) != 0) {
+ if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other,
IRQ_FLAG, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth:
request_irq failed for link %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2669,15 +4104,19 @@ static int nv_request_irq(struct net_dev
/* map interrupts to their respective vector */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
+#ifdef CONFIG_PCI_MSI
set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX,
NVREG_IRQ_RX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX,
NVREG_IRQ_TX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER,
NVREG_IRQ_OTHER);
+#endif
} else {
/* Request irq for all interrupts */
- if ((!intr_test &&
-
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq,
IRQF_SHARED, dev->name, dev) != 0) ||
- (intr_test &&
-
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test,
IRQF_SHARED, dev->name, dev) != 0)) {
+ if ((!intr_test && np->desc_ver == DESC_VER_3 &&
+
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized,
IRQ_FLAG, dev->name, dev) != 0) ||
+ (!intr_test && np->desc_ver !=
DESC_VER_3 &&
+
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQ_FLAG,
dev->name, dev) != 0) ||
+ (intr_test &&
+
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test,
IRQ_FLAG, dev->name, dev) != 0)) {
printk(KERN_INFO "forcedeth:
request_irq failed %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2692,14 +4131,17 @@ static int nv_request_irq(struct net_dev
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
- pci_intx(np->pci_dev, 0);
np->msi_flags |= NV_MSI_ENABLED;
- if ((!intr_test && request_irq(np->pci_dev->irq,
&nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
- (intr_test && request_irq(np->pci_dev->irq,
&nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
+ dev->irq = np->pci_dev->irq;
+ if ((!intr_test && np->desc_ver == DESC_VER_3 &&
+ request_irq(np->pci_dev->irq,
&nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
+ (!intr_test && np->desc_ver !=
DESC_VER_3 &&
+ request_irq(np->pci_dev->irq,
&nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) ||
+ (intr_test &&
request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) !=
0)) {
printk(KERN_INFO "forcedeth: request_irq failed
%d\n", ret);
pci_disable_msi(np->pci_dev);
- pci_intx(np->pci_dev, 1);
np->msi_flags &= ~NV_MSI_ENABLED;
+ dev->irq = np->pci_dev->irq;
goto out_err;
}
@@ -2710,22 +4152,124 @@ static int nv_request_irq(struct net_dev
writel(NVREG_MSI_VECTOR_0_ENABLED, base +
NvRegMSIIrqMask);
}
}
+#else
+#ifdef CONFIG_PCI_MSI
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ }
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_X_ENABLED;
+ if (optimization_mode ==
NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
+ msi_alloc_vectors(np->pci_dev,(int
*)np->msi_x_entry,2);
+ /* Request irq for rx handling */
+ if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx,
IRQ_FLAG, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth:
request_irq failed for rx %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+ /* Request irq for tx handling */
+ if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx,
IRQ_FLAG, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth:
request_irq failed for tx %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_rx;
+ }
+ /* Request irq for link and timer handling */
+ if
(request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other,
IRQ_FLAG, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth:
request_irq failed for link %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_tx;
+ }
+ /* map interrupts to their respective vector */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+#ifdef CONFIG_PCI_MSI
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX,
NVREG_IRQ_RX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX,
NVREG_IRQ_TX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER,
NVREG_IRQ_OTHER);
+#endif
+ } else {
+ /* Request irq for all interrupts */
+ if ((!intr_test && np->desc_ver == DESC_VER_3 &&
+
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized,
IRQ_FLAG, dev->name, dev) != 0) ||
+ (!intr_test && np->desc_ver !=
DESC_VER_3 &&
+
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQ_FLAG,
dev->name, dev) != 0) ||
+ (intr_test &&
+
request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test,
IRQ_FLAG, dev->name, dev) != 0)) {
+ printk(KERN_INFO "forcedeth:
request_irq failed %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ }
+ }
+ }
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
+ dev->irq = np->pci_dev->irq;
+ if ((!intr_test && np->desc_ver == DESC_VER_3 &&
+ request_irq(np->pci_dev->irq,
&nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
+ (!intr_test && np->desc_ver !=
DESC_VER_3 &&
+ request_irq(np->pci_dev->irq,
&nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) ||
+ (intr_test &&
request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) !=
0)) {
+ printk(KERN_INFO "forcedeth: request_irq failed
%d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ dev->irq = np->pci_dev->irq;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ /* enable msi vector 0 */
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base +
NvRegMSIIrqMask);
+ }
+ }
+#endif
+#endif
if (ret != 0) {
- if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq,
IRQF_SHARED, dev->name, dev) != 0) ||
- (intr_test && request_irq(np->pci_dev->irq,
&nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
+ if ((!intr_test && np->desc_ver == DESC_VER_3 &&
+ request_irq(np->pci_dev->irq,
&nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
+ (!intr_test && np->desc_ver != DESC_VER_3 &&
+ request_irq(np->pci_dev->irq, &nv_nic_irq,
IRQ_FLAG, dev->name, dev) != 0) ||
+ (intr_test && request_irq(np->pci_dev->irq,
&nv_nic_irq_test, IRQ_FLAG, dev->name, dev) != 0))
goto out_err;
}
return 0;
+
+#if NVVER > SLES9
out_free_tx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
out_free_rx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+#else
+#ifdef CONFIG_PCI_MSI
+out_free_tx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
+out_free_rx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+#endif
+#endif
out_err:
return 1;
}
+#if NVVER > SLES9
static void nv_free_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -2741,16 +4285,43 @@ static void nv_free_irq(struct net_devic
free_irq(np->pci_dev->irq, dev);
if (np->msi_flags & NV_MSI_ENABLED) {
pci_disable_msi(np->pci_dev);
- pci_intx(np->pci_dev, 1);
np->msi_flags &= ~NV_MSI_ENABLED;
}
}
}
+#else
+static void nv_free_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+#ifdef CONFIG_PCI_MSI
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ free_irq(np->msi_x_entry[i].vector, dev);
+ }
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
+#else
+ free_irq(np->pci_dev->irq, dev);
+#endif
+
+}
+#endif
static void nv_do_nic_poll(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
@@ -2760,115 +4331,239 @@ static void nv_do_nic_poll(unsigned long
* nv_nic_irq because that may decide to do otherwise
*/
+ spin_lock_irq(&np->timer_lock);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
-
disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- disable_irq_lockdep(dev->irq);
+ disable_irq(np->pci_dev->irq);
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
-
disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
mask |= NVREG_IRQ_RX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
-
disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
mask |= NVREG_IRQ_TX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
-
disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
mask |= NVREG_IRQ_OTHER;
}
}
np->nic_poll_irq = 0;
- /* FIXME: Do we need synchronize_irq(dev->irq) here? */
+ /* disable_irq() contains synchronize_irq,thus no irq handler can run
now */
+
+ if (np->recover_error) {
+ np->recover_error = 0;
+ printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
+ if (netif_running(dev)) {
+#if NVVER > FEDORA5
+ netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies +
OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING |
NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT)
+ ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits,
get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
+#if NVVER > FEDORA5
+ netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
+ }
+ }
writel(mask, base + NvRegIrqMask);
pci_push(base);
if (!using_multi_irqs(dev)) {
- nv_nic_irq(0, dev, NULL);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+#if NVVER < FEDORA7
+ nv_nic_irq((int) 0, (void *) data, (struct pt_regs *)
NULL);
+#else
+ nv_nic_irq((int) 0, (void *) data);
+#endif
+ else
+#if NVVER < FEDORA7
+ nv_nic_irq_optimized((int) 0, (void *) data, (struct
pt_regs *) NULL);
+#else
+ nv_nic_irq_optimized((int) 0, (void *) data);
+#endif
if (np->msi_flags & NV_MSI_X_ENABLED)
-
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- enable_irq_lockdep(dev->irq);
+ enable_irq(np->pci_dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
- nv_nic_irq_rx(0, dev, NULL);
-
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+#if NVVER < FEDORA7
+ nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs
*) NULL);
+#else
+ nv_nic_irq_rx((int) 0, (void *) data);
+#endif
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
- nv_nic_irq_tx(0, dev, NULL);
-
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+#if NVVER < FEDORA7
+ nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs
*) NULL);
+#else
+ nv_nic_irq_tx((int) 0, (void *) data);
+#endif
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
- nv_nic_irq_other(0, dev, NULL);
-
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
- }
-}
-
+#if NVVER < FEDORA7
+ nv_nic_irq_other((int) 0, (void *) data, (struct
pt_regs *) NULL);
+#else
+ nv_nic_irq_other((int) 0, (void *) data);
+#endif
+
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+ }
+ spin_unlock_irq(&np->timer_lock);
+}
+
+#if NVVER > RHES3
#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{
nv_do_nic_poll((unsigned long) dev);
}
#endif
+#else
+static void nv_poll_controller(struct net_device *dev)
+{
+ nv_do_nic_poll((unsigned long) dev);
+}
+#endif
static void nv_do_stats_poll(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- np->estats.tx_bytes += readl(base + NvRegTxCnt);
- np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
- np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
- np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
- np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
- np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
- np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
- np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
- np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
- np->estats.tx_deferral += readl(base + NvRegTxDef);
- np->estats.tx_packets += readl(base + NvRegTxFrame);
- np->estats.tx_pause += readl(base + NvRegTxPause);
- np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
- np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
- np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
- np->estats.rx_runt += readl(base + NvRegRxRunt);
- np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
- np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
- np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
- np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
- np->estats.rx_length_error += readl(base + NvRegRxLenErr);
- np->estats.rx_unicast += readl(base + NvRegRxUnicast);
- np->estats.rx_multicast += readl(base + NvRegRxMulticast);
- np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
- np->estats.rx_bytes += readl(base + NvRegRxCnt);
- np->estats.rx_pause += readl(base + NvRegRxPause);
- np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
- np->estats.rx_packets =
- np->estats.rx_unicast +
- np->estats.rx_multicast +
- np->estats.rx_broadcast;
- np->estats.rx_errors_total =
- np->estats.rx_crc_errors +
- np->estats.rx_over_errors +
- np->estats.rx_frame_error +
- (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
- np->estats.rx_late_collision +
- np->estats.rx_runt +
- np->estats.rx_frame_too_long;
-
- if (!np->in_shutdown)
+ spin_lock_irq(&np->lock);
+
+ np->estats.tx_dropped = np->stats.tx_dropped;
+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base +
NvRegTxLossCarrier);
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+ np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+ np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+ np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+ np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+ np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+ np->estats.rx_runt += readl(base + NvRegRxRunt);
+ np->estats.rx_frame_too_long += readl(base +
NvRegRxFrameTooLong);
+ np->estats.rx_frame_align_error += readl(base +
NvRegRxFrameAlignErr);
+ np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_broadcast;
+ np->estats.rx_errors_total =
+ np->estats.rx_crc_errors +
+ np->estats.rx_over_errors +
+ np->estats.rx_frame_error +
+ (np->estats.rx_frame_align_error -
np->estats.rx_extra_byte) +
+ np->estats.rx_late_collision +
+ np->estats.rx_runt +
+ np->estats.rx_frame_too_long +
+ np->rx_len_errors;
+
+ if (np->driver_data & DEV_HAS_STATISTICS_V2) {
+ np->estats.tx_deferral += readl(base + NvRegTxDef);
+ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.tx_pause += readl(base + NvRegTxPause);
+ np->estats.rx_pause += readl(base + NvRegRxPause);
+ np->estats.rx_drop_frame += readl(base +
NvRegRxDropFrame);
+ }
+
+ /* copy to net_device stats */
+ np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
+ np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
+ np->stats.tx_bytes = np->estats.tx_bytes;
+ np->stats.rx_crc_errors = np->estats.rx_crc_errors;
+ np->stats.rx_over_errors = np->estats.rx_over_errors;
+ np->stats.rx_packets = np->estats.rx_packets;
+ np->stats.rx_errors = np->estats.rx_errors_total;
+
+ } else {
+ np->estats.tx_packets = np->stats.tx_packets;
+ np->estats.tx_fifo_errors = np->stats.tx_fifo_errors;
+ np->estats.tx_carrier_errors = np->stats.tx_carrier_errors;
+ np->estats.tx_bytes = np->stats.tx_bytes;
+ np->estats.rx_bytes = np->stats.rx_bytes;
+ np->estats.rx_crc_errors = np->stats.rx_crc_errors;
+ np->estats.rx_over_errors = np->stats.rx_over_errors;
+ np->estats.rx_packets = np->stats.rx_packets;
+ np->estats.rx_errors_total = np->stats.rx_errors;
+ }
+
+ if (!np->in_shutdown && netif_running(dev))
mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+ spin_unlock_irq(&np->lock);
+}
+
+/*
+ * nv_get_stats: dev->get_stats function
+ * Get latest stats value from the nic.
+ * Called with read_lock(&dev_base_lock) held for read -
+ * only synchronized against unregister_netdevice.
+ */
+static struct net_device_stats *nv_get_stats(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ /* It seems that the nic always generates interrupts and doesn't
+ * accumulate errors internally. Thus the current values in np->stats
+ * are already up to date.
+ */
+ nv_do_stats_poll((unsigned long)dev);
+ return &np->stats;
}
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
*info)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
strcpy(info->driver, "forcedeth");
strcpy(info->version, FORCEDETH_VERSION);
strcpy(info->bus_info, pci_name(np->pci_dev));
@@ -2876,7 +4571,7 @@ static void nv_get_drvinfo(struct net_de
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
wolinfo->supported = WAKE_MAGIC;
spin_lock_irq(&np->lock);
@@ -2887,7 +4582,7 @@ static void nv_get_wol(struct net_device
static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 flags = 0;
@@ -2907,7 +4602,7 @@ static int nv_set_wol(struct net_device
static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int adv;
spin_lock_irq(&np->lock);
@@ -2926,15 +4621,15 @@ static int nv_get_settings(struct net_de
if (netif_carrier_ok(dev)) {
switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
- case NVREG_LINKSPEED_10:
- ecmd->speed = SPEED_10;
- break;
- case NVREG_LINKSPEED_100:
- ecmd->speed = SPEED_100;
- break;
- case NVREG_LINKSPEED_1000:
- ecmd->speed = SPEED_1000;
- break;
+ case NVREG_LINKSPEED_10:
+ ecmd->speed = SPEED_10;
+ break;
+ case NVREG_LINKSPEED_100:
+ ecmd->speed = SPEED_100;
+ break;
+ case NVREG_LINKSPEED_1000:
+ ecmd->speed = SPEED_1000;
+ break;
}
ecmd->duplex = DUPLEX_HALF;
if (np->duplex)
@@ -2965,9 +4660,9 @@ static int nv_get_settings(struct net_de
}
}
ecmd->supported = (SUPPORTED_Autoneg |
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
- SUPPORTED_MII);
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_MII);
if (np->gigabit == PHY_GIGABIT)
ecmd->supported |= SUPPORTED_1000baseT_Full;
@@ -2981,8 +4676,9 @@ static int nv_get_settings(struct net_de
static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
- struct fe_priv *np = netdev_priv(dev);
-
+ struct fe_priv *np = get_nvpriv(dev);
+
+ dprintk(KERN_DEBUG "%s: nv_set_settings \n", dev->name);
if (ecmd->port != PORT_MII)
return -EINVAL;
if (ecmd->transceiver != XCVR_EXTERNAL)
@@ -2996,7 +4692,7 @@ static int nv_set_settings(struct net_de
u32 mask;
mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
if (np->gigabit == PHY_GIGABIT)
mask |= ADVERTISED_1000baseT_Full;
@@ -3017,14 +4713,27 @@ static int nv_set_settings(struct net_de
netif_carrier_off(dev);
if (netif_running(dev)) {
- nv_disable_irq(dev);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+#if NVVER > RHES3
+ synchronize_irq(np->pci_dev->irq);
+#else
+ synchronize_irq();
+#endif
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
spin_unlock(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
}
if (ecmd->autoneg == AUTONEG_ENABLE) {
@@ -3035,14 +4744,22 @@ static int nv_set_settings(struct net_de
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ if (ecmd->advertising & ADVERTISED_10baseT_Half) {
adv |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
+ }
+ if (ecmd->advertising & ADVERTISED_10baseT_Full) {
adv |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
+ }
+ if (ecmd->advertising & ADVERTISED_100baseT_Half) {
adv |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
+ }
+ if (ecmd->advertising & ADVERTISED_100baseT_Full) {
adv |= ADVERTISE_100FULL;
+ np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
+ }
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set
both advertisments but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
@@ -3052,17 +4769,34 @@ static int nv_set_settings(struct net_de
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ if (ecmd->advertising & ADVERTISED_1000baseT_Full) {
adv |= ADVERTISE_1000FULL;
+ np->speed_duplex =
NV_SPEED_DUPLEX_1000_FULL_DUPLEX;
+ }
mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
+
+ if (ecmd->advertising &
(ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full|ADVERTISED_1000baseT_Full))
+ np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
+ } else {
+ if (ecmd->advertising &
(ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full))
+ np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
}
if (netif_running(dev))
printk(KERN_INFO "%s: link down.\n", dev->name);
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick,
+ * and cause autoneg to start */
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n",
dev->name);
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
} else {
int adv, bmcr;
@@ -3070,14 +4804,22 @@ static int nv_set_settings(struct net_de
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) {
adv |= ADVERTISE_10HALF;
- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
+ }
+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) {
adv |= ADVERTISE_10FULL;
- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
+ }
+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) {
adv |= ADVERTISE_100HALF;
- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
+ }
+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) {
adv |= ADVERTISE_100FULL;
+ np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
+ }
np->pause_flags &=
~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set
both advertisments but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
@@ -3102,24 +4844,26 @@ static int nv_set_settings(struct net_de
bmcr |= BMCR_FULLDPLX;
if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
if (np->phy_oui == PHY_OUI_MARVELL) {
- /* reset the phy */
- if (phy_reset(dev)) {
+ /* reset the phy in order for forced mode settings to
stick */
+ if (phy_reset(dev, bmcr)) {
printk(KERN_INFO "%s: phy reset failed\n",
dev->name);
return -EINVAL;
}
- } else if (netif_running(dev)) {
- /* Wait a bit and then reconfigure the nic. */
- udelay(10);
- nv_linkchange(dev);
+ } else {
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (netif_running(dev)) {
+ /* Wait a bit and then reconfigure the nic. */
+ udelay(10);
+ nv_linkchange(dev);
+ }
}
}
if (netif_running(dev)) {
nv_start_rx(dev);
nv_start_tx(dev);
- nv_enable_irq(dev);
+ nv_enable_hw_interrupts(dev, np->irqmask);
}
return 0;
@@ -3129,13 +4873,13 @@ static int nv_set_settings(struct net_de
static int nv_get_regs_len(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
return np->register_size;
}
static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 *rbuf = buf;
int i;
@@ -3149,7 +4893,7 @@ static void nv_get_regs(struct net_devic
static int nv_nway_reset(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int ret;
if (np->autoneg) {
@@ -3158,19 +4902,36 @@ static int nv_nway_reset(struct net_devi
netif_carrier_off(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
spin_unlock(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
printk(KERN_INFO "%s: link down.\n", dev->name);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick*/
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n",
dev->name);
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
if (netif_running(dev)) {
nv_start_rx(dev);
@@ -3185,19 +4946,9 @@ static int nv_nway_reset(struct net_devi
return ret;
}
-static int nv_set_tso(struct net_device *dev, u32 value)
-{
- struct fe_priv *np = netdev_priv(dev);
-
- if ((np->driver_data & DEV_HAS_CHECKSUM))
- return ethtool_op_set_tso(dev, value);
- else
- return -EOPNOTSUPP;
-}
-
static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam*
ring)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ?
RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
ring->rx_mini_max_pending = 0;
@@ -3212,66 +4963,62 @@ static void nv_get_ringparam(struct net_
static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam*
ring)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
dma_addr_t ring_addr;
if (ring->rx_pending < RX_RING_MIN ||
- ring->tx_pending < TX_RING_MIN ||
- ring->rx_mini_pending != 0 ||
- ring->rx_jumbo_pending != 0 ||
- (np->desc_ver == DESC_VER_1 &&
- (ring->rx_pending > RING_MAX_DESC_VER_1 ||
- ring->tx_pending > RING_MAX_DESC_VER_1)) ||
- (np->desc_ver != DESC_VER_1 &&
- (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
- ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_mini_pending != 0 ||
+ ring->rx_jumbo_pending != 0 ||
+ (np->desc_ver == DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_1 ||
+ ring->tx_pending > RING_MAX_DESC_VER_1)) ||
+ (np->desc_ver != DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
+ ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
return -EINVAL;
}
/* allocate new rings */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
rxtx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct ring_desc) *
(ring->rx_pending + ring->tx_pending),
- &ring_addr);
+ sizeof(struct ring_desc) * (ring->rx_pending +
ring->tx_pending),
+ &ring_addr);
} else {
rxtx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct ring_desc_ex) *
(ring->rx_pending + ring->tx_pending),
- &ring_addr);
- }
- rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending,
GFP_KERNEL);
- rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
- tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending,
GFP_KERNEL);
- tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
- tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending,
GFP_KERNEL);
- if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma ||
!tx_dma_len) {
+ sizeof(struct ring_desc_ex) * (ring->rx_pending
+ ring->tx_pending),
+ &ring_addr);
+ }
+ rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending,
GFP_KERNEL);
+ tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending,
GFP_KERNEL);
+
+ if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
/* fall back to old rings */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if(rxtx_ring)
pci_free_consistent(np->pci_dev, sizeof(struct
ring_desc) * (ring->rx_pending + ring->tx_pending),
- rxtx_ring, ring_addr);
+ rxtx_ring, ring_addr);
} else {
if (rxtx_ring)
pci_free_consistent(np->pci_dev, sizeof(struct
ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
- rxtx_ring, ring_addr);
+ rxtx_ring, ring_addr);
}
if (rx_skbuff)
kfree(rx_skbuff);
- if (rx_dma)
- kfree(rx_dma);
if (tx_skbuff)
kfree(tx_skbuff);
- if (tx_dma)
- kfree(tx_dma);
- if (tx_dma_len)
- kfree(tx_dma_len);
goto exit;
}
if (netif_running(dev)) {
nv_disable_irq(dev);
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
@@ -3287,8 +5034,8 @@ static int nv_set_ringparam(struct net_d
/* set new values */
np->rx_ring_size = ring->rx_pending;
np->tx_ring_size = ring->tx_pending;
- np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
- np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
+ np->tx_limit_stop =np->tx_ring_size - TX_LIMIT_DIFFERENCE;
+ np->tx_limit_start =np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -3296,18 +5043,12 @@ static int nv_set_ringparam(struct net_d
np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skbuff = (struct sk_buff**)rx_skbuff;
- np->rx_dma = (dma_addr_t*)rx_dma;
- np->tx_skbuff = (struct sk_buff**)tx_skbuff;
- np->tx_dma = (dma_addr_t*)tx_dma;
- np->tx_dma_len = (unsigned int*)tx_dma_len;
+ np->rx_skb = (struct nv_skb_map*)rx_skbuff;
+ np->tx_skb = (struct nv_skb_map*)tx_skbuff;
np->ring_addr = ring_addr;
- memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
- memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
- memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
- memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
- memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
+ memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
+ memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
if (netif_running(dev)) {
/* reinit driver view of the queues */
@@ -3321,7 +5062,7 @@ static int nv_set_ringparam(struct net_d
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) +
((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
- base + NvRegRingSizes);
+ base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) +
NvRegTxRxControl);
pci_push(base);
@@ -3330,7 +5071,11 @@ static int nv_set_ringparam(struct net_d
nv_start_rx(dev);
nv_start_tx(dev);
spin_unlock(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
nv_enable_irq(dev);
}
return 0;
@@ -3340,7 +5085,7 @@ exit:
static void nv_get_pauseparam(struct net_device *dev, struct
ethtool_pauseparam* pause)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
@@ -3349,13 +5094,13 @@ static void nv_get_pauseparam(struct net
static int nv_set_pauseparam(struct net_device *dev, struct
ethtool_pauseparam* pause)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int adv, bmcr;
if ((!np->autoneg && np->duplex == 0) ||
- (np->autoneg && !pause->autoneg && np->duplex == 0)) {
- printk(KERN_INFO "%s: can not set pause settings when forced
link is in half duplex.\n",
- dev->name);
+ (np->autoneg && !pause->autoneg && np->duplex == 0)) {
+ printk(KERN_INFO "%s: can not set pause settings when forced
link is in half duplex.\n",
+ dev->name);
return -EINVAL;
}
if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
@@ -3366,13 +5111,21 @@ static int nv_set_pauseparam(struct net_
netif_carrier_off(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
spin_unlock(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
}
np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
@@ -3420,31 +5173,26 @@ static int nv_set_pauseparam(struct net_
static u32 nv_get_rx_csum(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
- return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+ struct fe_priv *np = get_nvpriv(dev);
+ return (np->rx_csum) != 0;
}
static int nv_set_rx_csum(struct net_device *dev, u32 data)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int retcode = 0;
if (np->driver_data & DEV_HAS_CHECKSUM) {
- if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
- (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
- /* already set or unset */
- return 0;
- }
-
if (data) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
- np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
} else {
- printk(KERN_INFO "Can not disable rx checksum if vlan
is enabled\n");
- return -EINVAL;
+ np->rx_csum = 0;
+ /* vlan is dependent on rx checksum offload */
+ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
}
if (netif_running(dev)) {
@@ -3459,39 +5207,85 @@ static int nv_set_rx_csum(struct net_dev
return retcode;
}
+#ifdef NETIF_F_TSO
+static int nv_set_tso(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM){
+#if NVVER < SUSE10
+ if(data){
+ if(ethtool_op_get_sg(dev)==0)
+ return -EINVAL;
+ }
+#endif
+ return ethtool_op_set_tso(dev, data);
+ }else
+ return -EINVAL;
+}
+#endif
+
+static int nv_set_sg(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM){
+#if NVVER < SUSE10
+ if(data){
+ if(ethtool_op_get_tx_csum(dev)==0)
+ return -EINVAL;
+ }
+#ifdef NETIF_F_TSO
+ if(!data)
+ /* set tso off */
+ nv_set_tso(dev,data);
+#endif
+#endif
+ return ethtool_op_set_sg(dev, data);
+ }else
+ return -EINVAL;
+}
+
static int nv_set_tx_csum(struct net_device *dev, u32 data)
{
- struct fe_priv *np = netdev_priv(dev);
-
+ struct fe_priv *np = get_nvpriv(dev);
+
+#if NVVER < SUSE10
+ /* set sg off if tx off */
+ if(!data)
+ nv_set_sg(dev,data);
+#endif
if (np->driver_data & DEV_HAS_CHECKSUM)
+#if NVVER > RHES4
return ethtool_op_set_tx_hw_csum(dev, data);
+#else
+ {
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+ return 0;
+ }
+#endif
else
- return -EOPNOTSUPP;
-}
-
-static int nv_set_sg(struct net_device *dev, u32 data)
-{
- struct fe_priv *np = netdev_priv(dev);
-
- if (np->driver_data & DEV_HAS_CHECKSUM)
- return ethtool_op_set_sg(dev, data);
+ return -EINVAL;
+}
+
+static int nv_get_stats_count(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->driver_data & DEV_HAS_STATISTICS_V1)
+ return NV_DEV_STATISTICS_V1_COUNT;
+ else if (np->driver_data & DEV_HAS_STATISTICS_V2)
+ return NV_DEV_STATISTICS_V2_COUNT;
else
- return -EOPNOTSUPP;
-}
-
-static int nv_get_stats_count(struct net_device *dev)
-{
- struct fe_priv *np = netdev_priv(dev);
-
- if (np->driver_data & DEV_HAS_STATISTICS)
- return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
- else
- return 0;
+ return NV_DEV_STATISTICS_SW_COUNT;
}
static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats
*estats, u64 *buffer)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
/* update stats */
nv_do_stats_poll((unsigned long)dev);
@@ -3501,7 +5295,7 @@ static void nv_get_ethtool_stats(struct
static int nv_self_test_count(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
if (np->driver_data & DEV_HAS_TEST_EXTENDED)
return NV_TEST_COUNT_EXTENDED;
@@ -3511,7 +5305,7 @@ static int nv_self_test_count(struct net
static int nv_link_test(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
int mii_status;
mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
@@ -3554,7 +5348,7 @@ static int nv_register_test(struct net_d
static int nv_interrupt_test(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
int testcnt;
@@ -3583,7 +5377,7 @@ static int nv_interrupt_test(struct net_
nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
/* wait for at least one interrupt */
- msleep(100);
+ nv_msleep(100);
spin_lock_irq(&np->lock);
@@ -3617,7 +5411,7 @@ static int nv_interrupt_test(struct net_
static int nv_loopback_test(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
struct sk_buff *tx_skb, *rx_skb;
dma_addr_t test_dma_addr;
@@ -3629,6 +5423,8 @@ static int nv_loopback_test(struct net_d
u32 misc1_flags = 0;
int ret = 1;
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+
if (netif_running(dev)) {
nv_disable_irq(dev);
filter_flags = readl(base + NvRegPacketFilterFlags);
@@ -3649,7 +5445,7 @@ static int nv_loopback_test(struct net_d
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) +
((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
- base + NvRegRingSizes);
+ base + NvRegRingSizes);
pci_push(base);
/* restart rx engine */
@@ -3662,8 +5458,13 @@ static int nv_loopback_test(struct net_d
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
+#if NVVER > FEDORA7
test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
- tx_skb->end-tx_skb->data,
PCI_DMA_FROMDEVICE);
+ skb_tailroom(tx_skb), PCI_DMA_FROMDEVICE);
+#else
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
+#endif
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
@@ -3676,7 +5477,7 @@ static int nv_loopback_test(struct net_d
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) +
NvRegTxRxControl);
pci_push(get_hwbase(dev));
- msleep(500);
+ nv_msleep(500);
/* check for rx of the packet */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
@@ -3699,18 +5500,18 @@ static int nv_loopback_test(struct net_d
}
}
- if (ret) {
+ if (ret) {
if (len != pkt_len) {
ret = 0;
- dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs
%d\n",
- dev->name, len, pkt_len);
+ dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs
%d\n",
+ dev->name, len, pkt_len);
} else {
- rx_skb = np->rx_skbuff[0];
+ rx_skb = np->rx_skb[0].skb;
for (i = 0; i < pkt_len; i++) {
if (rx_skb->data[i] != (u8)(i & 0xff)) {
ret = 0;
- dprintk(KERN_DEBUG "%s: loopback
pattern check failed on byte %d\n",
- dev->name, i);
+ dprintk(KERN_DEBUG "%s: loopback
pattern check failed on byte %d\n",
+ dev->name, i);
break;
}
}
@@ -3719,9 +5520,15 @@ static int nv_loopback_test(struct net_d
dprintk(KERN_DEBUG "%s: loopback - did not receive test
packet\n", dev->name);
}
+#if NVVER > FEDORA7
pci_unmap_page(np->pci_dev, test_dma_addr,
- tx_skb->end-tx_skb->data,
- PCI_DMA_TODEVICE);
+ skb_end_pointer(tx_skb)-tx_skb->data,
+ PCI_DMA_TODEVICE);
+#else
+ pci_unmap_page(np->pci_dev, test_dma_addr,
+ tx_skb->end-tx_skb->data,
+ PCI_DMA_TODEVICE);
+#endif
dev_kfree_skb_any(tx_skb);
/* stop engines */
@@ -3743,11 +5550,13 @@ static int nv_loopback_test(struct net_d
static void nv_self_test(struct net_device *dev, struct ethtool_test *test,
u64 *buffer)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int result;
memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+
if (!nv_link_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[0] = 1;
@@ -3756,7 +5565,11 @@ static void nv_self_test(struct net_devi
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
+#if NVVER > FEDORA5
netif_tx_lock_bh(dev);
+#else
+ spin_lock_bh(&dev->xmit_lock);
+#endif
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
@@ -3772,7 +5585,11 @@ static void nv_self_test(struct net_devi
nv_drain_rx(dev);
nv_drain_tx(dev);
spin_unlock_irq(&np->lock);
+#if NVVER > FEDORA5
netif_tx_unlock_bh(dev);
+#else
+ spin_unlock_bh(&dev->xmit_lock);
+#endif
}
if (!nv_register_test(dev)) {
@@ -3806,7 +5623,7 @@ static void nv_self_test(struct net_devi
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING |
NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT)
+ ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
- base + NvRegRingSizes);
+ base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits,
get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
@@ -3822,12 +5639,12 @@ static void nv_get_strings(struct net_de
static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
{
switch (stringset) {
- case ETH_SS_STATS:
- memcpy(buffer, &nv_estats_str,
nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
- break;
- case ETH_SS_TEST:
- memcpy(buffer, &nv_etests_str,
nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
- break;
+ case ETH_SS_STATS:
+ memcpy(buffer, &nv_estats_str,
nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buffer, &nv_etests_str,
nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
+ break;
}
}
@@ -3841,9 +5658,11 @@ static struct ethtool_ops ops = {
.get_regs_len = nv_get_regs_len,
.get_regs = nv_get_regs,
.nway_reset = nv_nway_reset,
+#if NVVER < NVNEW
+#if NVVER > SUSE10
.get_perm_addr = ethtool_op_get_perm_addr,
- .get_tso = ethtool_op_get_tso,
- .set_tso = nv_set_tso,
+#endif
+#endif
.get_ringparam = nv_get_ringparam,
.set_ringparam = nv_set_ringparam,
.get_pauseparam = nv_get_pauseparam,
@@ -3854,6 +5673,10 @@ static struct ethtool_ops ops = {
.set_tx_csum = nv_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = nv_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = nv_set_tso,
+#endif
.get_strings = nv_get_strings,
.get_stats_count = nv_get_stats_count,
.get_ethtool_stats = nv_get_ethtool_stats,
@@ -3873,10 +5696,14 @@ static void nv_vlan_rx_register(struct n
if (grp) {
/* enable vlan on MAC */
np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP |
NVREG_TXRXCTL_VLANINS;
+ /* vlan is dependent on rx checksum */
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
} else {
/* disable vlan on MAC */
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
+ if (!np->rx_csum)
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
}
writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -3889,26 +5716,72 @@ static void nv_vlan_rx_kill_vid(struct n
/* nothing to do */
};
+/* The mgmt unit and driver use a semaphore to access the phy during init */
+static int nv_mgmt_acquire_sema(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 tx_ctrl, mgmt_sema;
+
+ for (i = 0; i < 10; i++) {
+ mgmt_sema = readl(base + NvRegTransmitterControl) &
NVREG_XMITCTL_MGMT_SEMA_MASK;
+ if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) {
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema:
sema is free\n");
+ break;
+ }
+ nv_msleep(500);
+ }
+
+ if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) {
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not
free\n");
+ return 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+
+ /* verify that semaphore was acquired */
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) ==
NVREG_XMITCTL_HOST_SEMA_ACQ) &&
+ ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) ==
NVREG_XMITCTL_MGMT_SEMA_FREE)) {
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema:
acquired sema\n");
+ return 1;
+ } else
+ udelay(50);
+ }
+
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n");
+ return 0;
+}
+
static int nv_open(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
+ u32 tx_ctrl;
int oom, i;
dprintk(KERN_DEBUG "nv_open: begin\n");
- /* 1) erase previous misconfiguration */
+ /* erase previous misconfiguration */
if (np->driver_data & DEV_HAS_POWER_CNTRL)
nv_mac_reset(dev);
- /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
+ /* stop adapter: ignored, 4.3 seems to be overkill */
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(0, base + NvRegPacketFilterFlags);
- writel(0, base + NvRegTransmitterControl);
+ if (np->mac_in_use){
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl &= ~NVREG_XMITCTL_START;
+ }else
+ tx_ctrl = 0;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
writel(0, base + NvRegReceiverControl);
writel(0, base + NvRegAdapterControl);
@@ -3916,26 +5789,23 @@ static int nv_open(struct net_device *de
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
- /* 2) initialize descriptor rings */
+ /* initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
writel(0, base + NvRegLinkSpeed);
- writel(0, base + NvRegUnknownTransmitterReg);
+ writel(readl(base + NvRegTransmitPoll) &
NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
nv_txrx_reset(dev);
writel(0, base + NvRegUnknownSetupReg6);
np->in_shutdown = 0;
- /* 3) set mac address */
- nv_copy_mac_to_hw(dev);
-
- /* 4) give hw rings */
+ /* give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) +
((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
- base + NvRegRingSizes);
-
- /* 5) continue setup */
+ base + NvRegRingSizes);
+
+ /* continue setup */
writel(np->linkspeed, base + NvRegLinkSpeed);
if (np->desc_ver == DESC_VER_1)
writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
@@ -3949,11 +5819,11 @@ static int nv_open(struct net_device *de
NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
- writel(0, base + NvRegUnknownSetupReg4);
+ writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
- writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
-
- /* 6) continue setup */
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
+ /* continue setup */
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base +
NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -3976,7 +5846,7 @@ static int nv_open(struct net_device *de
writel((np->phyaddr <<
NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
- writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
+ writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
if (np->wolenabled)
writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
@@ -3990,7 +5860,7 @@ static int nv_open(struct net_device *de
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
- writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
@@ -4004,8 +5874,8 @@ static int nv_open(struct net_device *de
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base +
NvRegPacketFilterFlags);
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
@@ -4013,7 +5883,7 @@ static int nv_open(struct net_device *de
{
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
}
/* set linkspeed to invalid value, thus force nv_update_linkspeed
@@ -4026,15 +5896,14 @@ static int nv_open(struct net_device *de
if (ret) {
netif_carrier_on(dev);
} else {
- printk("%s: no link during initialization.\n", dev->name);
+ dprintk(KERN_DEBUG "%s: no link during initialization.\n",
dev->name);
netif_carrier_off(dev);
}
if (oom)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
/* start statistics timer */
- if (np->driver_data & DEV_HAS_STATISTICS)
- mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+ mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
spin_unlock_irq(&np->lock);
@@ -4046,13 +5915,19 @@ out_drain:
static int nv_close(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base;
+ dprintk(KERN_DEBUG "nv_close: begin\n");
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
- synchronize_irq(dev->irq);
+
+#if NVVER > RHES3
+ synchronize_irq(np->pci_dev->irq);
+#else
+ synchronize_irq();
+#endif
del_timer_sync(&np->oom_kick);
del_timer_sync(&np->nic_poll);
@@ -4079,12 +5954,6 @@ static int nv_close(struct net_device *d
if (np->wolenabled)
nv_start_rx(dev);
- /* special op: write back the misordered MAC address - otherwise
- * the next nv_probe would see a wrong address.
- */
- writel(np->orig_mac[0], base + NvRegMacAddrA);
- writel(np->orig_mac[1], base + NvRegMacAddrB);
-
/* FIXME: power down nic */
return 0;
@@ -4097,16 +5966,21 @@ static int __devinit nv_probe(struct pci
unsigned long addr;
u8 __iomem *base;
int err, i;
- u32 powerstate;
-
+ u32 powerstate, phystate_orig = 0, phystate, txreg,reg,mii_status;
+ int phyinitialized = 0;
+
+ /* modify network device class id */
+ quirk_nforce_network_class(pci_dev);
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
if (!dev)
goto out;
- np = netdev_priv(dev);
+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ np = get_nvpriv(dev);
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
+ spin_lock_init(&np->timer_lock);
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pci_dev->dev);
@@ -4133,7 +6007,9 @@ static int __devinit nv_probe(struct pci
if (err < 0)
goto out_disable;
- if (id->driver_data &
(DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
+ if (id->driver_data &
(DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
+ np->register_size = NV_PCI_REGSZ_VER3;
+ else if (id->driver_data & DEV_HAS_STATISTICS_V1)
np->register_size = NV_PCI_REGSZ_VER2;
else
np->register_size = NV_PCI_REGSZ_VER1;
@@ -4143,8 +6019,8 @@ static int __devinit nv_probe(struct pci
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags
0x%08lx.\n",
pci_name(pci_dev), i,
(void*)pci_resource_start(pci_dev, i),
- pci_resource_len(pci_dev, i),
- pci_resource_flags(pci_dev, i));
+ (long)pci_resource_len(pci_dev, i),
+ (long)pci_resource_flags(pci_dev, i));
if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
pci_resource_len(pci_dev, i) >=
np->register_size) {
addr = pci_resource_start(pci_dev, i);
@@ -4153,7 +6029,7 @@ static int __devinit nv_probe(struct pci
}
if (i == DEVICE_COUNT_RESOURCE) {
printk(KERN_INFO "forcedeth: Couldn't find register window for
device %s.\n",
- pci_name(pci_dev));
+ pci_name(pci_dev));
goto out_relreg;
}
@@ -4168,15 +6044,17 @@ static int __devinit nv_probe(struct pci
if (dma_64bit) {
if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed,
using 32-bit addressing for device %s.\n",
- pci_name(pci_dev));
+ pci_name(pci_dev));
} else {
dev->features |= NETIF_F_HIGHDMA;
printk(KERN_INFO "forcedeth: using HIGHDMA\n");
}
+#if NVVER > RHES3
if (pci_set_consistent_dma_mask(pci_dev,
DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA
(consistent) failed, using 32-bit ring buffers for device %s.\n",
- pci_name(pci_dev));
+ pci_name(pci_dev));
}
+#endif
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
@@ -4191,21 +6069,43 @@ static int __devinit nv_probe(struct pci
np->pkt_limit = NV_PKTLIMIT_1;
if (id->driver_data & DEV_HAS_LARGEDESC)
np->pkt_limit = NV_PKTLIMIT_2;
+ if (mtu > np->pkt_limit) {
+ printk(KERN_INFO "forcedeth: MTU value of %d is too large.
Setting to maximum value of %d\n",
+ mtu, np->pkt_limit);
+ dev->mtu = np->pkt_limit;
+ } else {
+ dev->mtu = mtu;
+ }
if (id->driver_data & DEV_HAS_CHECKSUM) {
- np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ if (rx_checksum_offload) {
+ np->rx_csum = 1;
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ }
+
+ if (tx_checksum_offload)
+#if NVVER > RHES4
+ dev->features |= NETIF_F_HW_CSUM;
+#else
+ dev->features |= NETIF_F_IP_CSUM;
+#endif
+
+ if (scatter_gather)
+ dev->features |= NETIF_F_SG;
#ifdef NETIF_F_TSO
- dev->features |= NETIF_F_TSO;
-#endif
- }
+ if (tso_offload)
+ dev->features |= NETIF_F_TSO;
+#endif
+ }
np->vlanctl_bits = 0;
- if (id->driver_data & DEV_HAS_VLAN) {
+ if (id->driver_data & DEV_HAS_VLAN && tagging_8021pq) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
dev->vlan_rx_register = nv_vlan_rx_register;
dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
+ /* vlan needs rx checksum support, so force it */
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
}
np->msi_flags = 0;
@@ -4216,11 +6116,27 @@ static int __devinit nv_probe(struct pci
np->msi_flags |= NV_MSI_X_CAPABLE;
}
- np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ |
NV_PAUSEFRAME_AUTONEG;
- if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
- np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE |
NV_PAUSEFRAME_TX_REQ;
- }
-
+ np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
+ if (rx_flow_control == NV_RX_FLOW_CONTROL_ENABLED)
+ np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
+ if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
+ (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)||
+ (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3))
+ {
+ np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
+ if (tx_flow_control == NV_TX_FLOW_CONTROL_ENABLED)
+ np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
+ }
+ if (autoneg == AUTONEG_ENABLE) {
+ np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
+ } else if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX) {
+ printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not
enabled if autoneg is disabled\n");
+ goto out_relreg;
+ }
+
+ /* save phy config */
+ np->autoneg = autoneg;
+ np->speed_duplex = speed_duplex;
err = -ENOMEM;
np->base = ioremap(addr, np->register_size);
@@ -4228,51 +6144,86 @@ static int __devinit nv_probe(struct pci
goto out_relreg;
dev->base_addr = (unsigned long)np->base;
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+
dev->irq = pci_dev->irq;
- np->rx_ring_size = RX_RING_DEFAULT;
- np->tx_ring_size = TX_RING_DEFAULT;
- np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
- np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
+ if (np->desc_ver == DESC_VER_1) {
+ if (rx_ring_size > RING_MAX_DESC_VER_1) {
+ printk(KERN_INFO "forcedeth: rx_ring_size of %d is too
large. Setting to maximum of %d\n",
+ rx_ring_size, RING_MAX_DESC_VER_1);
+ rx_ring_size = RING_MAX_DESC_VER_1;
+ }
+ if (tx_ring_size > RING_MAX_DESC_VER_1) {
+ printk(KERN_INFO "forcedeth: tx_ring_size of %d is too
large. Setting to maximum of %d\n",
+ tx_ring_size, RING_MAX_DESC_VER_1);
+ tx_ring_size = RING_MAX_DESC_VER_1;
+ }
+ } else {
+ if (rx_ring_size > RING_MAX_DESC_VER_2_3) {
+ printk(KERN_INFO "forcedeth: rx_ring_size of %d is too
large. Setting to maximum of %d\n",
+ rx_ring_size, RING_MAX_DESC_VER_2_3);
+ rx_ring_size = RING_MAX_DESC_VER_2_3;
+ }
+ if (tx_ring_size > RING_MAX_DESC_VER_2_3) {
+ printk(KERN_INFO "forcedeth: tx_ring_size of %d is too
large. Setting to maximum of %d\n",
+ tx_ring_size, RING_MAX_DESC_VER_2_3);
+ tx_ring_size = RING_MAX_DESC_VER_2_3;
+ }
+ }
+ np->rx_ring_size = rx_ring_size;
+ np->tx_ring_size = tx_ring_size;
+ np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE;
+ np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc) *
(np->rx_ring_size + np->tx_ring_size),
- &np->ring_addr);
+ sizeof(struct ring_desc) * (np->rx_ring_size +
np->tx_ring_size),
+ &np->ring_addr);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
np->rx_ring.ex = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc_ex) *
(np->rx_ring_size + np->tx_ring_size),
- &np->ring_addr);
+ sizeof(struct ring_desc_ex) * (np->rx_ring_size
+ np->tx_ring_size),
+ &np->ring_addr);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size,
GFP_KERNEL);
- np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
- np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size,
GFP_KERNEL);
- np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
- np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size,
GFP_KERNEL);
- if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma ||
!np->tx_dma_len)
+ np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size,
GFP_KERNEL);
+ np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size,
GFP_KERNEL);
+ if (!np->rx_skb || !np->tx_skb)
goto out_freering;
- memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
- memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
- memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
- memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
- memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
+ memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
+ memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
dev->open = nv_open;
dev->stop = nv_close;
- dev->hard_start_xmit = nv_start_xmit;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ dev->hard_start_xmit = nv_start_xmit;
+ else
+ dev->hard_start_xmit = nv_start_xmit_optimized;
dev->get_stats = nv_get_stats;
dev->change_mtu = nv_change_mtu;
dev->set_mac_address = nv_set_mac_address;
dev->set_multicast_list = nv_set_multicast;
+
+#if NVVER < SLES9
+ dev->do_ioctl = nv_ioctl;
+#endif
+
+#if NVVER > RHES3
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = nv_poll_controller;
#endif
+#else
+ dev->poll_controller = nv_poll_controller;
+#endif
+
SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -4284,23 +6235,45 @@ static int __devinit nv_probe(struct pci
np->orig_mac[0] = readl(base + NvRegMacAddrA);
np->orig_mac[1] = readl(base + NvRegMacAddrB);
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* check the workaround bit for correct mac address order */
+ txreg = readl(base + NvRegTransmitPoll);
+ if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
+ (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ } else {
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* set permanent address to be correct aswell */
+ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1]
<< 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5]
<< 8);
+ writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base +
NvRegTransmitPoll);
+ }
+#if NVVER > SUSE10
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->perm_addr)) {
+ if (!is_valid_ether_addr(dev->perm_addr)){
+#else
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+#endif
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
printk(KERN_ERR "%s: Invalid Mac address detected:
%02x:%02x:%02x:%02x:%02x:%02x\n",
- pci_name(pci_dev),
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ pci_name(pci_dev),
+ dev->dev_addr[0], dev->dev_addr[1],
dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4],
dev->dev_addr[5]);
printk(KERN_ERR "Please complain to your hardware vendor.
Switching to a random MAC.\n");
dev->dev_addr[0] = 0x00;
dev->dev_addr[1] = 0x00;
@@ -4311,10 +6284,12 @@ static int __devinit nv_probe(struct pci
dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n",
pci_name(pci_dev),
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ /* set mac address */
+ nv_copy_mac_to_hw(dev);
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
- np->wolenabled = 0;
+ np->wolenabled = wol;
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
u8 revision_id;
@@ -4324,8 +6299,8 @@ static int __devinit nv_probe(struct pci
powerstate = readl(base + NvRegPowerState2);
powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
- revision_id >= 0xA3)
+ id->device ==
PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
+ revision_id >= 0xA3)
powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
writel(powerstate, base + NvRegPowerState2);
}
@@ -4354,6 +6329,41 @@ static int __devinit nv_probe(struct pci
} else {
dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
np->need_linktimer = 0;
+ }
+
+ /* clear phy state and temporarily halt phy interrupts */
+ writel(0, base + NvRegMIIMask);
+ phystate = readl(base + NvRegAdapterControl);
+ if (phystate & NVREG_ADAPTCTL_RUNNING) {
+ phystate_orig = 1;
+ phystate &= ~NVREG_ADAPTCTL_RUNNING;
+ writel(phystate, base + NvRegAdapterControl);
+ }
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
+ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
+ /* management unit running on the mac? */
+ if (readl(base + NvRegTransmitterControl) &
NVREG_XMITCTL_SYNC_PHY_INIT) {
+ np->mac_in_use = readl(base + NvRegTransmitterControl)
& NVREG_XMITCTL_MGMT_ST;
+ dprintk(KERN_INFO "%s: mgmt unit is running. mac in use
%x.\n", pci_name(pci_dev), np->mac_in_use);
+ for (i = 0; i < 5000; i++) {
+ nv_msleep(1);
+ if (nv_mgmt_acquire_sema(dev)) {
+ /* management unit setup the phy
already? */
+ if ((readl(base +
NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
+
NVREG_XMITCTL_SYNC_PHY_INIT) {
+ if(np->mac_in_use){
+ /* phy is inited by
mgmt unit */
+ phyinitialized = 1;
+ dprintk(KERN_INFO "%s:
Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
+ }
+ } else {
+ /* we need to init the phy */
+ }
+ break;
+ }
+ }
+ }
}
/* find a suitable phy */
@@ -4372,27 +6382,46 @@ static int __devinit nv_probe(struct pci
if (id2 < 0 || id2 == 0xffff)
continue;
+ np->phy_model = id2 & PHYID2_MODEL_MASK;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address
%d.\n",
- pci_name(pci_dev), id1, id2, phyaddr);
+ pci_name(pci_dev), id1, id2, phyaddr);
np->phyaddr = phyaddr;
np->phy_oui = id1 | id2;
break;
}
if (i == 33) {
printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
- pci_name(pci_dev));
+ pci_name(pci_dev));
goto out_error;
}
- /* reset it */
- phy_init(dev);
+ if (!phyinitialized) {
+ /* reset it */
+ phy_init(dev);
+ np->autoneg = autoneg;
+ } else {
+ /* see if it is a gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ }
+ reg = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ np->autoneg = (reg & BMCR_ANENABLE ?
AUTONEG_ENABLE:AUTONEG_DISABLE);
+ if(np->autoneg == AUTONEG_DISABLE){
+ reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ np->fixed_mode = reg;
+ }
+ }
+
+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model ==
PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E &&
np->pci_dev->subsystem_device==0x6676 ) {
+ nv_LED_on(dev);
+ }
/* set default link speed settings */
np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
np->duplex = 0;
- np->autoneg = 1;
err = register_netdev(dev);
if (err) {
@@ -4406,6 +6435,8 @@ static int __devinit nv_probe(struct pci
return 0;
out_error:
+ if (phystate_orig)
+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base +
NvRegAdapterControl);
pci_set_drvdata(pci_dev, NULL);
out_freering:
free_rings(dev);
@@ -4421,11 +6452,32 @@ out:
return err;
}
+#ifdef CONFIG_PM
+static void nv_set_low_speed(struct net_device *dev);
+#endif
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
-
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl;
+
+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model ==
PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E &&
np->pci_dev->subsystem_device==0x6676) {
+ nv_LED_off(dev);
+ }
unregister_netdev(dev);
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+
+ /* relinquish control of the semaphore */
+ if (np->mac_in_use){
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_MASK;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+ }
/* free all structures */
free_rings(dev);
@@ -4467,90 +6519,436 @@ static struct pci_device_id pci_tbl[] =
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_10),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_11),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_12),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_13),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_16),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_17),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_18),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_19),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_20),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_21),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_22),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_23),
- .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_24),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_25),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_26),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_27),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_28),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_29),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_30),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_31),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_32),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_33),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_34),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_35),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_36),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_37),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_38),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_39),
+ .driver_data =
DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{0,},
};
-static struct pci_driver driver = {
+#ifdef CONFIG_PM
+static void nv_set_low_speed(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int adv = 0;
+ int lpa = 0;
+ int adv_lpa, bmcr, tries = 0;
+ int mii_status;
+ u32 control_1000;
+
+ if (np->autoneg == 0 || ((np->linkspeed & 0xFFF) !=
NVREG_LINKSPEED_1000))
+ return;
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+
+ adv_lpa = lpa & adv;
+
+ if ((adv_lpa & LPA_10FULL) || (adv_lpa & LPA_10HALF)) {
+ adv &= ~(ADVERTISE_100BASE4 | ADVERTISE_100FULL |
ADVERTISE_100HALF);
+ control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
+ printk(KERN_INFO "forcedeth %s: set low speed to
10mbs\n",dev->name);
+ } else if ((adv_lpa & LPA_100FULL) || (adv_lpa & LPA_100HALF)) {
+ control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
+ } else
+ return;
+
+ /* set new advertisements */
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, control_1000);
+
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick,
+ * and cause autoneg to start */
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ while (!(mii_status & BMSR_ANEGCOMPLETE)) {
+ nv_msleep(100);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (tries++ > 50)
+ break;
+ }
+
+ nv_update_linkspeed(dev);
+
+ return;
+}
+
+static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 tx_ctrl;
+
+ dprintk(KERN_INFO "forcedeth: nv_suspend\n");
+
+ /* MCP55:save msix table */
+
if((pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_14)||(pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_15))
+ {
+ unsigned long phys_addr;
+ void __iomem *base_addr;
+ void __iomem *base;
+ unsigned int bir,len;
+ unsigned int i;
+ int pos;
+ u32 table_offset;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ pci_read_config_dword(pdev, pos+0x04 , &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+ table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
+ phys_addr = pci_resource_start(pdev, bir) + table_offset;
+ np->msix_pa_addr = phys_addr;
+ len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
+ base_addr = ioremap_nocache(phys_addr, len);
+
+ for(i=0;i<NV_MSI_X_MAX_VECTORS;i++){
+ base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
+ np->nvmsg[i].address_lo = readl(base +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ np->nvmsg[i].address_hi = readl(base +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET );
+ np->nvmsg[i].data = readl(base +
PCI_MSIX_ENTRY_DATA_OFFSET);
+ }
+
+ iounmap(base_addr);
+ }
+
+ nv_update_linkspeed(dev);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ /* bring down the adapter */
+ nv_close(dev);
+ }
+
+ /* relinquish control of the semaphore */
+ if (np->mac_in_use){
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_MASK;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+ }
+
+ /* set phy to a lower speed to conserve power */
+ if((lowpowerspeed==NV_LOW_POWER_ENABLED)&&!np->mac_in_use)
+ nv_set_low_speed(dev);
+
+#if NVVER > RHES4
+ pci_save_state(pdev);
+#else
+ pci_save_state(pdev,np->pci_state);
+#endif
+ np->saved_nvregphyinterface= readl(base+NvRegPhyInterface);
+ for(i=0;i<64;i++){
+ pci_read_config_dword(pdev,i*4,&np->saved_config_space[i]);
+ }
+#if NVVER > RHES4
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
+#else
+ pci_enable_wake(pdev, state, np->wolenabled);
+#endif
+ pci_disable_device(pdev);
+
+#if NVVER > RHES4
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+#else
+ pci_set_power_state(pdev, state);
+#endif
+
+ return 0;
+}
+
+static int nv_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ int rc = 0;
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ int err;
+ u32 txreg;
+
+ dprintk(KERN_INFO "forcedeth: nv_resume\n");
+
+ pci_set_power_state(pdev, PCI_D0);
+#if NVVER > RHES4
+ pci_restore_state(pdev);
+#else
+ pci_restore_state(pdev,np->pci_state);
+#endif
+ for(i=0;i<64;i++){
+ pci_write_config_dword(pdev,i*4,np->saved_config_space[i]);
+ }
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for
device %s\n",
+ err, pci_name(pdev));
+ }
+ pci_set_master(pdev);
+
+ txreg = readl(base + NvRegTransmitPoll);
+ txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
+ writel(txreg, base + NvRegTransmitPoll);
+ writel(np->saved_nvregphyinterface,base+NvRegPhyInterface);
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+
+ /* MCP55:restore msix table */
+
if((pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_14)||(pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_15))
+ {
+ unsigned long phys_addr;
+ void __iomem *base_addr;
+ void __iomem *base;
+ unsigned int len;
+ unsigned int i;
+
+ len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
+ phys_addr = np->msix_pa_addr;
+ base_addr = ioremap_nocache(phys_addr, len);
+ for(i=0;i< NV_MSI_X_MAX_VECTORS;i++){
+ base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
+ writel(np->nvmsg[i].address_lo,base +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel(np->nvmsg[i].address_hi,base +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel(np->nvmsg[i].data,base +
PCI_MSIX_ENTRY_DATA_OFFSET);
+ }
+
+ iounmap(base_addr);
+ }
+
+ if(np->mac_in_use){
+ /* take control of the semaphore */
+ for (i = 0; i < 5000; i++) {
+ if(nv_mgmt_acquire_sema(dev))
+ break;
+ nv_msleep(1);
+ }
+ }
+
+ if(lowpowerspeed==NV_LOW_POWER_ENABLED){
+ /* re-initialize the phy */
+ phy_init(dev);
+ udelay(10);
+ }
+
+ /* bring up the adapter */
+ if (netif_running(dev)){
+ rc = nv_open(dev);
+ }
+ netif_device_attach(dev);
+
+ return rc;
+}
+
+#endif /* CONFIG_PM */
+static struct pci_driver nv_eth_driver = {
.name = "forcedeth",
.id_table = pci_tbl,
.probe = nv_probe,
.remove = __devexit_p(nv_remove),
+#ifdef CONFIG_PM
+ .suspend = nv_suspend,
+ .resume = nv_resume,
+#endif
};
+#ifdef CONFIG_PM
+static int nv_reboot_handler(struct notifier_block *nb, unsigned long event,
void *p)
+{
+ struct pci_dev *pdev = NULL;
+ pm_message_t state = { PM_EVENT_SUSPEND };
+
+ switch (event)
+ {
+ case SYS_POWER_OFF:
+ case SYS_HALT:
+ case SYS_DOWN:
+#if NVVER < FEDORA7
+ while ((pdev = pci_find_device(PCI_VENDOR_ID_NVIDIA,
PCI_ANY_ID, pdev)) != NULL) {
+#else
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_NVIDIA,
PCI_ANY_ID, pdev)) != NULL) {
+#endif
+ if (pci_dev_driver(pdev) == &nv_eth_driver) {
+ nv_suspend(pdev, state);
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * Reboot notification
+ */
+struct notifier_block nv_reboot_notifier =
+{
+notifier_call : nv_reboot_handler,
+ next : NULL,
+ priority : 0
+};
+#endif
static int __init init_nic(void)
{
+ int status;
printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet
driver. Version %s.\n", FORCEDETH_VERSION);
- return pci_module_init(&driver);
+ DPRINTK(DRV,KERN_DEBUG,"forcedeth:%s\n",DRV_DATE);
+#if NVVER > FEDORA7
+ status = pci_register_driver(&nv_eth_driver);
+#else
+ status = pci_module_init(&nv_eth_driver);
+#endif
+#ifdef CONFIG_PM
+ if (status >= 0)
+ register_reboot_notifier(&nv_reboot_notifier);
+#endif
+ return status;
}
static void __exit exit_nic(void)
{
- pci_unregister_driver(&driver);
-}
-
+#ifdef CONFIG_PM
+ unregister_reboot_notifier(&nv_reboot_notifier);
+#endif
+ pci_unregister_driver(&nv_eth_driver);
+}
+
+#if NVVER > SLES9
+module_param(debug, int, 0);
+module_param(lowpowerspeed, int, 0);
+MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting
to 1 and disabled by setting to 0");
module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per
interrupt");
module_param(optimization_mode, int, 0);
@@ -4561,12 +6959,84 @@ MODULE_PARM_DESC(msi, "MSI interrupts ar
MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled
by setting to 0.");
module_param(msix, int, 0);
MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and
disabled by setting to 0.");
+
+module_param(speed_duplex, int, 0);
+MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0,
10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps
full = 5.");
+module_param(autoneg, int, 0);
+MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and
disabled by setting to 0.");
+module_param(scatter_gather, int, 0);
+MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1
and disabled by setting to 0.");
+module_param(tso_offload, int, 0);
+MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting
to 1 and disabled by setting to 0.");
+module_param(mtu, int, 0);
+MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on
hardware.");
+module_param(tx_checksum_offload, int, 0);
+MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by
setting to 1 and disabled by setting to 0.");
+module_param(rx_checksum_offload, int, 0);
+MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by
setting to 1 and disabled by setting to 0.");
+module_param(tx_ring_size, int, 0);
+MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384
depending on hardware.");
+module_param(rx_ring_size, int, 0);
+MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384
depending on hardware.");
+module_param(tx_flow_control, int, 0);
+MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1
and disabled by setting to 0.");
+module_param(rx_flow_control, int, 0);
+MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1
and disabled by setting to 0.");
module_param(dma_64bit, int, 0);
MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled
by setting to 0.");
-
+module_param(wol, int, 0);
+MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by
setting to 0.");
+module_param(tagging_8021pq, int, 0);
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1
and disabled by setting to 0.");
+#else
+MODULE_PARM(debug, "i");
+MODULE_PARM(lowpowerspeed, "i");
+MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting
to 1 and disabled by setting to 0");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per
interrupt");
+MODULE_PARM(optimization_mode, "i");
+MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx
packet will generate an interrupt. In CPU mode (1), interrupts are controlled
by a timer.");
+MODULE_PARM(poll_interval, "i");
+MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer
interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and
Max is 65535.");
+#ifdef CONFIG_PCI_MSI
+MODULE_PARM(msi, "i");
+MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled
by setting to 0.");
+MODULE_PARM(msix, "i");
+MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and
disabled by setting to 0.");
+#endif
+MODULE_PARM(speed_duplex, "i");
+MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0,
10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps
full = 5.");
+MODULE_PARM(autoneg, "i");
+MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and
disabled by setting to 0.");
+MODULE_PARM(scatter_gather, "i");
+MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1
and disabled by setting to 0.");
+MODULE_PARM(tso_offload, "i");
+MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting
to 1 and disabled by setting to 0.");
+MODULE_PARM(mtu, "i");
+MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on
hardware.");
+MODULE_PARM(tx_checksum_offload, "i");
+MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by
setting to 1 and disabled by setting to 0.");
+MODULE_PARM(rx_checksum_offload, "i");
+MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by
setting to 1 and disabled by setting to 0.");
+MODULE_PARM(tx_ring_size, "i");
+MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384
depending on hardware.");
+MODULE_PARM(rx_ring_size, "i");
+MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384
depending on hardware.");
+MODULE_PARM(tx_flow_control, "i");
+MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1
and disabled by setting to 0.");
+MODULE_PARM(rx_flow_control, "i");
+MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1
and disabled by setting to 0.");
+MODULE_PARM(dma_64bit, "i");
+MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled
by setting to 0.");
+MODULE_PARM(wol, "i");
+MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by
setting to 0.");
+MODULE_PARM(tagging_8021pq, "i");
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1
and disabled by setting to 0.");
+#endif
MODULE_AUTHOR("Manfred Spraul <manfred@xxxxxxxxxxxxxxxx>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(FORCEDETH_VERSION);
MODULE_DEVICE_TABLE(pci, pci_tbl);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|