diff --git a/BLD/build/HEADERS/CUR-9-vmkdrivers-namespace/vmkernel64/release/tg3/__namespace.h b/BLD/build/HEADERS/CUR-9-vmkdrivers-namespace/vmkernel64/release/tg3/__namespace.h new file mode 100644 index 0000000000000000000000000000000000000000..b9d5287c3b8b607e36c197e573e48b820372e7f2 --- /dev/null +++ b/BLD/build/HEADERS/CUR-9-vmkdrivers-namespace/vmkernel64/release/tg3/__namespace.h @@ -0,0 +1,10 @@ + +/* + * DO NOT EDIT THIS FILE - IT IS GENERATED BY THE DRIVER BUILD. + * + * If you need to change the driver's name spaces, look in the scons + * files for the driver's defineVmkDriver() rule. + */ + +VMK_NAMESPACE_PROVIDES("com.broadcom.tg3", "9.2.0.0"); +#define VMKLNX_MY_NAMESPACE_VERSION "9.2.0.0" diff --git a/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/scsi/vmkapi_scsi_const.h b/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/scsi/vmkapi_scsi_const.h index b239fea40b97536181bc5f68cd60b1e40bfe3e56..63478b99c2761eaab0738dd0a0154f662a2b02d0 100644 --- a/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/scsi/vmkapi_scsi_const.h +++ b/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/scsi/vmkapi_scsi_const.h @@ -398,6 +398,8 @@ #define VMK_SCSI_ASC_LOGICAL_UNIT_ERROR 0x3e #define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILED_SELF_TEST 0x03 #define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILURE 0x01 +/** \brief LU is not configured (array only). */ +#define VMK_SCSI_ASC_LU_NOT_CONFIGURED 0x68 /* * Inquiry data. diff --git a/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/scsi/vmkapi_scsi_const.h b/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/scsi/vmkapi_scsi_const.h index b239fea40b97536181bc5f68cd60b1e40bfe3e56..63478b99c2761eaab0738dd0a0154f662a2b02d0 100644 --- a/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/scsi/vmkapi_scsi_const.h +++ b/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/scsi/vmkapi_scsi_const.h @@ -398,6 +398,8 @@ #define VMK_SCSI_ASC_LOGICAL_UNIT_ERROR 0x3e #define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILED_SELF_TEST 0x03 #define VMK_SCSI_ASCQ_LOGICAL_UNIT_FAILURE 0x01 +/** \brief LU is not configured (array only). */ +#define VMK_SCSI_ASC_LU_NOT_CONFIGURED 0x68 /* * Inquiry data. diff --git a/BLD/build/version/buildNumber.h b/BLD/build/version/buildNumber.h index 9e6280226e23a5ca9c673c9fee2cb8ee490266d5..6d1e25a0a9139133cc03c0d042386f0fa69401a6 100644 --- a/BLD/build/version/buildNumber.h +++ b/BLD/build/version/buildNumber.h @@ -1,6 +1,6 @@ -#define BUILD_NUMBER "build-623860" -#define BUILD_NUMBER_NUMERIC 623860 -#define BUILD_NUMBER_NUMERIC_STRING "623860" -#define PRODUCT_BUILD_NUMBER "product-build-45730" -#define PRODUCT_BUILD_NUMBER_NUMERIC 45730 -#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "45730" +#define BUILD_NUMBER "build-920308" +#define BUILD_NUMBER_NUMERIC 920308 +#define BUILD_NUMBER_NUMERIC_STRING "920308" +#define PRODUCT_BUILD_NUMBER "product-build-53817" +#define PRODUCT_BUILD_NUMBER_NUMERIC 53817 +#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "53817" diff --git a/vmkdrivers/src_9/drivers/ata/libata-sff.c b/vmkdrivers/src_9/drivers/ata/libata-sff.c index a6ab579f51c0905325d7de83f11704d08b6bcd8e..222b81efa550a3f32702dfafd19da5c78064e63b 100644 --- a/vmkdrivers/src_9/drivers/ata/libata-sff.c +++ b/vmkdrivers/src_9/drivers/ata/libata-sff.c @@ -438,6 +438,9 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, qc = NULL; /* reset PIO HSM and stop DMA engine */ + + cancel_delayed_work_sync(&ap->port_task); + spin_lock_irqsave(ap->lock, flags); ap->hsm_task_state = HSM_ST_IDLE; diff --git a/vmkdrivers/src_9/drivers/char/openipmi/ipmi_si_drv/ipmi_si_intf.c b/vmkdrivers/src_9/drivers/char/openipmi/ipmi_si_drv/ipmi_si_intf.c index 9f149d33124896337b9c06a22f65bfb80a802aa8..17e14f2fac42857f58e733aee84bef25e9288a21 100644 --- a/vmkdrivers/src_9/drivers/char/openipmi/ipmi_si_drv/ipmi_si_intf.c +++ b/vmkdrivers/src_9/drivers/char/openipmi/ipmi_si_drv/ipmi_si_intf.c @@ -307,7 +307,11 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode) msg->rsp_size = 3; smi_info->curr_msg = NULL; + + /* To fix #PR 889881*/ + spin_unlock(&(smi_info->msg_lock)); deliver_recv_msg(smi_info, msg); + spin_lock(&(smi_info->msg_lock)); } static enum si_sm_result start_next_msg(struct smi_info *smi_info) @@ -767,7 +771,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, /* If we were handling a user message, format a response to send to the upper layer to tell it about the error. */ + /* To fix #PR 889881*/ + spin_lock(&(smi_info->msg_lock)); return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); + spin_unlock(&(smi_info->msg_lock)); } si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } diff --git a/vmkdrivers/src_9/drivers/net/bnx2x/bnx2x_main.c b/vmkdrivers/src_9/drivers/net/bnx2x/bnx2x_main.c index 029a895f82646bef3399292d1ce7ee8a86db7cbe..46328188802ad01b76a6ec162f4ff80001d11b8a 100644 --- a/vmkdrivers/src_9/drivers/net/bnx2x/bnx2x_main.c +++ b/vmkdrivers/src_9/drivers/net/bnx2x/bnx2x_main.c @@ -104,7 +104,7 @@ #define DRV_MODULE_VERSION "1.61.15.v50.1" -#define DRV_MODULE_RELDATE "$DateTime: 2011/04/17 13:10:36 $" +#define DRV_MODULE_RELDATE "$DateTime: 2012/04/17 15:53:26 $" #define BNX2X_BC_VER 0x040200 #if defined(BNX2X_UPSTREAM) && !defined(BNX2X_USE_INIT_VALUES) /* BNX2X_UPSTREAM */ @@ -13641,7 +13641,7 @@ static void poll_bnx2x(struct net_device *dev) #endif #else /* HAVE_POLL_CONTROLLER is used in 2.4 kernels */ -#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) +#if (defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)) && !defined(__VMKLNX__) static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -13675,8 +13675,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_vlan_rx_register = bnx2x_vlan_rx_register, #endif #ifdef CONFIG_NET_POLL_CONTROLLER +#if !defined(__VMKLNX__) /* BNX2X_UPSTREAM */ .ndo_poll_controller = poll_bnx2x, #endif +#endif }; #endif @@ -13872,8 +13874,10 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, #endif #endif #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) +#if !defined(__VMKLNX__) /* BNX2X_UPSTREAM */ dev->poll_controller = poll_bnx2x; #endif +#endif #endif dev->features |= NETIF_F_SG; dev->features |= NETIF_F_HW_CSUM; diff --git a/vmkdrivers/src_9/drivers/net/e1000/e1000_main.c b/vmkdrivers/src_9/drivers/net/e1000/e1000_main.c index 582c09fed384926bf4865b4187eb7a6251173932..e1eec16025845655e29340b9a8d4860ddd443d73 100644 --- a/vmkdrivers/src_9/drivers/net/e1000/e1000_main.c +++ b/vmkdrivers/src_9/drivers/net/e1000/e1000_main.c @@ -3738,6 +3738,9 @@ static irqreturn_t e1000_intr(int irq, void *data) if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) { #else /* defined(__VMKLNX__) */ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { + /* disable interrupts, without the synchronize_irq bit */ + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(&adapter->hw); #endif /* !defined(__VMKLNX__) */ adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; diff --git a/vmkdrivers/src_9/drivers/net/e1000e/netdev.c b/vmkdrivers/src_9/drivers/net/e1000e/netdev.c index e204321f71b6cee29e30c3c8858c1aed17ea2d08..f0bbc1efd489d5818f5676b3768b8ed182d6135b 100644 --- a/vmkdrivers/src_9/drivers/net/e1000e/netdev.c +++ b/vmkdrivers/src_9/drivers/net/e1000e/netdev.c @@ -959,8 +959,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); - +#ifdef __VMKLNX__ + if (cleaned && (buffer_info->skb != NULL)) { +#else //!__VMKLNX__ if (cleaned) { +#endif //__VMKLNX__ struct sk_buff *skb = buffer_info->skb; #ifdef NETIF_F_TSO unsigned int segs, bytecount; diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_main.c b/vmkdrivers/src_9/drivers/net/igb/igb_main.c index 12aed585b06bce7d7a3af91f2505ed77456a9a82..8f09efac4a41425cdbeaf5e1d3e18b9b1f345ace 100644 --- a/vmkdrivers/src_9/drivers/net/igb/igb_main.c +++ b/vmkdrivers/src_9/drivers/net/igb/igb_main.c @@ -3496,15 +3496,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) /* Don't starve jumbo frames */ avg_wire_size = min(avg_wire_size, 3000); -#ifndef __VMKLNX__ /* Give a little boost to mid-size frames */ if ((avg_wire_size > 300) && (avg_wire_size < 1200)) new_val = avg_wire_size / 3; else new_val = avg_wire_size / 2; -#else - new_val = avg_wire_size; -#endif set_itr_val: if (new_val != q_vector->itr_val) { diff --git a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_cna.c b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_cna.c index 9b7504ffdbe8572dd4afc0effc5b8d15b3eff058..210f6bba2ee02ff36a271d6a3f9454fc17539c4e 100644 --- a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_cna.c +++ b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_cna.c @@ -107,7 +107,12 @@ int ixgbe_cna_enable(struct ixgbe_adapter *adapter) cnadev->mtu = netdev->mtu; cnadev->pdev = netdev->pdev; cnadev->gso_max_size = GSO_MAX_SIZE; - cnadev->features = netdev->features | NETIF_F_CNA; +#ifdef __VMKLNX__ + cnadev->features = netdev->features | NETIF_F_CNA | + NETIF_F_HW_VLAN_FILTER; +#else + cnadev->features = netdev->features | NETIF_F_CNA; +#endif /*__VMKLNX__*/ /* set the MAC address to SAN mac address */ if (ixgbe_validate_mac_addr(adapter->hw.mac.san_addr) == 0) diff --git a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_common.c b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_common.c index 57342360091e244789372b6a17b954039db6c65b..775da33390275f850a871a0458de4fb5133e0e47 100644 --- a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_common.c +++ b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_common.c @@ -2951,11 +2951,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, else *speed = IXGBE_LINK_SPEED_100_FULL; +#ifndef __VMKLNX__ /* if link is down, zero out the current_mode */ if (*link_up == false) { hw->fc.current_mode = ixgbe_fc_none; hw->fc.fc_was_autonegged = false; } +#endif return 0; } diff --git a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c index d438648a465c6cd7de557490cdd61c25b222c064..0110a8dfe2a4cf8107914e6c73a72bf11d4522c8 100644 --- a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c +++ b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c @@ -74,7 +74,7 @@ static const char ixgbe_driver_string[] = #define FPGA -#define DRV_VERSION "2.0.84.8.2-10vmw" DRIVERNAPI DRV_HW_PERF FPGA +#define DRV_VERSION "2.0.84.8.2-11vmw" DRIVERNAPI DRV_HW_PERF FPGA const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; /* ixgbe_pci_tbl - PCI Device ID Table @@ -6095,7 +6095,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; adapter->dcb_cfg.rx_pba_cfg = pba_equal; - adapter->dcb_cfg.pfc_mode_enable = true; + adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.round_robin_enable = false; adapter->dcb_set_bitmap = 0x00; @@ -8281,18 +8281,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, DPRINTK(TX_ERR, INFO, "my (preferred) node is: %d\n", adapter->node); +#ifndef __VMKLNX__ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { hw->fc.requested_mode = ixgbe_fc_pfc; hw->fc.current_mode = ixgbe_fc_pfc; /* init for ethtool output */ } +#endif #ifdef MAX_SKB_FRAGS #ifdef NETIF_F_HW_VLAN_TX netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; +#ifdef __VMKLNX__ + NETIF_F_HW_VLAN_RX; +#else + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; +#endif /*__VMKLNX__*/ #else netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM; diff --git a/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c b/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c index d070b81413bdd57f2d7ecb599097a21dabe97209..ee8de1cd31560c491292595fcfa2f6c7015dcbf1 100644 --- a/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c +++ b/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c @@ -29,7 +29,7 @@ /* * Source file for NIC routines to access the Phantom hardware * - * $Id: //depot/vmkdrivers/esx50u1/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $ + * $Id: //depot/vmkdrivers/esx50u2/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $ * */ #include diff --git a/vmkdrivers/src_9/drivers/net/tg3/esx_ioctl.h b/vmkdrivers/src_9/drivers/net/tg3/esx_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..35e040b88f8ce3279487685283434ff4692d08f4 --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/tg3/esx_ioctl.h @@ -0,0 +1,86 @@ +/**************************************************************************** + * Copyright(c) 2000-2012 Broadcom Corporation, all rights reserved + * Proprietary and Confidential Information. + * + * This source file is the property of Broadcom Corporation, and + * may not be copied or distributed in any isomorphic form without + * the prior written consent of Broadcom Corporation. + * + * Name: esx_ioctl.h + * + * Description: Define data structures and prototypes to access ioctls + * supported by driver in VMware ESXi system. + * + * Author: cchsu + * + * $Log: + * + ****************************************************************************/ + +#ifndef BRCM_VMWARE_IOCTL_H +#define BRCM_VMWARE_IOCTL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define BRCM_VMWARE_CIM_IOCTL 0x89f0 + +#define BRCM_VMWARE_CIM_CMD_ENABLE_NIC 0x0001 +#define BRCM_VMWARE_CIM_CMD_DISABLE_NIC 0x0002 +#define BRCM_VMWARE_CIM_CMD_REG_READ 0x0003 +#define BRCM_VMWARE_CIM_CMD_REG_WRITE 0x0004 +#define BRCM_VMWARE_CIM_CMD_GET_NIC_PARAM 0x0005 +#define BRCM_VMWARE_CIM_CMD_GET_NIC_STATUS 0x0006 +#define BRCM_VMWARE_CIM_CMD_CFG_REG_READ 0x0007 +#define BRCM_VMWARE_CIM_CMD_CFG_REG_WRITE 0x0008 + +// Access type for Register Read/Write Ioctl +#define BRCM_VMWARE_REG_ACCESS_DIRECT 0x0000 +#define BRCM_VMWARE_REG_ACCESS_PCI_CFG 0x0001 + +struct brcm_vmware_ioctl_reg_read_req +{ + u32 reg_offset; + u32 reg_value; + u32 reg_access_type; +} __attribute__((packed)); + +struct brcm_vmware_ioctl_reg_write_req +{ + u32 reg_offset; + u32 reg_value; + u32 reg_access_type; +} __attribute__((packed)); + +#define BRCM_VMWARE_GET_NIC_PARAM_VERSION 1 +struct brcm_vmware_ioctl_get_nic_param_req +{ + u32 version; + u32 mtu; + u8 current_mac_addr[8]; +} __attribute__((packed)); + +#define BRCM_VMWARE_INVALID_NIC_STATUS 0xffffffff +struct brcm_vmware_ioctl_get_nic_status_req +{ + u32 nic_status; // 1: Up, 0: Down +} __attribute__((packed)); + +struct brcm_vmware_ioctl_req +{ + u32 cmd; + union { + // no struct for reset_nic command + struct brcm_vmware_ioctl_reg_read_req reg_read_req; + struct brcm_vmware_ioctl_reg_write_req reg_write_req; + struct brcm_vmware_ioctl_get_nic_param_req get_nic_param_req; + struct brcm_vmware_ioctl_get_nic_status_req get_nic_status_req; + } cmd_req; +} __attribute__((packed)); + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3.c b/vmkdrivers/src_9/drivers/net/tg3/tg3.c index 961f4491b6175dd0c7ab20e841625969e6669388..2366563311672ab085acc91d7c6925eb9b211e57 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3.c +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3.c @@ -4,8 +4,8 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2011 Broadcom Corporation. - * Portions Copyright (C) VMware, Inc. 2007-2010. All Rights Reserved. + * Copyright (C) 2005-2012 Broadcom Corporation. + * Portions Copyright (C) VMware, Inc. 2007-2012. All Rights Reserved. * * Firmware is: * Derived from proprietary unpublished source code, @@ -16,7 +16,6 @@ * notice is accompanying it. */ - #include #if (LINUX_VERSION_CODE < 0x020612) @@ -33,6 +32,7 @@ #if (LINUX_VERSION_CODE >= 0x20600) #include #endif +#include #include #include #include @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -61,7 +62,6 @@ #if (LINUX_VERSION_CODE >= 0x020600) #include #endif -#include #ifdef BCM_HAS_REQUEST_FIRMWARE #include #else @@ -72,6 +72,12 @@ #include #include +#ifdef BCM_HAS_IEEE1588_SUPPORT +#include +#include +#include +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + #include #include #include @@ -85,25 +91,42 @@ #define BAR_0 0 #define BAR_2 2 -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -#define TG3_VLAN_TAG_USED 1 -#else -#define TG3_VLAN_TAG_USED 0 -#endif - -#if defined(__VMKLNX__) -#include "tg3_vmware.h" -#endif #include "tg3.h" +/* Functions & macros to verify TG3_FLAGS types */ + +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) +{ + return test_bit(flag, bits); +} + +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) +{ + set_bit(flag, bits); +} + +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) +{ + clear_bit(flag, bits); +} + +#define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 110 +#define TG3_MIN_NUM 123 #define DRV_MODULE_VERSION \ - __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) "h.v50.4" -#define DRV_MODULE_RELDATE "September 27, 2010" + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) "b.v50.1" +#define DRV_MODULE_RELDATE "April 03, 2012" +#define RESET_KIND_SHUTDOWN 0 +#define RESET_KIND_INIT 1 +#define RESET_KIND_SUSPEND 2 -#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 #define TG3_DEF_TX_MODE 0 #define TG3_DEF_MSG_ENABLE \ @@ -116,25 +139,44 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ +#if defined(__VMKLNX__) +/* On VMware ESX there is a possibility that that netdev watchdog thread + * runs before the reset task if the machine is loaded. If this occurs + * too many times, these premature watchdog triggers will cause a PSOD + * on a VMware ESX beta build */ +#define TG3_TX_TIMEOUT (20 * HZ) +#else #define TG3_TX_TIMEOUT (5 * HZ) +#endif /* defined(__VMKLNX__) */ /* hardware minimum and maximum for a single frame's data payload */ #define TG3_MIN_MTU 60 #define TG3_MAX_MTU(tp) \ - ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) /* These numbers seem to be hard coded in the NIC firmware somehow. * You can't change the ring sizes, but you can change where you place * them in the NIC onboard memory. */ -#define TG3_RX_RING_SIZE 512 +#define TG3_RX_STD_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) +#define TG3_RX_JMB_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) + +#if defined(__VMKLNX__) +#define TG3_DEF_RX_RING_PENDING 255 +#define TG3_DEF_RX_JUMBO_RING_PENDING 200 +#else #define TG3_DEF_RX_RING_PENDING 200 -#define TG3_RX_JUMBO_RING_SIZE 256 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 -#define TG3_RSS_INDIR_TBL_SIZE 128 +#endif /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et @@ -142,26 +184,20 @@ * hw multiply/modulo instructions. Another solution would be to * replace things like '% foo' with '& (foo - 1)'. */ -#define TG3_RX_RCB_RING_SIZE(tp) \ - (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \ - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512) #define TG3_TX_RING_SIZE 512 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) -#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ - TG3_RX_RING_SIZE) -#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ - TG3_RX_JUMBO_RING_SIZE) -#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ - TG3_RX_RCB_RING_SIZE(tp)) +#define TG3_RX_STD_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) +#define TG3_RX_JMB_RING_BYTES(tp) \ + (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) +#define TG3_RX_RCB_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) -#define TG3_RX_DMA_ALIGN 16 -#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) - #define TG3_DMA_BYTE_ENAB 64 #define TG3_RX_STD_DMA_SZ 1536 @@ -172,11 +208,11 @@ #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) -#define TG3_RX_STD_BUFF_RING_SIZE \ - (sizeof(struct ring_info) * TG3_RX_RING_SIZE) +#define TG3_RX_STD_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) -#define TG3_RX_JMB_BUFF_RING_SIZE \ - (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) +#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) /* Due to a hardware bug, the 5701 can only DMA to memory addresses * that are at least dword aligned when used in PCIX mode. The driver @@ -196,20 +232,24 @@ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) #endif +#if (NET_IP_ALIGN != 0) +#define TG3_RX_OFFSET(tp) ((tp)->rx_offset) +#else +#define TG3_RX_OFFSET(tp) 0 +#endif + /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) +#define TG3_TX_BD_DMA_MAX_2K 2048 +#define TG3_TX_BD_DMA_MAX_4K 4096 +#define TG3_TX_BD_DMA_MAX_32K 32768 #define TG3_RAW_IP_ALIGN 2 #include "tg3_compat2.h" -/* number of ETHTOOL_GSTATS u64's */ -#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) - -#define TG3_NUM_TEST 6 - #if defined(__VMKLNX__) -/* see pr141646 , 626764*/ +/* see pr141646, 626764*/ #define TG3_FW_UPDATE_TIMEOUT_SEC 30 #else #define TG3_FW_UPDATE_TIMEOUT_SEC 5 @@ -236,6 +276,12 @@ module_param(tg3_debug, int, 0); MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); #endif +static int tg3_disable_eee = 1; +#if (LINUX_VERSION_CODE >= 0x20600) +module_param(tg3_disable_eee, int, 0); +MODULE_PARM_DESC(tg3_disable_eee, "Disable Energy Efficient Ethernet (EEE) support"); +#endif + static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, @@ -302,13 +348,18 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -323,7 +374,7 @@ MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); static const struct { const char string[ETH_GSTRING_LEN]; -} ethtool_stats_keys[TG3_NUM_STATS] = { +} ethtool_stats_keys[] = { { "rx_octets" }, { "rx_fragments" }, { "rx_ucast_packets" }, @@ -399,20 +450,30 @@ static const struct { { "ring_status_update" }, { "nic_irqs" }, { "nic_avoided_irqs" }, - { "nic_tx_threshold_hit" } + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, }; +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + + static const struct { const char string[ETH_GSTRING_LEN]; -} ethtool_test_keys[TG3_NUM_TEST] = { - { "nvram test (online) " }, - { "link test (online) " }, - { "register test (offline)" }, - { "memory test (offline)" }, - { "loopback test (offline)" }, - { "interrupt test (offline)" }, +} ethtool_test_keys[] = { + { "nvram test (online) " }, + { "link test (online) " }, + { "register test (offline)" }, + { "memory test (offline)" }, + { "mac loopback test (offline)" }, + { "phy loopback test (offline)" }, + { "ext loopback test (offline)" }, + { "interrupt test (offline)" }, }; +#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + static void tg3_write32(struct tg3 *tp, u32 off, u32 val) { writel(val, tp->regs + off); @@ -510,8 +571,7 @@ static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) */ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) { - if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || - (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) /* Non-posted methods */ tp->write32(tp, off, val); else { @@ -531,8 +591,7 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) { tp->write32_mbox(tp, off, val); - if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && - !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) + if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) tp->read32_mbox(tp, off); } @@ -540,9 +599,9 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) { void __iomem *mbox = tp->regs + off; writel(val, mbox); - if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) + if (tg3_flag(tp, TXD_MBOX_HWBUG)) writel(val, mbox); - if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) + if (tg3_flag(tp, MBOX_WRITE_REORDER)) readl(mbox); } @@ -571,12 +630,12 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) return; spin_lock_irqsave(&tp->indirect_lock, flags); - if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { + if (tg3_flag(tp, SRAM_USE_CONFIG)) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); @@ -596,14 +655,14 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) { unsigned long flags; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { *val = 0; return; } spin_lock_irqsave(&tp->indirect_lock, flags); - if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { + if (tg3_flag(tp, SRAM_USE_CONFIG)) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); @@ -622,7 +681,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) static void tg3_ape_lock_init(struct tg3 *tp) { int i; - u32 regbase; + u32 regbase, bit; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) regbase = TG3_APE_LOCK_GRANT; @@ -630,22 +689,50 @@ static void tg3_ape_lock_init(struct tg3 *tp) regbase = TG3_APE_PER_LOCK_GRANT; /* Make sure the driver hasn't any stale locks. */ - for (i = 0; i < 8; i++) - tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); + for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { + switch (i) { + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; + break; + default: + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + } + tg3_ape_write32(tp, regbase + 4 * i, bit); + } + } static int tg3_ape_lock(struct tg3 *tp, int locknum) { int i, off; int ret = 0; - u32 status, req, gnt; + u32 status, req, gnt, bit; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + if (!tg3_flag(tp, ENABLE_APE)) return 0; switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return 0; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: + if (!tp->pci_fn) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << tp->pci_fn; + break; + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_REQ_DRIVER; break; default: return -EINVAL; @@ -661,21 +748,19 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) off = 4 * locknum; - tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER); + tg3_ape_write32(tp, req + off, bit); /* Wait for up to 1 millisecond to acquire lock. */ for (i = 0; i < 100; i++) { status = tg3_ape_read32(tp, gnt + off); - if (status == APE_LOCK_GRANT_DRIVER) + if (status == bit) break; udelay(10); } - if (status != APE_LOCK_GRANT_DRIVER) { + if (status != bit) { /* Revoke the lock request. */ - tg3_ape_write32(tp, gnt + off, - APE_LOCK_GRANT_DRIVER); - + tg3_ape_write32(tp, gnt + off, bit); ret = -EBUSY; } @@ -684,14 +769,27 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) static void tg3_ape_unlock(struct tg3 *tp, int locknum) { - u32 gnt; + u32 gnt, bit; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + if (!tg3_flag(tp, ENABLE_APE)) return; switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + break; + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; break; default: return; @@ -702,7 +800,104 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) else gnt = TG3_APE_PER_LOCK_GRANT; - tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER); + tg3_ape_write32(tp, gnt + 4 * locknum, bit); +} + +static void tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int i; + u32 apedata; + + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); } static void tg3_disable_ints(struct tg3 *tp) @@ -730,14 +925,14 @@ static void tg3_enable_ints(struct tg3 *tp) struct tg3_napi *tnapi = &tp->napi[i]; tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + if (tg3_flag(tp, 1SHOT_MSI)) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); tp->coal_now |= tnapi->coal_now; } /* Force an initial interrupt */ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && + if (!tg3_flag(tp, TAGGED_STATUS) && #if defined(__VMKLNX__) tp->napi[0].hw_status && #endif @@ -756,9 +951,7 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) unsigned int work_exists = 0; /* check for phy events */ - if (!(tp->tg3_flags & - (TG3_FLAG_USE_LINKCHG_REG | - TG3_FLAG_POLL_SERDES))) { + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { if (sblk->status & SD_STATUS_LINK_CHG) work_exists = 1; } @@ -786,63 +979,17 @@ static void tg3_int_reenable(struct tg3_napi *tnapi) * The last_tag we write above tells the chip which piece of * work we've completed. */ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - tg3_has_work(tnapi)) + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) tw32(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | tnapi->coal_now); } -static void tg3_napi_disable(struct tg3 *tp) -{ -#ifdef TG3_NAPI - int i; - - for (i = tp->irq_cnt - 1; i >= 0; i--) - napi_disable(&tp->napi[i].napi); -#else - netif_poll_disable(tp->dev); -#endif -} - -static void tg3_napi_enable(struct tg3 *tp) -{ -#ifdef TG3_NAPI - int i; - - for (i = 0; i < tp->irq_cnt; i++) - napi_enable(&tp->napi[i].napi); -#else - netif_poll_enable(tp->dev); -#endif -} - -static inline void tg3_netif_stop(struct tg3 *tp) -{ - tp->dev->trans_start = jiffies; /* prevent tx timeout */ - tg3_napi_disable(tp); - netif_tx_disable(tp->dev); -} - -static inline void tg3_netif_start(struct tg3 *tp) -{ - /* NOTE: unconditional netif_tx_wake_all_queues is only - * appropriate so long as all callers are assured to - * have free tx slots (such as after tg3_init_hw) - */ - netif_tx_wake_all_queues(tp->dev); - - tg3_napi_enable(tp); - tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; - tg3_enable_ints(tp); -} - static void tg3_switch_clocks(struct tg3 *tp) { u32 clock_ctrl; u32 orig_clock_ctrl; - if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) return; clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); @@ -853,7 +1000,7 @@ static void tg3_switch_clocks(struct tg3 *tp) 0x1f); tp->pci_clock_ctrl = clock_ctrl; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl | CLOCK_CTRL_625_CORE, 40); @@ -884,6 +1031,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) udelay(80); } + tg3_ape_lock(tp, tp->phy_ape_lock); + *val = 0x0; frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & @@ -918,6 +1067,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) udelay(80); } + tg3_ape_unlock(tp, tp->phy_ape_lock); + return ret; } @@ -927,8 +1078,8 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) unsigned int loops; int ret; - if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && - (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) return 0; if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { @@ -937,6 +1088,8 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) udelay(80); } + tg3_ape_lock(tp, tp->phy_ape_lock); + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & @@ -967,59 +1120,176 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) udelay(80); } + tg3_ape_unlock(tp, tp->phy_ape_lock); + return ret; } -static int tg3_bmcr_reset(struct tg3 *tp) +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) { - u32 phy_control; - int limit, err; + int err; - /* OK, reset it, and poll the BMCR_RESET bit until it - * clears or we time out. - */ - phy_control = BMCR_RESET; - err = tg3_writephy(tp, MII_BMCR, phy_control); - if (err != 0) - return -EBUSY; + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; - limit = 5000; - while (limit--) { - err = tg3_readphy(tp, MII_BMCR, &phy_control); - if (err != 0) - return -EBUSY; + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; - if ((phy_control & BMCR_RESET) == 0) { - udelay(40); - break; - } - udelay(10); - } - if (limit < 0) - return -EBUSY; + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; - return 0; + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; } -#ifdef BCM_INCLUDE_PHYLIB_SUPPORT -static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) { - struct tg3 *tp = bp->priv; - u32 val; + int err; - spin_lock_bh(&tp->lock); + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; - if (tg3_readphy(tp, reg, &val)) - val = -EIO; + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; - spin_unlock_bh(&tp->lock); + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; - return val; + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; } -static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) { - struct tg3 *tp = bp->priv; + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; +} + +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) +{ + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); +} + +#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ + MII_TG3_AUXCTL_ACTL_TX_6DB) + +#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_TX_6DB); + +static int tg3_phy_shdw_read(struct tg3 *tp, int reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_MISC_SHDW, val); + + return err; +} + +static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) +{ + return tg3_writephy(tp, MII_TG3_MISC_SHDW, + reg | val | MII_TG3_MISC_SHDW_WREN); +} + +static int tg3_bmcr_reset(struct tg3 *tp) +{ + u32 phy_control; + int limit, err; + + /* OK, reset it, and poll the BMCR_RESET bit until it + * clears or we time out. + */ + phy_control = BMCR_RESET; + err = tg3_writephy(tp, MII_BMCR, phy_control); + if (err != 0) + return -EBUSY; + + limit = 5000; + while (limit--) { + err = tg3_readphy(tp, MII_BMCR, &phy_control); + if (err != 0) + return -EBUSY; + + if ((phy_control & BMCR_RESET) == 0) { + udelay(40); + break; + } + udelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT +static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) +{ + struct tg3 *tp = bp->priv; + u32 val; + + spin_lock_bh(&tp->lock); + + if (tg3_readphy(tp, reg, &val)) + val = -EIO; + + spin_unlock_bh(&tp->lock); + + return val; +} + +static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +{ + struct tg3 *tp = bp->priv; u32 ret = 0; spin_lock_bh(&tp->lock); @@ -1036,10 +1306,12 @@ static int tg3_mdio_reset(struct mii_bus *bp) { return 0; } +#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ static void tg3_mdio_config_5785(struct tg3 *tp) { u32 val; +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; @@ -1073,8 +1345,23 @@ static void tg3_mdio_config_5785(struct tg3 *tp) return; } +#else + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCMAC131) { + tw32(MAC_PHYCFG2, MAC_PHYCFG2_AC131_LED_MODES); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; + } - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) + val = MAC_PHYCFG2_50610_LED_MODES; +#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ + + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) val |= MAC_PHYCFG2_EMODE_MASK_MASK | MAC_PHYCFG2_FMODE_MASK_MASK | MAC_PHYCFG2_GMODE_MASK_MASK | @@ -1087,10 +1374,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp) val = tr32(MAC_PHYCFG1); val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; } val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | @@ -1105,13 +1392,13 @@ static void tg3_mdio_config_5785(struct tg3 *tp) MAC_RGMII_MODE_TX_ENABLE | MAC_RGMII_MODE_TX_LOWPWR | MAC_RGMII_MODE_TX_RESET); - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) val |= MAC_RGMII_MODE_RX_INT_B | MAC_RGMII_MODE_RX_QUALITY | MAC_RGMII_MODE_RX_ACTIVITY | MAC_RGMII_MODE_RX_ENG_DET; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) val |= MAC_RGMII_MODE_TX_ENABLE | MAC_RGMII_MODE_TX_LOWPWR | MAC_RGMII_MODE_TX_RESET; @@ -1125,21 +1412,46 @@ static void tg3_mdio_start(struct tg3 *tp) tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); - if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT + if (tg3_flag(tp, MDIOBUS_INITED) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_mdio_config_5785(tp); +#else + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + return; + + tg3_mdio_config_5785(tp); + + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + u32 val; + + /* FIXME -- This shouldn't be required, but without + * it, the device will not pass traffic until + * the phy is reset via a link up event or + * through a change in speed settings. + */ + tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MII_TG3_AUXCTL_MISC_RGMII_OOBSC; + else + val &= ~MII_TG3_AUXCTL_MISC_RGMII_OOBSC; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, val); + } +#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ } static int tg3_mdio_init(struct tg3 *tp) { +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT int i; u32 reg; struct phy_device *phydev; +#endif - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tg3_flag(tp, 5717_PLUS)) { u32 is_serdes; - tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; + tp->phy_addr = tp->pci_fn + 1; if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; @@ -1153,8 +1465,8 @@ static int tg3_mdio_init(struct tg3 *tp) tg3_mdio_start(tp); - if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || - (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) return 0; tp->mdio_bus = mdiobus_alloc(); @@ -1217,11 +1529,11 @@ static int tg3_mdio_init(struct tg3 *tp) PHY_BRCM_RX_REFCLK_UNUSED | PHY_BRCM_DIS_TXCRXC_NOENRGY | PHY_BRCM_AUTO_PWRDWN_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE) + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; /* fallthru */ case PHY_ID_RTL8211C: @@ -1231,118 +1543,29 @@ static int tg3_mdio_init(struct tg3 *tp) case PHY_ID_BCMAC131: phydev->interface = PHY_INTERFACE_MODE_MII; phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; - tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + tp->phy_flags |= TG3_PHYFLG_IS_FET; break; } - tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; + tg3_flag_set(tp, MDIOBUS_INITED); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_mdio_config_5785(tp); +#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ return 0; } static void tg3_mdio_fini(struct tg3 *tp) { - if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { - tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); } -} -#else -static void tg3_mdio_start(struct tg3 *tp) -{ - u32 val; - - tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) - return; - - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCMAC131) { - tw32(MAC_PHYCFG2, MAC_PHYCFG2_AC131_LED_MODES); - - val = tr32(MAC_PHYCFG1); - val &= ~(MAC_PHYCFG1_RGMII_INT | - MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); - val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; - tw32(MAC_PHYCFG1, val); - - return; - } - - val = MAC_PHYCFG2_50610_LED_MODES; - - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) - val |= MAC_PHYCFG2_EMODE_MASK_MASK | - MAC_PHYCFG2_FMODE_MASK_MASK | - MAC_PHYCFG2_GMODE_MASK_MASK | - MAC_PHYCFG2_ACT_MASK_MASK | - MAC_PHYCFG2_QUAL_MASK_MASK | - MAC_PHYCFG2_INBAND_ENABLE; - - tw32(MAC_PHYCFG2, val); - - val = tr32(MAC_PHYCFG1); - val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | - MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) - val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) - val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; - } - val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | - MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; - tw32(MAC_PHYCFG1, val); - - val = tr32(MAC_EXT_RGMII_MODE); - val &= ~(MAC_RGMII_MODE_RX_INT_B | - MAC_RGMII_MODE_RX_QUALITY | - MAC_RGMII_MODE_RX_ACTIVITY | - MAC_RGMII_MODE_RX_ENG_DET | - MAC_RGMII_MODE_TX_ENABLE | - MAC_RGMII_MODE_TX_LOWPWR | - MAC_RGMII_MODE_TX_RESET); - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) - val |= MAC_RGMII_MODE_RX_INT_B | - MAC_RGMII_MODE_RX_QUALITY | - MAC_RGMII_MODE_RX_ACTIVITY | - MAC_RGMII_MODE_RX_ENG_DET; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) - val |= MAC_RGMII_MODE_TX_ENABLE | - MAC_RGMII_MODE_TX_LOWPWR | - MAC_RGMII_MODE_TX_RESET; - } - tw32(MAC_EXT_RGMII_MODE, val); -} -static int tg3_mdio_init(struct tg3 *tp) -{ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - u32 is_serdes; - - tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; - - if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) - is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; - else - is_serdes = tr32(TG3_CPMU_PHY_STRAP) & - TG3_CPMU_PHY_STRAP_IS_SERDES; - if (is_serdes) - tp->phy_addr += 7; - } else - tp->phy_addr = TG3_PHY_MII_ADDR; - - tg3_mdio_start(tp); - return 0; -} -#define tg3_mdio_fini(tp) #endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ +} /* tp->lock is held. */ static inline void tg3_generate_fw_event(struct tg3 *tp) @@ -1391,8 +1614,7 @@ static void tg3_ump_link_report(struct tg3 *tp) u32 reg; u32 val; - if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) return; tg3_wait_for_event_ack(tp); @@ -1416,7 +1638,7 @@ static void tg3_ump_link_report(struct tg3 *tp) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); val = 0; - if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { if (!tg3_readphy(tp, MII_CTRL1000, ®)) val = reg << 16; if (!tg3_readphy(tp, MII_STAT1000, ®)) @@ -1433,52 +1655,198 @@ static void tg3_ump_link_report(struct tg3 *tp) tg3_generate_fw_event(tp); } -static void tg3_link_report(struct tg3 *tp) +/* tp->lock is held. */ +static void tg3_stop_fw(struct tg3 *tp) { - if (!netif_carrier_ok(tp->dev)) { - netif_info(tp, link, tp->dev, "Link is down\n"); - tg3_ump_link_report(tp); - } else if (netif_msg_link(tp)) { - netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", - (tp->link_config.active_speed == SPEED_1000 ? - 1000 : - (tp->link_config.active_speed == SPEED_100 ? - 100 : 10)), - (tp->link_config.active_duplex == DUPLEX_FULL ? - "full" : "half")); - - netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", - (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? - "on" : "off", - (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? - "on" : "off"); - tg3_ump_link_report(tp); - } -} + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); -static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) -{ - u16 miireg; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); - if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) - miireg = ADVERTISE_PAUSE_CAP; - else if (flow_ctrl & FLOW_CTRL_TX) - miireg = ADVERTISE_PAUSE_ASYM; - else if (flow_ctrl & FLOW_CTRL_RX) - miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - else - miireg = 0; + tg3_generate_fw_event(tp); - return miireg; + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } } -static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) +/* tp->lock is held. */ +static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) { - u16 miireg; + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); - if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) - miireg = ADVERTISE_1000XPAUSE; - else if (flow_ctrl & FLOW_CTRL_TX) + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_legacy(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } +} + +static int tg3_poll_fw(struct tg3 *tp) +{ + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; +} + +static void tg3_link_report(struct tg3 *tp) +{ + if (!netif_carrier_ok(tp->dev)) { + netif_info(tp, link, tp->dev, "Link is down\n"); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + + tg3_ump_link_report(tp); + } +} + +static u32 tg3_decode_flowctrl_1000T(u32 adv) +{ + u32 flowctrl = 0; + + if (adv & ADVERTISE_PAUSE_CAP) { + flowctrl |= FLOW_CTRL_RX; + if (!(adv & ADVERTISE_PAUSE_ASYM)) + flowctrl |= FLOW_CTRL_TX; + } else if (adv & ADVERTISE_PAUSE_ASYM) + flowctrl |= FLOW_CTRL_TX; + + return flowctrl; +} + +static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) miireg = ADVERTISE_1000XPSE_ASYM; else if (flow_ctrl & FLOW_CTRL_RX) miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; @@ -1488,22 +1856,30 @@ static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) return miireg; } +static u32 tg3_decode_flowctrl_1000X(u32 adv) +{ + u32 flowctrl = 0; + + if (adv & ADVERTISE_1000XPAUSE) { + flowctrl |= FLOW_CTRL_RX; + if (!(adv & ADVERTISE_1000XPSE_ASYM)) + flowctrl |= FLOW_CTRL_TX; + } else if (adv & ADVERTISE_1000XPSE_ASYM) + flowctrl |= FLOW_CTRL_TX; + + return flowctrl; +} + static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & ADVERTISE_1000XPAUSE) { - if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_1000XPAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { + if (lcladv & ADVERTISE_1000XPAUSE) + cap = FLOW_CTRL_RX; + if (rmtadv & ADVERTISE_1000XPAUSE) cap = FLOW_CTRL_TX; } @@ -1518,15 +1894,14 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) + if (tg3_flag(tp, USE_PHYLIB)) autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; else #endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ autoneg = tp->link_config.autoneg; - if (autoneg == AUTONEG_ENABLE && - (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); else flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); @@ -1582,7 +1957,7 @@ static void tg3_adjust_link(struct net_device *dev) if (phydev->duplex == DUPLEX_HALF) mac_mode |= MAC_MODE_HALF_DUPLEX; else { - lcl_adv = tg3_advert_flowctrl_1000T( + lcl_adv = mii_advertise_flowctrl( tp->link_config.flowctrl); if (phydev->pause) @@ -1641,7 +2016,7 @@ static int tg3_phy_init(struct tg3 *tp) { struct phy_device *phydev; - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) return 0; /* Bring the PHY back to a known state. */ @@ -1661,7 +2036,7 @@ static int tg3_phy_init(struct tg3 *tp) switch (phydev->interface) { case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { phydev->supported &= (PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); @@ -1678,7 +2053,7 @@ static int tg3_phy_init(struct tg3 *tp) return -EINVAL; } - tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED; + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; phydev->advertising = phydev->supported; @@ -1689,13 +2064,13 @@ static void tg3_phy_start(struct tg3 *tp) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - if (tp->link_config.phy_is_low_power) { - tp->link_config.phy_is_low_power = 0; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; phydev->speed = tp->link_config.orig_speed; phydev->duplex = tp->link_config.orig_duplex; phydev->autoneg = tp->link_config.orig_autoneg; @@ -1709,7 +2084,7 @@ static void tg3_phy_start(struct tg3 *tp) static void tg3_phy_stop(struct tg3 *tp) { - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); @@ -1717,9 +2092,9 @@ static void tg3_phy_stop(struct tg3 *tp) static void tg3_phy_fini(struct tg3 *tp) { - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); - tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } #else @@ -1729,21 +2104,35 @@ static void tg3_phy_fini(struct tg3 *tp) #define tg3_phy_fini(tp) #endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ -static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +static int tg3_phy_set_extloopbk(struct tg3 *tp) { - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); -} + int err; + u32 val; -#ifndef BCM_INCLUDE_PHYLIB_SUPPORT -static void tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) -{ - tg3_writephy(tp, 0x000d, devad); - tg3_writephy(tp, 0x000e, addr); - tg3_writephy(tp, 0x000d, 0x4000 | devad); - tg3_writephy(tp, 0x000e, val); + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + return 0; + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + MII_TG3_AUXCTL_ACTL_EXTLOOPBK | + 0x4c20); + goto done; + } + + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (err) + return err; + + val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); + +done: + return err; } -#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) { @@ -1769,21 +2158,19 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) { u32 reg; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) return; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_phy_fet_toggle_apd(tp, enable); return; } #ifndef BCM_INCLUDE_PHYLIB_SUPPORT if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_SCR5_SEL | - MII_TG3_MISC_SHDW_SCR5_TRDDAPD | + reg = MII_TG3_MISC_SHDW_SCR5_TRDDAPD | MII_TG3_MISC_SHDW_SCR5_LPED | MII_TG3_MISC_SHDW_SCR5_DLPTLM | MII_TG3_MISC_SHDW_SCR5_SDTL; @@ -1794,9 +2181,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) } } else { #endif - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_SCR5_SEL | - MII_TG3_MISC_SHDW_SCR5_LPED | + reg = MII_TG3_MISC_SHDW_SCR5_LPED | MII_TG3_MISC_SHDW_SCR5_DLPTLM | MII_TG3_MISC_SHDW_SCR5_SDTL | MII_TG3_MISC_SHDW_SCR5_C125OE; @@ -1806,27 +2191,25 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) } #endif - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_APD_SEL | - MII_TG3_MISC_SHDW_APD_WKTM_84MS; + reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; if (enable) reg |= MII_TG3_MISC_SHDW_APD_ENABLE; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); } static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) { u32 phy; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + if (!tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) return; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 ephy; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { @@ -1844,31 +2227,33 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) tg3_writephy(tp, MII_TG3_FET_TEST, ephy); } } else { - phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | - MII_TG3_AUXCTL_SHDWSEL_MISC; - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { if (enable) phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; else phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; - phy |= MII_TG3_AUXCTL_MISC_WREN; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); } } } static void tg3_phy_set_wirespeed(struct tg3 *tp) { + int ret; u32 val; - if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) return; - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) - tg3_writephy(tp, MII_TG3_AUX_CTRL, - (val | (1 << 15) | (1 << 4))); + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); } static void tg3_phy_apply_otp(struct tg3 *tp) @@ -1880,11 +2265,8 @@ static void tg3_phy_apply_otp(struct tg3 *tp) otp = tp->phy_otp; - /* Enable SM_DSP clock and tx 6dB coding. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + return; phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; @@ -1908,10 +2290,74 @@ static void tg3_phy_apply_otp(struct tg3 *tp) ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); - /* Turn off SM_DSP clock. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +} + +static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) +{ + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + +#ifndef BCM_INCLUDE_PHYLIB_SUPPORT + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50612E) + return; +#endif + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL && + (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_1000)) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_phy_cl45_read(tp, MDIO_MMD_AN, + TG3_CL45_D7_EEERES_STAT, &val); + + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + if (current_link_up == 1 && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } +} + +static void tg3_phy_eee_enable(struct tg3 *tp) +{ + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + tg3_flag(tp, 57765_CLASS)) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); } static int tg3_wait_macro_done(struct tg3 *tp) @@ -1921,7 +2367,7 @@ static int tg3_wait_macro_done(struct tg3 *tp) while (limit--) { u32 tmp32; - if (!tg3_readphy(tp, 0x16, &tmp32)) { + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { if ((tmp32 & 0x1000) == 0) break; } @@ -1947,13 +2393,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0002); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); for (i = 0; i < 6; i++) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, test_pat[chan][i]); - tg3_writephy(tp, 0x16, 0x0202); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; @@ -1961,13 +2407,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0082); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; } - tg3_writephy(tp, 0x16, 0x0802); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; @@ -2007,10 +2453,10 @@ static int tg3_phy_reset_chanpat(struct tg3 *tp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0002); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); for (i = 0; i < 6; i++) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); - tg3_writephy(tp, 0x16, 0x0202); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); if (tg3_wait_macro_done(tp)) return -EBUSY; } @@ -2042,22 +2488,21 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) /* Set full-duplex, 1000 mbps. */ tg3_writephy(tp, MII_BMCR, - BMCR_FULLDPLX | TG3_BMCR_SPEED1000); + BMCR_FULLDPLX | BMCR_SPEED1000); /* Set to master mode. */ - if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig)) + if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) continue; - tg3_writephy(tp, MII_TG3_CTRL, - (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER)); + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); - /* Enable SM_DSP_CLOCK and 6dB. */ - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (err) + return err; /* Block the PHY control access. */ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); + tg3_phydsp_write(tp, 0x8005, 0x0800); err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); if (!err) @@ -2068,21 +2513,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) if (err) return err; - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); + tg3_phydsp_write(tp, 0x8005, 0x0000); tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); - tg3_writephy(tp, 0x16, 0x0000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - /* Set Extended packet length bit for jumbo frames */ - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); - } else { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); + tg3_writephy(tp, MII_CTRL1000, phy9_orig); if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { reg32 &= ~0x3000; @@ -2098,19 +2536,16 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) */ static int tg3_phy_reset(struct tg3 *tp) { - u32 cpmuctrl; - u32 phy_status; + u32 val, cpmuctrl; int err; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val; - val = tr32(GRC_MISC_CFG); tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); udelay(40); } - err = tg3_readphy(tp, MII_BMSR, &phy_status); - err |= tg3_readphy(tp, MII_BMSR, &phy_status); + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); if (err != 0) return -EBUSY; @@ -2142,18 +2577,14 @@ static int tg3_phy_reset(struct tg3 *tp) return err; if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { - u32 phy; - - phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; - tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy); + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); tw32(TG3_CPMU_CTRL, cpmuctrl); } if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { - u32 val; - val = tr32(TG3_CPMU_LSPD_1000MB_CLK); if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == CPMU_LSPD_1000MB_MACCLK_12_5) { @@ -2163,150 +2594,128 @@ static int tg3_phy_reset(struct tg3 *tp) } } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) return 0; tg3_phy_apply_otp(tp); - if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); else tg3_phy_toggle_apd(tp, false); out: #ifndef BCM_INCLUDE_PHYLIB_SUPPORT - /* A0 */ - if (tp->phy_id == TG3_PHY_ID_BCM50612E) { - u32 reg; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 && + (tp->phy_id & TG3_PHY_ID_MASK) != TG3_PHY_ID_BCMAC131) { + /* A0 */ + if (tp->phy_id == TG3_PHY_ID_BCM50612E && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x0fff, 0x4000); + tg3_phydsp_write(tp, 0x0021, 0x4600); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + if (((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610 || + (tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610M) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + val = MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); + + /* Apply workaround to A0 revision parts only. */ + if (tp->phy_id == TG3_PHY_ID_BCM50610 || + tp->phy_id == TG3_PHY_ID_BCM50610M) { + tg3_phydsp_write(tp, 0x001F, 0x0300); + tg3_phydsp_write(tp, 0x601F, 0x0002); + tg3_phydsp_write(tp, 0x0F75, 0x003C); + tg3_phydsp_write(tp, 0x0F96, 0x0010); + tg3_phydsp_write(tp, 0x0F97, 0x0C0C); + } - /* Enable SM_DSP clock and tx 6dB coding. */ - reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } - tg3_phydsp_write(tp, 0x0fff, 0x4000); - tg3_phydsp_write(tp, 0x0021, 0x4600); + tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MII_TG3_AUXCTL_MISC_RGMII_OOBSC; + else + val &= ~MII_TG3_AUXCTL_MISC_RGMII_OOBSC; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, val); - /* Turn off SM_DSP clock. */ - reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); /* Clear all mode configuration bits. */ - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_RGMII_SEL; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + if (!tg3_phy_shdw_read(tp, MII_TG3_MISC_SHDW_RGMII_SEL, &val)) { + val &= ~(MII_TG3_MISC_SHDW_RGMII_MODESEL0 | + MII_TG3_MISC_SHDW_RGMII_MODESEL1); + tg3_phy_shdw_write(tp, + MII_TG3_MISC_SHDW_RGMII_SEL, val); + } } - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610 || - (tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610M) { - u32 reg; + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM57780 && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_EXP75); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &val); + val |= MII_TG3_DSP_EXP75_SUP_CM_OSC; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, val); - /* Enable SM_DSP clock and tx 6dB coding. */ - reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } +#endif + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } - reg = MII_TG3_DSP_EXP8_REJ2MHz; - tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, reg); + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + } - /* Apply workaround to A0 revision parts only. */ - if (tp->phy_id == TG3_PHY_ID_BCM50610 || - tp->phy_id == TG3_PHY_ID_BCM50610M) { - tg3_phydsp_write(tp, 0x001F, 0x0300); - tg3_phydsp_write(tp, 0x601F, 0x0002); - tg3_phydsp_write(tp, 0x0F75, 0x003C); - tg3_phydsp_write(tp, 0x0F96, 0x0010); - tg3_phydsp_write(tp, 0x0F97, 0x0C0C); + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); - /* Turn off SM_DSP clock. */ - reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); - - /* Clear all mode configuration bits. */ - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_RGMII_SEL; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } } - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM57780) { - u32 reg; - /* Enable SM_DSP clock and tx 6dB coding. */ - reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_EXP75); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, ®); - reg |= MII_TG3_DSP_EXP75_SUP_CM_OSC; - tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, reg); - - /* Turn off SM_DSP clock. */ - reg = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, reg); - } -#endif - if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } - if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { - tg3_writephy(tp, 0x1c, 0x8d68); - tg3_writephy(tp, 0x1c, 0x8d68); - } - if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); - tg3_writephy(tp, MII_TG3_TEST1, - MII_TG3_TEST1_TRIM_EN | 0x4); - } else - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } /* Set Extended packet length bit (bit 14) on all chips that */ /* support jumbo frames */ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { /* Cannot do read-modify-write on 5401 */ - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); - } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - u32 phy_reg; - + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { /* Set bit 14 with read-modify-write to preserve other bits */ - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); } /* Set phy register 0x10 bit 0 to high fifo elasticity to support * jumbo frames transmission. */ - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - u32 phy_reg; - - if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) tg3_writephy(tp, MII_TG3_EXT_CTRL, - phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { @@ -2314,12 +2723,12 @@ out: tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); } #ifndef BCM_INCLUDE_PHYLIB_SUPPORT - else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 brcmtest; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &brcmtest) && !tg3_writephy(tp, MII_TG3_FET_TEST, brcmtest | MII_TG3_FET_SHADOW_EN)) { - u32 val, reg = MII_TG3_FET_SHDW_AUXMODE4; + u32 reg = MII_TG3_FET_SHDW_AUXMODE4; if (!tg3_readphy(tp, reg, &val)) { val &= ~MII_TG3_FET_SHDW_AM4_LED_MASK; @@ -2337,119 +2746,238 @@ out: return 0; } -static void tg3_frob_aux_power(struct tg3 *tp) +#define TG3_GPIO_MSG_DRVR_PRES 0x00000001 +#define TG3_GPIO_MSG_NEED_VAUX 0x00000002 +#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ + TG3_GPIO_MSG_NEED_VAUX) +#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ + ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ + (TG3_GPIO_MSG_DRVR_PRES << 4) | \ + (TG3_GPIO_MSG_DRVR_PRES << 8) | \ + (TG3_GPIO_MSG_DRVR_PRES << 12)) + +#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ + ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ + (TG3_GPIO_MSG_NEED_VAUX << 4) | \ + (TG3_GPIO_MSG_NEED_VAUX << 8) | \ + (TG3_GPIO_MSG_NEED_VAUX << 12)) + +static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) { - struct tg3 *tp_peer = tp; + u32 status, shift; - /* The GPIOs do something completely different on 57765. */ - if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - return; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); + else + status = tr32(TG3_CPMU_DRV_STATUS); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - struct net_device *dev_peer; + shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; + status &= ~(TG3_GPIO_MSG_MASK << shift); + status |= (newstat << shift); - dev_peer = pci_get_drvdata(tp->pdev_peer); - /* remove_one() may have been run on the peer. */ - if (!dev_peer) - tp_peer = tp; - else - tp_peer = netdev_priv(dev_peer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); + else + tw32(TG3_CPMU_DRV_STATUS, status); + + return status >> TG3_APE_GPIO_MSG_SHIFT; +} + +static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return -EIO; + + tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); + + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } else { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); } - if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || - (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || - (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE0 | + return 0; +} + +static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) +{ + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) + return; + + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); +} + +static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1), - 100); - } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { - /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ - u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1 | - tp->grc_local_ctrl; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); - - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); - - grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); - } else { - u32 no_gpio2; - u32 grc_local_ctrl = 0; - - if (tp_peer != tp && - (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) - return; - - /* Workaround to prevent overdrawing Amps. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5714) { - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); - } + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; - /* On 5753 and variants, GPIO2 cannot be used. */ - no_gpio2 = tp->nic_sram_data_cfg & - NIC_SRAM_DATA_CFG_NO_GPIO2; - - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT1 | - GRC_LCLCTRL_GPIO_OUTPUT2; - if (no_gpio2) { - grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT2); - } + /* Workaround to prevent overdrawing Amps. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); - if (!no_gpio2) { - grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); - } + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); } - } else { - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - if (tp_peer != tp && - (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) - return; + } +} - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OUTPUT1), 100); +static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) +{ + u32 msg = 0; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - GRC_LCLCTRL_GPIO_OE1, 100); + /* Serialize power state transitions */ + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OUTPUT1), 100); + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) + msg = TG3_GPIO_MSG_NEED_VAUX; + + msg = tg3_set_function_status(tp, msg); + + if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) + goto done; + + if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + +done: + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); +} + +static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) +{ + bool need_vaux = false; + + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_frob_aux_power_5717(tp, include_wol ? + tg3_flag(tp, WOL_ENABLE) != 0 : 0); + return; + } + + if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; } } + + if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || + tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); } static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) @@ -2467,18 +2995,14 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) static int tg3_setup_phy(struct tg3 *, int); -#define RESET_KIND_SHUTDOWN 0 -#define RESET_KIND_INIT 1 -#define RESET_KIND_SUSPEND 2 - -static void tg3_write_sig_post_reset(struct tg3 *, int); -static int tg3_halt_cpu(struct tg3 *, u32); - static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) + return; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); u32 serdes_cfg = tr32(MAC_SERDES_CFG); @@ -2497,7 +3021,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); udelay(40); return; - } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 phytest; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { u32 phy; @@ -2521,11 +3045,10 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF); - tg3_writephy(tp, MII_TG3_AUX_CTRL, - MII_TG3_AUXCTL_SHDWSEL_PWRCTL | - MII_TG3_AUXCTL_PCTL_100TX_LPWR | - MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | - MII_TG3_AUXCTL_PCTL_VREG_11V); + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); } /* The PHY should not be powered down on some chips because @@ -2534,7 +3057,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && + !tp->pci_fn)) return; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || @@ -2551,7 +3076,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) /* tp->lock is held. */ static int tg3_nvram_lock(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_NVRAM) { + if (tg3_flag(tp, NVRAM)) { int i; if (tp->nvram_lock_cnt == 0) { @@ -2574,7 +3099,7 @@ static int tg3_nvram_lock(struct tg3 *tp) /* tp->lock is held. */ static void tg3_nvram_unlock(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_NVRAM) { + if (tg3_flag(tp, NVRAM)) { if (tp->nvram_lock_cnt > 0) tp->nvram_lock_cnt--; if (tp->nvram_lock_cnt == 0) @@ -2585,8 +3110,7 @@ static void tg3_nvram_unlock(struct tg3 *tp) /* tp->lock is held. */ static void tg3_enable_nvram_access(struct tg3 *tp) { - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); @@ -2596,8 +3120,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp) /* tp->lock is held. */ static void tg3_disable_nvram_access(struct tg3 *tp) { - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); @@ -2667,10 +3190,10 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) { - if ((tp->tg3_flags & TG3_FLAG_NVRAM) && - (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr / tp->nvram_pagesize) << @@ -2682,10 +3205,10 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) { - if ((tp->tg3_flags & TG3_FLAG_NVRAM) && - (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * @@ -2705,7 +3228,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) { int ret; - if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) + if (!tg3_flag(tp, NVRAM)) return tg3_nvram_read_using_eeprom(tp, offset, val); offset = tg3_nvram_phys_addr(tp, offset); @@ -2743,123 +3266,356 @@ static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) return res; } +#define RX_CPU_SCRATCH_BASE 0x30000 +#define RX_CPU_SCRATCH_SIZE 0x04000 +#define TX_CPU_SCRATCH_BASE 0x34000 +#define TX_CPU_SCRATCH_SIZE 0x04000 + /* tp->lock is held. */ -static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) +static int tg3_halt_cpu(struct tg3 *tp, u32 offset) { - u32 addr_high, addr_low; int i; - addr_high = ((tp->dev->dev_addr[0] << 8) | - tp->dev->dev_addr[1]); - addr_low = ((tp->dev->dev_addr[2] << 24) | - (tp->dev->dev_addr[3] << 16) | - (tp->dev->dev_addr[4] << 8) | - (tp->dev->dev_addr[5] << 0)); - for (i = 0; i < 4; i++) { - if (i == 1 && skip_mac_1) - continue; - tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - for (i = 0; i < 12; i++) { - tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; } } - addr_high = (tp->dev->dev_addr[0] + - tp->dev->dev_addr[1] + - tp->dev->dev_addr[2] + - tp->dev->dev_addr[3] + - tp->dev->dev_addr[4] + - tp->dev->dev_addr[5]) & - TX_BACKOFF_SEED_MASK; - tw32(MAC_TX_BACKOFF_SEED, addr_high); -} - -static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) -{ - u32 misc_host_ctrl; - bool device_should_wake, do_low_power; - - /* Make sure register accesses (indirect or otherwise) - * will function correctly. - */ - pci_write_config_dword(tp->pdev, - TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - - switch (state) { - case PCI_D0: - pci_enable_wake(tp->pdev, state, false); - pci_set_power_state(tp->pdev, PCI_D0); + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } - /* Switch out of Vaux if it is a NIC */ - if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; +} - return 0; +struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const u32 *fw_data; +}; - case PCI_D1: - case PCI_D2: - case PCI_D3hot: - break; +/* tp->lock is held. */ +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + struct fw_info *info) +{ + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); - default: - netdev_err(tp->dev, "Invalid power state (D%d) requested\n", - state); + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); return -EINVAL; } - /* Restore the CLKREQ setting. */ - if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { - u16 lnkctl; + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; - pci_read_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKCTL, - &lnkctl); - lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKCTL, - lnkctl); - } + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; - misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); - tw32(TG3PCI_MISC_HOST_CTRL, - misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + info->fw_data[i]); - device_should_wake = pci_pme_capable(tp->pdev, state) && - device_may_wakeup(&tp->pdev->dev) && - (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); + err = 0; -#ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { - do_low_power = false; - if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && - !tp->link_config.phy_is_low_power) { - struct phy_device *phydev; - u32 phyid, advertising; +out: + return err; +} - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +/* tp->lock is held. */ +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) +{ + struct fw_info info; + const u32 *fw_data; + int err, i; - tp->link_config.phy_is_low_power = 1; + fw_data = (void *)tp->fw->data; - tp->link_config.orig_speed = phydev->speed; - tp->link_config.orig_duplex = phydev->duplex; - tp->link_config.orig_autoneg = phydev->autoneg; - tp->link_config.orig_advertising = phydev->advertising; + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ - advertising = ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Autoneg | - ADVERTISED_10baseT_Half; + info.fw_base = fw_data[1]; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + return 0; +} + +#if TG3_TSO_SUPPORT != 0 + +/* tp->lock is held. */ +static int tg3_load_tso_firmware(struct tg3 *tp) +{ + struct fw_info info; + const u32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = fw_data[1]; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + cpu_scratch_size = (info.fw_len + + TG3_TSO5_FW_SBSS_LEN + + TG3_TSO5_FW_BSS_LEN); + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; + + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; +} + +#endif /* TG3_TSO_SUPPORT != 0 */ + +/* tp->lock is held. */ +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) +{ + u32 addr_high, addr_low; + int i; + + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } + } + + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); +} + +static void tg3_enable_register_access(struct tg3 *tp) +{ + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); +} + +static int tg3_power_up(struct tg3 *tp) +{ + int err; + + tg3_enable_register_access(tp); + + /* Kernels less than around 2.6.37 still need this */ + pci_enable_wake(tp->pdev, PCI_D0, false); + + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); + } + + return err; +} + +static void tg3_power_down(struct tg3 *tp) +{ + pci_set_power_state(tp->pdev, PCI_D3hot); +} + +static int tg3_power_down_prepare(struct tg3 *tp) +{ + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); + + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 lnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + lnkctl); + } + + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); + +#ifdef BCM_INCLUDE_PHYLIB_SUPPORT + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - device_should_wake) { - if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) + tp->link_config.orig_speed = phydev->speed; + tp->link_config.orig_duplex = phydev->duplex; + tp->link_config.orig_autoneg = phydev->autoneg; + tp->link_config.orig_advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) advertising |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | @@ -2886,19 +3642,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) { do_low_power = true; - if (tp->link_config.phy_is_low_power == 0) { - tp->link_config.phy_is_low_power = 1; + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; tp->link_config.orig_speed = tp->link_config.speed; tp->link_config.orig_duplex = tp->link_config.duplex; tp->link_config.orig_autoneg = tp->link_config.autoneg; } - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { - tp->link_config.speed = SPEED_10; - tp->link_config.duplex = DUPLEX_HALF; - tp->link_config.autoneg = AUTONEG_ENABLE; + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) tg3_setup_phy(tp, 0); - } } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { @@ -2906,7 +3658,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) val = tr32(GRC_VCPU_EXT_CTRL); tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); - } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { + } else if (!tg3_flag(tp, ENABLE_ASF)) { int i; u32 val; @@ -2917,7 +3669,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) msleep(1); } } - if (tp->tg3_flags & TG3_FLAG_WOL_CAP) + if (tg3_flag(tp, WOL_CAP)) tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | WOL_DRV_STATE_SHUTDOWN | WOL_DRV_WOL | @@ -2926,23 +3678,31 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) if (device_should_wake) { u32 mac_mode; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { if (do_low_power && - !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); udelay(40); } - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) mac_mode = MAC_MODE_PORT_MODE_GMII; - else + else if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { + if (tp->link_config.active_speed == SPEED_1000) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; + } else mac_mode = MAC_MODE_PORT_MODE_MII; mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - u32 speed = (tp->tg3_flags & - TG3_FLAG_WOL_SPEED_100MB) ? + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? SPEED_100 : SPEED_10; if (tg3_5700_link_polarity(tp, speed)) mac_mode |= MAC_MODE_LINK_POLARITY; @@ -2953,22 +3713,18 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) mac_mode = MAC_MODE_PORT_MODE_TBI; } - if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) + if (!tg3_flag(tp, 5750_PLUS)) tw32(MAC_LED_CTRL, tp->led_ctrl); mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; - if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && - ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - mac_mode |= tp->mac_mode & - (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); - if (mac_mode & MAC_MODE_APE_TX_EN) - mac_mode |= MAC_MODE_TDE_ENABLE; - } + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; tw32_f(MAC_MODE, mac_mode); udelay(100); @@ -2977,7 +3733,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) udelay(10); } - if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && + if (!tg3_flag(tp, WOL_SPEED_100MB) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { u32 base_val; @@ -2988,12 +3744,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | CLOCK_CTRL_PWRDOWN_PLL133, 40); - } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* do nothing */ - } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { u32 newbits1, newbits2; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -3002,7 +3757,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) CLOCK_CTRL_TXCLK_DISABLE | CLOCK_CTRL_ALTCLK); newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + } else if (tg3_flag(tp, 5705_PLUS)) { newbits1 = CLOCK_CTRL_625_CORE; newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; } else { @@ -3016,7 +3771,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 40); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { u32 newbits3; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -3033,11 +3788,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) } } - if (!(device_should_wake) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) tg3_power_down_phy(tp, do_low_power); - tg3_frob_aux_power(tp); + tg3_frob_aux_power(tp, true); /* Workaround for unstable PLL clock */ if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || @@ -3046,7 +3800,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); tw32(0x7d00, val); - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { + if (!tg3_flag(tp, ENABLE_ASF)) { int err; err = tg3_nvram_lock(tp); @@ -3058,11 +3812,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); - if (device_should_wake) - pci_enable_wake(tp->pdev, state, true); - - /* Finally, set the new power state. */ - pci_set_power_state(tp->pdev, state); + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); return 0; } @@ -3101,7 +3851,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 break; default: - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : SPEED_10; *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : @@ -3114,132 +3864,149 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 } } +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) +{ + int err = 0; + u32 val, new_adv; + + new_adv = ADVERTISE_CSMA; + new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; + new_adv |= mii_advertise_flowctrl(flowctrl); + + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + } + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; + +#ifndef BCM_INCLUDE_PHYLIB_SUPPORT + if ((tp->phy_id & TG3_PHY_ID_MASK) != TG3_PHY_ID_BCM50612E) +#endif + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; + + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + if (tg3_disable_eee) + val = 0; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + goto err_out; + + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_57766: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } + +#ifndef BCM_INCLUDE_PHYLIB_SUPPORT + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50612E) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_TLER); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &val); + if (tp->link_config.autoneg == AUTONEG_ENABLE) + val |= MII_TG3_DSP_TLER_AUTOGREEEN_EN; + else + val &= ~MII_TG3_DSP_TLER_AUTOGREEEN_EN; + tg3_phydsp_write(tp, MII_TG3_DSP_TLER, val); + } +#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ + +err_out: + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; + } + +done: + return err; +} + static void tg3_phy_copper_begin(struct tg3 *tp) { u32 new_adv; int i; - if (tp->link_config.phy_is_low_power) { - /* Entering low power mode. Disable gigabit and - * 100baseT advertisements. - */ - tg3_writephy(tp, MII_TG3_CTRL, 0); - - new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); - if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) - new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); - - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) + new_adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); } else if (tp->link_config.speed == SPEED_INVALID) { - if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) tp->link_config.advertising &= ~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - new_adv = ADVERTISE_CSMA; - if (tp->link_config.advertising & ADVERTISED_10baseT_Half) - new_adv |= ADVERTISE_10HALF; - if (tp->link_config.advertising & ADVERTISED_10baseT_Full) - new_adv |= ADVERTISE_10FULL; - if (tp->link_config.advertising & ADVERTISED_100baseT_Half) - new_adv |= ADVERTISE_100HALF; - if (tp->link_config.advertising & ADVERTISED_100baseT_Full) - new_adv |= ADVERTISE_100FULL; - - new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); - - tg3_writephy(tp, MII_ADVERTISE, new_adv); - - if (tp->link_config.advertising & - (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { - new_adv = 0; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= MII_TG3_CTRL_ADV_1000_HALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= MII_TG3_CTRL_ADV_1000_FULL; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && - (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) - new_adv |= (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER); - tg3_writephy(tp, MII_TG3_CTRL, new_adv); - } else { - tg3_writephy(tp, MII_TG3_CTRL, 0); - } + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); } else { - new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); - new_adv |= ADVERTISE_CSMA; - /* Asking for a specific link mode. */ if (tp->link_config.speed == SPEED_1000) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = MII_TG3_CTRL_ADV_1000_FULL; + new_adv = ADVERTISED_1000baseT_Full; else - new_adv = MII_TG3_CTRL_ADV_1000_HALF; - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER); - } else { - if (tp->link_config.speed == SPEED_100) { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv |= ADVERTISE_100FULL; - else - new_adv |= ADVERTISE_100HALF; - } else { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv |= ADVERTISE_10FULL; - else - new_adv |= ADVERTISE_10HALF; - } - tg3_writephy(tp, MII_ADVERTISE, new_adv); - - new_adv = 0; + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; } - tg3_writephy(tp, MII_TG3_CTRL, new_adv); + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); } -#ifndef BCM_INCLUDE_PHYLIB_SUPPORT - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50612E) { - u32 val = 0; - - if (tp->link_config.autoneg == AUTONEG_ENABLE) { - /* Advertise 100-BaseTX EEE ability */ - if (tp->link_config.advertising & - (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) - val |= 0x0002; - /* Advertise 1000-BaseT EEE ability */ - if (tp->link_config.advertising & - (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) - val |= 0x0004; - } - tg3_phy_cl45_write(tp, 0x7, 0x3c, val); - - /* Enable SM_DSP clock and tx 6dB coding. */ - val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - MII_TG3_DSP_TLER); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &val); - if (tp->link_config.autoneg == AUTONEG_ENABLE) - val |= MII_TG3_DSP_TLER_AUTOGREEEN_EN; - else - val &= ~MII_TG3_DSP_TLER_AUTOGREEEN_EN; - tg3_phydsp_write(tp, MII_TG3_DSP_TLER, val); - - /* Turn off SM_DSP clock. */ - val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); - } -#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) + return; if (tp->link_config.autoneg == AUTONEG_DISABLE && tp->link_config.speed != SPEED_INVALID) { @@ -3259,7 +4026,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp) break; case SPEED_1000: - bmcr |= TG3_BMCR_SPEED1000; + bmcr |= BMCR_SPEED1000; break; } @@ -3290,107 +4057,305 @@ static void tg3_phy_copper_begin(struct tg3 *tp) } } -static int tg3_init_5401phy_dsp(struct tg3 *tp) +static int tg3_phy_pull_flowctrl(struct tg3 *tp, u32 lcl) { int err; + u32 val, res; - /* Turn off tap power management. */ - /* Set Extended packet length bit */ - err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); + err = tg3_readphy(tp, MII_LPA, &val); + if (err) + return err; + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + res = tg3_resolve_flowctrl_1000X(lcl, val); + else + res = mii_resolve_flowctrl_fdx(lcl, val); + + val = 0; + + if (tr32(MAC_RX_MODE) & RX_MODE_FLOW_CTRL_ENABLE) + val |= FLOW_CTRL_RX; + + if (tr32(MAC_TX_MODE) & TX_MODE_FLOW_CTRL_ENABLE) + val |= FLOW_CTRL_TX; - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); + if (res == val) { + /* We can't be sure if flow control was really autonegotiated. + * We'll assume that is the case, because that's what we want anyways. + */ + tg3_flag_set(tp, PAUSE_AUTONEG); + } else { + /* If the flow control settings in the MAC differ from what the + * autoneg advertisements would have resolved to, then the flow + * control must have been forced. + */ + tp->link_config.flowctrl = val; + tg3_flag_clear(tp, PAUSE_AUTONEG); + } + + return 0; +} + +static int tg3_phy_pull_config(struct tg3 *tp) +{ + int err; + u32 val, lclfc = 0; - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); + err = tg3_readphy(tp, MII_BMCR, &val); + if (err) + goto done; - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); + if (!(val & BMCR_ANENABLE)) { + tp->link_config.autoneg = AUTONEG_DISABLE; + tp->link_config.advertising = 0; - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); + err = -EIO; - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); + switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { + case 0: + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + goto done; - udelay(40); + tp->link_config.speed = SPEED_10; + break; + case BMCR_SPEED100: + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + goto done; + + tp->link_config.speed = SPEED_100; + break; + case BMCR_SPEED1000: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + tp->link_config.speed = SPEED_1000; + break; + } + /* Fall through */ + default: + err = -EIO; + goto done; + } + + if (val & BMCR_FULLDPLX) + tp->link_config.duplex = DUPLEX_FULL; + else + tp->link_config.duplex = DUPLEX_HALF; + + tp->link_config.flowctrl = 0; + + val = tr32(MAC_RX_MODE); + if (val & RX_MODE_FLOW_CTRL_ENABLE) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + + val = tr32(MAC_TX_MODE); + if (val & TX_MODE_FLOW_CTRL_ENABLE) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + /* Assume autoneg flow control is desirable */ + tg3_flag_set(tp, PAUSE_AUTONEG); + + err = 0; + goto done; + } + + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.advertising = ADVERTISED_Autoneg; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + u32 adv; + + err = tg3_readphy(tp, MII_ADVERTISE, &val); + if (err) + goto done; + + adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); + tp->link_config.advertising |= adv | ADVERTISED_TP; + + adv = tg3_decode_flowctrl_1000T(val); + tp->link_config.flowctrl = adv; + lclfc = adv; + } else + tp->link_config.advertising |= ADVERTISED_FIBRE; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 adv; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + err = tg3_readphy(tp, MII_CTRL1000, &val); + if (err) + goto done; + + adv = mii_ctrl1000_to_ethtool_adv_t(val); + } else { + err = tg3_readphy(tp, MII_ADVERTISE, &val); + if (err) + goto done; + + adv = tg3_decode_flowctrl_1000X(val); + tp->link_config.flowctrl = adv; + lclfc = adv; + + val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); + adv = mii_adv_to_ethtool_adv_x(val); + } + + tp->link_config.advertising |= adv; + } + + err = tg3_phy_pull_flowctrl(tp, lclfc); + +done: return err; } -static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) +static int tg3_phy_pull_status(struct tg3 *tp) { - u32 adv_reg, all_mask = 0; + u32 val; + int i, err; - if (mask & ADVERTISED_10baseT_Half) - all_mask |= ADVERTISE_10HALF; - if (mask & ADVERTISED_10baseT_Full) - all_mask |= ADVERTISE_10FULL; - if (mask & ADVERTISED_100baseT_Half) - all_mask |= ADVERTISE_100HALF; - if (mask & ADVERTISED_100baseT_Full) - all_mask |= ADVERTISE_100FULL; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; - if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) - return 0; + err = tg3_readphy(tp, MII_BMSR, &val); + if (err) + return err; + + err = tg3_readphy(tp, MII_BMSR, &val); + if (err) + return err; - if ((adv_reg & all_mask) != all_mask) + if (!(val & BMSR_LSTATUS)) return 0; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { - u32 tg3_ctrl; - all_mask = 0; - if (mask & ADVERTISED_1000baseT_Half) - all_mask |= ADVERTISE_1000HALF; - if (mask & ADVERTISED_1000baseT_Full) - all_mask |= ADVERTISE_1000FULL; + for (i = 0; i < 2000; i++) { + err = tg3_readphy(tp, MII_TG3_AUX_STAT, &val); + if (err) + return err; - if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl)) - return 0; + if (val) + break; - if ((tg3_ctrl & all_mask) != all_mask) - return 0; + udelay(10); } - return 1; + + tg3_aux_stat_to_speed_duplex(tp, val, + &tp->link_config.active_speed, + &tp->link_config.active_duplex); + + tp->link_config.active_flowctrl = tp->link_config.flowctrl; + + return 0; } -static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) +static int tg3_init_5401phy_dsp(struct tg3 *tp) { - u32 curadv, reqadv; + int err; - if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) - return 1; + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); - curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + udelay(40); + + return err; +} +static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) +{ + u32 advmsk, tgtadv, advertising; + + advertising = tp->link_config.advertising; + tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; + + advmsk = ADVERTISE_ALL; if (tp->link_config.active_duplex == DUPLEX_FULL) { - if (curadv != reqadv) - return 0; + tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); + advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + } - if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) - tg3_readphy(tp, MII_LPA, rmtadv); - } else { - /* Reprogram the advertisement register, even if it - * does not affect the current link. If the link - * gets renegotiated in the future, we can save an - * additional renegotiation cycle by advertising - * it correctly in the first place. - */ - if (curadv != reqadv) { - *lcladv &= ~(ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return false; + + if ((*lcladv & advmsk) != tgtadv) + return false; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; + + tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); + + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return false; + + if (tgtadv && + (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) { + tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + } else { + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); } + + if (tg3_ctrl != tgtadv) + return false; } - return 1; + if (tg3_disable_eee) + return true; + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + u32 val; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return false; + + val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); + + tgtadv = 0; + if (advertising & ADVERTISED_100baseT_Full) + tgtadv |= MDIO_AN_EEE_ADV_100TX; + if (advertising & ADVERTISED_1000baseT_Full) + tgtadv |= MDIO_AN_EEE_ADV_1000T; + + if (val != tgtadv) + return false; + } + + return true; +} + +static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) +{ + u32 lpeth = 0; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 val; + + if (tg3_readphy(tp, MII_STAT1000, &val)) + return false; + + lpeth = mii_stat1000_to_ethtool_lpa_t(val); + } + + if (tg3_readphy(tp, MII_LPA, rmtadv)) + return false; + + lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); + tp->link_config.rmt_adv = lpeth; + + return true; } static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) { int current_link_up; - u32 bmsr, dummy; + u32 bmsr, val; u32 lcl_adv, rmt_adv; u16 current_speed; u8 current_duplex; @@ -3405,13 +4370,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) MAC_STATUS_LNKSTATE_CHANGED)); udelay(40); - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, - (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); - udelay(80); - } - - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); /* Some third-party PHYs need to be reset on link going * down. @@ -3431,7 +4391,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { tg3_readphy(tp, MII_BMSR, &bmsr); if (tg3_readphy(tp, MII_BMSR, &bmsr) || - !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) + !tg3_flag(tp, INIT_COMPLETE)) bmsr = 0; if (!(bmsr & BMSR_LSTATUS)) { @@ -3464,18 +4424,18 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { /* 5701 {A0,B0} CRC bug workaround */ tg3_writephy(tp, 0x15, 0x0a75); - tg3_writephy(tp, 0x1c, 0x8c68); - tg3_writephy(tp, 0x1c, 0x8d68); - tg3_writephy(tp, 0x1c, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); } /* Clear pending interrupts... */ - tg3_readphy(tp, MII_TG3_ISTAT, &dummy); - tg3_readphy(tp, MII_TG3_ISTAT, &dummy); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); - if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) tg3_writephy(tp, MII_TG3_IMASK, ~0); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -3490,15 +4450,17 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; - - if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { - u32 val; - - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); - tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); - if (!(val & (1 << 10))) { - val |= (1 << 10); - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; + tp->link_config.rmt_adv = 0; + + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); goto relink; } } @@ -3545,12 +4507,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->link_config.autoneg == AUTONEG_ENABLE) { if ((bmcr & BMCR_ANENABLE) && - tg3_copper_is_advertising_all(tp, - tp->link_config.advertising)) { - if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, - &rmt_adv)) - current_link_up = 1; - } + tg3_phy_copper_an_config_ok(tp, &lcl_adv) && + tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) + current_link_up = 1; } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@ -3562,19 +4521,31 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) } if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) + tp->link_config.active_duplex == DUPLEX_FULL) { + u32 reg, bit; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + reg = MII_TG3_FET_GEN_STAT; + bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; + } else { + reg = MII_TG3_EXT_STAT; + bit = MII_TG3_EXT_STAT_MDIX; + } + + if (!tg3_readphy(tp, reg, &val) && (val & bit)) + tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } } relink: - if (current_link_up == 0 || tp->link_config.phy_is_low_power) { - u32 tmp; - + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { tg3_phy_copper_begin(tp); - tg3_readphy(tp, MII_BMSR, &tmp); - if (!tg3_readphy(tp, MII_BMSR, &tmp) && - (tmp & BMSR_LSTATUS)) + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) current_link_up = 1; } @@ -3585,7 +4556,7 @@ relink: tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - } else if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || + } else if ((tp->phy_flags & TG3_PHYFLG_IS_FET) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else @@ -3616,27 +4587,19 @@ relink: tw32_f(MAC_MODE, tp->mac_mode); udelay(40); + tg3_phy_eee_adjust(tp, current_link_up); + #ifndef BCM_INCLUDE_PHYLIB_SUPPORT if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { /* A0 */ - if (tp->phy_id == TG3_PHY_ID_BCM50612E) { - u32 phy; - - /* Enable SM_DSP clock and tx 6dB coding. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); - + if (tp->phy_id == TG3_PHY_ID_BCM50612E && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { if (tp->link_config.active_speed == SPEED_10) tg3_phydsp_write(tp, 0x0ff0, 0x2000); else tg3_phydsp_write(tp, 0x0ff0, 0x0000); - /* Turn off SM_DSP clock. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } if (tp->link_config.active_speed == SPEED_10) @@ -3648,7 +4611,7 @@ relink: } #endif - if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { + if (tg3_flag(tp, USE_LINKCHG_REG)) { /* Polled via timer. */ tw32_f(MAC_EVENT, 0); } else { @@ -3659,8 +4622,7 @@ relink: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && current_link_up == 1 && tp->link_config.active_speed == SPEED_1000 && - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || - (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { udelay(120); tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | @@ -3672,11 +4634,11 @@ relink: } /* Prevent send BD corruption. */ - if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { + if (tg3_flag(tp, CLKREQ_BUG)) { u16 oldlnkctl, newlnkctl; pci_read_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKCTL, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, &oldlnkctl); if (tp->link_config.active_speed == SPEED_100 || tp->link_config.active_speed == SPEED_10) @@ -3685,8 +4647,8 @@ relink: newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; if (newlnkctl != oldlnkctl) pci_write_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKCTL, - newlnkctl); + pci_pcie_cap(tp->pdev) + + PCI_EXP_LNKCTL, newlnkctl); } if (current_link_up != netif_carrier_ok(tp->dev)) { @@ -4067,7 +5029,7 @@ static void tg3_init_bcm8002(struct tg3 *tp) int i; /* Reset when initting first time or we have a link. */ - if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && + if (tg3_flag(tp, INIT_COMPLETE) && !(mac_status & MAC_STATUS_PCS_SYNCED)) return; @@ -4169,7 +5131,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; if (sg_dig_ctrl != expected_sg_dig_ctrl) { - if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && tp->serdes_counter && ((mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_RCVD_CFG)) == @@ -4186,7 +5148,7 @@ restart_autoneg: tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } else if (mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET)) { sg_dig_status = tr32(SG_DIG_STATUS); @@ -4206,10 +5168,13 @@ restart_autoneg: if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; tp->serdes_counter = 0; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { if (tp->serdes_counter) tp->serdes_counter--; @@ -4236,8 +5201,8 @@ restart_autoneg: !(mac_status & MAC_STATUS_RCVD_CFG)) { tg3_setup_flow_control(tp, 0, 0); current_link_up = 1; - tp->tg3_flags2 |= - TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; tp->serdes_counter = SERDES_PARALLEL_DET_TIMEOUT; } else @@ -4246,7 +5211,7 @@ restart_autoneg: } } else { tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } out: @@ -4277,6 +5242,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) if (rxflags & MR_LP_ADV_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; @@ -4328,9 +5296,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) orig_active_speed = tp->link_config.active_speed; orig_active_duplex = tp->link_config.active_duplex; - if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && + if (!tg3_flag(tp, HW_AUTONEG) && netif_carrier_ok(tp->dev) && - (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { + tg3_flag(tp, INIT_COMPLETE)) { mac_status = tr32(MAC_STATUS); mac_status &= (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET | @@ -4359,9 +5327,10 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) udelay(40); current_link_up = 0; + tp->link_config.rmt_adv = 0; mac_status = tr32(MAC_STATUS); - if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) + if (tg3_flag(tp, HW_AUTONEG)) current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); else current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); @@ -4450,6 +5419,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; + tp->link_config.rmt_adv = 0; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); @@ -4463,32 +5433,28 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) err |= tg3_readphy(tp, MII_BMCR, &bmcr); if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, new_adv; + u32 adv, newadv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); - - new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000XHALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000XFULL; + newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); - if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, newadv); bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; tg3_writephy(tp, MII_BMCR, bmcr); tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; return err; } @@ -4533,7 +5499,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) else bmsr &= ~BMSR_LSTATUS; } - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } } @@ -4560,7 +5526,10 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; - } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + } else if (!tg3_flag(tp, 5780_CLASS)) { /* Link is up via parallel detect */ } else { current_link_up = 0; @@ -4588,7 +5557,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) netif_carrier_on(tp->dev); else { netif_carrier_off(tp->dev); - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } tg3_link_report(tp); } @@ -4612,13 +5581,14 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) u32 phy1, phy2; /* Select shadow register 0x1f */ - tg3_writephy(tp, 0x1c, 0x7c00); - tg3_readphy(tp, 0x1c, &phy1); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); /* Select expansion interrupt status register */ - tg3_writephy(tp, 0x17, 0x0f01); - tg3_readphy(tp, 0x15, &phy2); - tg3_readphy(tp, 0x15, &phy2); + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); if ((phy1 & 0x10) && !(phy2 & 0x20)) { /* We have signal detect and not receiving @@ -4629,17 +5599,18 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) bmcr &= ~BMCR_ANENABLE; bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; tg3_writephy(tp, MII_BMCR, bmcr); - tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; } } } else if (netif_carrier_ok(tp->dev) && (tp->link_config.autoneg == AUTONEG_ENABLE) && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { u32 phy2; /* Select expansion interrupt status register */ - tg3_writephy(tp, 0x17, 0x0f01); - tg3_readphy(tp, 0x15, &phy2); + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); if (phy2 & 0x20) { u32 bmcr; @@ -4647,7 +5618,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) tg3_readphy(tp, MII_BMCR, &bmcr); tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } } @@ -4655,17 +5626,18 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) static int tg3_setup_phy(struct tg3 *tp, int force_reset) { + u32 val; int err; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) err = tg3_setup_fiber_phy(tp, force_reset); - else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) err = tg3_setup_fiber_mii_phy(tp, force_reset); else err = tg3_setup_copper_phy(tp, force_reset); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { - u32 val, scale; + u32 scale; val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) @@ -4680,19 +5652,22 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) tw32(GRC_MISC_CFG, val); } + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + if (tp->link_config.active_speed == SPEED_1000 && tp->link_config.active_duplex == DUPLEX_HALF) - tw32(MAC_TX_LENGTHS, - ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); else - tw32(MAC_TX_LENGTHS, - ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { if (netif_carrier_ok(tp->dev)) { tw32(HOSTCC_STAT_COAL_TICKS, tp->coal.stats_block_coalesce_usecs); @@ -4701,8 +5676,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) } } - if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { - u32 val = tr32(PCIE_PWR_MGMT_THRESH); + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); if (!netif_carrier_ok(tp->dev)) val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | tp->pwrmgmt_thresh; @@ -4714,6 +5689,128 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) return err; } +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} + +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) +{ + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); +} + +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) +{ + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +} + +static void tg3_dump_state(struct tg3 *tp) +{ + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } +} + /* This is called whenever we suspect that the system chipset is re- * ordering the sequence of MMIO to the tx send mailbox. The symptom * is bogus tx completions. We try to recover by setting the @@ -4722,7 +5819,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) */ static void tg3_tx_recover(struct tg3 *tp) { - BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || tp->write32_tx_mbox == tg3_write_indirect_mbox); netdev_warn(tp->dev, @@ -4732,17 +5829,31 @@ static void tg3_tx_recover(struct tg3 *tp) "and include system chipset information.\n"); spin_lock(&tp->lock); - tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; + tg3_flag_set(tp, TX_RECOVERY_PENDING); spin_unlock(&tp->lock); } static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) { - smp_mb(); + /* Tell compiler to fetch tx indices from memory. */ + barrier(); return tnapi->tx_pending - ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); } +#ifdef BCM_HAS_IEEE1588_SUPPORT +static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, + struct skb_shared_hwtstamps *timestamp) +{ + u64 ns = timecounter_cyc2time(&tp->clock, hwclock); + timecompare_update(&tp->compare, ns); + + memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); + timestamp->hwtstamp = ns_to_ktime(ns); + timestamp->syststamp = timecompare_transform(&tp->compare, ns); +} +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -4754,14 +5865,15 @@ static void tg3_tx(struct tg3_napi *tnapi) u32 sw_idx = tnapi->tx_cons; struct netdev_queue *txq; int index = tnapi - tp->napi; + unsigned int pkts_compl = 0, bytes_compl = 0; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) index--; txq = netdev_get_tx_queue(tp->dev, index); while (sw_idx != hw_idx) { - struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; @@ -4770,6 +5882,18 @@ static void tg3_tx(struct tg3_napi *tnapi) return; } +#ifdef BCM_HAS_IEEE1588_SUPPORT + if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { + struct skb_shared_hwtstamps timestamp; + u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); + hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; + + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + + skb_tstamp_tx(skb, ×tamp); + } +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), skb_headlen(skb), @@ -4777,6 +5901,12 @@ static void tg3_tx(struct tg3_napi *tnapi) ri->skb = NULL; + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + sw_idx = NEXT_TX(sw_idx); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { @@ -4786,11 +5916,21 @@ static void tg3_tx(struct tg3_napi *tnapi) pci_unmap_page(tp->pdev, dma_unmap_addr(ri, mapping), - skb_shinfo(skb)->frags[i].size, + skb_frag_size(&skb_shinfo(skb)->frags[i]), PCI_DMA_TODEVICE); + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + sw_idx = NEXT_TX(sw_idx); } + pkts_compl++; + bytes_compl += skb->len; + dev_kfree_skb(skb); if (unlikely(tx_bug)) { @@ -4799,6 +5939,8 @@ static void tg3_tx(struct tg3_napi *tnapi) } } + netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); + tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() @@ -4844,22 +5986,21 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; - struct ring_info *map, *src_map; + struct ring_info *map; struct sk_buff *skb; dma_addr_t mapping; int skb_size, dest_idx; - src_map = NULL; switch (opaque_key) { case RXD_OPAQUE_RING_STD: - dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; skb_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: - dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; skb_size = TG3_RX_JMB_MAP_SZ; @@ -4875,11 +6016,13 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); + skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp) + + TG3_COMPAT_VLAN_ALLOC_LEN); if (skb == NULL) return -ENOMEM; - skb_reserve(skb, tp->rx_offset); + skb_reserve(skb, TG3_RX_OFFSET(tp) + + TG3_COMPAT_VLAN_RESERVE(TG3_TO_INT(skb->data))); mapping = pci_map_single(tp->pdev, skb->data, skb_size, PCI_DMA_FROMDEVICE); @@ -4902,6 +6045,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * tg3_alloc_rx_skb for full details. */ static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *spr, struct tg3_rx_prodring_set *dpr, u32 opaque_key, int src_idx, u32 dest_idx_unmasked) @@ -4909,12 +6053,11 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; - struct tg3_rx_prodring_set *spr = &tp->prodring[0]; int dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: - dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; dest_desc = &dpr->rx_std[dest_idx]; dest_map = &dpr->rx_std_buffers[dest_idx]; src_desc = &spr->rx_std[src_idx]; @@ -4922,7 +6065,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, break; case RXD_OPAQUE_RING_JUMBO: - dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; dest_desc = &dpr->rx_jmb[dest_idx].std; dest_map = &dpr->rx_jmb_buffers[dest_idx]; src_desc = &spr->rx_jmb[src_idx].std; @@ -4979,7 +6122,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; - struct tg3_rx_prodring_set *tpr = tnapi->prodring; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; hw_idx = *(tnapi->rx_rcb_prod_idx); /* @@ -4998,19 +6141,17 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; - bool hw_vlan __maybe_unused = false; - u16 vtag __maybe_unused = 0; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { - ri = &tp->prodring[0].rx_std_buffers[desc_idx]; + ri = &tnapi->srcprodring->rx_std_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); skb = ri->skb; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; + ri = &tnapi->srcprodring->rx_jmb_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); skb = ri->skb; post_ptr = &jmb_prod_idx; @@ -5021,12 +6162,23 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { +#ifdef TG3_VMWARE_NETQ_ENABLE + tnapi->netq.stats.rx_errors_sw++; + + if (desc->err_vlan & RXD_ERR_BAD_CRC) + tnapi->netq.stats.rx_crc_errors++; + + if (desc->err_vlan & + (RXD_ERR_TOO_SMALL | + RXD_ERR_HUGE_FRAME)) + tnapi->netq.stats.rx_frame_errors++; +#endif drop_it: - tg3_recycle_rx(tnapi, tpr, opaque_key, - desc_idx, *post_ptr); + tg3_recycle_rx(tnapi, tnapi->srcprodring, tpr, + opaque_key, desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ - tp->net_stats.rx_dropped++; + tp->rx_dropped++; goto next_pkt; } @@ -5055,15 +6207,15 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) } else { struct sk_buff *copy_skb; - tg3_recycle_rx(tnapi, tpr, opaque_key, - desc_idx, *post_ptr); + tg3_recycle_rx(tnapi, tnapi->srcprodring, tpr, + opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + + copy_skb = netdev_alloc_skb(tp->dev, len + TG3_RAW_IP_ALIGN); if (copy_skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); @@ -5073,47 +6225,53 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) skb = copy_skb; } - if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && +#ifdef BCM_HAS_IEEE1588_SUPPORT + if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == RXD_FLAG_PTPSTAT_PTPV1 || + (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == RXD_FLAG_PTPSTAT_PTPV2) { + u64 hwclock = tr32(TG3_RX_TSTAMP_LSB); + hwclock |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; + tg3_hwclock_to_timestamp(tp, hwclock, + skb_hwtstamps(skb)); + } +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + + if ((tp->dev->features & NETIF_F_RXCSUM) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) >> RXD_TCPCSUM_SHIFT) == 0xffff)) skb->ip_summed = CHECKSUM_UNNECESSARY; else - skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, tp->dev); if (len > (tp->dev->mtu + ETH_HLEN) && skb->protocol != htons(ETH_P_8021Q)) { dev_kfree_skb(skb); - goto next_pkt; + goto drop_it_no_recycle; } - if (desc->type_flags & RXD_FLAG_VLAN && +#ifndef BCM_HAS_VLAN_HWACCEL_PUT_TAG + if ((desc->type_flags & RXD_FLAG_VLAN) && !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { - vtag = desc->err_vlan & RXD_VLAN_MASK; -#if TG3_VLAN_TAG_USED - if (tp->vlgrp) - hw_vlan = true; - else -#endif - { - struct vlan_ethhdr *ve = (struct vlan_ethhdr *) - __skb_push(skb, VLAN_HLEN); - - memmove(ve, skb->data + VLAN_HLEN, - ETH_ALEN * 2); - ve->h_vlan_proto = htons(ETH_P_8021Q); - ve->h_vlan_TCI = htons(vtag); + if (!tp->vlgrp) { + tg3_vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); + napi_gro_receive(&tnapi->napi, skb); + } else { + vlan_gro_receive(&tnapi->napi, tp->vlgrp, + desc->err_vlan & RXD_VLAN_MASK, skb); } - } - -#if TG3_VLAN_TAG_USED - if (hw_vlan) - vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); - else -#endif + } else napi_gro_receive(&tnapi->napi, skb); +#else + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); + + napi_gro_receive(&tnapi->napi, skb); +#endif /* BCM_HAS_VLAN_HWACCEL_PUT_TAG */ #if (LINUX_VERSION_CODE < 0x02061D) /* 2.6.29 */ tp->dev->last_rx = jiffies; @@ -5121,11 +6279,18 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) received++; budget--; +#ifdef TG3_VMWARE_NETQ_ENABLE + /* Update queue specific stats */ + tnapi->netq.stats.rx_packets_sw++; + tnapi->netq.stats.rx_bytes_sw += len; +#endif + next_pkt: (*post_ptr)++; if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { - tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); work_mask &= ~RXD_OPAQUE_RING_STD; @@ -5133,7 +6298,7 @@ next_pkt: } next_pkt_nopost: sw_idx++; - sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); + sw_idx &= tp->rx_ret_ring_mask; /* Refresh hw_idx to see if there is new work */ if (sw_idx == hw_idx) { @@ -5147,20 +6312,29 @@ next_pkt_nopost: tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { - /* Some platforms need to sync memory here */ + if (!tg3_flag(tp, ENABLE_RSS)) { + /* Sync BD data before updating mailbox */ wmb(); if (work_mask & RXD_OPAQUE_RING_STD) { - tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; +#ifdef TG3_VMWARE_NETQ_ENABLE + tw32_rx_mbox(tpr->rx_std_mbox, tpr->rx_std_prod_idx); +#else tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); +#endif } if (work_mask & RXD_OPAQUE_RING_JUMBO) { - tpr->rx_jmb_prod_idx = jmb_prod_idx % - TG3_RX_JUMBO_RING_SIZE; + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; +#ifdef TG3_VMWARE_NETQ_ENABLE + tw32_rx_mbox(tpr->rx_jmb_mbox, tpr->rx_jmb_prod_idx); +#else tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); +#endif } mmiowb(); } else if (work_mask) { @@ -5169,31 +6343,29 @@ next_pkt_nopost: */ smp_wmb(); - tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; - tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; - if (tnapi != &tp->napi[1]) + if (tnapi != &tp->napi[1]) { + tp->rx_refill = true; tg3_netif_rx_schedule(tp->dev, &tp->napi[1].napi); + } } return received; } -#ifdef TG3_NAPI - static void tg3_poll_link(struct tg3 *tp) { /* handle link change and other phy events */ - if (!(tp->tg3_flags & - (TG3_FLAG_USE_LINKCHG_REG | - TG3_FLAG_POLL_SERDES))) { + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { struct tg3_hw_status *sblk = tp->napi[0].hw_status; if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | (sblk->status & ~SD_STATUS_LINK_CHG); spin_lock(&tp->lock); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED | @@ -5207,6 +6379,66 @@ static void tg3_poll_link(struct tg3 *tp) } } +static inline void tg3_reset_task_schedule(struct tg3 *tp) +{ + if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + schedule_work(&tp->reset_task); +} + +static inline void tg3_reset_task_cancel(struct tg3 *tp) +{ +#if (LINUX_VERSION_CODE >= 0x20616) || defined (__VMKLNX__) + cancel_work_sync(&tp->reset_task); +#else + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); +#endif + tg3_flag_clear(tp, RESET_TASK_PENDING); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); +} + +static void tg3_process_error(struct tg3 *tp) +{ + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); +#ifdef TG3_VMWARE_NETQ_ENABLE + /* Shutting down NetQueues cause permissible RCB errors */ + val &= ~(HOSTCC_FLOW_ATTN_MBUF_LWM | + HOSTCC_FLOW_ATTN_RCB_MISCFG | + HOSTCC_FLOW_ATTN_RCV_BDI_ATTN); +#endif + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n" ); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n" ); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n" ); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + tg3_reset_task_schedule(tp); +} + +#ifdef TG3_NAPI + static int tg3_rx_prodring_xfer(struct tg3 *tp, struct tg3_rx_prodring_set *dpr, struct tg3_rx_prodring_set *spr) @@ -5228,9 +6460,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, if (spr->rx_std_cons_idx < src_prod_idx) cpycnt = src_prod_idx - spr->rx_std_cons_idx; else - cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; - cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); si = spr->rx_std_cons_idx; di = dpr->rx_std_prod_idx; @@ -5264,10 +6498,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, dbd->addr_lo = sbd->addr_lo; } - spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % - TG3_RX_RING_SIZE; - dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % - TG3_RX_RING_SIZE; + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; } while (1) { @@ -5284,10 +6518,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, if (spr->rx_jmb_cons_idx < src_prod_idx) cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; else - cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; cpycnt = min(cpycnt, - TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); si = spr->rx_jmb_cons_idx; di = dpr->rx_jmb_prod_idx; @@ -5321,10 +6556,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, dbd->addr_lo = sbd->addr_lo; } - spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % - TG3_RX_JUMBO_RING_SIZE; - dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % - TG3_RX_JUMBO_RING_SIZE; + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; } return err; @@ -5337,7 +6572,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) /* run TX completion thread */ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { tg3_tx(tnapi); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) return work_done; } @@ -5348,15 +6583,16 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_done += tg3_rx(tnapi, budget - work_done); - if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { - struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; int i, err = 0; u32 std_prod_idx = dpr->rx_std_prod_idx; u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; - for (i = 1; i < tp->irq_cnt; i++) + tp->rx_refill = false; + for (i = 1; i < tp->rxq_cnt + 1; i++) err |= tg3_rx_prodring_xfer(tp, dpr, - tp->napi[i].prodring); + &tp->napi[i].prodring); wmb(); @@ -5387,7 +6623,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) while (1) { work_done = tg3_poll_work(tnapi, work_done, budget); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) goto tx_recovery; if (unlikely(work_done >= budget)) @@ -5404,9 +6640,25 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) /* check for RX/TX work to do */ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + + /* This test here is not race free, but will reduce + * the number of interrupts by looping again. + */ + if (tnapi == &tp->napi[1] && tp->rx_refill) + continue; + tg3_netif_rx_complete(tp->dev, napi); /* Reenable interrupts. */ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + + /* This test here is synchronized by napi_schedule() + * and napi_complete() to close the race condition. + */ + if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | + tnapi->coal_now); + } mmiowb(); break; } @@ -5417,7 +6669,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) tx_recovery: /* work_done is guaranteed to be less than budget. */ tg3_netif_rx_complete(tp->dev, napi); - schedule_work(&tp->reset_task); + tg3_reset_task_schedule(tp); return work_done; } @@ -5429,17 +6681,20 @@ static int tg3_poll(struct napi_struct *napi, int budget) struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + tg3_poll_link(tp); work_done = tg3_poll_work(tnapi, work_done, budget); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) goto tx_recovery; if (unlikely(work_done >= budget)) break; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + if (tg3_flag(tp, TAGGED_STATUS)) { /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. @@ -5462,7 +6717,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) tx_recovery: /* work_done is guaranteed to be less than budget. */ tg3_netif_rx_complete(tp->dev, napi); - schedule_work(&tp->reset_task); + tg3_reset_task_schedule(tp); return work_done; } @@ -5475,33 +6730,17 @@ static int tg3_poll(struct net_device *netdev, int *budget) struct tg3_hw_status *sblk = tnapi->hw_status; int done; - /* handle link change and other phy events */ - if (!(tp->tg3_flags & - (TG3_FLAG_USE_LINKCHG_REG | - TG3_FLAG_POLL_SERDES))) { - if (sblk->status & SD_STATUS_LINK_CHG) { - sblk->status = SD_STATUS_UPDATED | - (sblk->status & ~SD_STATUS_LINK_CHG); - spin_lock(&tp->lock); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); - udelay(40); - } else - tg3_setup_phy(tp, 0); - spin_unlock(&tp->lock); - } - } + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + + tg3_poll_link(tp); /* run TX completion thread */ if (sblk->idx[0].tx_consumer != tnapi->tx_cons) { tg3_tx(tnapi); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) { netif_rx_complete(netdev); - schedule_work(&tp->reset_task); + tg3_reset_task_schedule(tp); return 0; } } @@ -5523,7 +6762,7 @@ static int tg3_poll(struct net_device *netdev, int *budget) netdev->quota -= work_done; } - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + if (tg3_flag(tp, TAGGED_STATUS)) { tnapi->last_tag = sblk->status_tag; rmb(); } else @@ -5541,6 +6780,74 @@ static int tg3_poll(struct net_device *netdev, int *budget) #endif /* TG3_NAPI */ +static void tg3_napi_disable(struct tg3 *tp) +{ +#ifdef TG3_NAPI + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +#else + netif_poll_disable(tp->dev); +#endif +} + +static void tg3_napi_init(struct tg3 *tp) +{ +#ifdef TG3_NAPI + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); +#else + tp->dev->poll = tg3_poll; + tp->dev->weight = 64; +#endif +} + +static void tg3_napi_fini(struct tg3 *tp) +{ +#ifdef TG3_NAPI + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); +#endif +} + +static void tg3_napi_enable(struct tg3 *tp) +{ +#ifdef TG3_NAPI + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); +#else + netif_poll_enable(tp->dev); +#endif +} + +static inline void tg3_netif_stop(struct tg3 *tp) +{ + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); +} + +static inline void tg3_netif_start(struct tg3 *tp) +{ + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); +} + static void tg3_irq_quiesce(struct tg3 *tp) { #if (LINUX_VERSION_CODE >= 0x2051c) @@ -5560,11 +6867,6 @@ static void tg3_irq_quiesce(struct tg3 *tp) #endif } -static inline int tg3_irq_sync(struct tg3 *tp) -{ - return tp->irq_sync; -} - /* Fully shutdown all tg3 driver activity elsewhere in the system. * If irq_sync is non-zero, then the IRQ handler must be synchronized * with as well. Most of the time, this is not necessary except when @@ -5627,7 +6929,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs) * NIC to stop sending us irqs, engaging "in-intr-handler" * event coalescing. */ - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + tw32_mailbox(tnapi->int_mbox, 0x00000001); if (likely(!tg3_irq_sync(tp))) tg3_netif_rx_schedule(tp->dev, &tnapi->napi); @@ -5651,7 +6953,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) * interrupt is ours and will flush the status block. */ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { - if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || + if (tg3_flag(tp, CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; goto out; @@ -5704,7 +7006,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r * interrupt is ours and will flush the status block. */ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { - if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || + if (tg3_flag(tp, CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; goto out; @@ -5829,41 +7131,48 @@ static void tg3_reset_task(void *_data) struct tg3 *tp = _data; #endif int err; - unsigned int restart_timer; tg3_full_lock(tp, 0); if (!netif_running(tp->dev)) { + tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); return; } tg3_full_unlock(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + /* Prevent any netqueue operations while we are resetting. */ + if (tg3_flag(tp, ENABLE_IOV)) + rtnl_lock(); +#endif + tg3_phy_stop(tp); tg3_netif_stop(tp); - tg3_full_lock(tp, 1); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_invalidate_state(tp); +#endif - restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; - tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; + tg3_full_lock(tp, 1); - if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; tp->write32_rx_mbox = tg3_write_flush_reg32; - tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; - tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); } tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - err = tg3_init_hw(tp, 1); #if defined(__VMKLNX__) if (err) { if (printk_ratelimit()) { printk(KERN_ERR "tg3_init_hw failed in tg3_init_task\n"); } + tp->irq_sync = 0; tg3_napi_enable(tp); goto out; } @@ -5874,22 +7183,18 @@ static void tg3_reset_task(void *_data) tg3_netif_start(tp); - if (restart_timer) - mod_timer(&tp->timer, jiffies + 1); - out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); -} -static void tg3_dump_short_state(struct tg3 *tp) -{ - netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", - tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); - netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", - tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, ENABLE_IOV)) + rtnl_unlock(); +#endif + + tg3_flag_clear(tp, RESET_TASK_PENDING); } static void tg3_tx_timeout(struct net_device *dev) @@ -5898,10 +7203,17 @@ static void tg3_tx_timeout(struct net_device *dev) if (netif_msg_tx_err(tp)) { netdev_err(dev, "transmit timed out, resetting\n"); - tg3_dump_short_state(tp); + tg3_dump_state(tp); +#if defined(__VMKLNX__) + if (psod_on_tx_timeout) { + msleep(100); + BUG_ON(1); + return; + } +#endif } - schedule_work(&tp->reset_task); + tg3_reset_task_schedule(tp); } /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ @@ -5909,8 +7221,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) { u32 base = (u32) mapping & 0xffffffff; - return ((base > 0xffffdcc0) && - (base + len + 8 < base)); + return (base > 0xffffdcc0) && (base + len + 8 < base); } /* Test for DMA addresses > 40-bit */ @@ -5918,26 +7229,136 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) { #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) - if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) - return (((u64) mapping + len) > DMA_BIT_MASK(40)); + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); return 0; #else return 0; #endif } -static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); +static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); +} + +static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + bool hwbug = false; + u32 dma_limit = tp->dma_limit; + + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = true; + + if (tg3_4g_overflow_test(map, len)) + hwbug = true; + + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = true; + + if (tg3_flag(tp, NO_TSO_BD_LIMIT) && mss) + dma_limit = 0; + + if (dma_limit) { + u32 prvidx = *entry; + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > dma_limit && *budget) { + u32 frag_len = dma_limit; + len -= dma_limit; + + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += dma_limit / 2; + frag_len = dma_limit / 2; + } + + tnapi->tx_buffers[*entry].fragmented = true; + + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + *budget -= 1; + prvidx = *entry; + *entry = NEXT_TX(*entry); + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *budget -= 1; + *entry = NEXT_TX(*entry); + } else { + hwbug = true; + tnapi->tx_buffers[prvidx].fragmented = false; + } + } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); + } + + return hwbug; +} + +static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) +{ + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; + + skb = txb->skb; + txb->skb = NULL; + + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + + for (i = 0; i <= last; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } +} /* Workaround 4GB and 40-bit hardware DMA bugs. */ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff *skb, u32 last_plus_one, - u32 *start, u32 base_flags, u32 mss) + struct sk_buff **pskb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) { struct tg3 *tp = tnapi->tp; - struct sk_buff *new_skb; + struct sk_buff *new_skb, *skb = *pskb; dma_addr_t new_addr = 0; - u32 entry = *start; - int i, ret = 0; + int ret = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) new_skb = skb_copy(skb, GFP_ATOMIC); @@ -5953,280 +7374,57 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, ret = -1; } else { /* New SKB is guaranteed to be linear. */ - entry = *start; new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, PCI_DMA_TODEVICE); /* Make sure the mapping succeeded */ if (tg3_pci_dma_mapping_error(tp->pdev, new_addr)) { - ret = -1; dev_kfree_skb(new_skb); - new_skb = NULL; - - /* Make sure new skb does not cross any 4G boundaries. - * Drop the packet if it does. - */ - } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && - tg3_4g_overflow_test(new_addr, new_skb->len)) { - pci_unmap_single(tp->pdev, new_addr, new_skb->len, - PCI_DMA_TODEVICE); ret = -1; - dev_kfree_skb(new_skb); - new_skb = NULL; } else { - tg3_set_txd(tnapi, entry, new_addr, new_skb->len, - base_flags, 1 | (mss << 1)); - *start = NEXT_TX(entry); - } - } + u32 save_entry = *entry; - /* Now clean up the sw ring entries. */ - i = 0; - while (entry != last_plus_one) { - int len; + base_flags |= TXD_FLAG_END; - if (i == 0) - len = skb_headlen(skb); - else - len = skb_shinfo(skb)->frags[i-1].size; + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); - pci_unmap_single(tp->pdev, - dma_unmap_addr(&tnapi->tx_buffers[entry], - mapping), - len, PCI_DMA_TODEVICE); - if (i == 0) { - tnapi->tx_buffers[entry].skb = new_skb; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, - new_addr); - } else { - tnapi->tx_buffers[entry].skb = NULL; + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, save_entry, -1); + dev_kfree_skb(new_skb); + ret = -1; + } } - entry = NEXT_TX(entry); - i++; } dev_kfree_skb(skb); - + *pskb = new_skb; return ret; } -static void tg3_set_txd(struct tg3_napi *tnapi, int entry, - dma_addr_t mapping, int len, u32 flags, - u32 mss_and_is_end) -{ - struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; - int is_end = (mss_and_is_end & 0x1); - u32 mss = (mss_and_is_end >> 1); - u32 vlan_tag = 0; - - if (is_end) - flags |= TXD_FLAG_END; - if (flags & TXD_FLAG_VLAN) { - vlan_tag = flags >> 16; - flags &= 0xffff; - } - vlan_tag |= (mss << TXD_MSS_SHIFT); - - txd->addr_hi = ((u64) mapping >> 32); - txd->addr_lo = ((u64) mapping & 0xffffffff); - txd->len_flags = (len << TXD_LEN_SHIFT) | flags; - txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; -} +#if TG3_TSO_SUPPORT != 0 +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); -/* hard_start_xmit for devices that don't have any bugs and - * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. +/* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) { - struct tg3 *tp = netdev_priv(dev); - u32 len, entry, base_flags, mss; - dma_addr_t mapping; - struct tg3_napi *tnapi; - struct netdev_queue *txq; - unsigned int i, last; + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) - tnapi++; + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { + netif_stop_queue(tp->dev); - /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->napi.poll inside of a software - * interrupt. Furthermore, IRQ processing runs lockless so we have - * no IRQ context deadlocks to worry about either. Rejoice! - */ - if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); - } - return NETDEV_TX_BUSY; - } - - entry = tnapi->tx_prod; - base_flags = 0; -#if TG3_TSO_SUPPORT != 0 - mss = 0; - if ((mss = skb_shinfo(skb)->gso_size) != 0) { - int tcp_opt_len, ip_tcp_len; - u32 hdrlen; - - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - goto out_unlock; - } - -#ifndef BCM_NO_TSO6 - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - hdrlen = skb_headlen(skb) - ETH_HLEN; - else -#endif - { - struct iphdr *iph = ip_hdr(skb); - - tcp_opt_len = tcp_optlen(skb); - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - - iph->check = 0; - iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - hdrlen = ip_tcp_len + tcp_opt_len; - } - - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { - mss |= (hdrlen & 0xc) << 12; - if (hdrlen & 0x10) - base_flags |= 0x00000010; - base_flags |= (hdrlen & 0x3e0) << 5; - } else - mss |= hdrlen << 9; - - base_flags |= (TXD_FLAG_CPU_PRE_DMA | - TXD_FLAG_CPU_POST_DMA); - - tcp_hdr(skb)->check = 0; - - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - base_flags |= TXD_FLAG_TCPUDP_CSUM; - } - -#else - mss = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) - base_flags |= TXD_FLAG_TCPUDP_CSUM; -#endif -#if TG3_VLAN_TAG_USED - if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) - base_flags |= (TXD_FLAG_VLAN | - (vlan_tx_tag_get(skb) << 16)); -#endif - - len = skb_headlen(skb); - - /* Queue skb data, a.k.a. the main skb fragment. */ - mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (tg3_pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); - goto out_unlock; - } - - tnapi->tx_buffers[entry].skb = skb; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); - - if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && - !mss && skb->len > ETH_DATA_LEN) - base_flags |= TXD_FLAG_JMB_PKT; - - tg3_set_txd(tnapi, entry, mapping, len, base_flags, - (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); - - entry = NEXT_TX(entry); - - /* Now loop through additional data fragments, and queue them. */ - if (skb_shinfo(skb)->nr_frags > 0) { - last = skb_shinfo(skb)->nr_frags - 1; - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - len = frag->size; - mapping = pci_map_page(tp->pdev, - frag->page, - frag->page_offset, - len, PCI_DMA_TODEVICE); - if (tg3_pci_dma_mapping_error(tp->pdev, mapping)) - goto dma_error; - - tnapi->tx_buffers[entry].skb = NULL; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, - mapping); - - tg3_set_txd(tnapi, entry, mapping, len, - base_flags, (i == last) | (mss << 1)); - - entry = NEXT_TX(entry); - } - } - - /* Some platforms need to sync memory here */ - wmb(); - - /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - - tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); - } - -out_unlock: - mmiowb(); - - tg3_update_trans_start(dev); - - return NETDEV_TX_OK; - -dma_error: - last = i; - entry = tnapi->tx_prod; - tnapi->tx_buffers[entry].skb = NULL; - pci_unmap_single(tp->pdev, - dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - entry = NEXT_TX(entry); - - pci_unmap_page(tp->pdev, - dma_unmap_addr(&tnapi->tx_buffers[entry], - mapping), - frag->size, PCI_DMA_TODEVICE); - } - - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -#if TG3_TSO_SUPPORT != 0 -static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, - struct net_device *); - -/* Use GSO to workaround a rare TSO bug that may be triggered when the - * TSO header is greater than 80 bytes. - */ -static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) -{ - struct sk_buff *segs, *nskb; - u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; - - /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { - netif_stop_queue(tp->dev); + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) return NETDEV_TX_BUSY; @@ -6234,14 +7432,17 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) } segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); - if (IS_ERR(segs)) + /* VMWare always returns NULL. Linux will only return NULL + * when no segments are required. + */ + if (!segs || IS_ERR(segs)) goto tg3_tso_bug_end; do { nskb = segs; segs = segs->next; nskb->next = NULL; - tg3_start_xmit_dma_bug(nskb, tp->dev); + tg3_start_xmit(nskb, tp->dev); } while (segs); tg3_tso_bug_end: @@ -6252,30 +7453,39 @@ tg3_tso_bug_end: #endif /* TG3_TSO_SUPPORT */ /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and - * support TG3_FLG2_HW_TSO_1 or firmware TSO only. + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. */ -static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - u32 len, entry, base_flags, mss; - int would_hit_hwbug; + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; dma_addr_t mapping; struct tg3_napi *tnapi; struct netdev_queue *txq; - unsigned int i, last; + unsigned int last; txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); +#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 50000) + /* For esx4.0/esx4.1u0-u2, the vmkernel doesn't check queue state + * before calling start_xmit(). So driver has to check it itself. + */ + if (unlikely(netif_tx_queue_stopped(txq))) + goto drop; +#endif tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) tnapi++; + budget = tg3_tx_avail(tnapi); + /* We are running in BH disabled context with netif_tx_lock * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); @@ -6292,48 +7502,40 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, base_flags |= TXD_FLAG_TCPUDP_CSUM; #if TG3_TSO_SUPPORT != 0 - if ((mss = skb_shinfo(skb)->gso_size) != 0) { + mss = skb_shinfo(skb)->gso_size; + if (mss) { struct iphdr *iph; u32 tcp_opt_len, hdr_len; if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - goto out_unlock; - } + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto drop; iph = ip_hdr(skb); tcp_opt_len = tcp_optlen(skb); -#ifndef BCM_NO_TSO6 - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - hdr_len = skb_headlen(skb) - ETH_HLEN; - else -#endif - { - u32 ip_tcp_len; - - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - hdr_len = ip_tcp_len + tcp_opt_len; + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + if (!skb_is_gso_v6(skb)) { iph->check = 0; iph->tot_len = htons(mss + hdr_len); } - /* Specific check for SLES 10.2 */ if (hdr_len + mss >= skb->len - ETH_HLEN) { mss = 0; goto abort_lso; } if (unlikely((ETH_HLEN + hdr_len) > 80) && - (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) + tg3_flag(tp, TSO_BUG)) return tg3_tso_bug(tp, skb); base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { tcp_hdr(skb)->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; } else @@ -6342,14 +7544,14 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, IPPROTO_TCP, 0); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { + if (tg3_flag(tp, HW_TSO_3)) { mss |= (hdr_len & 0xc) << 12; if (hdr_len & 0x10) base_flags |= 0x00000010; base_flags |= (hdr_len & 0x3e0) << 5; - } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) + } else if (tg3_flag(tp, HW_TSO_2)) mss |= hdr_len << 9; - else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || + else if (tg3_flag(tp, HW_TSO_1) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { if (tcp_opt_len || iph->ihl > 5) { int tsflags; @@ -6370,59 +7572,61 @@ abort_lso: #else mss = 0; #endif -#if TG3_VLAN_TAG_USED - if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) - base_flags |= (TXD_FLAG_VLAN | - (vlan_tx_tag_get(skb) << 16)); -#endif - if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && - !mss && skb->len > ETH_DATA_LEN) + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) base_flags |= TXD_FLAG_JMB_PKT; +#ifdef BCM_KERNEL_SUPPORTS_8021Q + if (vlan_tx_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = vlan_tx_tag_get(skb); + } +#endif + +#ifdef BCM_KERNEL_SUPPORTS_TIMESTAMPING + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + base_flags |= TXD_FLAG_HWTSTAMP; + } +#endif /* BCM_KERNEL_SUPPORTS_TIMESTAMPING */ + len = skb_headlen(skb); mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (tg3_pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); - goto out_unlock; - } + if (tg3_pci_dma_mapping_error(tp->pdev, mapping)) + goto drop; tnapi->tx_buffers[entry].skb = skb; dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); would_hit_hwbug = 0; - if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) - would_hit_hwbug = 1; - - if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && - tg3_4g_overflow_test(mapping, len)) - would_hit_hwbug = 1; - - if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && - tg3_40bit_overflow_test(tp, mapping, len)) + if (tg3_flag(tp, 5701_DMA_BUG)) would_hit_hwbug = 1; - if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) { would_hit_hwbug = 1; + } else if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; - tg3_set_txd(tnapi, entry, mapping, len, base_flags, - (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); - - entry = NEXT_TX(entry); + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; - /* Now loop through additional data fragments, and queue them. */ - if (skb_shinfo(skb)->nr_frags > 0) { + /* Now loop through additional data + * fragments, and queue them. + */ last = skb_shinfo(skb)->nr_frags - 1; for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; - mapping = pci_map_page(tp->pdev, - frag->page, - frag->page_offset, - len, PCI_DMA_TODEVICE); + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, + len, DMA_TO_DEVICE); tnapi->tx_buffers[entry].skb = NULL; dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, @@ -6430,47 +7634,34 @@ abort_lso: if (tg3_pci_dma_mapping_error(tp->pdev, mapping)) goto dma_error; - if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && - len <= 8) - would_hit_hwbug = 1; - - if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && - tg3_4g_overflow_test(mapping, len)) - would_hit_hwbug = 1; - - if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && - tg3_40bit_overflow_test(tp, mapping, len)) + if (!budget || + tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) { would_hit_hwbug = 1; - - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) - tg3_set_txd(tnapi, entry, mapping, len, - base_flags, (i == last)|(mss << 1)); - else - tg3_set_txd(tnapi, entry, mapping, len, - base_flags, (i == last)); - - entry = NEXT_TX(entry); + break; + } } } if (would_hit_hwbug) { - u32 last_plus_one = entry; - u32 start; - - start = entry - 1 - skb_shinfo(skb)->nr_frags; - start &= (TG3_TX_RING_SIZE - 1); + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ - if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, - &start, base_flags, mss)) - goto out_unlock; - - entry = start; + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); + if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, + base_flags, mss, vlan)) + goto drop_nofree; } - /* Some platforms need to sync memory here */ + skb_tx_timestamp(skb); + netdev_sent_queue(tp->dev, skb->len); + + /* Sync BD data before updating mailbox */ wmb(); /* Packets are ready, update Tx producer idx local and on card. */ @@ -6479,11 +7670,17 @@ abort_lso: tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) netif_tx_wake_queue(txq); } -out_unlock: mmiowb(); tg3_update_trans_start(dev); @@ -6491,26 +7688,184 @@ out_unlock: return NETDEV_TX_OK; dma_error: - last = i; - entry = tnapi->tx_prod; - tnapi->tx_buffers[entry].skb = NULL; - pci_unmap_single(tp->pdev, - dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - entry = NEXT_TX(entry); + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; +drop: + dev_kfree_skb(skb); +drop_nofree: + tp->tx_dropped++; + return NETDEV_TX_OK; +} + +static void tg3_mac_loopback(struct tg3 *tp, bool enable) +{ + if (enable) { + tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | + MAC_MODE_PORT_MODE_MASK); + + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + + if (!tg3_flag(tp, 5705_PLUS)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; - pci_unmap_page(tp->pdev, - dma_unmap_addr(&tnapi->tx_buffers[entry], - mapping), - frag->size, PCI_DMA_TODEVICE); + if (tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; } - dev_kfree_skb(skb); - return NETDEV_TX_OK; + tw32(MAC_MODE, tp->mac_mode); + udelay(40); +} + +static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) +{ + u32 val, bmcr, mac_mode, ptest = 0; + + tg3_phy_toggle_apd(tp, false); + tg3_phy_toggle_automdix(tp, 0); + + if (extlpbk && tg3_phy_set_extloopbk(tp)) + return -EIO; + + bmcr = BMCR_FULLDPLX; + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + speed = SPEED_100; + bmcr |= BMCR_SPEED100; + } else { + speed = SPEED_1000; + bmcr |= BMCR_SPEED1000; + } + } + + if (extlpbk) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_readphy(tp, MII_CTRL1000, &val); + val |= CTL1000_AS_MASTER | + CTL1000_ENABLE_MASTER; + tg3_writephy(tp, MII_CTRL1000, val); + } else { + ptest = MII_TG3_FET_PTEST_TRIM_SEL | + MII_TG3_FET_PTEST_TRIM_2; + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); + } + } else + bmcr |= BMCR_LOOPBACK; + + tg3_writephy(tp, MII_BMCR, bmcr); + + /* The write needs to be flushed for the FETs */ + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tg3_readphy(tp, MII_BMCR, &bmcr); + + udelay(40); + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + + /* The write needs to be flushed for the AC131 */ + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + } + + /* Reset to prevent losing 1st rx packet intermittently */ + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (speed == SPEED_1000) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + + tw32(MAC_MODE, mac_mode); + udelay(40); + + return 0; +} + +#ifdef BCM_HAS_FIX_FEATURES +static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, true); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, false); + /* Force link status check */ + tg3_setup_phy(tp, 1); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } +} + +static netdev_features_t tg3_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static int tg3_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; } +#endif /* BCM_HAS_FIX_FEATURES */ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, int new_mtu) @@ -6518,25 +7873,28 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, dev->mtu = new_mtu; if (new_mtu > ETH_DATA_LEN) { - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { - tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); #if TG3_TSO_SUPPORT != 0 ethtool_op_set_tso(dev, 0); #endif } else { - tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; + tg3_flag_set(tp, JUMBO_RING_ENABLE); } } else { - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; - tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); + } + tg3_flag_clear(tp, JUMBO_RING_ENABLE); } } static int tg3_change_mtu(struct net_device *dev, int new_mtu) { struct tg3 *tp = netdev_priv(dev); - int err; + int err, reset_phy = 0; if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) return -EINVAL; @@ -6549,17 +7907,35 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) return 0; } - tg3_phy_stop(tp); +#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 50000) + /* There is no need to hold rtnl_lock + * when calling change MTU into driver + * from VMkernel ESX 5.0 onwards. + */ + rtnl_lock(); +#endif + + tg3_phy_stop(tp); tg3_netif_stop(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_invalidate_state(tp); +#endif + tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_set_mtu(dev, tp, new_mtu); - err = tg3_restart_hw(tp, 0); + /* Reset PHY, otherwise the read DMA engine will be in a mode that + * breaks all requests to 256 bytes. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + reset_phy = 1; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); @@ -6569,6 +7945,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) if (!err) tg3_phy_start(tp); +#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 50000) + rtnl_unlock(); +#endif + return err; } @@ -6577,16 +7957,22 @@ static void tg3_rx_prodring_free(struct tg3 *tp, { int i; - if (tpr != &tp->prodring[0]) { + if (!tpr->rx_std_buffers) + return; + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, ENABLE_RSS)) +#endif + if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; - i = (i + 1) % TG3_RX_RING_SIZE) + i = (i + 1) & tp->rx_std_ring_mask) tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; - i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { + i = (i + 1) & tp->rx_jmb_ring_mask) { tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } @@ -6595,12 +7981,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp, return; } - for (i = 0; i < TG3_RX_RING_SIZE; i++) + for (i = 0; i <= tp->rx_std_ring_mask; i++) tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } @@ -6623,19 +8009,26 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, tpr->rx_jmb_cons_idx = 0; tpr->rx_jmb_prod_idx = 0; - if (tpr != &tp->prodring[0]) { - memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) + if (!tpr->rx_std_buffers) + goto done; + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, ENABLE_RSS)) +#endif + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) memset(&tpr->rx_jmb_buffers[0], 0, - TG3_RX_JMB_BUFF_RING_SIZE); + TG3_RX_JMB_BUFF_RING_SIZE(tp)); goto done; } /* Zero out all descriptors. */ - memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; - if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && + if (tg3_flag(tp, 5780_CLASS) && tp->dev->mtu > ETH_DATA_LEN) rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); @@ -6644,7 +8037,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, * stuff once. This works because the card does not * write into the rx buffer posting rings. */ - for (i = 0; i < TG3_RX_RING_SIZE; i++) { + for (i = 0; i <= tp->rx_std_ring_mask; i++) { struct tg3_rx_buffer_desc *rxd; rxd = &tpr->rx_std[i]; @@ -6668,15 +8061,15 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } } - if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) goto done; - memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); - if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) goto done; - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { struct tg3_rx_buffer_desc *rxd; rxd = &tpr->rx_jmb[i].std; @@ -6716,13 +8109,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, kfree(tpr->rx_jmb_buffers); tpr->rx_jmb_buffers = NULL; if (tpr->rx_std) { - pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, - tpr->rx_std, tpr->rx_std_mapping); + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); tpr->rx_std = NULL; } if (tpr->rx_jmb) { - pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, - tpr->rx_jmb, tpr->rx_jmb_mapping); + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); tpr->rx_jmb = NULL; } } @@ -6730,24 +8123,28 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, static int tg3_rx_prodring_init(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { - tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), + GFP_KERNEL); if (!tpr->rx_std_buffers) return -ENOMEM; - tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, - &tpr->rx_std_mapping); + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); if (!tpr->rx_std) goto err_out; - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), GFP_KERNEL); if (!tpr->rx_jmb_buffers) goto err_out; - tpr->rx_jmb = pci_alloc_consistent(tp->pdev, - TG3_RX_JUMBO_RING_BYTES, - &tpr->rx_jmb_mapping); + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); if (!tpr->rx_jmb) goto err_out; } @@ -6773,44 +8170,25 @@ static void tg3_free_rings(struct tg3 *tp) for (j = 0; j < tp->irq_cnt; j++) { struct tg3_napi *tnapi = &tp->napi[j]; - tg3_rx_prodring_free(tp, &tp->prodring[j]); + tg3_rx_prodring_free(tp, &tnapi->prodring); if (!tnapi->tx_buffers) continue; - for (i = 0; i < TG3_TX_RING_SIZE; ) { - struct ring_info *txp; - struct sk_buff *skb; - unsigned int k; + for (i = 0; i < TG3_TX_RING_SIZE; i++) { + struct sk_buff *skb = tnapi->tx_buffers[i].skb; - txp = &tnapi->tx_buffers[i]; - skb = txp->skb; - - if (skb == NULL) { - i++; + if (!skb) continue; - } - pci_unmap_single(tp->pdev, - dma_unmap_addr(txp, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - txp->skb = NULL; - - i++; - - for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { - txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; - pci_unmap_page(tp->pdev, - dma_unmap_addr(txp, mapping), - skb_shinfo(skb)->frags[k].size, - PCI_DMA_TODEVICE); - i++; - } + tg3_tx_skb_unmap(tnapi, i, + skb_shinfo(skb)->nr_frags - 1); dev_kfree_skb_any(skb); } } + + netdev_reset_queue(tp->dev); } /* Initialize tx/rx rings for packet processing. @@ -6845,7 +8223,10 @@ static int tg3_init_rings(struct tg3 *tp) if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { +#ifdef TG3_VMWARE_NETQ_ENABLE + if (!i || (i && tg3_flag(tp, ENABLE_RSS))) +#endif + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } @@ -6854,6 +8235,122 @@ static int tg3_init_rings(struct tg3 *tp) return 0; } +static void tg3_mem_tx_release(struct tg3 *tp) +{ + int i; + struct tg3_napi *tnapi = &tp->napi[0]; + + for (i = 0; i < tp->irq_max; i++, tnapi++) { + if (!tnapi->tx_ring) + continue; + + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + } +} + +static int tg3_mem_tx_acquire(struct tg3 *tp) +{ + int i; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + for (i = 0; i < tp->txq_cnt; i++, tnapi++) { + tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); + if (!tnapi->tx_ring) + goto err_out; + } + + return 0; + +err_out: + tg3_mem_tx_release(tp); + return -ENOMEM; +} + +static void tg3_mem_rx_release(struct tg3 *tp) +{ + int i; + struct tg3_napi *tnapi = &tp->napi[0]; + + for (i = 0; i < tp->irq_max; i++, tnapi++) { + tg3_rx_prodring_fini(tp, &tnapi->prodring); + + if (!tnapi->rx_rcb) + continue; + + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } +} + +static int tg3_mem_rx_acquire(struct tg3 *tp) +{ + unsigned int i, limit; + + limit = tp->rxq_cnt; + + /* If RSS is enabled, we need a (dummy) producer ring + * set on vector zero. This is the true hw prodring. + */ + if (tg3_flag(tp, ENABLE_RSS)) + limit++; + + for (i = 0; i < limit; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + + if (tg3_flag(tp, ENABLE_IOV)) + tnapi->srcprodring = &tnapi->prodring; + else + tnapi->srcprodring = &tp->napi[0].prodring; + + /* If multivector RSS is enabled, vector 0 + * does not handle rx or tx interrupts. + * Don't allocate any resources for it. + */ + if (!i && tg3_flag(tp, ENABLE_RSS)) + continue; + + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); + if (!tnapi->rx_rcb) + goto err_out; + + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + } + + return 0; + +err_out: + tg3_mem_rx_release(tp); + return -ENOMEM; +} + /* * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. @@ -6865,38 +8362,22 @@ static void tg3_free_consistent(struct tg3 *tp) for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; - if (tnapi->tx_ring) { - pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, - tnapi->tx_ring, tnapi->tx_desc_mapping); - tnapi->tx_ring = NULL; - } - - kfree(tnapi->tx_buffers); - tnapi->tx_buffers = NULL; - - if (tnapi->rx_rcb) { - pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), - tnapi->rx_rcb, - tnapi->rx_rcb_mapping); - tnapi->rx_rcb = NULL; - } - if (tnapi->hw_status) { - pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, - tnapi->hw_status, - tnapi->status_mapping); + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); tnapi->hw_status = NULL; } } + tg3_mem_rx_release(tp); + tg3_mem_tx_release(tp); + if (tp->hw_stats) { - pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), - tp->hw_stats, tp->stats_mapping); + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } - - for (i = 0; i < tp->irq_cnt; i++) - tg3_rx_prodring_fini(tp, &tp->prodring[i]); } /* @@ -6907,14 +8388,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) { int i; - for (i = 0; i < tp->irq_cnt; i++) { - if (tg3_rx_prodring_init(tp, &tp->prodring[i])) - goto err_out; - } - - tp->hw_stats = pci_alloc_consistent(tp->pdev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping); + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, + GFP_KERNEL); if (!tp->hw_stats) goto err_out; @@ -6924,72 +8401,47 @@ static int tg3_alloc_consistent(struct tg3 *tp) struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; - tnapi->hw_status = pci_alloc_consistent(tp->pdev, - TG3_HW_STATUS_SIZE, - &tnapi->status_mapping); + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); if (!tnapi->hw_status) goto err_out; memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); sblk = tnapi->hw_status; - /* If multivector TSS is enabled, vector 0 does not handle - * tx interrupts. Don't allocate any resources for it. - */ - if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || - (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { - tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * - TG3_TX_RING_SIZE, - GFP_KERNEL); - if (!tnapi->tx_buffers) - goto err_out; - - tnapi->tx_ring = pci_alloc_consistent(tp->pdev, - TG3_TX_RING_BYTES, - &tnapi->tx_desc_mapping); - if (!tnapi->tx_ring) - goto err_out; - } + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + if (tg3_flag(tp, ENABLE_RSS)) { + volatile u16 *prodptr; - /* - * When RSS is enabled, the status block format changes - * slightly. The "rx_jumbo_consumer", "reserved", - * and "rx_mini_consumer" members get mapped to the - * other three rx return ring producer indexes. - */ - switch (i) { - default: - tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; - break; - case 2: - tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; - break; - case 3: - tnapi->rx_rcb_prod_idx = &sblk->reserved; - break; - case 4: - tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; - break; + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + default: + prodptr = &sblk->idx[0].rx_producer; + break; + case 2: + prodptr = &sblk->rx_jumbo_consumer; + break; + case 3: + prodptr = &sblk->reserved; + break; + case 4: + prodptr = &sblk->rx_mini_consumer; + break; + } + tnapi->rx_rcb_prod_idx = prodptr; } - - tnapi->prodring = &tp->prodring[i]; - - /* - * If multivector RSS is enabled, vector 0 does not handle - * rx or tx interrupts. Don't allocate any resources for it. - */ - if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) - continue; - - tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, - TG3_RX_RCB_RING_BYTES(tp), - &tnapi->rx_rcb_mapping); - if (!tnapi->rx_rcb) - goto err_out; - - memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); } + if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) + goto err_out; + return 0; err_out: @@ -7007,7 +8459,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int unsigned int i; u32 val; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { switch (ofs) { case RCVLSC_MODE: case DMAC_MODE: @@ -7072,268 +8524,41 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - tp->tx_mode &= ~TX_MODE_ENABLE; - tw32_f(MAC_TX_MODE, tp->tx_mode); - - for (i = 0; i < MAX_WAIT_CNT; i++) { - udelay(100); - if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) - break; - } - if (i >= MAX_WAIT_CNT) { - dev_err(&tp->pdev->dev, - "%s timed out, TX_MODE_ENABLE will not clear " - "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); - err |= -ENODEV; - } - - err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); - - tw32(FTQ_RESET, 0xffffffff); - tw32(FTQ_RESET, 0x00000000); - - err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - if (tnapi->hw_status) - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - } - if (tp->hw_stats) - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - - return err; -} - -static void tg3_ape_send_event(struct tg3 *tp, u32 event) -{ - int i; - u32 apedata; - - /* NCSI does not support APE events */ - if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); - if (apedata != APE_SEG_SIG_MAGIC) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); - if (!(apedata & APE_FW_STATUS_READY)) - return; - - /* Wait for up to 1 millisecond for APE to service previous event. */ - for (i = 0; i < 10; i++) { - if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, - event | APE_EVENT_STATUS_EVENT_PENDING); - - tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - break; - - udelay(100); - } - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); -} - -static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) -{ - u32 event; - u32 apedata; - - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) - return; - - switch (kind) { - case RESET_KIND_INIT: - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, - APE_HOST_SEG_SIG_MAGIC); - tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, - APE_HOST_SEG_LEN_MAGIC); - apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); - tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); - tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, - APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); - tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, - APE_HOST_BEHAV_NO_PHYLOCK); - tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, - TG3_APE_HOST_DRVR_STATE_START); - - event = APE_EVENT_STATUS_STATE_START; - break; - case RESET_KIND_SHUTDOWN: - /* With the interface we are currently using, - * APE does not track driver state. Wiping - * out the HOST SEGMENT SIGNATURE forces - * the APE to assume OS absent status. - */ - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); - - if (device_may_wakeup(&tp->pdev->dev) && - (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) { - tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, - TG3_APE_HOST_WOL_SPEED_AUTO); - apedata = TG3_APE_HOST_DRVR_STATE_WOL; - } else - apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; - - tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); - - event = APE_EVENT_STATUS_STATE_UNLOAD; - break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; - default: - return; - } - - event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; - - tg3_ape_send_event(tp, event); -} - -/* tp->lock is held. */ -static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) -{ - tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, - NIC_SRAM_FIRMWARE_MBOX_MAGIC1); - - if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD); - break; - - case RESET_KIND_SUSPEND: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_SUSPEND); - break; - - default: - break; - } - } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); -} - -/* tp->lock is held. */ -static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) -{ - if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START_DONE); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD_DONE); - break; - - default: - break; - } - } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); -} - -/* tp->lock is held. */ -static void tg3_write_sig_legacy(struct tg3 *tp, int kind) -{ - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD); - break; - - case RESET_KIND_SUSPEND: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_SUSPEND); - break; - - default: - break; - } - } -} - -static int tg3_poll_fw(struct tg3 *tp) -{ - int i; - u32 val; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - /* Wait up to 20ms for init done. */ - for (i = 0; i < 200; i++) { - if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) - return 0; - udelay(100); - } - return -ENODEV; - } + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - /* Wait for firmware initialization to complete. */ - for (i = 0; i < 100000; i++) { - tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); - if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + tp->tx_mode &= ~TX_MODE_ENABLE; + tw32_f(MAC_TX_MODE, tp->tx_mode); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) break; - udelay(10); + } + if (i >= MAX_WAIT_CNT) { + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); + err |= -ENODEV; } - /* Chip might not be fitted with firmware. Some Sun onboard - * parts are configured like that. So don't signal the timeout - * of the above loop as an error, but do report the lack of - * running firmware once. - */ - if (i >= 100000 && - !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { - tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; + err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); - netdev_info(tp->dev, "No firmware running\n"); - } + tw32(FTQ_RESET, 0xffffffff); + tw32(FTQ_RESET, 0x00000000); - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { - /* The 57765 A0 needs a little more - * time to do some important work. - */ - mdelay(10); + err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); } - return 0; + return err; } /* Save PCI command register before chip reset */ @@ -7354,10 +8579,10 @@ static void tg3_restore_pci_state(struct tg3 *tp) /* Set MAX PCI retry to zero. */ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) + tg3_flag(tp, PCIX_MODE)) val |= PCISTATE_RETRY_SAME_DMA; /* Allow reads and writes to the APE register and memory space. */ - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + if (tg3_flag(tp, ENABLE_APE)) val |= PCISTATE_ALLOW_APE_CTLSPC_WR | PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; @@ -7365,19 +8590,15 @@ static void tg3_restore_pci_state(struct tg3 *tp) pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) - pcie_set_readrq(tp->pdev, 4096); - else { - pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - tp->pci_cacheline_sz); - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); - } + if (!tg3_flag(tp, PCI_EXPRESS)) { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); } /* Make sure PCI-X relaxed ordering bit is clear. */ - if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + if (tg3_flag(tp, PCIX_MODE)) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, @@ -7387,12 +8608,12 @@ static void tg3_restore_pci_state(struct tg3 *tp) pcix_cmd); } - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { + if (tg3_flag(tp, 5780_CLASS)) { /* Chip reset on 5780 will reset MSI enable bit, * so need to restore it. */ - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tg3_flag(tp, USING_MSI)) { u16 ctrl; pci_read_config_word(tp->pdev, @@ -7405,9 +8626,9 @@ static void tg3_restore_pci_state(struct tg3 *tp) tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); } } -} -static void tg3_stop_fw(struct tg3 *); + tg3_disable_ints(tp); +} /* tp->lock is held. */ static int tg3_chip_reset(struct tg3 *tp) @@ -7415,6 +8636,7 @@ static int tg3_chip_reset(struct tg3 *tp) u32 val; void (*write_op)(struct tg3 *, u32, u32); int i, err; + tg3_nvram_lock(tp); tg3_ape_lock(tp, TG3_APE_LOCK_GRC); @@ -7431,7 +8653,7 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_save_pci_state(tp); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) + tg3_flag(tp, 5755_PLUS)) tw32(GRC_FASTBOOT_PC, 0); /* @@ -7450,7 +8672,7 @@ static int tg3_chip_reset(struct tg3 *tp) * at this time, but the irq handler may still be called due to irq * sharing or irqpoll. */ - tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; + tg3_flag_set(tp, CHIP_RESETTING); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) { @@ -7477,11 +8699,10 @@ static int tg3_chip_reset(struct tg3 *tp) /* do the reset */ val = GRC_MISC_CFG_CORECLK_RESET; - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if (tg3_flag(tp, PCI_EXPRESS)) { /* Force PCIe 1.0a mode */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + !tg3_flag(tp, 57765_PLUS) && tr32(TG3_PCIE_PHY_TSTCTL) == (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); @@ -7499,8 +8720,7 @@ static int tg3_chip_reset(struct tg3 *tp) } /* Manage gphy power for all CPMU absent PCIe devices. */ - if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) val |= GRC_MISC_CFG_KEEP_GPHY_POWER; tw32(GRC_MISC_CFG, val); @@ -7533,7 +8753,7 @@ static int tg3_chip_reset(struct tg3 *tp) udelay(120); - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { + if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { u16 val16; if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { @@ -7551,7 +8771,7 @@ static int tg3_chip_reset(struct tg3 *tp) /* Clear the "no snoop" and "relaxed ordering" bits. */ pci_read_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_DEVCTL, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, &val16); val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); @@ -7559,17 +8779,15 @@ static int tg3_chip_reset(struct tg3 *tp) * Older PCIe devices only support the 128 byte * MPS setting. Enforce the restriction. */ - if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) + if (!tg3_flag(tp, CPMU_PRESENT)) val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; pci_write_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_DEVCTL, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, val16); - pcie_set_readrq(tp->pdev, 4096); - /* Clear error status */ pci_write_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_DEVSTA, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED | PCI_EXP_DEVSTA_FED | @@ -7578,10 +8796,11 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_restore_pci_state(tp); - tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); val = 0; - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) + if (tg3_flag(tp, 5780_CLASS)) val = tr32(MEMARB_MODE); tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); @@ -7606,19 +8825,16 @@ static int tg3_chip_reset(struct tg3 *tp) tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_TBI; - tw32_f(MAC_MODE, tp->mac_mode); - } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { + val = tp->mac_mode; + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_GMII; - tw32_f(MAC_MODE, tp->mac_mode); - } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); - if (tp->mac_mode & MAC_MODE_APE_TX_EN) - tp->mac_mode |= MAC_MODE_TDE_ENABLE; - tw32_f(MAC_MODE, tp->mac_mode); + val = tp->mac_mode; } else - tw32_f(MAC_MODE, 0); + val = 0; + + tw32_f(MAC_MODE, val); udelay(40); tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); @@ -7629,51 +8845,51 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_mdio_start(tp); - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && + if (tg3_flag(tp, PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { + !tg3_flag(tp, 57765_PLUS)) { val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + } + /* Reprobe ASF enable state. */ - tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; - tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; + tg3_flag_clear(tp, ENABLE_ASF); + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | + TG3_PHYFLG_KEEP_LINK_ON_PWRDN); + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg; tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { - tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; + tg3_flag_set(tp, ENABLE_ASF); tp->last_event_jiffies = jiffies; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) - tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); + if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) + tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; + if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) + tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; } } return 0; } -/* tp->lock is held. */ -static void tg3_stop_fw(struct tg3 *tp) -{ - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { - /* Wait for RX cpu to ACK the previous event. */ - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); - - tg3_generate_fw_event(tp); - - /* Wait for RX cpu to ACK this event. */ - tg3_wait_for_event_ack(tp); - } -} +static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); +static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, int silent) @@ -7682,247 +8898,27 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) tg3_stop_fw(tp); - tg3_write_sig_pre_reset(tp, kind); - - tg3_abort_hw(tp, silent); - err = tg3_chip_reset(tp); - - __tg3_set_mac_addr(tp, 0); - - tg3_write_sig_legacy(tp, kind); - tg3_write_sig_post_reset(tp, kind); - - if (err) - return err; - - return 0; -} - -#define RX_CPU_SCRATCH_BASE 0x30000 -#define RX_CPU_SCRATCH_SIZE 0x04000 -#define TX_CPU_SCRATCH_BASE 0x34000 -#define TX_CPU_SCRATCH_SIZE 0x04000 - -/* tp->lock is held. */ -static int tg3_halt_cpu(struct tg3 *tp, u32 offset) -{ - int i; - - BUG_ON(offset == TX_CPU_BASE && - (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val = tr32(GRC_VCPU_EXT_CTRL); - - tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); - return 0; - } - if (offset == RX_CPU_BASE) { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - - tw32(offset + CPU_STATE, 0xffffffff); - tw32_f(offset + CPU_MODE, CPU_MODE_HALT); - udelay(10); - } else { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - } - - if (i >= 10000) { - netdev_err(tp->dev, "%s timed out, %s CPU\n", - __func__, offset == RX_CPU_BASE ? "RX" : "TX"); - return -ENODEV; - } - - /* Clear firmware's nvram arbitration. */ - if (tp->tg3_flags & TG3_FLAG_NVRAM) - tw32(NVRAM_SWARB, SWARB_REQ_CLR0); - return 0; -} - -struct fw_info { - unsigned int fw_base; - unsigned int fw_len; - const u32 *fw_data; -}; - -/* tp->lock is held. */ -static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, - int cpu_scratch_size, struct fw_info *info) -{ - int err, lock_err, i; - void (*write_op)(struct tg3 *, u32, u32); - - if (cpu_base == TX_CPU_BASE && - (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - netdev_err(tp->dev, - "%s: Trying to load TX cpu firmware which is 5705\n", - __func__); - return -EINVAL; - } - - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) - write_op = tg3_write_mem; - else - write_op = tg3_write_indirect_reg32; - - /* It is possible that bootcode is still loading at this point. - * Get the nvram lock first before halting the cpu. - */ - lock_err = tg3_nvram_lock(tp); - err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); - if (err) - goto out; - - for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) - write_op(tp, cpu_scratch_base + i, 0); - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->fw_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->fw_base & 0xffff) + - (i * sizeof(u32))), - info->fw_data[i]); - - err = 0; - -out: - return err; -} - -/* tp->lock is held. */ -static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) -{ - struct fw_info info; - const u32 *fw_data; - int err, i; - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ - - info.fw_base = fw_data[1]; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - - err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, - RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; - - err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, - TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; - - /* Now startup only the RX cpu. */ - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) - break; - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " - "should be %08x\n", __func__, - tr32(RX_CPU_BASE + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); - - return 0; -} - -#if TG3_TSO_SUPPORT != 0 - -/* tp->lock is held. */ -static int tg3_load_tso_firmware(struct tg3 *tp) -{ - struct fw_info info; - const u32 *fw_data; - unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; - int err, i; - - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) - return 0; - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ + tg3_write_sig_pre_reset(tp, kind); - info.fw_base = fw_data[1]; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; + tg3_abort_hw(tp, silent); + err = tg3_chip_reset(tp); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - cpu_base = RX_CPU_BASE; - cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; - cpu_scratch_size = (info.fw_len + - TG3_TSO5_FW_SBSS_LEN + - TG3_TSO5_FW_BSS_LEN); - } else { - cpu_base = TX_CPU_BASE; - cpu_scratch_base = TX_CPU_SCRATCH_BASE; - cpu_scratch_size = TX_CPU_SCRATCH_SIZE; - } + __tg3_set_mac_addr(tp, 0); - err = tg3_load_firmware_cpu(tp, cpu_base, - cpu_scratch_base, cpu_scratch_size, - &info); - if (err) - return err; + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); - /* Now startup the cpu. */ - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.fw_base); + if (tp->hw_stats) { + /* Save the stats across chip resets... */ + tg3_get_nstats(tp, &tp->net_stats_prev); + tg3_get_estats(tp, &tp->estats_prev); - for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.fw_base) - break; - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, - "%s fails to set CPU PC, is %08x should be %08x\n", - __func__, tr32(cpu_base + CPU_PC), info.fw_base); - return -ENODEV; + /* And make sure the next sample is new data */ + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); } - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_MODE, 0x00000000); - return 0; -} -#endif /* TG3_TSO_SUPPORT != 0 */ + return err; +} static int tg3_set_mac_addr(struct net_device *dev, void *p) { @@ -7938,7 +8934,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) if (!netif_running(dev)) return 0; - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if (tg3_flag(tp, ENABLE_ASF)) { u32 addr0_high, addr0_low, addr1_high, addr1_low; addr0_high = tr32(MAC_ADDR_0_HIGH); @@ -7973,18 +8969,17 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), maxlen_flags); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tg3_write_mem(tp, (bdinfo_addr + TG3_BDINFO_NIC_ADDR), nic_addr); } -static void __tg3_set_rx_mode(struct net_device *); -static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) +static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) { - int i; + int i = 0; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { + if (!tg3_flag(tp, ENABLE_TSS)) { tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); @@ -7992,31 +8987,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) tw32(HOSTCC_TXCOL_TICKS, 0); tw32(HOSTCC_TXMAX_FRAMES, 0); tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + + for (; i < tp->txq_cnt; i++) { + u32 reg; + + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); } +} + +static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + int i = 0; + u32 limit = tp->rxq_cnt; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { + if (!tg3_flag(tp, ENABLE_RSS)) { tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + limit--; } else { tw32(HOSTCC_RXCOL_TICKS, 0); tw32(HOSTCC_RXMAX_FRAMES, 0); tw32(HOSTCC_RXCOAL_MAXF_INT, 0); } - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - u32 val = ec->stats_block_coalesce_usecs; - - tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); - tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); - - if (!netif_carrier_ok(tp->dev)) - val = 0; - - tw32(HOSTCC_STAT_COAL_TICKS, val); - } - - for (i = 0; i < tp->irq_cnt - 1; i++) { + for (; i < limit; i++) { u32 reg; reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; @@ -8025,41 +9032,45 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) tw32(reg, ec->rx_max_coalesced_frames); reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; tw32(reg, ec->rx_max_coalesced_frames_irq); - - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { - reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; - tw32(reg, ec->tx_coalesce_usecs); - reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; - tw32(reg, ec->tx_max_coalesced_frames); - reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; - tw32(reg, ec->tx_max_coalesced_frames_irq); - } } for (; i < tp->irq_max - 1; i++) { tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } +} - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { - tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); - tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); - tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); - } +static void __tg3_set_rx_mode(struct net_device *); +static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + tg3_coal_tx_init(tp, ec); + tg3_coal_rx_init(tp, ec); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + + if (!netif_carrier_ok(tp->dev)) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); } } /* tp->lock is held. */ -static void tg3_rings_reset(struct tg3 *tp) +static void tg3_tx_rcbs_disable(struct tg3 *tp) { - int i; - u32 stblk, txrcb, rxrcb, limit; - struct tg3_napi *tnapi = &tp->napi[0]; + u32 txrcb, limit; /* Disable all transmit rings but the first. */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; @@ -8068,15 +9079,41 @@ static void tg3_rings_reset(struct tg3 *tp) txrcb < limit; txrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} +/* tp->lock is held. */ +static void tg3_tx_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) +{ + u32 rxrcb, limit; /* Disable all receive return rings but the first. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; - else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; else limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; @@ -8085,44 +9122,90 @@ static void tg3_rings_reset(struct tg3 *tp) rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk; + struct tg3_napi *tnapi = &tp->napi[0]; + + tg3_tx_rcbs_disable(tp); + +#ifdef TG3_VMWARE_NETQ_ENABLE + for (i = 1; i < TG3_IRQ_MAX_VECS_IOV; i++) + tg3_disable_prod_rcbs(tp, i); +#endif + + tg3_rx_ret_rcbs_disable(tp); /* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; /* Zero mailbox registers. */ - if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { - for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { tp->napi[i].tx_prod = 0; tp->napi[i].tx_cons = 0; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) tw32_mailbox(tp->napi[i].prodmbox, 0); tw32_rx_mbox(tp->napi[i].consmbox, 0); tw32_mailbox_f(tp->napi[i].int_mbox, 1); - + tp->napi[i].chk_msi_cnt = 0; tp->napi[i].last_rx_cons = 0; tp->napi[i].last_tx_cons = 0; + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (!tg3_flag(tp, ENABLE_RSS)) { + struct tg3_rx_prodring_set *tpr; + + tpr = &tp->napi[i].prodring; + tw32_rx_mbox(tpr->rx_jmb_mbox, 0); + tw32_rx_mbox(tpr->rx_std_mbox, 0); + } +#endif } - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) + if (!tg3_flag(tp, ENABLE_TSS)) tw32_mailbox(tp->napi[0].prodmbox, 0); } else { tp->napi[0].tx_prod = 0; tp->napi[0].tx_cons = 0; tw32_mailbox(tp->napi[0].prodmbox, 0); tw32_rx_mbox(tp->napi[0].consmbox, 0); - tp->napi[0].last_rx_cons = 0; - tp->napi[0].last_tx_cons = 0; } /* Make sure the NIC-based send BD rings are disabled. */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; for (i = 0; i < 16; i++) tw32_tx_mbox(mbox + i * 8, 0); } - txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); @@ -8132,45 +9215,127 @@ static void tg3_rings_reset(struct tg3 *tp) tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff)); - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (TG3_RX_RCB_RING_SIZE(tp) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - rxrcb += TG3_BDINFO_SIZE; - } - stblk = HOSTCC_STATBLCK_RING1; for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8; /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); +} - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (TG3_RX_RCB_RING_SIZE(tp) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); +static void tg3_setup_rxbd_thresholds(struct tg3 *tp) +{ + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 57765_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + +#ifdef TG3_VMWARE_NETQ_ENABLE + /* In IOV, mode, the std rx BD cache is chopped into 17 pieces. */ + if (tg3_flag(tp, ENABLE_IOV)) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; +#endif /* TG3_VMWARE_NETQ_ENABLE */ + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, 5717_PLUS) && tg3_flag(tp, ENABLE_IOV)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt / 2); +#endif /* TG3_VMWARE_NETQ_ENABLE */ + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + +#ifdef TG3_VMWARE_NETQ_ENABLE + /* In IOV, mode, the jmb rx BD cache is chopped into 17 pieces. */ + if (tg3_flag(tp, ENABLE_IOV)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; +#endif /* TG3_VMWARE_NETQ_ENABLE */ + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, 5717_PLUS) && tg3_flag(tp, ENABLE_IOV)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt / 2); +#endif /* TG3_VMWARE_NETQ_ENABLE */ +} + +static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) +{ + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); +} + +static void tg3_rss_check_indir_tbl(struct tg3 *tp) +{ + int i; + + if (!tg3_flag(tp, ENABLE_RSS)) + return; + + if (tp->rxq_cnt == 1) { + memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); + return; + } + + /* Validate table against current IRQ count */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { + if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) + break; + } + + if (i != TG3_RSS_INDIR_TBL_SIZE) + tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); +} + +static void tg3_rss_write_indir_tbl(struct tg3 *tp) +{ + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + u32 val = tp->rss_ind_tbl[i]; + i++; + for (; i % 8; i++) { + val <<= 4; + val |= tp->rss_ind_tbl[i]; + } + tw32(reg, val); + reg += 4; } } @@ -8179,7 +9344,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { u32 val, rdmac_mode; int i, err, limit; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; tg3_disable_ints(tp); @@ -8187,9 +9352,53 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); - if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) + if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1); + /* Enable MAC control of LPI */ +#ifndef BCM_INCLUDE_PHYLIB_SUPPORT + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) +#endif + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, + TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + TG3_CPMU_EEEMD_LPI_IN_TX | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + if (tg3_disable_eee) + val = 0x0; + + tw32_f(TG3_CPMU_EEE_MODE, val); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + TG3_CPMU_DBTMR1_LNKIDLE_2047US); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); + } + + if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && + !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { + tg3_phy_pull_config(tp); + tg3_phy_pull_status(tp); + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + } + if (reset_phy) tg3_phy_reset(tp); @@ -8235,7 +9444,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); } - if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) { + if (tg3_flag(tp, L1PLLPD_EN)) { u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of PL PCIE block registers. */ @@ -8249,18 +9458,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MODE, grc_mode); } - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { - u32 grc_mode = tr32(GRC_MODE); + if (tg3_flag(tp, 57765_CLASS)) { + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); - /* Access the lower 1K of PL PCIE block registers. */ - val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; - tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); - val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); - tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, - val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); - tw32(GRC_MODE, grc_mode); + tw32(GRC_MODE, grc_mode); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } val = tr32(TG3_CPMU_LSPD_10MB_CLK); val &= ~CPMU_LSPD_10MB_MACCLK_MASK; @@ -8273,20 +9501,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) * other revision. But do not set this on PCI Express * chips and don't even touch the clocks if the CPMU is present. */ - if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { - if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { + tg3_flag(tp, PCIX_MODE)) { val = tr32(TG3PCI_PCISTATE); val |= PCISTATE_RETRY_SAME_DMA; tw32(TG3PCI_PCISTATE, val); } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. */ @@ -8313,12 +9541,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3PCI_DMA_RW_CTRL) & ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (!tg3_flag(tp, 57765_CLASS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { @@ -8342,9 +9572,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) */ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; - tw32(GRC_MODE, - tp->grc_mode | - (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); + val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; + if (tp->rxptpctl || tg3_flag(tp, TX_TSTAMP_EN)) { + tw32(TG3_RX_PTP_CTL, + tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + + val |= GRC_MODE_TIME_SYNC_ENABLE; + } + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, ENABLE_IOV)) + val |= GRC_MODE_IOV_ENABLE; +#endif + + tw32(GRC_MODE, tp->grc_mode | val); /* Setup the timer prescalar register. Clock is always 66Mhz. */ val = tr32(GRC_MISC_CFG); @@ -8353,7 +9594,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MISC_CFG, val); /* Initialize MBUF/DESC pool. */ - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + if (tg3_flag(tp, 5750_PLUS)) { /* Do nothing. */ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); @@ -8363,7 +9604,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); - } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { + } else if (tg3_flag(tp, TSO_CAPABLE)) { #if TG3_TSO_SUPPORT != 0 int fw_len; @@ -8400,7 +9641,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(BUFMGR_DMA_HIGH_WATER, tp->bufmgr_config.dma_high_water); - tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); + val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; + tw32(BUFMGR_MODE, val); for (i = 0; i < 2000; i++) { if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) break; @@ -8411,21 +9659,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) return -ENODEV; } - /* Setup replenish threshold. */ - val = tp->rx_pending / 8; - if (val == 0) - val = 1; - else if (val > tp->rx_std_max_post) - val = tp->rx_std_max_post; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) - tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); - - if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) - val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; - } + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); - tw32(RCVBDI_STD_THRESH, val); + tg3_setup_rxbd_thresholds(tp); /* Initialize TG3_BDINFO's at: * RCVDBDI_STD_BD: standard eth size rx ring @@ -8448,32 +9685,32 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_std_mapping & 0xffffffff)); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + if (!tg3_flag(tp, 5717_PLUS)) tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC); /* Disable the mini ring */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); /* Program the jumbo buffer descriptor ring control * blocks on those devices that have them. */ - if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { - /* Setup replenish threshold. */ - tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tpr->rx_jmb_mapping >> 32)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, - (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | - BDINFO_FLAGS_USE_EXT_RECV); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || + tg3_flag(tp, 57765_CLASS)) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -8481,30 +9718,24 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) BDINFO_FLAGS_DISABLED); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | - (TG3_RX_STD_DMA_SZ << 2); - else + if (tg3_flag(tp, 57765_PLUS)) { + val = TG3_RX_STD_RING_SIZE(tp); + val <<= BDINFO_FLAGS_MAXLEN_SHIFT; + val |= (TG3_RX_STD_DMA_SZ << 2); + } else val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; } else - val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); tpr->rx_std_prod_idx = tp->rx_pending; tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); - tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? - tp->rx_jumbo_pending : 0; + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { - tw32(STD_REPLENISH_LWM, 32); - tw32(JMB_REPLENISH_LWM, 16); - } - tg3_rings_reset(tp); /* Initialize MAC address and backoff seed. */ @@ -8517,10 +9748,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* The slot time is changed by tg3_setup_phy if we * run at gigabit with half duplex. */ - tw32(MAC_TX_LENGTHS, - (2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); /* Receive rules. */ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); @@ -8545,49 +9782,69 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_MBUF_RBD_CRPT_ENAB | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; - /* If statement applies to 5705 and 5750 PCI devices only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { - if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { + !tg3_flag(tp, IS_5788)) { rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; } } - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) + if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; #if TG3_TSO_SUPPORT != 0 - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + if (tg3_flag(tp, 57765_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; #endif + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3_RDMA_RSRVCTRL_REG); - tw32(TG3_RDMA_RSRVCTRL_REG, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + } + tw32(TG3_RDMA_RSRVCTRL_REG, + val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); } /* Receive/send statistics. */ - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + if (tg3_flag(tp, 5750_PLUS)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_DACK_FIX; tw32(RCVLPC_STATS_ENABLE, val); } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && - (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + tg3_flag(tp, TSO_CAPABLE)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; tw32(RCVLPC_STATS_ENABLE, val); @@ -8610,7 +9867,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) __tg3_set_coalesce(tp, &tp->coal); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { /* Status/statistics block address. See tg3_timer, * the tg3_periodic_fetch_stats call there, and * tg3_get_stats to see how this works for 5705/5750 chips. @@ -8636,36 +9893,35 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; /* reset to prevent losing 1st rx packet intermittently */ tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) - tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; - else - tp->mac_mode = 0; tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | - MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | + MAC_MODE_FHDE_ENABLE; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (!tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) tp->mac_mode |= MAC_MODE_LINK_POLARITY; tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); udelay(40); /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). - * If TG3_FLG2_IS_NIC is zero, we should read the + * If TG3_FLAG_IS_NIC is zero, we should read the * register to preserve the GPIO settings for LOMs. The GPIOs, * whether used as inputs or outputs, are set by boot code after * reset. */ - if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { + if (!tg3_flag(tp, IS_NIC)) { u32 gpio_mask; gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | @@ -8683,20 +9939,24 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; /* GPIO1 must be driven high for eeprom write protect */ - if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) + if (tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); } tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { + if (tg3_flag(tp, USING_MSIX)) { val = tr32(MSGINT_MODE); - val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + val |= MSGINT_MODE_ENABLE; + if (tp->irq_cnt > 1) + val |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + val |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); udelay(40); } @@ -8707,23 +9967,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | WDMAC_MODE_LNGREAD_ENAB); - /* If statement applies to 5705 and 5750 PCI devices only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { - if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { /* nothing */ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + !tg3_flag(tp, IS_5788)) { val |= WDMAC_MODE_RX_ACCEL; } } /* Enable host coalescing bug fix */ - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + if (tg3_flag(tp, 5755_PLUS)) val |= WDMAC_MODE_STATUS_TAG_FIX; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) @@ -8732,7 +9989,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(WDMAC_MODE, val); udelay(40); - if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + if (tg3_flag(tp, PCIX_MODE)) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, @@ -8752,7 +10009,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(40); tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) @@ -8762,15 +10019,27 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); +#ifdef TG3_VMWARE_NETQ_ENABLE + val = RCVBDI_MODE_ENABLE; + if (!tg3_flag(tp, ENABLE_IOV)) + val |= RCVBDI_MODE_RCB_ATTN_ENAB; + tw32(RCVBDI_MODE, val); +#else tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); - tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); +#endif + val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + val |= RCVDBDI_MODE_LRG_RING_SZ; + tw32(RCVDBDI_MODE, val); tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); #if TG3_TSO_SUPPORT != 0 - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); #endif val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) val |= SNDBDI_MODE_MULTI_TXQ_EN; tw32(SNDBDI_MODE, val); tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); @@ -8782,7 +10051,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } #if TG3_TSO_SUPPORT != 0 - if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { + if (tg3_flag(tp, TSO_CAPABLE)) { err = tg3_load_tso_firmware(tp); if (err) return err; @@ -8790,26 +10059,26 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) #endif tp->tx_mode = TX_MODE_ENABLE; - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + + if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + tw32_f(MAC_TX_MODE, tp->tx_mode); udelay(100); - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { - u32 reg = MAC_RSS_INDIR_TBL_0; - u8 *ent = (u8 *)&val; - - /* Setup the indirection table */ - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { - int idx = i % sizeof(val); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_restore(tp); +#endif - ent[idx] = i % (tp->irq_cnt - 1); - if (idx == sizeof(val) - 1) { - tw32(reg, val); - reg += 4; - } - } + if (tg3_flag(tp, ENABLE_RSS)) { + tg3_rss_write_indir_tbl(tp); /* Setup the "secret" hash key. */ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); @@ -8825,10 +10094,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } tp->rx_mode = RX_MODE_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + if (tg3_flag(tp, 5755_PLUS)) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) + if (tg3_flag(tp, ENABLE_RSS)) tp->rx_mode |= RX_MODE_RSS_ENABLE | RX_MODE_RSS_ITBL_HASH_BITS_7 | RX_MODE_RSS_IPV6_HASH_EN | @@ -8842,16 +10111,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(MAC_LED_CTRL, tp->led_ctrl); tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); } tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && - !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { /* Set drive transmission level to 1.2V */ /* only if the signal pre-emphasis bit is not set */ val = tr32(MAC_SERDES_CFG); @@ -8866,20 +10135,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Prevent chip from dropping frames when flow control * is enabled. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tg3_flag(tp, 57765_CLASS)) val = 1; else val = 2; tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && - (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { /* Use hardware link auto-negotiation */ - tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; + tg3_flag_set(tp, HW_AUTONEG); } - if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { u32 tmp; tmp = tr32(SERDES_RX_CTRL); @@ -8889,9 +10158,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); } - if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { - if (tp->link_config.phy_is_low_power) { - tp->link_config.phy_is_low_power = 0; + if (!tg3_flag(tp, USE_PHYLIB)) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; tp->link_config.speed = tp->link_config.orig_speed; tp->link_config.duplex = tp->link_config.orig_duplex; tp->link_config.autoneg = tp->link_config.orig_autoneg; @@ -8901,15 +10170,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && - !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { u32 tmp; /* Clear CRC stats. */ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { tg3_writephy(tp, MII_TG3_TEST1, tmp | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, 0x14, &tmp); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); } } } @@ -8922,12 +10191,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); - if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) limit = 8; else limit = 16; - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) + if (tg3_flag(tp, ENABLE_ASF)) limit -= 4; switch (limit) { case 16: @@ -8965,7 +10233,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) break; } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + if (tg3_flag(tp, ENABLE_APE)) /* Write our heartbeat update interval to APE. */ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, APE_HOST_HEARTBEAT_INT_DISABLE); @@ -9031,20 +10299,40 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); - TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); + +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_vmware_fetch_stats(tp); +#endif } static void tg3_chk_missed_msi(struct tg3 *tp) { u32 i; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) - return; - for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; - struct tg3_hw_status *sblk = tnapi->hw_status; + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (!(tnapi->netq.flags & TG3_NETQ_RXQ_ENABLED) && + !(tnapi->netq.flags & TG3_NETQ_TXQ_ALLOCATED)) + continue; +#endif if (tg3_has_work(tnapi)) { if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && @@ -9053,12 +10341,10 @@ static void tg3_chk_missed_msi(struct tg3 *tp) tnapi->chk_msi_cnt++; return; } - tw32_mailbox(tnapi->int_mbox, - tnapi->last_tag << 24); - tnapi->coalesce_tries++; -#if !defined(__VMKLNX__) - netdev_warn(tp->dev, "vector %d: Rewrote status tag %d times.\n", i, - tnapi->coalesce_tries); +#ifdef BCM_HAS_NEW_IRQ_SIG + tg3_msi(0, tnapi); +#else + tg3_msi(0, tnapi, 0); #endif } } @@ -9072,18 +10358,20 @@ static void tg3_timer(unsigned long __opaque) { struct tg3 *tp = (struct tg3 *) __opaque; - if (tp->irq_sync) + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) goto restart_timer; spin_lock(&tp->lock); - tg3_chk_missed_msi(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tg3_flag(tp, 57765_CLASS)) + tg3_chk_missed_msi(tp); #if defined(__VMKLNX__) tg3_vmware_timer(tp); #endif - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + if (!tg3_flag(tp, TAGGED_STATUS)) { /* All of this garbage is because when using non-tagged * IRQ status the mailbox/status_block protocol the chip * uses with the cpu is race prone. @@ -9097,26 +10385,28 @@ static void tg3_timer(unsigned long __opaque) } if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; spin_unlock(&tp->lock); - schedule_work(&tp->reset_task); - return; + tg3_reset_task_schedule(tp); + goto restart_timer; } } /* This part only runs once per second. */ if (!--tp->timer_counter) { - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + if (tg3_flag(tp, 5705_PLUS)) tg3_periodic_fetch_stats(tp); - if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { u32 mac_stat; int phy_event; mac_stat = tr32(MAC_STATUS); phy_event = 0; - if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { if (mac_stat & MAC_STATUS_MI_INTERRUPT) phy_event = 1; } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) @@ -9124,7 +10414,7 @@ static void tg3_timer(unsigned long __opaque) if (phy_event) tg3_setup_phy(tp, 0); - } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { + } else if (tg3_flag(tp, POLL_SERDES)) { u32 mac_stat = tr32(MAC_STATUS); int need_setup = 0; @@ -9132,7 +10422,7 @@ static void tg3_timer(unsigned long __opaque) (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { need_setup = 1; } - if (! netif_carrier_ok(tp->dev) && + if (!netif_carrier_ok(tp->dev) && (mac_stat & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET))) { need_setup = 1; @@ -9148,8 +10438,8 @@ static void tg3_timer(unsigned long __opaque) } tg3_setup_phy(tp, 0); } - } else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { tg3_serdes_parallel_detect(tp); } @@ -9174,8 +10464,7 @@ static void tg3_timer(unsigned long __opaque) * resets. */ if (!--tp->asf_counter) { - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, @@ -9211,20 +10500,31 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num) name = tp->dev->name; else { name = &tnapi->irq_lbl[0]; - snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + if (tnapi->tx_buffers && tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-txrx-%d", tp->dev->name, irq_num); + else if (tnapi->tx_buffers) + snprintf(name, IFNAMSIZ, + "%s-tx-%d", tp->dev->name, irq_num); + else if (tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-rx-%d", tp->dev->name, irq_num); + else + snprintf(name, IFNAMSIZ, + "%s-%d", tp->dev->name, irq_num); name[IFNAMSIZ-1] = 0; } - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { fn = tg3_msi; - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + if (tg3_flag(tp, 1SHOT_MSI)) fn = tg3_msi_1shot; - flags = IRQF_SAMPLE_RANDOM; + flags = 0; } else { fn = tg3_interrupt; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + if (tg3_flag(tp, TAGGED_STATUS)) fn = tg3_interrupt_tagged; - flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; + flags = IRQF_SHARED; } return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); @@ -9248,9 +10548,7 @@ static int tg3_test_interrupt(struct tg3 *tp) * Turn off MSI one shot mode. Otherwise this test has no * observable way to know whether the interrupt was delivered. */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + if (tg3_flag(tp, 57765_PLUS)) { val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } @@ -9278,6 +10576,10 @@ static int tg3_test_interrupt(struct tg3 *tp) break; } + if (tg3_flag(tp, 57765_PLUS) && + tnapi->hw_status->status_tag != tnapi->last_tag) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + msleep(10); } @@ -9292,9 +10594,7 @@ static int tg3_test_interrupt(struct tg3 *tp) if (intr_ok) { /* Reenable MSI one shot mode. */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } @@ -9313,7 +10613,7 @@ static int tg3_test_msi(struct tg3 *tp) int err; u16 pci_cmd; - if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) + if (!tg3_flag(tp, USING_MSI)) return 0; /* Turn off SERR reporting in case MSI terminates with Master @@ -9343,7 +10643,8 @@ static int tg3_test_msi(struct tg3 *tp) pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + tg3_flag_clear(tp, USING_MSI); + tp->napi[0].irq_vec = tp->pdev->irq; err = tg3_request_irq(tp, 0); if (err) @@ -9398,64 +10699,178 @@ static int tg3_request_firmware(struct tg3 *tp) } #if defined(CONFIG_PCI_MSI) -static bool tg3_enable_msix(struct tg3 *tp) +static bool tg3_ints_alloc_vectors(struct tg3 *tp) { -#ifdef BCM_HAS_STRUCT_NETDEV_QUEUE - int i, rc, cpus = num_online_cpus(); + int i, rc; struct msix_entry msix_ent[tp->irq_max]; - if (cpus == 1) - /* Just fallback to the simpler MSI mode. */ - return false; - - /* - * We want as many rx rings enabled as there are cpus. - * The first MSIX vector only deals with link interrupts, etc, - * so we add one to the number of vectors we are requesting. - */ - tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); - for (i = 0; i < tp->irq_max; i++) { msix_ent[i].entry = i; msix_ent[i].vector = 0; } - rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); - if (rc < 0) { - return false; - } else if (rc != 0) { - if (pci_enable_msix(tp->pdev, msix_ent, rc)) + rc = tp->irq_cnt; + while (1) { + int ret; + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (!tg3_flag(tp, IOV_CAPABLE)) +#endif + /* If the kernel says that only two MSI-X + * vectors are available, fallback to a simpler + * single queue, single vector MSI-X mode. + */ + if (rc == 2) + rc--; + + ret = pci_enable_msix(tp->pdev, msix_ent, rc); + if (ret < 0) return false; - netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", - tp->irq_cnt, rc); - tp->irq_cnt = rc; + else if (ret == 0) + break; + rc = ret; } for (i = 0; i < tp->irq_max; i++) tp->napi[i].irq_vec = msix_ent[i].vector; - tp->dev->real_num_tx_queues = 1; - if (tp->irq_cnt > 1) { - tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; + return true; +} + +static inline u32 tg3_irq_count(struct tg3 *tp) +{ + u32 irqcnt; + + irqcnt = max(tp->rxq_cnt, tp->txq_cnt); + if (irqcnt > 1) { + /* We want as many rx rings enabled as there are cpus. + * In multiqueue MSI-X mode, the first MSI-X vector only deals + * with link interrupts, etc, so we add one to the number of + * vectors we are requesting. + */ + irqcnt = min_t(unsigned, irqcnt + 1, tp->irq_max); + } + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, IOV_CAPABLE)) + irqcnt = tg3_netq_tune_vector_count(tp); +#endif + + return irqcnt; +} + +static bool tg3_enable_msix(struct tg3 *tp) +{ + u32 cpus, irqcnt; + + cpus = num_online_cpus(); + + tp->txq_cnt = tp->txq_req; + tp->rxq_cnt = tp->rxq_req; + if (!tp->txq_cnt) + tp->txq_cnt = min(cpus, tp->txq_max); + if (!tp->rxq_cnt) + tp->rxq_cnt = min(cpus, tp->rxq_max); + +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_limit_dflt_queue_counts(tp); +#endif + + irqcnt = tg3_irq_count(tp); + + tp->irq_cnt = irqcnt; + while (tp->irq_cnt) { + u32 rxq_cnt, new_irq_cnt; + + if (!tg3_ints_alloc_vectors(tp)) + return false; + + /* If the number of interrupts is less than our desired queue + * count, adjust the queue count downwards to match. + */ + rxq_cnt = tp->irq_cnt; +#ifdef TG3_VMWARE_NETQ_ENABLE + if (!tg3_flag(tp, IOV_CAPABLE)) +#endif + if (tp->irq_cnt > 1) + rxq_cnt--; + + rxq_cnt = min(rxq_cnt, tp->rxq_cnt); + tp->rxq_cnt = rxq_cnt; + +#ifdef BCM_HAS_STRUCT_NETDEV_QUEUE + while (rxq_cnt) { + if (netif_set_real_num_rx_queues(tp->dev, rxq_cnt)) + rxq_cnt--; + else + break; + } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; - tp->dev->real_num_tx_queues = tp->irq_cnt - 1; + if (!rxq_cnt) { + pci_disable_msix(tp->pdev); + return false; } +#endif /* BCM_HAS_STRUCT_NETDEV_QUEUE */ + + if (tp->rxq_cnt == rxq_cnt) + break; + + tp->rxq_cnt = rxq_cnt; + + /* See if we can free up any unused MSI-X vectors. */ + new_irq_cnt = tg3_irq_count(tp); + + /* If the IRQ count is the same, we need + * the extra interrupts for the tx side. + */ + if (irqcnt == new_irq_cnt) + break; + + /* Free unused interrupts and reallocate the exact amount. */ + pci_disable_msix(tp->pdev); + tp->irq_cnt = new_irq_cnt; } - return true; -#else - return false; + if (irqcnt != tp->irq_cnt) + netdev_notice(tp->dev, + "Requested %d MSI-X vectors, received %d\n", + tp->irq_cnt, irqcnt); + + if (tp->irq_cnt == 1) { + tp->txq_cnt = 1; + goto done; + } + + /* If more than one interrupt vector is allocated, we _need_ to enable + * either IOV mode or RSS mode, even if only one rx queue is desired. + * If we don't, TSS will not work. + */ +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, IOV_CAPABLE)) + tg3_flag_set(tp, ENABLE_IOV); + else +#endif + tg3_flag_set(tp, ENABLE_RSS); + + if (tp->txq_cnt > 1) { + tg3_flag_set(tp, ENABLE_TSS); + tp->txq_cnt = min(tp->irq_cnt - 1, tp->txq_cnt); + } + +done: +#ifdef BCM_HAS_STRUCT_NETDEV_QUEUE + netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); #endif + + return true; } #endif static void tg3_ints_init(struct tg3 *tp) { #ifdef CONFIG_PCI_MSI - if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && - !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { /* All MSI supporting chips should support tagged * status. Assert that this is the case. */ @@ -9464,28 +10879,35 @@ static void tg3_ints_init(struct tg3 *tp) goto defcfg; } - if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) - tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; - else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && - pci_enable_msi(tp->pdev) == 0) - tp->tg3_flags2 |= TG3_FLG2_USING_MSI; + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); tg3_5780_class_intx_workaround(tp); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { u32 msi_mode = tr32(MSGINT_MODE); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) msi_mode |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); } defcfg: #endif - if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { + if (!tg3_flag(tp, USING_MSIX)) { tp->irq_cnt = 1; tp->napi[0].irq_vec = tp->pdev->irq; + } + + if (tp->irq_cnt == 1) { + tp->txq_cnt = 1; + tp->rxq_cnt = 1; #ifdef BCM_HAS_STRUCT_NETDEV_QUEUE - tp->dev->real_num_tx_queues = 1; + netif_set_real_num_tx_queues(tp->dev, 1); + netif_set_real_num_rx_queues(tp->dev, 1); #endif } } @@ -9493,13 +10915,18 @@ defcfg: static void tg3_ints_fini(struct tg3 *tp) { #ifdef CONFIG_PCI_MSI - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + if (tg3_flag(tp, USING_MSIX)) pci_disable_msix(tp->pdev); - else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) + else if (tg3_flag(tp, USING_MSI)) pci_disable_msi(tp->pdev); #endif - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; - tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_flag_clear(tp, ENABLE_IOV); +#endif } static int tg3_open(struct net_device *dev) @@ -9514,23 +10941,23 @@ static int tg3_open(struct net_device *dev) return err; } else if (err) { netdev_warn(tp->dev, "TSO capability disabled\n"); - tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; - } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { netdev_notice(tp->dev, "TSO capability restored\n"); - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; + tg3_flag_set(tp, TSO_CAPABLE); } } netif_carrier_off(tp->dev); - err = tg3_set_power_state(tp, PCI_D0); + err = tg3_power_up(tp); if (err) return err; tg3_full_lock(tp, 0); tg3_disable_ints(tp); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; + tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); @@ -9540,6 +10967,8 @@ static int tg3_open(struct net_device *dev) */ tg3_ints_init(tp); + tg3_rss_check_indir_tbl(tp); + /* The placement of this call is tied * to the setup and use of Host TX descriptors. */ @@ -9547,30 +10976,33 @@ static int tg3_open(struct net_device *dev) if (err) goto err_out1; + tg3_napi_init(tp); + tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; err = tg3_request_irq(tp, i); if (err) { - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); - break; + } + goto err_out2; } } - if (err) - goto err_out2; - tg3_full_lock(tp, 0); - err = tg3_init_hw(tp, 1); + err = tg3_init_hw(tp, + (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) == 0); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); } else { - if ((tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + if (tg3_flag(tp, TAGGED_STATUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + !tg3_flag(tp, 57765_CLASS)) tp->timer_offset = HZ; else tp->timer_offset = HZ / 10; @@ -9593,7 +11025,7 @@ static int tg3_open(struct net_device *dev) goto err_out3; #ifdef CONFIG_PCI_MSI - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); if (err) { @@ -9605,10 +11037,7 @@ static int tg3_open(struct net_device *dev) goto err_out2; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && - (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { u32 val = tr32(PCIE_TRANSACTION_CFG); tw32(PCIE_TRANSACTION_CFG, @@ -9622,13 +11051,22 @@ static int tg3_open(struct net_device *dev) tg3_full_lock(tp, 0); add_timer(&tp->timer); - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); tg3_enable_ints(tp); tg3_full_unlock(tp); netif_tx_start_all_queues(dev); - + +#ifdef BCM_HAS_FIX_FEATURES + /* + * Reset loopback feature if it was turned on while the device was down + * make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(dev, dev->features); +#endif + return 0; err_out3: @@ -9639,43 +11077,137 @@ err_out3: err_out2: tg3_napi_disable(tp); + tg3_napi_fini(tp); tg3_free_consistent(tp); err_out1: tg3_ints_fini(tp); + tg3_power_down(tp); return err; } -static struct net_device_stats *tg3_get_stats(struct net_device *); -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); +#ifdef ETHTOOL_GCHANNELS +static int tg3_start(struct tg3 *tp, bool reset_phy) +{ + int i, err; -static int tg3_close(struct net_device *dev) + /* Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + + /* Make sure the RSS indirection table + * does not use unallocated interrupts. + */ + tg3_rss_check_indir_tbl(tp); + + /* The placement of this call is tied + * to the setup and use of Host TX descriptors. + */ + err = tg3_alloc_consistent(tp); + if (err) + goto err_out1; + + tg3_napi_enable(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) { + tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + goto err_out2; + } + } + + tg3_full_lock(tp, 0); + + err = tg3_init_hw(tp, reset_phy); + if (err) + goto err_out3; + + tg3_flag_set(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + tg3_phy_start(tp); + + tg3_full_lock(tp, 0); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_enable_ints(tp); + + tg3_full_unlock(tp); + + netif_tx_start_all_queues(tp->dev); + +#ifdef BCM_HAS_FIX_FEATURES + /* Reset loopback feature if it was turned on while the device + * was down make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(tp->dev, tp->dev->features); +#endif + + return 0; + +err_out3: + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + +err_out2: + tg3_napi_disable(tp); + tg3_napi_fini(tp); + tg3_free_consistent(tp); + +err_out1: + tg3_ints_fini(tp); + + return err; +} +#endif /* ETHTOOL_GCHANNELS */ + +static int tg3_stop(struct tg3 *tp) { int i; - struct tg3 *tp = netdev_priv(dev); - tg3_napi_disable(tp); + tg3_reset_task_cancel(tp); -#if (LINUX_VERSION_CODE >= 0x20616) || defined (__VMKLNX__) - cancel_work_sync(&tp->reset_task); +#ifdef __VMKLNX__ + /* VMWare has a bug where transmits might be reattempted while + * the device is shutting down. The only way to fix this is to + * take the tx_lock, which happens through netif_tx_disable(). + */ + tg3_netif_stop(tp); #else - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); -#endif + tg3_napi_disable(tp); - netif_tx_stop_all_queues(dev); + netif_tx_stop_all_queues(tp->dev); +#endif del_timer_sync(&tp->timer); tg3_phy_stop(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_free_all_qs(tp); +#endif + tg3_full_lock(tp, 1); tg3_disable_ints(tp); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; tg3_full_unlock(tp); @@ -9686,54 +11218,58 @@ static int tg3_close(struct net_device *dev) tg3_ints_fini(tp); - memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), - sizeof(tp->net_stats_prev)); - memcpy(&tp->estats_prev, tg3_get_estats(tp), - sizeof(tp->estats_prev)); - - tg3_free_consistent(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_stats_clear(tp); +#endif - tg3_set_power_state(tp, PCI_D3hot); + tg3_napi_fini(tp); - netif_carrier_off(tp->dev); + tg3_free_consistent(tp); return 0; } -static inline unsigned long get_stat64(tg3_stat64_t *val) +static int tg3_close(struct net_device *dev) { - unsigned long ret; + struct tg3 *tp = netdev_priv(dev); -#if (BITS_PER_LONG == 32) - ret = val->low; -#else - ret = ((u64)val->high << 32) | ((u64)val->low); -#endif - return ret; + tg3_stop(tp); + + tg3_flag_clear(tp, INIT_COMPLETE); + + /* Clear stats across close / open calls */ + memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); + memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); + + tg3_power_down_prepare(tp); + + tg3_power_down(tp); + + netif_carrier_off(dev); + + return 0; } -static inline u64 get_estat64(tg3_stat64_t *val) +static inline u64 get_stat64(tg3_stat64_t *val) { return ((u64)val->high << 32) | ((u64)val->low); } -static unsigned long calc_crc_errors(struct tg3 *tp) +static u64 tg3_calc_crc_errors(struct tg3 *tp) { struct tg3_hw_stats *hw_stats = tp->hw_stats; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { u32 val; - spin_lock_bh(&tp->lock); if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { tg3_writephy(tp, MII_TG3_TEST1, val | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, 0x14, &val); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); } else val = 0; - spin_unlock_bh(&tp->lock); tp->phy_crc_errors += val; @@ -9745,17 +11281,13 @@ static unsigned long calc_crc_errors(struct tg3 *tp) #define ESTAT_ADD(member) \ estats->member = old_estats->member + \ - get_estat64(&hw_stats->member) + get_stat64(&hw_stats->member) -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) +static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) { - struct tg3_ethtool_stats *estats = &tp->estats; struct tg3_ethtool_stats *old_estats = &tp->estats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; - if (!hw_stats) - return old_estats; - ESTAT_ADD(rx_octets); ESTAT_ADD(rx_fragments); ESTAT_ADD(rx_ucast_packets); @@ -9832,19 +11364,14 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) ESTAT_ADD(nic_avoided_irqs); ESTAT_ADD(nic_tx_threshold_hit); - return estats; + ESTAT_ADD(mbuf_lwm_thresh_hit); } -static struct net_device_stats *tg3_get_stats(struct net_device *dev) +static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) { - struct tg3 *tp = netdev_priv(dev); - struct net_device_stats *stats = &tp->net_stats; - struct net_device_stats *old_stats = &tp->net_stats_prev; + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; - if (!hw_stats) - return old_stats; - stats->rx_packets = old_stats->rx_packets + get_stat64(&hw_stats->rx_ucast_packets) + get_stat64(&hw_stats->rx_mcast_packets) + @@ -9887,12 +11414,13 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev) get_stat64(&hw_stats->tx_carrier_sense_errors); stats->rx_crc_errors = old_stats->rx_crc_errors + - calc_crc_errors(tp); + tg3_calc_crc_errors(tp); stats->rx_missed_errors = old_stats->rx_missed_errors + get_stat64(&hw_stats->rx_discards); - return stats; + stats->rx_dropped = tp->rx_dropped; + stats->tx_dropped = tp->tx_dropped; } static inline u32 calc_crc(unsigned char *buf, int len) @@ -9939,17 +11467,11 @@ static void __tg3_set_rx_mode(struct net_device *dev) /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG * flag clear. */ -#if TG3_VLAN_TAG_USED - if (!tp->vlgrp && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) - rx_mode |= RX_MODE_KEEP_VLAN_TAG; -#else - /* By definition, VLAN is disabled always in this - * case. - */ - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) - rx_mode |= RX_MODE_KEEP_VLAN_TAG; +#ifdef BCM_USE_OLD_VLAN_INTERFACE + if (!tp->vlgrp) #endif + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; if (dev->flags & IFF_PROMISC) { /* Promiscuous mode. */ @@ -9982,12 +11504,35 @@ static void __tg3_set_rx_mode(struct net_device *dev) tw32(MAC_HASH_REG_3, mc_filter[3]); } - if (rx_mode != tp->rx_mode) { - tp->rx_mode = rx_mode; - tw32_f(MAC_RX_MODE, rx_mode); - udelay(10); - } + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } +} + +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!tp->hw_stats) + return &tp->net_stats_prev; + + spin_lock_bh(&tp->lock); + tg3_get_nstats(tp, stats); + spin_unlock_bh(&tp->lock); + + return stats; +} + +#ifndef BCM_HAS_GET_STATS64 +static struct rtnl_link_stats64 *tg3_get_stats(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + return tg3_get_stats64(dev, &tp->net_stats); } +#endif /* BCM_HAS_GET_STATS64 */ static void tg3_set_rx_mode(struct net_device *dev) { @@ -10001,82 +11546,26 @@ static void tg3_set_rx_mode(struct net_device *dev) tg3_full_unlock(tp); } -#define TG3_REGDUMP_LEN (32 * 1024) - static int tg3_get_regs_len(struct net_device *dev) { - return TG3_REGDUMP_LEN; + return TG3_REG_BLK_SIZE; } static void tg3_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { - u32 *p = _p; struct tg3 *tp = netdev_priv(dev); - u8 *orig_p = _p; - int i; regs->version = 0; - memset(p, 0, TG3_REGDUMP_LEN); + memset(_p, 0, TG3_REG_BLK_SIZE); - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return; tg3_full_lock(tp, 0); -#define __GET_REG32(reg) (*(p)++ = tr32(reg)) -#define GET_REG32_LOOP(base,len) \ -do { p = (u32 *)(orig_p + (base)); \ - for (i = 0; i < len; i += 4) \ - __GET_REG32((base) + i); \ -} while (0) -#define GET_REG32_1(reg) \ -do { p = (u32 *)(orig_p + (reg)); \ - __GET_REG32((reg)); \ -} while (0) - - GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); - GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); - GET_REG32_LOOP(MAC_MODE, 0x4f0); - GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); - GET_REG32_1(SNDDATAC_MODE); - GET_REG32_LOOP(SNDBDS_MODE, 0x80); - GET_REG32_LOOP(SNDBDI_MODE, 0x48); - GET_REG32_1(SNDBDC_MODE); - GET_REG32_LOOP(RCVLPC_MODE, 0x20); - GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); - GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); - GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); - GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); - GET_REG32_1(RCVDCC_MODE); - GET_REG32_LOOP(RCVBDI_MODE, 0x20); - GET_REG32_LOOP(RCVCC_MODE, 0x14); - GET_REG32_LOOP(RCVLSC_MODE, 0x08); - GET_REG32_1(MBFREE_MODE); - GET_REG32_LOOP(HOSTCC_MODE, 0x100); - GET_REG32_LOOP(MEMARB_MODE, 0x10); - GET_REG32_LOOP(BUFMGR_MODE, 0x58); - GET_REG32_LOOP(RDMAC_MODE, 0x08); - GET_REG32_LOOP(WDMAC_MODE, 0x08); - GET_REG32_1(RX_CPU_MODE); - GET_REG32_1(RX_CPU_STATE); - GET_REG32_1(RX_CPU_PGMCTR); - GET_REG32_1(RX_CPU_HWBKPT); - GET_REG32_1(TX_CPU_MODE); - GET_REG32_1(TX_CPU_STATE); - GET_REG32_1(TX_CPU_PGMCTR); - GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); - GET_REG32_LOOP(FTQ_RESET, 0x120); - GET_REG32_LOOP(MSGINT_MODE, 0x0c); - GET_REG32_1(DMAC_MODE); - GET_REG32_LOOP(GRC_MODE, 0x4c); - if (tp->tg3_flags & TG3_FLAG_NVRAM) - GET_REG32_LOOP(NVRAM_CMD, 0x24); - -#undef __GET_REG32 -#undef GET_REG32_LOOP -#undef GET_REG32_1 + tg3_dump_legacy_regs(tp, (u32 *)_p); tg3_full_unlock(tp); } @@ -10099,10 +11588,10 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u32 i, offset, len, b_offset, b_count; __be32 val; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) + if (tg3_flag(tp, NO_NVRAM)) return -EINVAL; - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; offset = eeprom->offset; @@ -10122,13 +11611,13 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); if (ret) return ret; - memcpy(data, ((char*)&val) + b_offset, b_count); + memcpy(data, ((char *)&val) + b_offset, b_count); len -= b_count; offset += b_count; eeprom->len += b_count; } - /* read bytes upto the last 4 byte boundary */ + /* read bytes up to the last 4 byte boundary */ pd = &data[eeprom->len]; for (i = 0; i < (len - (len & 3)); i += 4) { ret = tg3_nvram_read_be32(tp, offset + i, &val); @@ -10166,10 +11655,10 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf; __be32 start, end; - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; - if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || + if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; @@ -10223,9 +11712,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct tg3 *tp = netdev_priv(dev); #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_ethtool_gset(phydev, cmd); @@ -10234,11 +11723,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->supported = (SUPPORTED_Autoneg); - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) cmd->supported |= (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { cmd->supported |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Half | @@ -10251,9 +11740,38 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) } cmd->advertising = tp->link_config.advertising; - if (netif_running(dev)) { - cmd->speed = tp->link_config.active_speed; + if (tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->link_config.flowctrl & FLOW_CTRL_RX) { + if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Pause; + } else { + cmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } + } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Asym_Pause; + } + } + if (netif_running(dev) && netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); cmd->duplex = tp->link_config.active_duplex; +#ifdef BCM_HAS_LP_ADVERTISING + cmd->lp_advertising = tp->link_config.rmt_adv; +#endif /* BCM_HAS_LP_ADVERTISING */ +#ifdef BCM_HAS_MDIX_STATUS + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) + cmd->eth_tp_mdix = ETH_TP_MDI_X; + else + cmd->eth_tp_mdix = ETH_TP_MDI; + } +#endif /* BCM_HAS_MDIX_STATUS */ + } else { + ethtool_cmd_speed_set(cmd, SPEED_INVALID); + cmd->duplex = DUPLEX_INVALID; +#ifdef BCM_HAS_MDIX_STATUS + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; +#endif /* BCM_HAS_MDIX_STATUS */ } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; @@ -10266,11 +11784,12 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_ethtool_sset(phydev, cmd); @@ -10291,11 +11810,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) mask |= ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) mask |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | @@ -10316,15 +11835,15 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->advertising &= mask; } else { - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { - if (cmd->speed != SPEED_1000) + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { + if (speed != SPEED_1000) return -EINVAL; if (cmd->duplex != DUPLEX_FULL) return -EINVAL; } else { - if (cmd->speed != SPEED_100 && - cmd->speed != SPEED_10) + if (speed != SPEED_100 && + speed != SPEED_10) return -EINVAL; } } @@ -10339,7 +11858,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) tp->link_config.duplex = DUPLEX_INVALID; } else { tp->link_config.advertising = 0; - tp->link_config.speed = cmd->speed; + tp->link_config.speed = speed; tp->link_config.duplex = cmd->duplex; } @@ -10347,6 +11866,8 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) tp->link_config.orig_duplex = tp->link_config.duplex; tp->link_config.orig_autoneg = tp->link_config.autoneg; + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + if (netif_running(dev)) tg3_setup_phy(tp, 1); @@ -10359,24 +11880,22 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct tg3 *tp = netdev_priv(dev); - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->fw_version, tp->fw_ver); - strcpy(info->bus_info, pci_name(tp->pdev)); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); } static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); - if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && - device_can_wakeup(&tp->pdev->dev)) + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) wol->supported = WAKE_MAGIC; else wol->supported = 0; wol->wolopts = 0; - if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && - device_can_wakeup(&tp->pdev->dev)) + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) wol->wolopts = WAKE_MAGIC; memset(&wol->sopass, 0, sizeof(wol->sopass)); } @@ -10384,21 +11903,23 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); +#ifdef BCM_HAS_DEVICE_WAKEUP_API + struct device *dp = &tp->pdev->dev; +#endif if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if ((wol->wolopts & WAKE_MAGIC) && - !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(&tp->pdev->dev))) + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) return -EINVAL; + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + spin_lock_bh(&tp->lock); - if (wol->wolopts & WAKE_MAGIC) { - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; - device_set_wakeup_enable(&tp->pdev->dev, true); - } else { - tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; - device_set_wakeup_enable(&tp->pdev->dev, false); - } + if (wol->wolopts & WAKE_MAGIC) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); spin_unlock_bh(&tp->lock); return 0; @@ -10416,35 +11937,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value) tp->msg_enable = value; } -#if TG3_TSO_SUPPORT != 0 -static int tg3_set_tso(struct net_device *dev, u32 value) -{ - struct tg3 *tp = netdev_priv(dev); - - if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { - if (value) - return -EINVAL; - return 0; - } - if ((dev->features & NETIF_F_IPV6_CSUM) && - ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { - if (value) { - dev->features |= NETIF_F_TSO6; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - dev->features |= NETIF_F_TSO_ECN; - } else - dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); - } - return ethtool_op_set_tso(dev, value); -} -#endif - static int tg3_nway_reset(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); @@ -10453,12 +11945,12 @@ static int tg3_nway_reset(struct net_device *dev) if (!netif_running(dev)) return -EAGAIN; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) return -EINVAL; #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (tg3_flag(tp, USE_PHYLIB)) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); } else @@ -10471,7 +11963,7 @@ static int tg3_nway_reset(struct net_device *dev) tg3_readphy(tp, MII_BMCR, &bmcr); if (!tg3_readphy(tp, MII_BMCR, &bmcr) && ((bmcr & BMCR_ANENABLE) || - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); r = 0; @@ -10486,10 +11978,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * { struct tg3 *tp = netdev_priv(dev); - ering->rx_max_pending = TG3_RX_RING_SIZE - 1; + ering->rx_max_pending = tp->rx_std_ring_mask; ering->rx_mini_max_pending = 0; - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) - ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; else ering->rx_jumbo_max_pending = 0; @@ -10497,7 +11989,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * ering->rx_pending = tp->rx_pending; ering->rx_mini_pending = 0; - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) + if (tg3_flag(tp, JUMBO_RING_ENABLE)) ering->rx_jumbo_pending = tp->rx_jumbo_pending; else ering->rx_jumbo_pending = 0; @@ -10510,17 +12002,20 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e struct tg3 *tp = netdev_priv(dev); int i, irq_sync = 0, err = 0; - if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || - (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || + if ((ering->rx_pending > tp->rx_std_ring_mask) || + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || (ering->tx_pending > TG3_TX_RING_SIZE - 1) || (ering->tx_pending <= MAX_SKB_FRAGS) || - ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && + (tg3_flag(tp, TSO_BUG) && (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) return -EINVAL; if (netif_running(dev)) { tg3_phy_stop(tp); tg3_netif_stop(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_invalidate_state(tp); +#endif irq_sync = 1; } @@ -10528,12 +12023,12 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tp->rx_pending = ering->rx_pending; - if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && + if (tg3_flag(tp, MAX_RXPEND_64) && tp->rx_pending > 63) tp->rx_pending = 63; tp->rx_jumbo_pending = ering->rx_jumbo_pending; - for (i = 0; i < TG3_IRQ_MAX_VECS; i++) + for (i = 0; i < tp->irq_max; i++) tp->napi[i].tx_pending = ering->tx_pending; if (netif_running(dev)) { @@ -10555,14 +12050,14 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam { struct tg3 *tp = netdev_priv(dev); - epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); - if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) + if (tp->link_config.flowctrl & FLOW_CTRL_RX) epause->rx_pause = 1; else epause->rx_pause = 0; - if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) + if (tp->link_config.flowctrl & FLOW_CTRL_TX) epause->tx_pause = 1; else epause->tx_pause = 0; @@ -10574,7 +12069,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam int err = 0; #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { u32 newadv; struct phy_device *phydev; @@ -10582,8 +12077,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && - ((epause->rx_pause && !epause->tx_pause) || - (!epause->rx_pause && epause->tx_pause)))) + (epause->rx_pause != epause->tx_pause))) return -EINVAL; tp->link_config.flowctrl = 0; @@ -10603,11 +12097,11 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam newadv = 0; if (epause->autoneg) - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_set(tp, PAUSE_AUTONEG); else - tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_clear(tp, PAUSE_AUTONEG); - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { u32 oldadv = phydev->advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause); if (oldadv != newadv) { @@ -10643,15 +12137,18 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (netif_running(dev)) { tg3_netif_stop(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_invalidate_state(tp); +#endif irq_sync = 1; } tg3_full_lock(tp, irq_sync); if (epause->autoneg) - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_set(tp, PAUSE_AUTONEG); else - tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_clear(tp, PAUSE_AUTONEG); if (epause->rx_pause) tp->link_config.flowctrl |= FLOW_CTRL_RX; else @@ -10671,90 +12168,183 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_full_unlock(tp); } + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + return err; } -static u32 tg3_get_rx_csum(struct net_device *dev) +static int tg3_get_sset_count(struct net_device *dev, int sset) { - struct tg3 *tp = netdev_priv(dev); - return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: +#ifdef TG3_VMWARE_NETQ_ENABLE + return tg3_netq_stats_size(netdev_priv(dev)); +#else + return TG3_NUM_STATS; +#endif + default: + return -EOPNOTSUPP; + } +} + +#if (LINUX_VERSION_CODE < 0x020618) +static int tg3_get_stats_count (struct net_device *dev) +{ + return tg3_get_sset_count(dev, ETH_SS_STATS); +} + +static int tg3_get_test_count (struct net_device *dev) +{ + return tg3_get_sset_count(dev, ETH_SS_TEST); } +#endif -static int tg3_set_rx_csum(struct net_device *dev, u32 data) +#ifdef BCM_HAS_GET_RXNFC +#ifdef BCM_HAS_OLD_GET_RXNFC_SIG +static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + void *rules) +#else +static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +#endif /* BCM_HAS_OLD_GET_RXNFC_SIG */ { struct tg3 *tp = netdev_priv(dev); - if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { - if (data != 0) - return -EINVAL; + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + if (netif_running(tp->dev)) + info->data = tp->rxq_cnt; + else { + info->data = num_online_cpus(); + if (info->data > TG3_RSS_MAX_NUM_QS) + info->data = TG3_RSS_MAX_NUM_QS; + } return 0; + + default: + return -EOPNOTSUPP; } +} +#endif /* BCM_HAS_GET_RXNFC */ - spin_lock_bh(&tp->lock); - if (data) - tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; - else - tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; - spin_unlock_bh(&tp->lock); +#ifdef BCM_HAS_GET_RXFH_INDIR_SIZE +static u32 tg3_get_rxfh_indir_size(struct net_device *dev) +{ + u32 size = 0; + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, SUPPORT_MSIX)) + size = TG3_RSS_INDIR_TBL_SIZE; + + return size; +} + +static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) +{ + struct tg3 *tp = netdev_priv(dev); + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + indir[i] = tp->rss_ind_tbl[i]; return 0; } -#ifdef BCM_HAS_SET_TX_CSUM -static int tg3_set_tx_csum(struct net_device *dev, u32 data) +static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) { struct tg3 *tp = netdev_priv(dev); + size_t i; - if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { - if (data != 0) - return -EINVAL; + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = indir[i]; + + if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) return 0; - } - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) -#if defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM) - ethtool_op_set_tx_ipv6_csum(dev, data); -#elif defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM) - ethtool_op_set_tx_hw_csum(dev, data); -#else - tg3_set_tx_hw_csum(dev, data); -#endif - else - ethtool_op_set_tx_csum(dev, data); + /* It is legal to write the indirection + * table while the device is running. + */ + tg3_full_lock(tp, 0); + tg3_rss_write_indir_tbl(tp); + tg3_full_unlock(tp); return 0; } -#endif +#endif /* BCM_HAS_GET_RXFH_INDIR_SIZE */ -static int tg3_get_sset_count(struct net_device *dev, int sset) +#ifdef ETHTOOL_GCHANNELS +static void tg3_get_channels(struct net_device *dev, + struct ethtool_channels *channel) { - switch (sset) { - case ETH_SS_TEST: - return TG3_NUM_TEST; - case ETH_SS_STATS: - return TG3_NUM_STATS; - default: - return -EOPNOTSUPP; + struct tg3 *tp = netdev_priv(dev); + + channel->max_rx = tp->rxq_max; + channel->max_tx = tp->txq_max; + + if (netif_running(dev)) { + channel->rx_count = tp->rxq_cnt; + channel->tx_count = tp->txq_cnt; + } else { + if (tp->rxq_req) + channel->rx_count = tp->rxq_req; + else + channel->rx_count = min(num_online_cpus(), tp->rxq_max); + + if (tp->txq_req) + channel->tx_count = tp->txq_req; + else + channel->tx_count = min(num_online_cpus(), tp->txq_max); } } -#if (LINUX_VERSION_CODE < 0x020618) -static int tg3_get_stats_count (struct net_device *dev) +static int tg3_set_channels(struct net_device *dev, + struct ethtool_channels *channel) { - return tg3_get_sset_count(dev, ETH_SS_STATS); -} + struct tg3 *tp = netdev_priv(dev); -static int tg3_get_test_count (struct net_device *dev) -{ - return tg3_get_sset_count(dev, ETH_SS_TEST); + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EOPNOTSUPP; + + if (channel->rx_count > tp->rxq_max || + channel->tx_count > tp->txq_max) + return -EINVAL; + + tp->rxq_req = channel->rx_count; + tp->txq_req = channel->tx_count; + + if (!netif_running(dev)) + return 0; + + tg3_stop(tp); + + netif_carrier_off(dev); + + tg3_start(tp, true); + + return 0; } -#endif +#endif /* ETHTOOL_GCHANNELS */ static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { +#ifdef TG3_VMWARE_NETQ_ENABLE + struct tg3 *tp = netdev_priv(dev); +#endif + switch (stringset) { case ETH_SS_STATS: memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, ENABLE_IOV)) { + buf += sizeof(ethtool_stats_keys); + tg3_netq_stats_get_strings(tp, buf); + } +#endif break; case ETH_SS_TEST: memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); @@ -10765,35 +12355,38 @@ static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } -static int tg3_phys_id(struct net_device *dev, u32 data) +static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) { struct tg3 *tp = netdev_priv(dev); - int i; if (!netif_running(tp->dev)) return -EAGAIN; - if (data == 0) - data = UINT_MAX / 2; - - for (i = 0; i < (data * 2); i++) { - if ((i % 2) == 0) - tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_1000MBPS_ON | - LED_CTRL_100MBPS_ON | - LED_CTRL_10MBPS_ON | - LED_CTRL_TRAFFIC_OVERRIDE | - LED_CTRL_TRAFFIC_BLINK | - LED_CTRL_TRAFFIC_LED); + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON | + LED_CTRL_100MBPS_ON | + LED_CTRL_10MBPS_ON | + LED_CTRL_TRAFFIC_OVERRIDE | + LED_CTRL_TRAFFIC_BLINK | + LED_CTRL_TRAFFIC_LED); + break; - else - tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_TRAFFIC_OVERRIDE); + case ETHTOOL_ID_OFF: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE); + break; - if (msleep_interruptible(500)) - break; + case ETHTOOL_ID_INACTIVE: + tw32(MAC_LED_CTRL, tp->led_ctrl); + break; } - tw32(MAC_LED_CTRL, tp->led_ctrl); + return 0; } @@ -10801,23 +12394,110 @@ static void tg3_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct tg3 *tp = netdev_priv(dev); - memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); + + if (tp->hw_stats) + tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); + else + memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); + +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_stats_get(tp, tmp_stats + TG3_NUM_STATS); +#endif +} + +static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) +{ + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + *vpdlen = len; + + return buf; + +error: + kfree(buf); + return NULL; } #define NVRAM_TEST_SIZE 0x100 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c +#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 +#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 +#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 #define NVRAM_SELFBOOT_HW_SIZE 0x20 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c static int tg3_test_nvram(struct tg3 *tp) { - u32 csum, magic; + u32 csum, magic, len; __be32 *buf; int i, j, k, err = 0, size; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) + if (tg3_flag(tp, NO_NVRAM)) return 0; if (tg3_nvram_read(tp, 0, &magic) != 0) @@ -10838,8 +12518,17 @@ static int tg3_test_nvram(struct tg3 *tp) case TG3_EEPROM_SB_REVISION_3: size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; default: - return 0; + return -EIO; } } else return 0; @@ -10931,16 +12620,49 @@ static int tg3_test_nvram(struct tg3 *tp) goto out; } + err = -EIO; + /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); - if (csum != be32_to_cpu(buf[0x10/4])) + if (csum != le32_to_cpu(buf[0x10/4])) goto out; /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); - if (csum != be32_to_cpu(buf[0xfc/4])) + if (csum != le32_to_cpu(buf[0xfc/4])) goto out; + kfree(buf); + + buf = tg3_vpd_readblock(tp, &len); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + err = 0; out: @@ -10958,7 +12680,7 @@ static int tg3_test_link(struct tg3 *tp) if (!netif_running(tp->dev)) return -ENODEV; - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) max = TG3_SERDES_TIMEOUT_SEC; else max = TG3_COPPER_TIMEOUT_SEC; @@ -11125,9 +12847,9 @@ static int tg3_test_registers(struct tg3 *tp) }; is_5705 = is_5750 = 0; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { is_5705 = 1; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) + if (tg3_flag(tp, 5750_PLUS)) is_5750 = 1; } @@ -11138,7 +12860,7 @@ static int tg3_test_registers(struct tg3 *tp) if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) continue; - if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && + if (tg3_flag(tp, IS_5788) && (reg_tbl[i].flags & TG3_FL_NOT_5788)) continue; @@ -11261,118 +12983,75 @@ static int tg3_test_memory(struct tg3 *tp) int err = 0; int i; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + if (tg3_flag(tp, 5717_PLUS)) mem_tbl = mem_tbl_5717; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + else if (tg3_flag(tp, 57765_CLASS)) mem_tbl = mem_tbl_57765; - else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + else if (tg3_flag(tp, 5755_PLUS)) mem_tbl = mem_tbl_5755; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mem_tbl = mem_tbl_5906; - else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + else if (tg3_flag(tp, 5705_PLUS)) mem_tbl = mem_tbl_5705; else mem_tbl = mem_tbl_570x; for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { - if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, - mem_tbl[i].len)) != 0) + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) break; } return err; } -#define TG3_MAC_LOOPBACK 0 -#define TG3_PHY_LOOPBACK 1 - -static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) -{ - u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; - u32 desc_idx, coal_now; - struct sk_buff *skb, *rx_skb; - u8 *tx_data; - dma_addr_t map; - int num_pkts, tx_len, rx_len, i, err; - struct tg3_rx_buffer_desc *desc; - struct tg3_napi *tnapi, *rnapi; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; - - tnapi = &tp->napi[0]; - rnapi = &tp->napi[0]; - if (tp->irq_cnt > 1) { - rnapi = &tp->napi[1]; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) - tnapi = &tp->napi[1]; - } - coal_now = tnapi->coal_now | rnapi->coal_now; - - if (loopback_mode == TG3_MAC_LOOPBACK) { - /* HW errata - mac loopback fails in some cases on 5780. - * Normal traffic and PHY loopback are not affected by - * errata. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) - return 0; - - mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_PORT_INT_LPBACK; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) - mac_mode |= MAC_MODE_LINK_POLARITY; - if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) - mac_mode |= MAC_MODE_PORT_MODE_MII; - else - mac_mode |= MAC_MODE_PORT_MODE_GMII; - tw32(MAC_MODE, mac_mode); - } else if (loopback_mode == TG3_PHY_LOOPBACK) { - u32 val; - - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { - tg3_phy_fet_toggle_apd(tp, false); - val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; - } else - val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; - - tg3_phy_toggle_automdix(tp, 0); - - tg3_writephy(tp, MII_BMCR, val); - udelay(40); +#define TG3_TSO_MSS 500 + +#define TG3_TSO_IP_HDR_LEN 20 +#define TG3_TSO_TCP_HDR_LEN 20 +#define TG3_TSO_TCP_OPT_LEN 12 + +static const u8 tg3_tso_header[] = { +0x08, 0x00, +0x45, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, +0x40, 0x06, 0x00, 0x00, +0x0a, 0x00, 0x00, 0x01, +0x0a, 0x00, 0x00, 0x02, +0x0d, 0x00, 0xe0, 0x00, +0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x02, 0x00, +0x80, 0x10, 0x10, 0x00, +0x14, 0x09, 0x00, 0x00, +0x01, 0x01, 0x08, 0x0a, +0x11, 0x11, 0x11, 0x11, +0x11, 0x11, 0x11, 0x11, +}; - mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { - tg3_writephy(tp, MII_TG3_FET_PTEST, - MII_TG3_FET_PTEST_FRC_TX_LINK | - MII_TG3_FET_PTEST_FRC_TX_LOCK); - /* The write needs to be flushed for the AC131 */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_readphy(tp, MII_TG3_FET_PTEST, &val); - mac_mode |= MAC_MODE_PORT_MODE_MII; - } else - mac_mode |= MAC_MODE_PORT_MODE_GMII; +static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) +{ + u32 rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; + u32 budget; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; - /* reset to prevent losing 1st rx packet intermittently */ - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { - tw32_f(MAC_RX_MODE, RX_MODE_RESET); - udelay(10); - tw32_f(MAC_RX_MODE, tp->rx_mode); - } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; - if (masked_phy_id == TG3_PHY_ID_BCM5401) - mac_mode &= ~MAC_MODE_LINK_POLARITY; - else if (masked_phy_id == TG3_PHY_ID_BCM5411) - mac_mode |= MAC_MODE_LINK_POLARITY; - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_LNK3_LED_MODE); - } - tw32(MAC_MODE, mac_mode); - } else { - return -EINVAL; - } + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi = &tp->napi[1]; + coal_now = tnapi->coal_now | rnapi->coal_now; err = -EIO; - tx_len = 1514; + tx_len = pktsz; skb = netdev_alloc_skb(tp->dev, tx_len); if (!skb) return -ENOMEM; @@ -11381,9 +13060,65 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) memcpy(tx_data, tp->dev->dev_addr, 6); memset(tx_data + 6, 0x0, 8); - tw32(MAC_RX_MTU_SIZE, tx_len + 4); + tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); + +#if TG3_TSO_SUPPORT != 0 + if (tso_loopback) { + struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; + + u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + + TG3_TSO_TCP_OPT_LEN; + + memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, + sizeof(tg3_tso_header)); + mss = TG3_TSO_MSS; + + val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); + num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); + + /* Set the total length field in the IP header */ + iph->tot_len = htons((u16)(mss + hdr_len)); + + base_flags = (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + struct tcphdr *th; + val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; + th = (struct tcphdr *)&tx_data[val]; + th->check = 0; + } else + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + mss |= (TG3_TSO_TCP_OPT_LEN << 9); + } else { + base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); + } + + data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); + } else +#endif + { + num_pkts = 1; + data_off = ETH_HLEN; + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + tx_len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + } - for (i = 14; i < tx_len; i++) + for (i = data_off; i < tx_len; i++) tx_data[i] = (u8) (i & 0xff); map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); @@ -11392,6 +13127,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) return -EIO; } + val = tnapi->tx_prod; + tnapi->tx_buffers[val].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | rnapi->coal_now); @@ -11399,14 +13138,17 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) rx_start_idx = rnapi->hw_status->idx[0].rx_producer; - num_pkts = 0; - - tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); + budget = tg3_tx_avail(tnapi); + if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, + base_flags | TXD_FLAG_END, mss, 0)) { + tnapi->tx_buffers[val].skb = NULL; + dev_kfree_skb(skb); + return -EIO; + } tnapi->tx_prod++; - num_pkts++; - /* Some platforms need to sync memory here */ + /* Sync BD data before updating mailbox */ wmb(); tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); @@ -11428,7 +13170,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) break; } - pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); dev_kfree_skb(skb); if (tx_idx != tnapi->tx_prod) @@ -11437,29 +13179,56 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) if (rx_idx != rx_start_idx + num_pkts) goto out; - desc = &rnapi->rx_rcb[rx_start_idx]; - desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; - opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; - if (opaque_key != RXD_OPAQUE_RING_STD) - goto out; + val = data_off; + while (rx_idx != rx_start_idx) { + desc = &rnapi->rx_rcb[rx_start_idx++]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) - goto out; + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; - rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; - if (rx_len != tx_len) - goto out; + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) + - ETH_FCS_LEN; - rx_skb = tpr->rx_std_buffers[desc_idx].skb; + if (!tso_loopback) { + if (rx_len != tx_len) + goto out; - map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); - pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); + if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + } else { + if (opaque_key != RXD_OPAQUE_RING_JUMBO) + goto out; + } + } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT != 0xffff) { + goto out; + } - for (i = 14; i < tx_len; i++) { - if (*(rx_skb->data + i) != (u8) (i & 0xff)) + if (opaque_key == RXD_OPAQUE_RING_STD) { + rx_skb = tpr->rx_std_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], + mapping); + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], + mapping); + } else goto out; + + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, + PCI_DMA_FROMDEVICE); + + for (i = data_off; i < rx_len; i++, val++) { + if (*(rx_skb->data + i) != (u8) (val & 0xff)) + goto out; + } } + err = 0; /* tg3_free_rings will unmap and free the rx_skb */ @@ -11467,70 +13236,128 @@ out: return err; } -#define TG3_MAC_LOOPBACK_FAILED 1 -#define TG3_PHY_LOOPBACK_FAILED 2 -#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ - TG3_PHY_LOOPBACK_FAILED) +#define TG3_STD_LOOPBACK_FAILED 1 +#define TG3_JMB_LOOPBACK_FAILED 2 +#define TG3_TSO_LOOPBACK_FAILED 4 +#define TG3_LOOPBACK_FAILED \ + (TG3_STD_LOOPBACK_FAILED | \ + TG3_JMB_LOOPBACK_FAILED | \ + TG3_TSO_LOOPBACK_FAILED) -static int tg3_test_loopback(struct tg3 *tp) +static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) { - int err = 0; - u32 cpmuctrl = 0; + int err = -EIO; + u32 eee_cap; + u32 jmb_pkt_sz = 9000; - if (!netif_running(tp->dev)) - return TG3_LOOPBACK_FAILED; + if (tp->dma_limit) + jmb_pkt_sz = tp->dma_limit - ETH_HLEN; + + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + + if (!netif_running(tp->dev)) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } err = tg3_reset_hw(tp, 1); - if (err) - return TG3_LOOPBACK_FAILED; + if (err) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } - /* Turn off gphy autopowerdown. */ - if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) - tg3_phy_toggle_apd(tp, false); + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } + + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) { + tg3_mac_loopback(tp, true); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[0] |= TG3_STD_LOOPBACK_FAILED; + + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) + data[0] |= TG3_JMB_LOOPBACK_FAILED; + + tg3_mac_loopback(tp, false); + } - if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tg3_flag(tp, USE_PHYLIB)) { int i; - u32 status; - tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); + tg3_phy_lpbk_set(tp, 0, false); - /* Wait for up to 40 microseconds to acquire lock. */ - for (i = 0; i < 4; i++) { - status = tr32(TG3_CPMU_MUTEX_GNT); - if (status == CPMU_MUTEX_GNT_DRIVER) + /* Wait for link */ + for (i = 0; i < 700; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) break; - udelay(10); + mdelay(1); } - if (status != CPMU_MUTEX_GNT_DRIVER) - return TG3_LOOPBACK_FAILED; + if (i == 700) { + netdev_info(tp->dev, "No link for loopback test!\n" ); + return -EIO; + } - /* Turn off link-based power management. */ - cpmuctrl = tr32(TG3_CPMU_CTRL); - tw32(TG3_CPMU_CTRL, - cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | - CPMU_CTRL_LINK_AWARE_MODE)); - } + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[1] |= TG3_STD_LOOPBACK_FAILED; +#if TG3_TSO_SUPPORT != 0 + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[1] |= TG3_TSO_LOOPBACK_FAILED; +#endif + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) + data[1] |= TG3_JMB_LOOPBACK_FAILED; - if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) - err |= TG3_MAC_LOOPBACK_FAILED; + if (do_extlpbk) { + tg3_phy_lpbk_set(tp, 0, true); - if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { - tw32(TG3_CPMU_CTRL, cpmuctrl); + /* All link indications report up, but the hardware + * isn't really ready for about 20 msec. Double it + * to be sure. + */ + mdelay(40); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[2] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[2] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) + data[2] |= TG3_JMB_LOOPBACK_FAILED; + } - /* Release the mutex */ - tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); + /* Re-enable gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); } - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && - !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { - if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) - err |= TG3_PHY_LOOPBACK_FAILED; - } + err = (data[0] | data[1] | data[2]) ? -EIO : 0; - /* Re-enable gphy autopowerdown. */ - if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) - tg3_phy_toggle_apd(tp, true); +done: + tp->phy_flags |= eee_cap; return err; } @@ -11539,9 +13366,14 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *data) { struct tg3 *tp = netdev_priv(dev); + bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; - if (tp->link_config.phy_is_low_power) - tg3_set_power_state(tp, PCI_D0); + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@ -11549,7 +13381,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, etest->flags |= ETH_TEST_FL_FAILED; data[0] = 1; } - if (tg3_test_link(tp) != 0) { + if (!doextlpbk && tg3_test_link(tp)) { etest->flags |= ETH_TEST_FL_FAILED; data[1] = 1; } @@ -11559,6 +13391,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, if (netif_running(dev)) { tg3_phy_stop(tp); tg3_netif_stop(tp); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_netq_invalidate_state(tp); +#endif irq_sync = 1; } @@ -11567,37 +13402,42 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_halt(tp, RESET_KIND_SUSPEND, 1); err = tg3_nvram_lock(tp); tg3_halt_cpu(tp, RX_CPU_BASE); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tg3_halt_cpu(tp, TX_CPU_BASE); if (!err) tg3_nvram_unlock(tp); - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tg3_phy_reset(tp); if (tg3_test_registers(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[2] = 1; } + if (tg3_test_memory(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[3] = 1; } - if ((data[4] = tg3_test_loopback(tp)) != 0) + + if (doextlpbk) + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + + if (tg3_test_loopback(tp, &data[4], doextlpbk)) etest->flags |= ETH_TEST_FL_FAILED; tg3_full_unlock(tp); if (tg3_test_interrupt(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; - data[5] = 1; + data[7] = 1; } tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); if (netif_running(dev)) { - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); err2 = tg3_restart_hw(tp, 1); if (!err2) tg3_netif_start(tp); @@ -11608,10 +13448,125 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, if (irq_sync && !err2) tg3_phy_start(tp); } - if (tp->link_config.phy_is_low_power) - tg3_set_power_state(tp, PCI_D3hot); + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tg3_power_down_prepare(tp); + tg3_power_down(tp); + } + +} + +#ifdef BCM_HAS_IEEE1588_SUPPORT +static int tg3_hwtstamp_ioctl(struct net_device *dev, + struct ifreq *ifr, int cmd) +{ + struct tg3 *tp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + u32 grc_mode; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5720) + return -EINVAL; + + if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) + return -EFAULT; + + if (stmpconf.flags) + return -EINVAL; + + switch (stmpconf.tx_type) { + case HWTSTAMP_TX_ON: + tg3_flag_set(tp, TX_TSTAMP_EN); + break; + case HWTSTAMP_TX_OFF: + tg3_flag_clear(tp, TX_TSTAMP_EN); + break; + default: + return -ERANGE; + } + + switch (stmpconf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tp->rxptpctl = 0; + break; + case HWTSTAMP_FILTER_ALL: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_ALL_V1_EVENTS | + TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_ALL_V1_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + default: + return -ERANGE; + } + + if (netif_running(dev)) { + grc_mode = tr32(GRC_MODE) & ~GRC_MODE_TIME_SYNC_ENABLE; + tw32(GRC_MODE, grc_mode); + + if (tp->rxptpctl || tg3_flag(tp, TX_TSTAMP_EN)) { + tw32(TG3_RX_PTP_CTL, + tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + + grc_mode |= GRC_MODE_TIME_SYNC_ENABLE; + tw32(GRC_MODE, grc_mode); + } + } + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; } +#endif /* BCM_HAS_IEEE1588_SUPPORT */ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { @@ -11624,12 +13579,12 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) int err; #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - return phy_mii_ioctl(phydev, data, cmd); + return phy_mii_ioctl(phydev, ifr, cmd); } #endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ @@ -11641,10 +13596,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: { u32 mii_regval; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->link_config.phy_is_low_power) + if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11657,10 +13612,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } case SIOCSMIIREG: - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->link_config.phy_is_low_power) + if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11670,10 +13625,15 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; #if defined(__VMKLNX__) && !defined(TG3_VMWARE_BMAPILNX_DISABLE) - case SIOTG3CIM: + case BRCM_VMWARE_CIM_IOCTL: return tg3_vmware_ioctl_cim(dev, ifr); #endif /* TG3_VMWARE_BMAPILNX */ +#ifdef BCM_HAS_IEEE1588_SUPPORT + case SIOCSHWTSTAMP: + return tg3_hwtstamp_ioctl(dev, ifr, cmd); +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + default: /* do nothing */ break; @@ -11681,48 +13641,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -#if TG3_VLAN_TAG_USED -static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) -{ - struct tg3 *tp = netdev_priv(dev); - - if (!netif_running(dev)) { - tp->vlgrp = grp; - return; - } - - tg3_netif_stop(tp); - - tg3_full_lock(tp, 0); - - tp->vlgrp = grp; - - /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ - __tg3_set_rx_mode(dev); - - tg3_netif_start(tp); - - tg3_full_unlock(tp); -} - -#ifndef HAVE_NET_DEVICE_OPS -static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) -{ - struct tg3 *tp = netdev_priv(dev); - - if (netif_running(dev)) - tg3_netif_stop(tp); - - tg3_full_lock(tp, 0); - vlan_group_set_device(tp->vlgrp, vid, NULL); - tg3_full_unlock(tp); - - if (netif_running(dev)) - tg3_netif_start(tp); -} -#endif -#endif /* TG3_VLAN_TAG_USED */ - static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct tg3 *tp = netdev_priv(dev); @@ -11737,7 +13655,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; max_stat_coal_ticks = MAX_STAT_COAL_TICKS; @@ -11810,32 +13728,54 @@ static struct ethtool_ops tg3_ethtool_ops = { .set_ringparam = tg3_set_ringparam, .get_pauseparam = tg3_get_pauseparam, .set_pauseparam = tg3_set_pauseparam, + .self_test = tg3_self_test, + .get_strings = tg3_get_strings, +#ifdef BCM_HAS_SET_PHYS_ID + .set_phys_id = tg3_set_phys_id, +#endif + .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, + .set_coalesce = tg3_set_coalesce, +#if (LINUX_VERSION_CODE >= 0x20618) || defined (__VMKLNX__) + .get_sset_count = tg3_get_sset_count, +#endif +#ifdef BCM_HAS_GET_RXNFC + .get_rxnfc = tg3_get_rxnfc, +#endif /* BCM_HAS_GET_RXNFC */ +#ifdef BCM_HAS_GET_RXFH_INDIR +#ifdef BCM_HAS_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = tg3_get_rxfh_indir_size, +#endif /* BCM_HAS_GET_RXFH_INDIR_SIZE */ + .get_rxfh_indir = tg3_get_rxfh_indir, + .set_rxfh_indir = tg3_set_rxfh_indir, +#endif /* BCM_HAS_GET_RXFH_INDIR */ +#ifdef ETHTOOL_GCHANNELS + .get_channels = tg3_get_channels, + .set_channels = tg3_set_channels, +#endif + +#ifndef BCM_HAS_NETDEV_UPDATE_FEATURES .get_rx_csum = tg3_get_rx_csum, .set_rx_csum = tg3_set_rx_csum, .get_tx_csum = ethtool_op_get_tx_csum, #ifdef BCM_HAS_SET_TX_CSUM .set_tx_csum = tg3_set_tx_csum, #endif - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, #if TG3_TSO_SUPPORT != 0 .get_tso = ethtool_op_get_tso, .set_tso = tg3_set_tso, #endif +#endif /* BCM_HAS_NETDEV_UPDATE_FEATURES */ + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, #if (LINUX_VERSION_CODE < 0x20618) .self_test_count = tg3_get_test_count, #endif - .self_test = tg3_self_test, - .get_strings = tg3_get_strings, +#ifndef BCM_HAS_SET_PHYS_ID .phys_id = tg3_phys_id, +#endif #if (LINUX_VERSION_CODE < 0x20618) .get_stats_count = tg3_get_stats_count, -#endif - .get_ethtool_stats = tg3_get_ethtool_stats, - .get_coalesce = tg3_get_coalesce, - .set_coalesce = tg3_set_coalesce, -#if (LINUX_VERSION_CODE >= 0x20618) || defined (__VMKLNX__) - .get_sset_count = tg3_get_sset_count, #endif #if defined(ETHTOOL_GPERMADDR) && (LINUX_VERSION_CODE < 0x020617) .get_perm_addr = ethtool_op_get_perm_addr, @@ -11880,8 +13820,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) { u32 val; - if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || - tg3_nvram_read(tp, 0, &val) != 0) + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) return; /* Selfboot format */ @@ -11916,19 +13855,19 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) nvcfg1 = tr32(NVRAM_CFG1); if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, FLASH); } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; @@ -11937,12 +13876,12 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) case FLASH_VENDOR_ATMEL_EEPROM: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_ST: tp->nvram_jedecnum = JEDEC_ST; tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_SAIFUN: tp->nvram_jedecnum = JEDEC_SAIFUN; @@ -11957,7 +13896,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) } else { tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); } } @@ -11996,29 +13935,29 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tg3_flag_set(tp, PROTECTED_NVRAM); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); break; } - if (tp->tg3_flags2 & TG3_FLG2_FLASH) { + if (tg3_flag(tp, FLASH)) { tg3_nvram_get_pagesize(tp, nvcfg1); } else { /* For eeprom, set pagesize to maximum eeprom size */ @@ -12037,7 +13976,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tg3_flag_set(tp, PROTECTED_NVRAM); protect = 1; } @@ -12048,8 +13987,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) case FLASH_5755VENDOR_ATMEL_FLASH_3: case FLASH_5755VENDOR_ATMEL_FLASH_5: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 264; if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) @@ -12066,8 +14005,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) tp->nvram_size = (protect ? @@ -12097,7 +14036,7 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; @@ -12108,16 +14047,16 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) case FLASH_5755VENDOR_ATMEL_FLASH_2: case FLASH_5755VENDOR_ATMEL_FLASH_3: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 264; break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; break; } @@ -12131,7 +14070,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tg3_flag_set(tp, PROTECTED_NVRAM); protect = 1; } @@ -12146,9 +14085,9 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) case FLASH_5761VENDOR_ATMEL_MDB081D: case FLASH_5761VENDOR_ATMEL_MDB161D: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); tp->nvram_pagesize = 256; break; case FLASH_5761VENDOR_ST_A_M45PE20: @@ -12160,8 +14099,8 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) case FLASH_5761VENDOR_ST_M_M45PE80: case FLASH_5761VENDOR_ST_M_M45PE16: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; break; } @@ -12201,7 +14140,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) { tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; } @@ -12215,7 +14154,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; @@ -12229,8 +14168,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) case FLASH_57780VENDOR_ATMEL_AT45DB041D: case FLASH_57780VENDOR_ATMEL_AT45DB041B: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: @@ -12252,8 +14191,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ST_M45PE10: @@ -12268,13 +14207,13 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) } break; default: - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; + tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } @@ -12288,7 +14227,7 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) case FLASH_5717VENDOR_ATMEL_EEPROM: case FLASH_5717VENDOR_MICRO_EEPROM: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; @@ -12302,11 +14241,13 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) case FLASH_5717VENDOR_ATMEL_ADB021D: case FLASH_5717VENDOR_ATMEL_45USPT: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; case FLASH_5717VENDOR_ATMEL_ADB021B: case FLASH_5717VENDOR_ATMEL_ADB021D: tp->nvram_size = TG3_NVRAM_SIZE_256KB; @@ -12327,29 +14268,143 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) case FLASH_5717VENDOR_ST_25USPT: case FLASH_5717VENDOR_ST_45USPT: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ST_M_M25PE20: - case FLASH_5717VENDOR_ST_A_M25PE20: - case FLASH_5717VENDOR_ST_M_M45PE20: - case FLASH_5717VENDOR_ST_A_M45PE20: + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; default: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; default: - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; + tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } /* Chips other than 5700/5701 use the NVRAM for fetching info. */ @@ -12369,7 +14424,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - tp->tg3_flags |= TG3_FLAG_NVRAM; + tg3_flag_set(tp, NVRAM); if (tg3_nvram_lock(tp)) { netdev_warn(tp->dev, @@ -12394,10 +14449,13 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tg3_get_57780_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -12408,7 +14466,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) tg3_nvram_unlock(tp); } else { - tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); tg3_get_eeprom_size(tp); } @@ -12578,8 +14637,6 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, phy_addr = tg3_nvram_phys_addr(tp, offset); - tw32(NVRAM_ADDR, phy_addr); - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; if (page_off == 0 || i == 0) @@ -12590,8 +14647,13 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, if (i == (len - 4)) nvram_cmd |= NVRAM_CMD_LAST; + if ((nvram_cmd & NVRAM_CMD_FIRST) || + !tg3_flag(tp, FLASH) || + !tg3_flag(tp, 57765_PLUS)) + tw32(NVRAM_ADDR, phy_addr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && - !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && + !tg3_flag(tp, 5755_PLUS) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -12601,7 +14663,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, break; } - if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { + if (!tg3_flag(tp, FLASH)) { /* We always do complete word writes to eeprom. */ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); } @@ -12617,13 +14679,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { int ret; - if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & ~GRC_LCLCTRL_GPIO_OUTPUT1); udelay(40); } - if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { + if (!tg3_flag(tp, NVRAM)) { ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); } else { u32 grc_mode; @@ -12633,16 +14695,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) return ret; tg3_enable_nvram_access(tp); - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); - if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || - !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { - + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { ret = tg3_nvram_write_block_buffered(tp, offset, len, buf); } else { @@ -12657,7 +14716,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) tg3_nvram_unlock(tp); } - if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(40); } @@ -12751,54 +14810,33 @@ static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) { u32 val; - u16 pmcsr; - - /* On some early chips the SRAM cannot be accessed in D3hot state, - * so need make sure we're in D0. - */ - pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); - msleep(1); - - /* Make sure register accesses (indirect or otherwise) - * will function correctly. - */ - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - - /* The memory arbiter has to be enabled in order for SRAM accesses - * to succeed. Normally on powerup the tg3 chip firmware will make - * sure it is enabled, but other entities such as system netboot - * code might disable it. - */ - val = tr32(MEMARB_MODE); - tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); tp->phy_id = TG3_PHY_ID_INVALID; tp->led_ctrl = LED_CTRL_MODE_PHY_1; /* Assume an onboard device and WOL capable by default. */ - tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { - tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; - tp->tg3_flags2 |= TG3_FLG2_IS_NIC; + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); } val = tr32(VCPU_CFGSHDW); if (val & VCPU_CFGSHDW_ASPM_DBNC) - tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; + tg3_flag_set(tp, ASPM_WORKAROUND); if ((val & VCPU_CFGSHDW_WOL_ENABLE) && (val & VCPU_CFGSHDW_WOL_MAGPKT)) - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; + tg3_flag_set(tp, WOL_ENABLE); goto done; } tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg, led_cfg; - u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; + u32 cfg2 = 0, cfg3 = 0, cfg4 = 0; + u32 nic_phy_id, ver, eeprom_phy_id; int eeprom_phy_serdes = 0; tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); @@ -12806,9 +14844,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); ver >>= NIC_SRAM_DATA_VER_SHIFT; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && (ver > 0) && (ver < 0x100)) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); @@ -12832,14 +14870,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->phy_id = eeprom_phy_id; if (eeprom_phy_serdes) { - if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) - tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; + if (!tg3_flag(tp, 5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else - tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; } - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) + if (tg3_flag(tp, 5750_PLUS)) led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | SHASTA_EXT_LED_MODE_MASK); else @@ -12899,72 +14936,81 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->led_ctrl = LED_CTRL_MODE_PHY_1; if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { - tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; + tg3_flag_set(tp, EEPROM_WRITE_PROT); if ((tp->pdev->subsystem_vendor == PCI_VENDOR_ID_ARIMA) && (tp->pdev->subsystem_device == 0x205a || tp->pdev->subsystem_device == 0x2063)) - tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; + tg3_flag_clear(tp, EEPROM_WRITE_PROT); } else { - tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; - tp->tg3_flags2 |= TG3_FLG2_IS_NIC; + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); } if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { - tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) - tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); } if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && - (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) - tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) - tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; + tg3_flag_clear(tp, WOL_CAP); - if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && + if (tg3_flag(tp, WOL_CAP) && (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; + tg3_flag_set(tp, WOL_ENABLE); if (cfg2 & (1 << 17)) - tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; /* serdes signal pre-emphasis in register 0x590 set by */ /* bootcode if bit 18 is set */ if (cfg2 & (1 << 18)) - tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + if ((tg3_flag(tp, 57765_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && - (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))) - tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD; - - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { - u32 cfg3; + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; + if (tg3_flag(tp, PCI_EXPRESS)) { tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); - if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) - tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) + tg3_flag_set(tp, ASPM_WORKAROUND); + if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) + tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; + if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) + tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; } if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) - tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE; + tg3_flag_set(tp, RGMII_INBAND_DISABLE); if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) - tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) - tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); } done: - device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); - device_set_wakeup_enable(&tp->pdev->dev, - tp->tg3_flags & TG3_FLAG_WOL_ENABLE); + +#ifndef BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE + device_init_wakeup(&tp->pdev->dev, tg3_flag(tp, WOL_CAP)); +#endif + + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); } static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) @@ -13016,21 +15062,75 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); } +static void __devinit tg3_phy_init_link_config(struct tg3 *tp) +{ + u32 adv = ADVERTISED_Autoneg; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; +} + static int __devinit tg3_phy_probe(struct tg3 *tp) { u32 hw_phy_id_1, hw_phy_id_2; u32 hw_phy_id, hw_phy_id_masked; int err; - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, ENABLE_APE)) { + switch (tp->pci_fn) { + case 0: + tp->phy_ape_lock = TG3_APE_LOCK_PHY0; + break; + case 1: + tp->phy_ape_lock = TG3_APE_LOCK_PHY1; + break; + case 2: + tp->phy_ape_lock = TG3_APE_LOCK_PHY2; + break; + case 3: + tp->phy_ape_lock = TG3_APE_LOCK_PHY3; + break; + } + } + + if (!tg3_flag(tp, ENABLE_ASF) && + !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | + TG3_PHYFLG_KEEP_LINK_ON_PWRDN); + + if (tg3_flag(tp, USE_PHYLIB)) return tg3_phy_init(tp); /* Reading the PHY ID register can conflict with ASF * firmware access to the PHY hardware. */ err = 0; - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; } else { /* Now read the physical PHY_ID from the chip and verify @@ -13051,9 +15151,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { tp->phy_id = hw_phy_id; if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) - tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else - tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; } else { if (tp->phy_id != TG3_PHY_ID_INVALID) { /* Do nothing, phy ID already set up in @@ -13072,122 +15172,54 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tp->phy_id = p->phy_id; if (!tp->phy_id || tp->phy_id == TG3_PHY_ID_BCM8002) - tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; } } - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { - u32 bmsr, adv_reg, tg3_ctrl, mask; - - tg3_readphy(tp, MII_BMSR, &bmsr); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - (bmsr & BMSR_LSTATUS)) - goto skip_phy_reset; - + /* A0 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 && + tp->phy_id == TG3_PHY_ID_BCM50612E) { + tp->phy_flags &= ~TG3_PHYFLG_ENABLE_APD; + tg3_flag_clear(tp, RGMII_INBAND_DISABLE); + tg3_flag_clear(tp, RGMII_EXT_IBND_RX_EN); + tg3_flag_clear(tp, RGMII_EXT_IBND_TX_EN); + } + +#ifndef TG3_DISABLE_EEE_SUPPORT + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 || + (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && + tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; +#endif /* TG3_DISABLE_EEE_SUPPORT */ + + tg3_phy_init_link_config(tp); + + /* Bring the phy out of its low-power state. */ + if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && + !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !tg3_flag(tp, ENABLE_APE) && !tg3_flag(tp, ENABLE_ASF)) err = tg3_phy_reset(tp); - if (err) - return err; - - adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL | - ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); - tg3_ctrl = 0; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { - tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | - MII_TG3_CTRL_ADV_1000_FULL); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER); - } - - mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - if (!tg3_copper_is_advertising_all(tp, mask)) { - tg3_writephy(tp, MII_ADVERTISE, adv_reg); - - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) - tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); - - tg3_writephy(tp, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART); - } - tg3_phy_set_wirespeed(tp); - - tg3_writephy(tp, MII_ADVERTISE, adv_reg); - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) - tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); - } - -skip_phy_reset: - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { - err = tg3_init_5401phy_dsp(tp); - if (err) - return err; - - err = tg3_init_5401phy_dsp(tp); - } - - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) - tp->link_config.advertising = - (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | - ADVERTISED_FIBRE); - if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) - tp->link_config.advertising &= - ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); return err; } static void __devinit tg3_read_vpd(struct tg3 *tp) { - u8 vpd_data[TG3_NVM_VPD_LEN]; + u8 *vpd_data; unsigned int block_end, rosize, len; + u32 vpdlen; int j, i = 0; - u32 magic; - - if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || - tg3_nvram_read(tp, 0x0, &magic)) - goto out_not_found; - - if (magic == TG3_EEPROM_MAGIC) { - for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { - u32 tmp; - - /* The data is in little-endian format in NVRAM. - * Use the big-endian read routines to preserve - * the byte order as it exists in NVRAM. - */ - if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) - goto out_not_found; - - memcpy(&vpd_data[i], &tmp, sizeof(tmp)); - } - } else { - ssize_t cnt; - unsigned int pos = 0; - for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { - cnt = pci_read_vpd(tp->pdev, pos, - TG3_NVM_VPD_LEN - pos, - &vpd_data[pos]); - if (cnt == -ETIMEDOUT || -EINTR) - cnt = 0; - else if (cnt < 0) - goto out_not_found; - } - if (pos != TG3_NVM_VPD_LEN) - goto out_not_found; - } + vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); + if (!vpd_data) + goto out_no_vpd; - i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, - PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; @@ -13195,7 +15227,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp) block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; i += PCI_VPD_LRDT_TAG_SIZE; - if (block_end > TG3_NVM_VPD_LEN) + if (block_end > vpdlen) goto out_not_found; j = pci_vpd_find_info_keyword(vpd_data, i, rosize, @@ -13220,7 +15252,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp) goto partno; memcpy(tp->fw_ver, &vpd_data[j], len); - strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1); + strncat(tp->fw_ver, " bc ", vpdlen - len - 1); } partno: @@ -13233,48 +15265,67 @@ partno: i += PCI_VPD_INFO_FLD_HDR_SIZE; if (len > TG3_BPN_SIZE || - (len + i) > TG3_NVM_VPD_LEN) + (len + i) > vpdlen) goto out_not_found; memcpy(tp->board_part_number, &vpd_data[i], len); - return; - out_not_found: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + +out_no_vpd: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) + strcpy(tp->board_part_number, "BCM57762"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) + strcpy(tp->board_part_number, "BCM57766"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) + strcpy(tp->board_part_number, "BCM57782"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) + strcpy(tp->board_part_number, "BCM57786"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) - strcpy(tp->board_part_number, "BCM57780"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) - strcpy(tp->board_part_number, "BCM57760"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) - strcpy(tp->board_part_number, "BCM57790"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) - strcpy(tp->board_part_number, "BCM57788"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) - strcpy(tp->board_part_number, "BCM57761"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) - strcpy(tp->board_part_number, "BCM57765"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) - strcpy(tp->board_part_number, "BCM57781"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) - strcpy(tp->board_part_number, "BCM57785"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) - strcpy(tp->board_part_number, "BCM57791"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) - strcpy(tp->board_part_number, "BCM57795"); - else + } else { +nomatch: strcpy(tp->board_part_number, "none"); + } } static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) @@ -13431,7 +15482,7 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) if (offset == TG3_NVM_DIR_END) return; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) start = 0x08000000; else if (tg3_nvram_read(tp, offset - 4, &start)) return; @@ -13471,8 +15522,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) u32 apedata; char *fwtype; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) return; apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); @@ -13486,7 +15536,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { - tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI; + tg3_flag_set(tp, APE_HAS_NCSI); fwtype = "NCSI"; } else { fwtype = "DASH"; @@ -13510,7 +15560,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) if (tp->fw_ver[0] != 0) vpd_vers = true; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { + if (tg3_flag(tp, NO_NVRAM)) { strcat(tp->fw_ver, "sb"); return; } @@ -13527,11 +15577,15 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) else return; - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers) + if (vpd_vers) goto done; - tg3_read_mgmtfw_ver(tp); + if (tg3_flag(tp, ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF)) + tg3_read_dash_ver(tp); + } else if (tg3_flag(tp, ENABLE_ASF)) { + tg3_read_mgmtfw_ver(tp); + } done: tp->fw_ver[TG3_VER_SIZE - 1] = 0; @@ -13539,19 +15593,27 @@ done: static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); -static int __devinit tg3_get_invariants(struct tg3 *tp) +static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) { + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; + else + return TG3_RX_RET_MAX_SIZE_5705; +} + #if (LINUX_VERSION_CODE >= 0x2060a) - static struct pci_device_id write_reorder_chipsets[] = { - { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_FE_GATE_700C) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_8131_BRIDGE) }, - { PCI_DEVICE(PCI_VENDOR_ID_VIA, - PCI_DEVICE_ID_VIA_8385_0) }, - { }, - }; +static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, +}; #endif + +static int __devinit tg3_get_invariants(struct tg3 *tp) +{ u32 misc_ctrl_reg; u32 pci_state_reg, grc_misc_cfg; u32 val; @@ -13569,14 +15631,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) pci_cmd &= ~PCI_COMMAND_INVALIDATE; pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); - /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL - * has the register indirect write enable bit set before - * we try to access any of the MMIO registers. It is also - * critical that the PCI-X hw workaround situation is decided - * before that as well. + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. */ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); tp->pci_chip_rev_id = (misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT); @@ -13585,7 +15650,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) pci_read_config_dword(tp->pdev, TG3PCI_GEN2_PRODID_ASICREV, &prod_id_asic_rev); @@ -13594,7 +15660,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) pci_read_config_dword(tp->pdev, TG3PCI_GEN15_PRODID_ASICREV, &prod_id_asic_rev); @@ -13666,15 +15736,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (bridge->subordinate && (bridge->subordinate->number == tp->pdev->bus->number)) { - - tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; + tg3_flag_set(tp, ICH_WORKAROUND); pci_dev_put(bridge); break; } } } - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { static struct tg3_dev_id { u32 vendor; u32 device; @@ -13699,7 +15768,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->bus->number) && (bridge->subordinate->subordinate >= tp->pdev->bus->number)) { - tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG; + tg3_flag_set(tp, 5701_DMA_BUG); pci_dev_put(bridge); break; } @@ -13714,8 +15783,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; - tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; + tg3_flag_set(tp, 5780_CLASS); + tg3_flag_set(tp, 40BIT_DMA_BUG); tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); } else { struct pci_dev *bridge = NULL; @@ -13729,24 +15798,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->bus->number) && (bridge->subordinate->subordinate >= tp->pdev->bus->number)) { - tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; + tg3_flag_set(tp, 40BIT_DMA_BUG); pci_dev_put(bridge); break; } } while (bridge); } - /* Initialize misc host control in PCI block. */ - tp->misc_host_ctrl |= (misc_ctrl_reg & - MISC_HOST_CTRL_CHIPREV); - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) tp->pdev_peer = tg3_find_peer(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + tg3_flag_set(tp, 57765_CLASS); + + if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); + /* Intentionally exclude ASIC_REV_5906 */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || @@ -13754,134 +15828,177 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || - (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) - tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; - - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || - (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) - tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); - /* 5700 B0 chips do not support checksumming correctly due - * to hardware bugs. - */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) - tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; - else { - tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; -#ifndef BCM_NO_IPV6_CSUM - tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) - tp->dev->features |= NETIF_F_IPV6_CSUM; -#else - tp->dev->features |= NETIF_F_SG; - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) - tp->dev->features |= NETIF_F_HW_CSUM; - else - tp->dev->features |= NETIF_F_IP_CSUM; -#endif - } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); /* Determine TSO capabilities */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; - else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; - else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) - tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; + tg3_flag_clear(tp, TSO_BUG); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { - tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; + tg3_flag_set(tp, TSO_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) tp->fw_needed = FIRMWARE_TG3TSO5; else tp->fw_needed = FIRMWARE_TG3TSO; } + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + tp->fw_needed) { + /* For firmware TSO, assume ASF is disabled. + * We'll disable TSO later if we discover ASF + * is enabled in tg3_get_eeprom_hw_cfg(). + */ + tg3_flag_set(tp, TSO_CAPABLE); + } else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + tp->irq_max = 1; + tp->txq_max = 1; + tp->rxq_max = 1; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { - tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && tp->pdev_peer == tp->pdev)) - tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; + tg3_flag_clear(tp, SUPPORT_MSI); - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; + tg3_flag_set(tp, 1SHOT_MSI); } + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); #ifdef TG3_NAPI - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { - tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; - tp->irq_max = TG3_IRQ_MAX_VECS; - } + tp->irq_max = TG3_IRQ_MAX_VECS_RSS; + tp->rxq_max = tp->irq_max - 1; + tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->txq_max = tp->irq_max - 1; #endif + } +#ifdef TG3_INBOX + tp->irq_max = 1; +#else +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tp->vmware.netq.index = tg3_netq_index++; + tg3_flag_set(tp, IOV_CAPABLE); + tg3_flag_clear(tp, 1SHOT_MSI); + tp->irq_max = TG3_IRQ_MAX_VECS_IOV; + tp->rxq_max = tp->irq_max; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + tp->txq_max = tp->irq_max - 1; + } +#endif /* TG3_VMWARE_NETQ_ENABLE */ +#endif /* TG3_INBOX */ } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; - else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { - tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; - tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; - } + if (tg3_flag(tp, 5755_PLUS)) + tg3_flag_set(tp, SHORT_DMA_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; +#if defined(__VMKLNX__) + else if (tg3_flag(tp, TSO_CAPABLE)) + tp->dma_limit = TG3_TX_BD_DMA_MAX_32K; +#endif if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, LRG_PROD_RING_CAP); + + if (tg3_flag(tp, 57765_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) - tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); +#ifndef BCM_HAS_PCI_PCIE_CAP tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); - if (tp->pcie_cap != 0) { +#endif + + if (pci_is_pcie(tp->pdev)) { u16 lnkctl; - tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; + tg3_flag_set(tp, PCI_EXPRESS); - pcie_set_readrq(tp->pdev, 4096); + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) { + int readrq = pcie_get_readrq(tp->pdev); + if (readrq > 2048) + pcie_set_readrq(tp->pdev, 2048); + } pci_read_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKCTL, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, &lnkctl); if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5906) { + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); + } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) - tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; + tg3_flag_set(tp, CLKREQ_BUG); } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { - tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN; + tg3_flag_set(tp, L1PLLPD_EN); } } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { - tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; - } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + /* BCM5785 devices are effectively PCIe devices, and should + * follow PCIe codepaths, but do not have a PCIe capabilities + * section. + */ + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); if (!tp->pcix_cap) { dev_err(&tp->pdev->dev, @@ -13890,7 +16007,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) - tp->tg3_flags |= TG3_FLAG_PCIX_MODE; + tg3_flag_set(tp, PCIX_MODE); } /* If we have an AMD 762 or VIA K8T800 chipset, write @@ -13907,10 +16024,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0, NULL)) && #else - if (pci_dev_present(write_reorder_chipsets) && + if (pci_dev_present(tg3_write_reorder_chipsets) && #endif - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) - tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &tp->pci_cacheline_sz); @@ -13923,21 +16040,24 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pci_lat_timer); } + /* Important! -- It is critical that the PCI-X hw workaround + * situation is decided before the first MMIO register access. + */ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { /* 5700 BX chips need to have their TX producer index * mailboxes written twice to workaround a bug. */ - tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; + tg3_flag_set(tp, TXD_MBOX_HWBUG); /* If we are in PCI-X mode, enable register write workaround. * * The workaround is to use indirect register accesses * for all chip writes not to mailbox registers. */ - if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + if (tg3_flag(tp, PCIX_MODE)) { u32 pm_reg; - tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; + tg3_flag_set(tp, PCIX_TARGET_HWBUG); /* The chip can have it's power management PCI config * space registers clobbered due to this bug. @@ -13960,9 +16080,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) - tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; + tg3_flag_set(tp, PCI_HIGH_SPEED); if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) - tp->tg3_flags |= TG3_FLAG_PCI_32BIT; + tg3_flag_set(tp, PCI_32BIT); /* Chip-specific fixup from Broadcom driver */ if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && @@ -13980,10 +16100,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->write32_rx_mbox = tg3_write32; /* Various workaround register access methods */ - if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) tp->write32 = tg3_write_indirect_reg32; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || - ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && + (tg3_flag(tp, PCI_EXPRESS) && tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { /* * Back to back register writes can cause problems on these @@ -13995,14 +16115,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->write32 = tg3_write_flush_reg32; } - if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || - (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; - if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) + if (tg3_flag(tp, MBOX_WRITE_REORDER)) tp->write32_rx_mbox = tg3_write_flush_reg32; } - if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { + if (tg3_flag(tp, ICH_WORKAROUND)) { tp->read32 = tg3_read_indirect_reg32; tp->write32 = tg3_write_indirect_reg32; tp->read32_mbox = tg3_read_indirect_mbox; @@ -14025,13 +16144,47 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } if (tp->write32 == tg3_write_indirect_reg32 || - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && + (tg3_flag(tp, PCIX_MODE) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) - tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; + tg3_flag_set(tp, SRAM_USE_CONFIG); + + /* The memory arbiter has to be enabled in order for SRAM accesses + * to succeed. Normally on powerup the tg3 chip firmware will make + * sure it is enabled, but other entities such as system netboot + * code might disable it. + */ + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tg3_flag(tp, PCIX_MODE)) { + pci_read_config_dword(tp->pdev, + tp->pcix_cap + PCI_X_STATUS, + &val); + tp->pci_fn = val & 0x7; + } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); + if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == + NIC_SRAM_CPMUSTAT_SIG) { + tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717; + tp->pci_fn = tp->pci_fn ? 1 : 0; + } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); + if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == + NIC_SRAM_CPMUSTAT_SIG) { + tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> + TG3_CPMU_STATUS_FSHFT_5719; + } + } /* Get eeprom hw config before calling tg3_set_power_state(). - * In particular, the TG3_FLG2_IS_NIC flag must be + * In particular, the TG3_FLAG_IS_NIC flag must be * determined before calling tg3_set_power_state() so that * we know whether or not to switch out of Vaux power. * When the flag is set, it means that GPIO1 is used for eeprom @@ -14040,7 +16193,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_get_eeprom_hw_cfg(tp); - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. */ @@ -14049,23 +16208,25 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); + + tg3_ape_lock_init(tp); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, CPMU_PRESENT); - /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). - * GPIO1 driven high will bring 5700's external PHY out of reset. + /* Set up tp->grc_local_ctrl before calling + * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high + * will bring 5700's external PHY out of reset. * It is also used as eeprom write protect on LOMs. */ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || - (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); /* Unused GPIO3 must be driven as output on 5752 because there @@ -14076,85 +16237,79 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { /* Turn off the debug UART. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; - if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) + if (tg3_flag(tp, IS_NIC)) /* Keep VMain power. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OUTPUT0; } - /* Force the chip into D0. */ - err = tg3_set_power_state(tp, PCI_D0); - if (err) { - dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); - return err; - } + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); /* Derive initial jumbo mode from MTU assigned in * ether_setup() via the alloc_etherdev() call */ - if (tp->dev->mtu > ETH_DATA_LEN && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) - tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); /* Determine WakeOnLan speed to use. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { - tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); + tg3_flag_clear(tp, WOL_SPEED_100MB); } else { - tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; + tg3_flag_set(tp, WOL_SPEED_100MB); } #ifndef BCM_INCLUDE_PHYLIB_SUPPORT if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 && (tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCMAC131)) - tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + tp->phy_flags |= TG3_PHYFLG_IS_FET; #else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + tp->phy_flags |= TG3_PHYFLG_IS_FET; #endif /* A few boards don't want Ethernet@WireSpeed phy feature */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || - ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || - (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || - (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) - tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) - tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) - tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; - if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && + if (tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { + !tg3_flag(tp, 57765_PLUS)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) - tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) - tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; } else - tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; + tp->phy_flags |= TG3_PHYFLG_BER_BUG; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && @@ -14164,7 +16319,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->phy_otp = TG3_OTP_DEFAULT; } - if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) + if (tg3_flag(tp, CPMU_PRESENT)) tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; else tp->mi_mode = MAC_MI_MODE_BASE; @@ -14174,10 +16329,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + /* Set these bits to enable statistics workaround. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + #ifdef BCM_INCLUDE_PHYLIB_SUPPORT if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; + tg3_flag_set(tp, USE_PHYLIB); #endif err = tg3_mdio_init(tp); @@ -14186,7 +16349,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) /* Initialize data/descriptor byte/word swapping. */ val = tr32(GRC_MODE); - val &= GRC_MODE_HOST_STACKUP; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + tw32(GRC_MODE, val | tp->grc_mode); tg3_switch_clocks(tp); @@ -14197,7 +16368,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && - (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); if (chiprevid == CHIPREV_ID_5701_A0 || @@ -14216,7 +16387,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) writel(0x00000000, sram_base + 4); writel(0xffffffff, sram_base + 4); if (readl(sram_base) != 0x00000000) - tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; + tg3_flag_set(tp, PCIX_TARGET_HWBUG); } } @@ -14229,12 +16400,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) - tp->tg3_flags2 |= TG3_FLG2_IS_5788; + tg3_flag_set(tp, IS_5788); - if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) - tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + if (!tg3_flag(tp, IS_5788) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | HOSTCC_MODE_CLRTICK_TXBD); @@ -14244,11 +16415,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } /* Preserve the APE MAC_MODE bits */ - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) - tp->mac_mode = tr32(MAC_MODE) | - MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; else - tp->mac_mode = TG3_DEF_MAC_MODE; + tp->mac_mode = 0; /* these are limited to 10/100 only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && @@ -14265,8 +16435,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || - (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) - tp->tg3_flags |= TG3_FLAG_10_100_ONLY; + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; err = tg3_phy_probe(tp); if (err) { @@ -14278,13 +16448,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_read_vpd(tp); tg3_read_fw_ver(tp); - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { - tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } else { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; else - tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } /* 5700 {AX,BX} chips have a broken status block link @@ -14292,9 +16462,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * status register in those cases. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; + tg3_flag_set(tp, USE_LINKCHG_REG); else - tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; + tg3_flag_clear(tp, USE_LINKCHG_REG); /* The led_ctrl is set during tg3_phy_probe, here we might * have to force the link status polling mechanism based @@ -14302,28 +16472,32 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { - tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | - TG3_FLAG_USE_LINKCHG_REG); + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tg3_flag_set(tp, USE_LINKCHG_REG); } /* For all SERDES we poll the MAC status register. */ - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) - tp->tg3_flags |= TG3_FLAG_POLL_SERDES; + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + tg3_flag_set(tp, POLL_SERDES); else - tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; + tg3_flag_clear(tp, POLL_SERDES); - tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; + tp->rx_offset = NET_IP_ALIGN; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { - tp->rx_offset -= NET_IP_ALIGN; + tg3_flag(tp, PCIX_MODE)) { + tp->rx_offset = 0; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; #endif } - tp->rx_std_max_post = TG3_RX_RING_SIZE; + tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; + tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; + tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; + + tp->rx_std_max_post = tp->rx_std_ring_mask + 1; /* Increment the rx prod index on the rx std ring by at most * 8 for these chips to workaround hw errata. @@ -14333,7 +16507,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tp->rx_std_max_post = 8; - if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) + if (tg3_flag(tp, ASPM_WORKAROUND)) tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & PCIE_PWR_MGMT_L1_THRESH_MSK; @@ -14380,17 +16554,19 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) #endif mac_offset = 0x7c; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) mac_offset = 0xcc; if (tg3_nvram_lock(tp)) tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); else tg3_nvram_unlock(tp); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - if (PCI_FUNC(tp->pdev->devfn)) + } else if (tg3_flag(tp, 5717_PLUS)) { + if (tp->pci_fn & 1) mac_offset = 0xcc; + if (tp->pci_fn > 1) + mac_offset += 0x18c; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mac_offset = 0x10; @@ -14411,7 +16587,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) } if (!addr_ok) { /* Next, try NVRAM. */ - if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && + if (!tg3_flag(tp, NO_NVRAM) && !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); @@ -14464,7 +16640,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + !tg3_flag(tp, PCI_EXPRESS)) goto out; #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) @@ -14477,8 +16653,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) #endif #endif - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tg3_flag(tp, 57765_PLUS)) { val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; goto out; } @@ -14497,8 +16672,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) * other than 5700 and 5701 which do not implement the * boundary bits. */ - if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { switch (cacheline_size) { case 16: case 32: @@ -14523,7 +16697,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) DMA_RWCTRL_WRITE_BNDRY_384_PCIX); break; } - } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + } else if (tg3_flag(tp, PCI_EXPRESS)) { switch (cacheline_size) { case 16: case 32: @@ -14672,13 +16846,21 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm #define TEST_BUFFER_SIZE 0x2000 +#if (LINUX_VERSION_CODE >= 0x2060a) +static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, +}; +#endif + static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; u32 *buf, saved_dma_rwctrl; int ret = 0; - buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out_nofree; @@ -14689,14 +16871,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tg3_flag(tp, 57765_PLUS)) goto out; - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if (tg3_flag(tp, PCI_EXPRESS)) { /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; - } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { + } else if (!tg3_flag(tp, PCIX_MODE)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) tp->dma_rwctrl |= 0x003f0000; @@ -14712,7 +16893,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) * do the less restrictive ONE_DMA workaround for * better performance. */ - if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && + if (tg3_flag(tp, 40BIT_DMA_BUG) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) tp->dma_rwctrl |= 0x8000; else if (ccval == 0x6 || ccval == 0x7) @@ -14841,14 +17022,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp) } if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != DMA_RWCTRL_WRITE_BNDRY_16) { -#if (LINUX_VERSION_CODE >= 0x2060a) - static struct pci_device_id dma_wait_state_chipsets[] = { - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, - PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, - { }, - }; -#endif - /* DMA test passed without adjusting DMA boundary, * now look for chipsets that are known to expose the * DMA bug without failing the test. @@ -14857,7 +17030,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) if (pci_find_device(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL)) #else - if (pci_dev_present(dma_wait_state_chipsets)) + if (pci_dev_present(tg3_dma_wait_state_chipsets)) #endif { tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; @@ -14871,33 +17044,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp) } out: - pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); -out_nofree: - return ret; -} - -static void __devinit tg3_init_link_config(struct tg3 *tp) -{ - tp->link_config.advertising = - (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_MII); - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; - tp->link_config.autoneg = AUTONEG_ENABLE; - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tp->link_config.phy_is_low_power = 0; - tp->link_config.orig_speed = SPEED_INVALID; - tp->link_config.orig_duplex = DUPLEX_INVALID; - tp->link_config.orig_autoneg = AUTONEG_INVALID; + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); +out_nofree: + return ret; } static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tg3_flag(tp, 57765_PLUS)) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = @@ -14911,7 +17065,7 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; tp->bufmgr_config.mbuf_high_water_jumbo = DEFAULT_MB_HIGH_WATER_JUMBO_57765; - } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + } else if (tg3_flag(tp, 5705_PLUS)) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = @@ -14981,6 +17135,8 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case TG3_PHY_ID_BCM5718C: return "5718C"; case TG3_PHY_ID_BCM5718S: return "5718S"; case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; case TG3_PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -14989,10 +17145,10 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) { - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if (tg3_flag(tp, PCI_EXPRESS)) { strcpy(str, "PCI Express"); return str; - } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + } else if (tg3_flag(tp, PCIX_MODE)) { u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; strcpy(str, "PCIX:"); @@ -15011,12 +17167,12 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) strcat(str, "100MHz"); } else { strcpy(str, "PCI:"); - if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) + if (tg3_flag(tp, PCI_HIGH_SPEED)) strcat(str, "66MHz"); else strcat(str, "33MHz"); } - if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) + if (tg3_flag(tp, PCI_32BIT)) strcat(str, ":32-bit"); else strcat(str, ":64-bit"); @@ -15075,52 +17231,91 @@ static void __devinit tg3_init_coal(struct tg3 *tp) ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; } - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { ec->rx_coalesce_usecs_irq = 0; ec->tx_coalesce_usecs_irq = 0; ec->stats_block_coalesce_usecs = 0; } } -#ifdef HAVE_NET_DEVICE_OPS +#ifdef BCM_HAS_IEEE1588_SUPPORT +static cycle_t tg3_read_clock(const struct cyclecounter *tc) +{ + struct tg3 *tp = container_of(tc, struct tg3, cycles); + u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); + return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; +} + +static void __devinit tg3_init_hwtimer(struct tg3 *tp) +{ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5720) + return; + + /* + * Initialize hardware timer: we keep it running just in case + * that some program needs it later on. + */ + memset(&tp->cycles, 0, sizeof(tp->cycles)); + tp->cycles.read = tg3_read_clock; + tp->cycles.mask = CLOCKSOURCE_MASK(64); + tp->cycles.mult = 1; + + /* Set registers so that rollover occurs soon to test this. */ + tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); + tw32(TG3_EAV_REF_CLCK_LSB, 0x00000000); + tw32(TG3_EAV_REF_CLCK_MSB, 0xFF800000); + tw32_f(TG3_EAV_REF_CLCK_CTL, 0x0); + + timecounter_init(&tp->clock, + &tp->cycles, + ktime_to_ns(ktime_get_real())); + /* + * Synchronize our NIC clock against system wall clock. NIC + * time stamp reading requires ~3us per sample, each sample + * was pretty stable even under load => only require 10 + * samples for each offset comparison. + */ + memset(&tp->compare, 0, sizeof(tp->compare)); + tp->compare.source = &tp->clock; + tp->compare.target = ktime_get_real; + tp->compare.num_samples = 10; + timecompare_update(&tp->compare, 0); +} +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + +#ifdef BCM_HAS_NET_DEVICE_OPS static const struct net_device_ops tg3_netdev_ops = { .ndo_open = tg3_open, .ndo_stop = tg3_close, .ndo_start_xmit = tg3_start_xmit, +#ifdef BCM_HAS_GET_STATS64 + .ndo_get_stats64 = tg3_get_stats64, +#else .ndo_get_stats = tg3_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = tg3_set_rx_mode, - .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, - .ndo_tx_timeout = tg3_tx_timeout, - .ndo_change_mtu = tg3_change_mtu, -#if TG3_VLAN_TAG_USED - .ndo_vlan_rx_register = tg3_vlan_rx_register, -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tg3_poll_controller, #endif -}; - -static const struct net_device_ops tg3_netdev_ops_dma_bug = { - .ndo_open = tg3_open, - .ndo_stop = tg3_close, - .ndo_start_xmit = tg3_start_xmit_dma_bug, - .ndo_get_stats = tg3_get_stats, .ndo_validate_addr = eth_validate_addr, +#ifdef BCM_HAS_SET_MULTICAST_LIST .ndo_set_multicast_list = tg3_set_rx_mode, +#else + .ndo_set_rx_mode = tg3_set_rx_mode, +#endif .ndo_set_mac_address = tg3_set_mac_addr, .ndo_do_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, -#if TG3_VLAN_TAG_USED +#ifdef BCM_HAS_FIX_FEATURES + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, +#endif +#if defined(BCM_KERNEL_SUPPORTS_8021Q) && defined(BCM_USE_OLD_VLAN_INTERFACE) .ndo_vlan_rx_register = tg3_vlan_rx_register, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tg3_poll_controller, #endif }; -#endif /* HAVE_NET_DEVICE_OPS */ +#endif /* BCM_HAS_NET_DEVICE_OPS */ static int __devinit tg3_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -15132,9 +17327,23 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, char str[40]; u64 dma_mask, persist_dma_mask; DECLARE_MAC_BUF(mac); + netdev_features_t features = 0; printk_once(KERN_INFO "%s\n", version); +#ifdef TG3_VMWARE_NETQ_ENABLE + for (i = 0; i < TG3_MAX_NIC; i++) { + if (tg3_netq_force[i] < TG3_OPTION_UNSET || + tg3_netq_force[i] >= TG3_IRQ_MAX_VECS_IOV) { + dev_err(&pdev->dev, + "Invalid force_netq module parameter " + "value for index %d (%d)\n", + i, tg3_netq_force[i]); + return -EINVAL; + } + } +#endif + err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); @@ -15158,11 +17367,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_res; } + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); + goto err_out_free_res; + } + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); err = -ENOMEM; - goto err_out_free_res; + goto err_out_power_down; } SET_MODULE_OWNER(dev); @@ -15170,9 +17385,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); #endif -#if TG3_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; -#endif + pci_set_drvdata(pdev, dev); tp = netdev_priv(dev); tp->pdev = pdev; @@ -15222,7 +17435,23 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_dev; } - tg3_init_link_config(tp); + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { + tg3_flag_set(tp, ENABLE_APE); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); + if (!tp->aperegs) { + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + } tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; @@ -15235,15 +17464,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (err) { dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); - goto err_out_iounmap; + goto err_out_apeunmap; } -#ifdef HAVE_NET_DEVICE_OPS - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) - dev->netdev_ops = &tg3_netdev_ops; - else - dev->netdev_ops = &tg3_netdev_ops_dma_bug; +#ifdef BCM_HAS_NET_DEVICE_OPS + dev->netdev_ops = &tg3_netdev_ops; #else dev->open = tg3_open; dev->stop = tg3_close; @@ -15253,7 +17478,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, dev->do_ioctl = tg3_ioctl; dev->tx_timeout = tg3_tx_timeout; dev->change_mtu = tg3_change_mtu; -#if TG3_VLAN_TAG_USED +#ifdef BCM_USE_OLD_VLAN_INTERFACE dev->vlan_rx_register = tg3_vlan_rx_register; dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid; #endif @@ -15261,14 +17486,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, dev->poll_controller = tg3_poll_controller; #endif - /* All chips before 5787 can get confused if TX buffers - * straddle the 4GB address boundary in some cases. - */ - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) - tp->dev->hard_start_xmit = tg3_start_xmit; - else - tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; + tp->dev->hard_start_xmit = tg3_start_xmit; #endif /* The EPB bridge inside 5714, 5715, and 5780 and any @@ -15277,9 +17495,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * On 64-bit systems without IOMMU, use 64-bit dma_mask and * do DMA address check in tg3_start_xmit(). */ - if (tp->tg3_flags2 & TG3_FLG2_IS_5788) + if (tg3_flag(tp, IS_5788)) persist_dma_mask = dma_mask = DMA_BIT_MASK(32); - else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { persist_dma_mask = dma_mask = DMA_BIT_MASK(40); #ifdef CONFIG_HIGHMEM dma_mask = DMA_BIT_MASK(64); @@ -15291,13 +17509,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (dma_mask > DMA_BIT_MASK(32)) { err = pci_set_dma_mask(pdev, dma_mask); if (!err) { - dev->features |= NETIF_F_HIGHDMA; + features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, persist_dma_mask); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit " "DMA for consistent allocations\n"); - goto err_out_iounmap; + goto err_out_apeunmap; } } } @@ -15306,41 +17524,52 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); - goto err_out_iounmap; + goto err_out_apeunmap; } } tg3_init_bufmgr_config(tp); -#if TG3_TSO_SUPPORT != 0 - /* Selectively allow TSO based on operating conditions */ - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || - (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; - else { - tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); - tp->fw_needed = NULL; + features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_GRO | NETIF_F_RXCSUM; + +#ifndef BCM_NO_IPV6_CSUM + features |= NETIF_F_IP_CSUM; + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; +#else + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_HW_CSUM; + else + features |= NETIF_F_IP_CSUM; +#endif } +#if TG3_TSO_SUPPORT != 0 /* TSO is on by default on chips that support hardware TSO. * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && - (dev->features & NETIF_F_IP_CSUM)) - dev->features |= NETIF_F_TSO; - - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { - if (dev->features & NETIF_F_IPV6_CSUM) - dev->features |= NETIF_F_TSO6; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) + features |= NETIF_F_TSO; + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) + features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - dev->features |= NETIF_F_TSO_ECN; + features |= NETIF_F_TSO_ECN; } #if defined(__VMKLNX__) && defined(TG3_INBOX) @@ -15348,18 +17577,38 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * VMWare does not see significant performance * increases with TSO enabled. */ - dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); - tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); +#endif +#if defined(__VMKLNX__) + /* VMWare does not have skb_gso_segment() to workaround TSO_BUG */ + if (tg3_flag(tp, TSO_BUG)) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); #endif #endif /* TG3_TSO_SUPPORT != 0 */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) - tp->fw_needed = FIRMWARE_TG3; + dev->features |= features; + dev->vlan_features |= features; + +#ifdef BCM_HAS_FIX_FEATURES + /* + * Add loopback capability only for a subset of devices that support + * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY + * loopback for the remaining devices. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) + /* Add the loopback capability */ + features |= NETIF_F_LOOPBACK; +#endif + + dev->hw_features |= features; if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && - !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && + !tg3_flag(tp, TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { - tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; + tg3_flag_set(tp, MAX_RXPEND_64); tp->rx_pending = 63; } @@ -15367,22 +17616,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (err) { dev_err(&pdev->dev, "Could not obtain valid ethernet address, aborting\n"); - goto err_out_iounmap; - } - - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - tp->aperegs = pci_ioremap_bar(pdev, BAR_2); - if (!tp->aperegs) { - dev_err(&pdev->dev, - "Cannot map APE registers, aborting\n"); - err = -ENOMEM; - goto err_out_iounmap; - } - - tg3_ape_lock_init(tp); - - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) - tg3_read_dash_ver(tp); + goto err_out_apeunmap; } /* @@ -15402,43 +17636,63 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_apeunmap; } - /* flow control autonegotiation is default behavior */ - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; - tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; - for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { + for (i = 0; i < tp->irq_max; i++) { struct tg3_napi *tnapi = &tp->napi[i]; tnapi->tp = tp; tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; tnapi->int_mbox = intmbx; - if (i < 4) + if (i <= 4) intmbx += 0x8; - else - intmbx += 0x4; + else { + if (intmbx & 0x4) + intmbx -= 0x4; + else + intmbx += 0xc; + } tnapi->consmbox = rcvmbx; tnapi->prodmbox = sndmbx; - if (i) { - tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); -#ifdef TG3_NAPI - netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); +#ifdef TG3_VMWARE_NETQ_ENABLE + tg3_setup_prod_mboxes(tp, i); #endif - } else { + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else tnapi->coal_now = HOSTCC_MODE_NOW; -#ifdef TG3_NAPI - netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); -#endif - } - if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) + if (!tg3_flag(tp, SUPPORT_MSIX)) break; +#ifdef TG3_VMWARE_NETQ_ENABLE + /* + * If we support NETQ, the first interrupt vector is the default + * rx queue. The first four queues follow the legacy RSS mailbox + * enumeration scheme. Then, the enumerations follow the quirky + * new way. + */ + if(tg3_flag(tp, IOV_CAPABLE)) { + if (i > 3) { + if (rcvmbx & 0x4) + rcvmbx -= 0x4; + else + rcvmbx += 0xc; + } else + rcvmbx += 0x8; + } + + if (!i) + continue; + + if (!tg3_flag(tp, IOV_CAPABLE)) + rcvmbx += 0x8; +#else /* * If we support MSIX, we'll be using RSS. If we're using * RSS, the first vector only handles link interrupts and the @@ -15450,6 +17704,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, continue; rcvmbx += 0x8; +#endif if (sndmbx & 0x4) sndmbx -= 0x4; @@ -15457,14 +17712,22 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, sndmbx += 0xc; } -#ifndef TG3_NAPI - dev->poll = tg3_poll; - dev->weight = 64; -#endif - tg3_init_coal(tp); - pci_set_drvdata(pdev, dev); +#ifdef BCM_HAS_IEEE1588_SUPPORT + tg3_init_hwtimer(tp); +#endif /* BCM_HAS_IEEE1588_SUPPORT */ + +#ifdef TG3_VMWARE_NETQ_ENABLE + if (tg3_flag(tp, IOV_CAPABLE) && + tg3_netq_force[tp->vmware.netq.index]) + tg3_netq_init(tp); +#endif + + if (tg3_flag(tp, 5717_PLUS)) { + /* Resume a low-power mode */ + tg3_frob_aux_power(tp, false); + } err = register_netdev(dev); if (err) { @@ -15479,7 +17742,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, print_mac(mac, dev->dev_addr)); #ifdef BCM_INCLUDE_PHYLIB_SUPPORT - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; netdev_info(dev, @@ -15487,19 +17750,29 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, phydev->drv->name, dev_name(&phydev->dev)); } else #endif + { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + netdev_info(dev, "attached PHY is %s (%s Ethernet) " - "(WireSpeed[%d])\n", tg3_phy_string(tp), - ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : - ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : - "10/100/1000Base-T")), - (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); + } netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", - (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, - (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, - (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, - (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); + (dev->features & NETIF_F_RXCSUM) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", tp->dma_rwctrl, pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : @@ -15507,7 +17780,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, #if defined(__VMKLNX__) netdev_info(dev, "Jumbo Frames capable[%d]\n", - (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) != 0); + tg3_flag(tp, JUMBO_CAPABLE) != 0); +#ifdef TG3_VMWARE_NETQ_ENABLE + if(tg3_flag(tp, IOV_CAPABLE)) + netdev_info(dev, "NetQueue module parameter index [%d]\n", + tp->vmware.netq.index); +#endif +#endif + +#ifdef BCM_HAS_PCI_EEH_SUPPORT + pci_save_state(pdev); #endif return 0; @@ -15531,6 +17813,9 @@ err_out_free_dev: kfree(dev); #endif +err_out_power_down: + pci_set_power_state(pdev, PCI_D3hot); + err_out_free_res: pci_release_regions(pdev); @@ -15550,11 +17835,9 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) if (tp->fw) tg3_priv_release_firmware(tp->fw); -#if (LINUX_VERSION_CODE >= 0x20600) - flush_scheduled_work(); -#endif + tg3_reset_task_cancel(tp); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } @@ -15579,33 +17862,28 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) } } -#if (LINUX_VERSION_CODE < 0x2060b) -static int tg3_suspend(struct pci_dev *pdev, u32 state) +#ifdef SIMPLE_DEV_PM_OPS +static int tg3_suspend(struct device *device) #else static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) #endif { +#ifdef SIMPLE_DEV_PM_OPS + struct pci_dev *pdev = to_pci_dev(device); +#endif struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - pci_power_t target_state; int err; - /* PCI register 4 needs to be saved whether netif_running() or not. - * MSI address and data need to be saved if using MSI and - * netif_running(). - */ -#if (LINUX_VERSION_CODE < 0x2060a) - pci_save_state(pdev, tp->pci_cfg_state); -#else - pci_save_state(pdev); -#endif + if (tg3_invalid_pci_state(tp, state)) + return -EINVAL; + + tg3_pci_save_state(tp); if (!netif_running(dev)) return 0; -#if (LINUX_VERSION_CODE >= 0x20600) - flush_scheduled_work(); -#endif + tg3_reset_task_cancel(tp); tg3_phy_stop(tp); tg3_netif_stop(tp); @@ -15619,22 +17897,16 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; + tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); -#ifdef BCM_HAS_PCI_TARGET_STATE - target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; -#else - target_state = pci_choose_state(pdev, state); -#endif - - err = tg3_set_power_state(tp, target_state); + err = tg3_power_down_prepare(tp); if (err) { int err2; tg3_full_lock(tp, 0); - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); err2 = tg3_restart_hw(tp, 1); if (err2) goto out; @@ -15655,22 +17927,25 @@ out: return err; } +#ifdef SIMPLE_DEV_PM_OPS +static int tg3_resume(struct device *device) +#else static int tg3_resume(struct pci_dev *pdev) +#endif { +#ifdef SIMPLE_DEV_PM_OPS + struct pci_dev *pdev = to_pci_dev(device); +#endif struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); int err; -#if (LINUX_VERSION_CODE < 0x2060a) - pci_restore_state(tp->pdev, tp->pci_cfg_state); -#else - pci_restore_state(tp->pdev); -#endif + tg3_pci_restore_state(tp); if (!netif_running(dev)) return 0; - err = tg3_set_power_state(tp, PCI_D0); + err = tg3_power_up(tp); if (err) return err; @@ -15680,7 +17955,7 @@ static int tg3_resume(struct pci_dev *pdev) tg3_full_lock(tp, 0); - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, 1); if (err) goto out; @@ -15699,13 +17974,171 @@ out: return err; } +#ifdef CONFIG_PM_SLEEP +static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +#define TG3_PM_OPS (&tg3_pm_ops) + +#else + +#define TG3_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +#ifdef BCM_HAS_PCI_EEH_SUPPORT +/** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + + /* Want to make sure that the reset task doesn't run */ + tg3_reset_task_cancel(tp); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + +done: + if (state == pci_channel_io_perm_failure) + err = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return err; +} + +/** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) + goto done; + + rc = PCI_ERS_RESULT_RECOVERED; + +done: + rtnl_unlock(); + + return rc; +} + +/** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void tg3_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + tg3_full_unlock(tp); + if (err) { + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + tg3_phy_start(tp); + +done: + rtnl_unlock(); +} + +static struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume +}; +#endif /* BCM_HAS_PCI_EEH_SUPPORT */ + static struct pci_driver tg3_driver = { .name = DRV_MODULE_NAME, .id_table = tg3_pci_tbl, .probe = tg3_init_one, .remove = __devexit_p(tg3_remove_one), +#ifdef BCM_HAS_PCI_EEH_SUPPORT + .err_handler = &tg3_err_handler, +#endif +#ifdef SIMPLE_DEV_PM_OPS + .driver.pm = TG3_PM_OPS, +#else .suspend = tg3_suspend, .resume = tg3_resume +#endif }; static int __init tg3_init(void) diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3.h b/vmkdrivers/src_9/drivers/net/tg3/tg3.h index a7251cdeccfcaa958a51ae2b43e9ac6a6ab7dca4..d9df431bc210ff3af7eda6e21916a3cd695db929 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3.h +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3.h @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2011 Broadcom Corporation. + * Copyright (C) 2007-2012 Broadcom Corporation. */ #ifndef _T3_H @@ -25,10 +25,15 @@ #define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ #define TG3_BDINFO_SIZE 0x10UL -#define TG3_RX_INTERNAL_RING_SZ_5906 32 +#define TG3_RX_STD_MAX_SIZE_5700 512 +#define TG3_RX_STD_MAX_SIZE_5717 2048 +#define TG3_RX_JMB_MAX_SIZE_5700 256 +#define TG3_RX_JMB_MAX_SIZE_5717 1024 +#define TG3_RX_RET_MAX_SIZE_5700 1024 +#define TG3_RX_RET_MAX_SIZE_5705 512 +#define TG3_RX_RET_MAX_SIZE_5717 4096 -#define RX_STD_MAX_SIZE_5705 512 -#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ +#define TG3_RSS_INDIR_TBL_SIZE 128 /* First 256 bytes are a mirror of PCI config space. */ #define TG3PCI_VENDOR 0x00000000 @@ -48,13 +53,18 @@ #define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ #define TG3PCI_DEVICE_TIGON3_5717 0x1655 #define TG3PCI_DEVICE_TIGON3_5718 0x1656 -#define TG3PCI_DEVICE_TIGON3_5724 0x165c #define TG3PCI_DEVICE_TIGON3_57781 0x16b1 #define TG3PCI_DEVICE_TIGON3_57785 0x16b5 #define TG3PCI_DEVICE_TIGON3_57761 0x16b0 #define TG3PCI_DEVICE_TIGON3_57765 0x16b4 #define TG3PCI_DEVICE_TIGON3_57791 0x16b2 #define TG3PCI_DEVICE_TIGON3_57795 0x16b6 +#define TG3PCI_DEVICE_TIGON3_5719 0x1657 +#define TG3PCI_DEVICE_TIGON3_5720 0x165f +#define TG3PCI_DEVICE_TIGON3_57762 0x1682 +#define TG3PCI_DEVICE_TIGON3_57766 0x1686 +#define TG3PCI_DEVICE_TIGON3_57786 0x16b3 +#define TG3PCI_DEVICE_TIGON3_57782 0x16b7 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 @@ -142,6 +152,8 @@ #define CHIPREV_ID_57780_A1 0x57780001 #define CHIPREV_ID_5717_A0 0x05717000 #define CHIPREV_ID_57765_A0 0x57785000 +#define CHIPREV_ID_5719_A0 0x05719000 +#define CHIPREV_ID_5720_A0 0x05720000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -162,6 +174,9 @@ #define ASIC_REV_57780 0x57780 #define ASIC_REV_5717 0x5717 #define ASIC_REV_57765 0x57785 +#define ASIC_REV_5719 0x5719 +#define ASIC_REV_5720 0x5720 +#define ASIC_REV_57766 0x57766 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -174,6 +189,7 @@ #define CHIPREV_5750_BX 0x41 #define CHIPREV_5784_AX 0x57840 #define CHIPREV_5761_AX 0x57610 +#define CHIPREV_57765_AX 0x577650 #define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff) #define METAL_REV_A0 0x00 #define METAL_REV_A1 0x01 @@ -182,6 +198,7 @@ #define METAL_REV_B2 0x02 #define TG3PCI_DMA_RW_CTRL 0x0000006c #define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 +#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080 #define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 #define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 #define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 @@ -301,6 +318,7 @@ #define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */ #define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */ #define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */ +#define MAILBOX_RCV_JUMBO_PROD_IDX_RING1 0x000002d4 /* 32-bit */ #define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */ #define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */ #define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */ @@ -315,8 +333,10 @@ #define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */ #define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */ #define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */ +#define MAILBOX_RCV_JMB_PROD_IDX_RING12 0x00000340 /* 32-bit */ #define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */ #define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */ +#define MAILBOX_RCV_STD_PROD_IDX_RING1 0x00000354 /* 32-bit */ #define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */ #define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */ #define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */ @@ -472,6 +492,8 @@ #define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 #define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 #define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 +#define TX_MODE_JMB_FRM_LEN 0x00400000 +#define TX_MODE_CNT_DN_MODE 0x00800000 #define MAC_TX_STATUS 0x00000460 #define TX_STATUS_XOFFED 0x00000001 #define TX_STATUS_SENT_XOFF 0x00000002 @@ -486,6 +508,8 @@ #define TX_LENGTHS_IPG_SHIFT 8 #define TX_LENGTHS_IPG_CRS_MASK 0x00003000 #define TX_LENGTHS_IPG_CRS_SHIFT 12 +#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000 +#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000 #define MAC_RX_MODE 0x00000468 #define RX_MODE_RESET 0x00000001 #define RX_MODE_ENABLE 0x00000002 @@ -567,6 +591,8 @@ #define MAC_EXTADDR_5_HIGH 0x00000558 #define MAC_EXTADDR_5_LOW 0x0000055c #define MAC_EXTADDR_6_HIGH 0x00000560 +#define MAC_VRQ_ENABLE 0x00000560 +#define MAC_VRQ_ENABLE_DFLT_VRQ 0x00000001 #define MAC_EXTADDR_6_LOW 0x00000564 #define MAC_EXTADDR_7_HIGH 0x00000568 #define MAC_EXTADDR_7_LOW 0x0000056c @@ -746,7 +772,9 @@ #define SG_DIG_MAC_ACK_STATUS 0x00000004 #define SG_DIG_AUTONEG_COMPLETE 0x00000002 #define SG_DIG_AUTONEG_ERROR 0x00000001 -/* 0x5b8 --> 0x600 unused */ +#define TG3_TX_TSTAMP_LSB 0x000005c0 +#define TG3_TX_TSTAMP_MSB 0x000005c4 +/* 0x5c8 --> 0x600 unused */ #define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ #define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ /* 0x624 --> 0x670 unused */ @@ -763,7 +791,36 @@ #define MAC_RSS_HASH_KEY_7 0x0000068c #define MAC_RSS_HASH_KEY_8 0x00000690 #define MAC_RSS_HASH_KEY_9 0x00000694 -/* 0x698 --> 0x800 unused */ +/* 0x698 --> 0x6b0 unused */ + +#define TG3_RX_TSTAMP_LSB 0x000006b0 +#define TG3_RX_TSTAMP_MSB 0x000006b4 +/* 0x6b8 --> 0x6c8 unused */ + +#define TG3_RX_PTP_CTL 0x000006c8 +#define TG3_RX_PTP_CTL_SYNC_EVNT 0x00000001 +#define TG3_RX_PTP_CTL_DELAY_REQ 0x00000002 +#define TG3_RX_PTP_CTL_PDLAY_REQ 0x00000004 +#define TG3_RX_PTP_CTL_PDLAY_RES 0x00000008 +#define TG3_RX_PTP_CTL_ALL_V1_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \ + TG3_RX_PTP_CTL_DELAY_REQ) +#define TG3_RX_PTP_CTL_ALL_V2_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \ + TG3_RX_PTP_CTL_DELAY_REQ | \ + TG3_RX_PTP_CTL_PDLAY_REQ | \ + TG3_RX_PTP_CTL_PDLAY_RES) +#define TG3_RX_PTP_CTL_FOLLOW_UP 0x00000100 +#define TG3_RX_PTP_CTL_DELAY_RES 0x00000200 +#define TG3_RX_PTP_CTL_PDRES_FLW_UP 0x00000400 +#define TG3_RX_PTP_CTL_ANNOUNCE 0x00000800 +#define TG3_RX_PTP_CTL_SIGNALING 0x00001000 +#define TG3_RX_PTP_CTL_MANAGEMENT 0x00002000 +#define TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN 0x00800000 +#define TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN 0x01000000 +#define TG3_RX_PTP_CTL_RX_PTP_V2_EN (TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | \ + TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN) +#define TG3_RX_PTP_CTL_RX_PTP_V1_EN 0x02000000 +#define TG3_RX_PTP_CTL_HWTS_INTERLOCK 0x04000000 +/* 0x6cc --> 0x800 unused */ #define MAC_TX_STATS_OCTETS 0x00000800 #define MAC_TX_STATS_RESV1 0x00000804 @@ -973,12 +1030,21 @@ #define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004 #define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008 #define RCVDBDI_MODE_INV_RING_SZ 0x00000010 +#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000 #define RCVDBDI_STATUS 0x00002404 #define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004 #define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008 #define RCVDBDI_STATUS_INV_RING_SZ 0x00000010 #define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408 -/* 0x240c --> 0x2440 unused */ +#define VRQ_STATUS 0x0000240c +#define VRQ_FLUSH_CTRL 0x00002410 +#define VRQ_FLUSH_ENABLE 0x00000001 +#define VRQ_FLUSH_RESET_ENABLE 0x00000002 +#define VRQ_FLUSH_STATUPDT_INT_ENABLE 0x00000004 +#define VRQ_FLUSH_DISCARD_PKT_ENABLE 0x00000008 +#define VRQ_FLUSH_SW_FLUSH 0x00000100 +/* 0x2414 --> 0x2440 unused */ + #define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */ #define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */ #define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */ @@ -1005,6 +1071,9 @@ #define RCVDBDI_HWDIAG 0x000024c0 /* 0x24c4 --> 0x2800 unused */ +#define RCVDBDI_JMB_BD_RING1 0x00002500 +/* 0x2504 --> 0x2800 unused */ + /* Receive Data Completion Control */ #define RCVDCC_MODE 0x00002800 #define RCVDCC_MODE_RESET 0x00000001 @@ -1052,6 +1121,8 @@ #define RCVLSC_STATUS_ERROR_ATTN 0x00000004 /* 0x3408 --> 0x3600 unused */ +#define TG3_CPMU_DRV_STATUS 0x0000344c + /* CPMU registers */ #define TG3_CPMU_CTRL 0x00003600 #define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 @@ -1077,6 +1148,14 @@ #define CPMU_HST_ACC_MACCLK_6_25 0x00130000 /* 0x3620 --> 0x3630 unused */ +#define TG3_CPMU_CLCK_ORIDE 0x00003624 +#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 + +#define TG3_CPMU_STATUS 0x0000362c +#define TG3_CPMU_STATUS_FMSK_5717 0x20000000 +#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000 +#define TG3_CPMU_STATUS_FSHFT_5719 30 + #define TG3_CPMU_CLCK_STAT 0x00003630 #define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 #define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 @@ -1090,7 +1169,32 @@ #define CPMU_MUTEX_GNT_DRIVER 0x00001000 #define TG3_CPMU_PHY_STRAP 0x00003664 #define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 -/* 0x3664 --> 0x3800 unused */ +/* 0x3664 --> 0x36b0 unused */ + +#define TG3_CPMU_EEE_MODE 0x000036b0 +#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004 +#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 +#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040 +#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 +#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 +#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 +#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 +#define TG3_CPMU_EEE_DBTMR1 0x000036b4 +#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff +#define TG3_CPMU_EEE_DBTMR2 0x000036b8 +#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff +#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc +#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 +#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 +/* 0x36c0 --> 0x36d0 unused */ + +#define TG3_CPMU_EEE_CTRL 0x000036d0 +#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d +#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384 +#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8 +/* 0x36d4 --> 0x3800 unused */ /* Mbuf cluster free registers */ #define MBFREE_MODE 0x00003800 @@ -1155,12 +1259,16 @@ #define DEFAULT_STAT_COAL_TICKS 0x000f4240 #define MAX_STAT_COAL_TICKS 0xd693d400 #define MIN_STAT_COAL_TICKS 0x00000064 +#define HOSTCC_PARAM_SET_RESET 0x00003c28 /* 0x3c2c --> 0x3c30 unused */ #define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */ #define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */ #define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 #define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 #define HOSTCC_FLOW_ATTN 0x00003c48 +#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040 +#define HOSTCC_FLOW_ATTN_RCB_MISCFG 0x00020000 +#define HOSTCC_FLOW_ATTN_RCV_BDI_ATTN 0x00800000 /* 0x3c4c --> 0x3c50 unused */ #define HOSTCC_JUMBO_CON_IDX 0x00003c50 #define HOSTCC_STD_CON_IDX 0x00003c54 @@ -1225,6 +1333,7 @@ #define BUFMGR_MODE_ATTN_ENABLE 0x00000004 #define BUFMGR_MODE_BM_TEST 0x00000008 #define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010 +#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000 #define BUFMGR_STATUS 0x00004404 #define BUFMGR_STATUS_ERROR 0x00000004 #define BUFMGR_STATUS_MBLOW 0x00000010 @@ -1271,9 +1380,6 @@ #define BUFMGR_HWDIAG_2 0x00004454 /* 0x4458 --> 0x4800 unused */ -#define TG3_RDMA_RSRVCTRL_REG 0x00004900 -#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 - /* Read DMA control registers */ #define RDMAC_MODE 0x00004800 #define RDMAC_MODE_RESET 0x00000001 @@ -1293,9 +1399,11 @@ #define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 #define RDMAC_MODE_FIFO_SIZE_128 0x00020000 #define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 +#define RDMAC_MODE_JMB_2K_MMRR 0x00800000 #define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 #define RDMAC_MODE_IPV4_LSO_EN 0x08000000 #define RDMAC_MODE_IPV6_LSO_EN 0x10000000 +#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000 #define RDMAC_STATUS 0x00004804 #define RDMAC_STATUS_TGTABORT 0x00000004 #define RDMAC_STATUS_MSTABORT 0x00000008 @@ -1305,7 +1413,22 @@ #define RDMAC_STATUS_FIFOURUN 0x00000080 #define RDMAC_STATUS_FIFOOREAD 0x00000100 #define RDMAC_STATUS_LNGREAD 0x00000200 -/* 0x4808 --> 0x4c00 unused */ +/* 0x4808 --> 0x4900 unused */ + +#define TG3_RDMA_RSRVCTRL_REG 0x00004900 +#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 +#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 +#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 +/* 0x4904 --> 0x4910 unused */ + +#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 +/* 0x4914 --> 0x4c00 unused */ /* Write DMA control registers */ #define WDMAC_MODE 0x00004c00 @@ -1405,8 +1528,24 @@ #define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 #define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 +#define MAC_VRQFLT_CFG 0x00005400 +#define MAC_VRQFLT_ELEM_EN 0x80000000 +#define MAC_VRQFLT_HDR_VLAN 0x0000e000 +#define MAC_VRQFLT_PTRN 0x00005480 +#define MAC_VRQFLT_PTRN_VLANID 0x0000ffff +#define MAC_VRQFLT_FLTSET 0x00005500 + /* Mailboxes */ #define GRCMBOX_BASE 0x00005600 +#define MAC_VRQMAP_1H 0x00005600 +#define MAC_VRQMAP_1H_PTA_PFEN 0x00000020 +#define MAC_VRQMAP_2H 0x00005604 +#define MAC_VRQMAP_2H_PTA_VFEN 0x00000020 +#define MAC_VRQMAP_2H_PTA_AND 0x00000000 +#define MAC_VRQMAP_2H_PTA_OR 0x00000040 +#define MAC_VRQMAP_2H_PTA_EN 0x00000080 +#define MAC_VRQ_PMATCH_HI_5 0x00005690 +#define MAC_VRQ_PMATCH_LO_5 0x00005694 #define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */ #define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */ #define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */ @@ -1557,6 +1696,7 @@ #define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 #define MSGINT_MODE_MULTIVEC_EN 0x00000080 #define MSGINT_STATUS 0x00006004 +#define MSGINT_STATUS_MSI_REQ 0x00000001 #define MSGINT_FIFO 0x00006008 /* 0x600c --> 0x6400 unused */ @@ -1573,6 +1713,9 @@ #define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 #define GRC_MODE_BSWAP_DATA 0x00000010 #define GRC_MODE_WSWAP_DATA 0x00000020 +#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040 +#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080 +#define GRC_MODE_IOV_ENABLE 0x00000100 #define GRC_MODE_SPLITHDR 0x00000100 #define GRC_MODE_NOFRM_CRACKING 0x00000200 #define GRC_MODE_INCL_CRC 0x00000400 @@ -1580,8 +1723,11 @@ #define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 #define GRC_MODE_NOIRQ_ON_RCV 0x00004000 #define GRC_MODE_FORCE_PCI32BIT 0x00008000 +#define GRC_MODE_B2HRX_ENABLE 0x00008000 #define GRC_MODE_HOST_STACKUP 0x00010000 #define GRC_MODE_HOST_SENDBDS 0x00020000 +#define GRC_MODE_HTX2B_ENABLE 0x00040000 +#define GRC_MODE_TIME_SYNC_ENABLE 0x00080000 #define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 #define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 #define GRC_MODE_PCIE_TL_SEL 0x00000000 @@ -1684,7 +1830,11 @@ #define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 #define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ -/* 0x6c00 --> 0x7000 unused */ +#define TG3_EAV_REF_CLCK_LSB 0x00006900 +#define TG3_EAV_REF_CLCK_MSB 0x00006904 +#define TG3_EAV_REF_CLCK_CTL 0x00006908 +#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 +/* 0x690c --> 0x7000 unused */ /* NVRAM Control registers */ #define NVRAM_CMD 0x00007000 @@ -1778,6 +1928,38 @@ #define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 #define FLASH_5717VENDOR_ST_25USPT 0x03400002 #define FLASH_5717VENDOR_ST_45USPT 0x03400001 +#define FLASH_5720_EEPROM_HD 0x00000001 +#define FLASH_5720_EEPROM_LD 0x00000003 +#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 +#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 +#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 +#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003 +#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000 +#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002 +#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001 +#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003 +#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000 +#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002 +#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001 +#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003 +#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000 +#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002 +#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001 +#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000 +#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002 +#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001 +#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003 +#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000 +#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002 +#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001 +#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003 +#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000 +#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002 +#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001 +#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003 +#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000 +#define FLASH_5720VENDOR_ST_25USPT 0x03c00002 +#define FLASH_5720VENDOR_ST_45USPT 0x03c00001 #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 #define FLASH_5752PAGE_SIZE_256 0x00000000 #define FLASH_5752PAGE_SIZE_512 0x10000000 @@ -1858,11 +2040,16 @@ /* Alternate PCIE definitions */ #define TG3_PCIE_TLDLPL_PORT 0x00007c00 +#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c +#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff +#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c #define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 #define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 #define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 #define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 +#define TG3_REG_BLK_SIZE 0x00008000 + /* OTP bit definitions */ #define TG3_OTP_AGCTGT_MASK 0x000000e0 #define TG3_OTP_AGCTGT_SHIFT 1 @@ -1914,7 +2101,9 @@ #define TG3_NVM_DIR_END 0x78 #define TG3_NVM_DIRENT_SIZE 0xc #define TG3_NVM_DIRTYPE_SHIFT 24 +#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff #define TG3_NVM_DIRTYPE_ASFINI 1 +#define TG3_NVM_DIRTYPE_EXTVPD 20 #define TG3_NVM_PTREV_BCVER 0x94 #define TG3_NVM_BCVER_MAJMSK 0x0000ff00 #define TG3_NVM_BCVER_MAJSFT 8 @@ -2018,6 +2207,8 @@ #define NIC_SRAM_DATA_CFG_3 0x00000d3c #define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 +#define NIC_SRAM_LNK_FLAP_AVOID 0x00400000 +#define NIC_SRAM_1G_ON_VAUX_OK 0x00800000 #define NIC_SRAM_DATA_CFG_4 0x00000d60 #define NIC_SRAM_GMII_MODE 0x00000002 @@ -2025,6 +2216,10 @@ #define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 #define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 +#define NIC_SRAM_CPMU_STATUS 0x00000e00 +#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c +#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff + #define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 #define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 @@ -2038,6 +2233,13 @@ #define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 #define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 + +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64 +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16 + /* Currently this is fixed. */ #define TG3_PHY_PCIE_ADDR 0x00 @@ -2045,13 +2247,9 @@ /*** Tigon3 specific PHY MII registers. ***/ -#define TG3_BMCR_SPEED1000 0x0040 - -#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */ -#define MII_TG3_CTRL_ADV_1000_HALF 0x0100 -#define MII_TG3_CTRL_ADV_1000_FULL 0x0200 -#define MII_TG3_CTRL_AS_MASTER 0x0800 -#define MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000 +#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */ +#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000 +#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */ #define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ #define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 @@ -2060,19 +2258,28 @@ #define MII_TG3_EXT_CTRL_TBI 0x8000 #define MII_TG3_EXT_STAT 0x11 /* Extended status register */ +#define MII_TG3_EXT_STAT_MDIX 0x2000 #define MII_TG3_EXT_STAT_LPASS 0x0100 +#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ #define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ - +#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */ #define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ #define MII_TG3_DSP_TAP1 0x0001 #define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 +#define MII_TG3_DSP_TAP26 0x001a +#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 +#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 +#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 #define MII_TG3_DSP_AADJ1CH0 0x001f +#define MII_TG3_DSP_CH34TP2 0x4022 +#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff #define MII_TG3_DSP_AADJ1CH3 0x601f #define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 #define MII_TG3_DSP_TLER 0x0d40 /* Top Level Expansion reg */ #define MII_TG3_DSP_TLER_AUTOGREEEN_EN 0x0001 +#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 #define MII_TG3_DSP_EXP8 0x0f08 #define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 #define MII_TG3_DSP_EXP8_AEDW 0x0200 @@ -2083,19 +2290,27 @@ #define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ +#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 +#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 +#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000 +#define MII_TG3_AUXCTL_ACTL_EXTLOOPBK 0x8000 + +#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 +#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008 #define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 #define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 +#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040 #define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 -#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 -#define MII_TG3_AUXCTL_MISC_WREN 0x8000 -#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000 -#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004 -#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 -#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 -#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010 +#define MII_TG3_AUXCTL_MISC_RGMII_OOBSC 0x0020 +#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12 +#define MII_TG3_AUXCTL_MISC_WREN 0x8000 #define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */ #define MII_TG3_AUX_STAT_LPASS 0x0004 @@ -2119,13 +2334,9 @@ #define MII_TG3_INT_DUPLEXCHG 0x0008 #define MII_TG3_INT_ANEG_PAGE_RX 0x0400 -#define MII_TG3_MISC_SHDW 0x1c +#define MII_TG3_MISC_SHDW 0x1c /* Misc shadow register */ #define MII_TG3_MISC_SHDW_WREN 0x8000 -#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001 -#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020 -#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 - #define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001 #define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002 #define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004 @@ -2134,22 +2345,42 @@ #define MII_TG3_MISC_SHDW_SCR5_TRDDAPD 0x0100 #define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 +#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001 +#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020 +#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 + +#define MII_TG3_MISC_SHDW_RGMII_MODESEL0 0x0008 +#define MII_TG3_MISC_SHDW_RGMII_MODESEL1 0x0010 #define MII_TG3_MISC_SHDW_RGMII_SEL 0x2c00 #define MII_TG3_TEST1 0x1e #define MII_TG3_TEST1_TRIM_EN 0x0010 #define MII_TG3_TEST1_CRC_EN 0x8000 +/* Clause 45 expansion registers */ +#define TG3_CL45_D7_EEEADV_CAP 0x003c +#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002 +#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004 +#define TG3_CL45_D7_EEERES_STAT 0x803e +#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 +#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 + /* Fast Ethernet Tranceiver definitions */ #define MII_TG3_FET_PTEST 0x17 +#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010 +#define MII_TG3_FET_PTEST_TRIM_2 0x0002 #define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 #define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 +#define MII_TG3_FET_GEN_STAT 0x1c +#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000 + #define MII_TG3_FET_TEST 0x1f #define MII_TG3_FET_SHADOW_EN 0x0080 #define MII_TG3_FET_SHDW_MISCCTRL 0x10 +#define MII_TG3_FET_SHDW_MISCCTRL_ELBK 0x1000 #define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 #define MII_TG3_FET_SHDW_AUXMODE4 0x1a @@ -2162,6 +2393,8 @@ /* APE registers. Accessible through BAR1 */ +#define TG3_APE_GPIO_MSG 0x0008 +#define TG3_APE_GPIO_MSG_SHIFT 4 #define TG3_APE_EVENT 0x000c #define APE_EVENT_1 0x00000001 #define TG3_APE_LOCK_REQ 0x002c @@ -2222,8 +2455,13 @@ #define APE_PER_LOCK_GRANT_DRIVER 0x00001000 /* APE convenience enumerations. */ +#define TG3_APE_LOCK_PHY0 0 #define TG3_APE_LOCK_GRC 1 +#define TG3_APE_LOCK_PHY1 2 +#define TG3_APE_LOCK_PHY2 3 #define TG3_APE_LOCK_MEM 4 +#define TG3_APE_LOCK_PHY3 5 +#define TG3_APE_LOCK_GPIO 7 @@ -2274,6 +2512,7 @@ struct tg3_tx_buffer_desc { #define TXD_FLAG_IP_FRAG 0x0008 #define TXD_FLAG_JMB_PKT 0x0008 #define TXD_FLAG_IP_FRAG_END 0x0010 +#define TXD_FLAG_HWTSTAMP 0x0020 #define TXD_FLAG_VLAN 0x0040 #define TXD_FLAG_COAL_NOW 0x0080 #define TXD_FLAG_CPU_PRE_DMA 0x0100 @@ -2315,6 +2554,9 @@ struct tg3_rx_buffer_desc { #define RXD_FLAG_IP_CSUM 0x1000 #define RXD_FLAG_TCPUDP_CSUM 0x2000 #define RXD_FLAG_IS_TCP 0x4000 +#define RXD_FLAG_PTPSTAT_MASK 0x0210 +#define RXD_FLAG_PTPSTAT_PTPV1 0x0010 +#define RXD_FLAG_PTPSTAT_PTPV2 0x0200 u32 ip_tcp_csum; #define RXD_IPCSUM_MASK 0xffff0000 @@ -2514,7 +2756,12 @@ struct tg3_hw_stats { tg3_stat64_t nic_avoided_irqs; tg3_stat64_t nic_tx_threshold_hit; - u8 __reserved4[0xb00-0x9c0]; + /* NOT a part of the hardware statistics block format. + * These stats are here as storage for tg3_periodic_fetch_stats(). + */ + tg3_stat64_t mbuf_lwm_thresh_hit; + + u8 __reserved4[0xb00-0x9c8]; }; /* 'mapping' is superfluous as the chip does not write into @@ -2526,8 +2773,10 @@ struct ring_info { DEFINE_DMA_UNMAP_ADDR(mapping); }; -struct tg3_config_info { - u32 flags; +struct tg3_tx_ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + bool fragmented; }; struct tg3_link_config { @@ -2546,11 +2795,11 @@ struct tg3_link_config { #define DUPLEX_INVALID 0xff #define AUTONEG_INVALID 0xff u16 active_speed; + u32 rmt_adv; /* When we go in and out of low power mode we need * to swap with this state. */ - int phy_is_low_power; u16 orig_speed; u8 orig_duplex; u8 orig_autoneg; @@ -2651,9 +2900,19 @@ struct tg3_ethtool_stats { u64 nic_irqs; u64 nic_avoided_irqs; u64 nic_tx_threshold_hit; + + u64 mbuf_lwm_thresh_hit; }; +#if defined(__VMKLNX__) +#include "tg3_vmware.h" +#endif + struct tg3_rx_prodring_set { +#ifdef TG3_VMWARE_NETQ_ENABLE + u32 rx_std_mbox; + u32 rx_jmb_mbox; +#endif u32 rx_std_prod_idx; u32 rx_std_cons_idx; u32 rx_jmb_prod_idx; @@ -2666,10 +2925,20 @@ struct tg3_rx_prodring_set { dma_addr_t rx_jmb_mapping; }; -#if defined(__VMKLNX__) && defined(TG3_INBOX) -#define TG3_IRQ_MAX_VECS 1 -#else -#define TG3_IRQ_MAX_VECS 5 +#define TG3_RSS_MAX_NUM_QS 4 +#define TG3_IRQ_MAX_VECS_RSS TG3_RSS_MAX_NUM_QS + 1 + +#if defined(__VMKLNX__) +#if defined(TG3_INBOX) + #define TG3_IRQ_MAX_VECS_IOV 1 +#elif defined(TG3_VMWARE_NETQ_ENABLE) + #define TG3_IRQ_MAX_VECS_IOV 17 + #define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_IOV +#endif +#endif /* __VMKLNX__ */ + +#ifndef TG3_IRQ_MAX_VECS +#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS #endif struct tg3_napi { @@ -2679,23 +2948,27 @@ struct tg3_napi { struct tg3 *tp; struct tg3_hw_status *hw_status; + u32 chk_msi_cnt; u32 last_tag; u32 last_irq_tag; u32 int_mbox; u32 coal_now; - u32 tx_prod; - u32 tx_cons; - u32 tx_pending; - u32 prodmbox; - u32 consmbox; + u32 consmbox ____cacheline_aligned; u32 rx_rcb_ptr; + u32 last_rx_cons; volatile u16 *rx_rcb_prod_idx; - struct tg3_rx_prodring_set *prodring; - + struct tg3_rx_prodring_set *srcprodring; + struct tg3_rx_prodring_set prodring; struct tg3_rx_buffer_desc *rx_rcb; + + u32 tx_prod ____cacheline_aligned; + u32 tx_cons; + u32 tx_pending; + u32 last_tx_cons; + u32 prodmbox; struct tg3_tx_buffer_desc *tx_ring; - struct ring_info *tx_buffers; + struct tg3_tx_ring_info *tx_buffers; dma_addr_t status_mapping; dma_addr_t rx_rcb_mapping; @@ -2704,10 +2977,95 @@ struct tg3_napi { char irq_lbl[IFNAMSIZ]; unsigned int irq_vec; - u32 last_rx_cons; - u32 last_tx_cons; - u32 chk_msi_cnt; - u32 coalesce_tries; +#if defined(__VMKLNX__) && !defined(TG3_VMWARE_NETQ_DISABLE) + struct tg3_netq_napi netq; +#endif +}; + +enum TG3_FLAGS { + TG3_FLAG_TAGGED_STATUS = 0, + TG3_FLAG_TXD_MBOX_HWBUG, + TG3_FLAG_USE_LINKCHG_REG, + TG3_FLAG_ERROR_PROCESSED, + TG3_FLAG_ENABLE_ASF, + TG3_FLAG_ASPM_WORKAROUND, + TG3_FLAG_POLL_SERDES, + TG3_FLAG_MBOX_WRITE_REORDER, + TG3_FLAG_PCIX_TARGET_HWBUG, + TG3_FLAG_WOL_SPEED_100MB, + TG3_FLAG_WOL_ENABLE, + TG3_FLAG_EEPROM_WRITE_PROT, + TG3_FLAG_NVRAM, + TG3_FLAG_NVRAM_BUFFERED, + TG3_FLAG_SUPPORT_MSI, + TG3_FLAG_SUPPORT_MSIX, + TG3_FLAG_PCIX_MODE, + TG3_FLAG_PCI_HIGH_SPEED, + TG3_FLAG_PCI_32BIT, + TG3_FLAG_SRAM_USE_CONFIG, + TG3_FLAG_TX_RECOVERY_PENDING, + TG3_FLAG_WOL_CAP, + TG3_FLAG_JUMBO_RING_ENABLE, + TG3_FLAG_PAUSE_AUTONEG, + TG3_FLAG_CPMU_PRESENT, + TG3_FLAG_40BIT_DMA_BUG, + TG3_FLAG_BROKEN_CHECKSUMS, + TG3_FLAG_JUMBO_CAPABLE, + TG3_FLAG_CHIP_RESETTING, + TG3_FLAG_INIT_COMPLETE, + TG3_FLAG_TSO_BUG, + TG3_FLAG_MAX_RXPEND_64, + TG3_FLAG_TSO_CAPABLE, + TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ + TG3_FLAG_ASF_NEW_HANDSHAKE, + TG3_FLAG_HW_AUTONEG, + TG3_FLAG_IS_NIC, + TG3_FLAG_FLASH, + TG3_FLAG_HW_TSO_1, + TG3_FLAG_HW_TSO_2, + TG3_FLAG_HW_TSO_3, + TG3_FLAG_USING_MSI, + TG3_FLAG_USING_MSIX, + TG3_FLAG_ICH_WORKAROUND, + TG3_FLAG_1SHOT_MSI, + TG3_FLAG_NO_FWARE_REPORTED, + TG3_FLAG_NO_NVRAM_ADDR_TRANS, + TG3_FLAG_ENABLE_APE, + TG3_FLAG_PROTECTED_NVRAM, + TG3_FLAG_5701_DMA_BUG, + TG3_FLAG_USE_PHYLIB, + TG3_FLAG_MDIOBUS_INITED, + TG3_FLAG_LRG_PROD_RING_CAP, + TG3_FLAG_RGMII_INBAND_DISABLE, + TG3_FLAG_RGMII_EXT_IBND_RX_EN, + TG3_FLAG_RGMII_EXT_IBND_TX_EN, + TG3_FLAG_CLKREQ_BUG, + TG3_FLAG_NO_NVRAM, + TG3_FLAG_ENABLE_RSS, + TG3_FLAG_ENABLE_TSS, + TG3_FLAG_SHORT_DMA_BUG, + TG3_FLAG_USE_JUMBO_BDFLAG, + TG3_FLAG_L1PLLPD_EN, + TG3_FLAG_APE_HAS_NCSI, + TG3_FLAG_TX_TSTAMP_EN, + TG3_FLAG_4K_FIFO_LIMIT, + TG3_FLAG_NO_TSO_BD_LIMIT, + TG3_FLAG_RESET_TASK_PENDING, + TG3_FLAG_USER_INDIR_TBL, + TG3_FLAG_5705_PLUS, + TG3_FLAG_IS_5788, + TG3_FLAG_5750_PLUS, + TG3_FLAG_5780_CLASS, + TG3_FLAG_5755_PLUS, + TG3_FLAG_57765_PLUS, + TG3_FLAG_57765_CLASS, + TG3_FLAG_5717_PLUS, + + TG3_FLAG_IOV_CAPABLE, + TG3_FLAG_ENABLE_IOV, + + /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ + TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ }; struct tg3 { @@ -2733,7 +3091,7 @@ struct tg3 { /* SMP locking strategy: * * lock: Held during reset, PHY access, timer, and when - * updating tg3_flags and tg3_flags2. + * updating tg3_flags. * * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds * netif_tx_lock when it needs to call @@ -2765,138 +3123,55 @@ struct tg3 { u32 coal_now; u32 msg_enable; +#ifdef BCM_HAS_IEEE1588_SUPPORT + struct cyclecounter cycles; + struct timecounter clock; + struct timecompare compare; +#endif + /* begin "tx thread" cacheline section */ void (*write32_tx_mbox) (struct tg3 *, u32, u32); + u32 dma_limit; + u32 txq_req; + u32 txq_cnt; + u32 txq_max; /* begin "rx thread" cacheline section */ struct tg3_napi napi[TG3_IRQ_MAX_VECS]; void (*write32_rx_mbox) (struct tg3 *, u32, u32); u32 rx_copy_thresh; + u32 rx_std_ring_mask; + u32 rx_jmb_ring_mask; + u32 rx_ret_ring_mask; u32 rx_pending; u32 rx_jumbo_pending; u32 rx_std_max_post; u32 rx_offset; u32 rx_pkt_map_sz; -#if TG3_VLAN_TAG_USED + u32 rxq_req; + u32 rxq_cnt; + u32 rxq_max; +#ifdef BCM_USE_OLD_VLAN_INTERFACE struct vlan_group *vlgrp; #endif - struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS]; - + bool rx_refill; /* begin "everything else" cacheline(s) section */ - struct net_device_stats net_stats; - struct net_device_stats net_stats_prev; - struct tg3_ethtool_stats estats; + unsigned long rx_dropped; + unsigned long tx_dropped; + struct rtnl_link_stats64 net_stats_prev; struct tg3_ethtool_stats estats_prev; + DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); + union { unsigned long phy_crc_errors; unsigned long last_event_jiffies; }; - u32 tg3_flags; -#define TG3_FLAG_TAGGED_STATUS 0x00000001 -#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 -#define TG3_FLAG_RX_CHECKSUMS 0x00000004 -#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 -#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010 -#define TG3_FLAG_ENABLE_ASF 0x00000020 -#define TG3_FLAG_ASPM_WORKAROUND 0x00000040 -#define TG3_FLAG_POLL_SERDES 0x00000080 -#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100 -#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200 -#define TG3_FLAG_WOL_SPEED_100MB 0x00000400 -#define TG3_FLAG_WOL_ENABLE 0x00000800 -#define TG3_FLAG_EEPROM_WRITE_PROT 0x00001000 -#define TG3_FLAG_NVRAM 0x00002000 -#define TG3_FLAG_NVRAM_BUFFERED 0x00004000 -#define TG3_FLAG_SUPPORT_MSI 0x00008000 -#define TG3_FLAG_SUPPORT_MSIX 0x00010000 -#define TG3_FLAG_SUPPORT_MSI_OR_MSIX (TG3_FLAG_SUPPORT_MSI | \ - TG3_FLAG_SUPPORT_MSIX) -#define TG3_FLAG_PCIX_MODE 0x00020000 -#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000 -#define TG3_FLAG_PCI_32BIT 0x00080000 -#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000 -#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000 -#define TG3_FLAG_WOL_CAP 0x00400000 -#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 -#define TG3_FLAG_10_100_ONLY 0x01000000 -#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 -#define TG3_FLAG_CPMU_PRESENT 0x04000000 -#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 -#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 -#define TG3_FLAG_JUMBO_CAPABLE 0x20000000 -#define TG3_FLAG_CHIP_RESETTING 0x40000000 -#define TG3_FLAG_INIT_COMPLETE 0x80000000 - u32 tg3_flags2; -#define TG3_FLG2_RESTART_TIMER 0x00000001 -#define TG3_FLG2_TSO_BUG 0x00000002 -#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 -#define TG3_FLG2_IS_5788 0x00000008 -#define TG3_FLG2_MAX_RXPEND_64 0x00000010 -#define TG3_FLG2_TSO_CAPABLE 0x00000020 -#define TG3_FLG2_PHY_ADC_BUG 0x00000040 -#define TG3_FLG2_PHY_5704_A0_BUG 0x00000080 -#define TG3_FLG2_PHY_BER_BUG 0x00000100 -#define TG3_FLG2_PCI_EXPRESS 0x00000200 -#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400 -#define TG3_FLG2_HW_AUTONEG 0x00000800 -#define TG3_FLG2_IS_NIC 0x00001000 -#define TG3_FLG2_PHY_SERDES 0x00002000 -#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000 -#define TG3_FLG2_FLASH 0x00008000 -#define TG3_FLG2_HW_TSO_1 0x00010000 -#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 -#define TG3_FLG2_5705_PLUS 0x00040000 -#define TG3_FLG2_5750_PLUS 0x00080000 -#define TG3_FLG2_HW_TSO_3 0x00100000 -#define TG3_FLG2_USING_MSI 0x00200000 -#define TG3_FLG2_USING_MSIX 0x00400000 -#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ - TG3_FLG2_USING_MSIX) -#define TG3_FLG2_MII_SERDES 0x00800000 -#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \ - TG3_FLG2_MII_SERDES) -#define TG3_FLG2_PARALLEL_DETECT 0x01000000 -#define TG3_FLG2_ICH_WORKAROUND 0x02000000 -#define TG3_FLG2_5780_CLASS 0x04000000 -#define TG3_FLG2_HW_TSO_2 0x08000000 -#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \ - TG3_FLG2_HW_TSO_2 | \ - TG3_FLG2_HW_TSO_3) -#define TG3_FLG2_1SHOT_MSI 0x10000000 -#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 -#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 -#define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000 - u32 tg3_flags3; -#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 -#define TG3_FLG3_ENABLE_APE 0x00000002 -#define TG3_FLG3_PROTECTED_NVRAM 0x00000004 -#define TG3_FLG3_5701_DMA_BUG 0x00000008 -#define TG3_FLG3_USE_PHYLIB 0x00000010 -#define TG3_FLG3_MDIOBUS_INITED 0x00000020 -#define TG3_FLG3_PHY_CONNECTED 0x00000080 -#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100 -#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 -#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 -#define TG3_FLG3_CLKREQ_BUG 0x00000800 -#define TG3_FLG3_PHY_ENABLE_APD 0x00001000 -#define TG3_FLG3_5755_PLUS 0x00002000 -#define TG3_FLG3_NO_NVRAM 0x00004000 -#define TG3_FLG3_PHY_IS_FET 0x00010000 -#define TG3_FLG3_ENABLE_RSS 0x00020000 -#define TG3_FLG3_ENABLE_TSS 0x00040000 -#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000 -#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000 -#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 -#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 -#define TG3_FLG3_L1PLLPD_EN 0x00800000 -#define TG3_FLG3_APE_HAS_NCSI 0x02000000 - struct timer_list timer; u16 timer_counter; u16 timer_multiplier; @@ -2924,26 +3199,19 @@ struct tg3 { u32 dma_rwctrl; u32 coalesce_mode; u32 pwrmgmt_thresh; + u32 rxptpctl; /* PCI block */ u32 pci_chip_rev_id; u16 pci_cmd; u8 pci_cacheline_sz; u8 pci_lat_timer; -#if (LINUX_VERSION_CODE < 0x2060a) - u32 pci_cfg_state[64 / sizeof(u32)]; -#endif + int pci_fn; int pm_cap; int msi_cap; - union { int pcix_cap; - int pcie_cap; - }; - -#if defined(__VMKLNX__) - struct tg3_vmware vmware; -#endif + int pcie_readrq; #ifdef BCM_INCLUDE_PHYLIB_SUPPORT struct mii_bus *mdio_bus; @@ -2951,6 +3219,7 @@ struct tg3 { #endif u8 phy_addr; + u8 phy_ape_lock; /* PHY info */ u32 phy_id; @@ -2974,6 +3243,8 @@ struct tg3 { #define TG3_PHY_ID_BCM5718C 0x5c0d8a00 #define TG3_PHY_ID_BCM5718S 0xbc050ff0 #define TG3_PHY_ID_BCM57765 0x5c0d8a40 +#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 +#define TG3_PHY_ID_BCM5720C 0x5c0d8b60 #define TG3_PHY_ID_BCM5906 0xdc00ac40 #define TG3_PHY_ID_BCM8002 0x60010140 #ifndef BCM_INCLUDE_PHYLIB_SUPPORT @@ -3006,10 +3277,42 @@ struct tg3 { (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ - (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002) + (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ + (X) == TG3_PHY_ID_BCM8002 || \ + (X) == TG3_PHY_ID_BCM50610 || (X) == TG3_PHY_ID_BCM50610M || \ + (X) == TG3_PHY_ID_BCM50612E || (X) == TG3_PHY_ID_BCMAC131 || \ + (X) == TG3_PHY_ID_BCM57780) + + u32 phy_flags; +#define TG3_PHYFLG_USER_CONFIGURED 0x00000001 +#define TG3_PHYFLG_IS_LOW_POWER 0x00000002 +#define TG3_PHYFLG_IS_CONNECTED 0x00000004 +#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000008 +#define TG3_PHYFLG_PHY_SERDES 0x00000010 +#define TG3_PHYFLG_MII_SERDES 0x00000020 +#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ + TG3_PHYFLG_MII_SERDES) +#define TG3_PHYFLG_IS_FET 0x00000040 +#define TG3_PHYFLG_10_100_ONLY 0x00000080 +#define TG3_PHYFLG_ENABLE_APD 0x00000100 +#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200 +#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400 +#define TG3_PHYFLG_JITTER_BUG 0x00000800 +#define TG3_PHYFLG_ADJUST_TRIM 0x00001000 +#define TG3_PHYFLG_ADC_BUG 0x00002000 +#define TG3_PHYFLG_5704_A0_BUG 0x00004000 +#define TG3_PHYFLG_BER_BUG 0x00008000 +#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 +#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 +#define TG3_PHYFLG_EEE_CAP 0x00040000 +#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000 +#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000 +#define TG3_PHYFLG_MDIX_STATE 0x00200000 u32 led_ctrl; u32 phy_otp; + u32 setlpicnt; + u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE]; #define TG3_BPN_SIZE 24 char board_part_number[TG3_BPN_SIZE]; @@ -3025,6 +3328,7 @@ struct tg3 { int nvram_lock_cnt; u32 nvram_size; +#define TG3_NVRAM_SIZE_2KB 0x00000800 #define TG3_NVRAM_SIZE_64KB 0x00010000 #define TG3_NVRAM_SIZE_128KB 0x00020000 #define TG3_NVRAM_SIZE_256KB 0x00040000 @@ -3040,6 +3344,9 @@ struct tg3 { #define JEDEC_SAIFUN 0x4f #define JEDEC_SST 0xbf +#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB +#define ATMEL_AT24C02_PAGE_SIZE (8) + #define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB #define ATMEL_AT24C64_PAGE_SIZE (32) @@ -3066,6 +3373,19 @@ struct tg3 { const char *fw_needed; const struct tg3_firmware *fw; u32 fw_len; /* includes BSS */ + +#if defined(__VMKLNX__) + struct tg3_vmware vmware; +#endif +#ifndef BCM_HAS_PCI_PCIE_CAP + int pcie_cap; +#endif +#if (LINUX_VERSION_CODE < 0x2060a) + u32 pci_cfg_state[64 / sizeof(u32)]; +#endif +#ifndef BCM_HAS_GET_STATS64 + struct rtnl_link_stats64 net_stats; +#endif }; #endif /* !(_T3_H) */ diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3_compat.h b/vmkdrivers/src_9/drivers/net/tg3/tg3_compat.h index 1320b487fe87ace8bb2f3d767a8ffe1e1fab1bcb..2d96f7b35d8970c97c69f3d4c7c6c9d1081e50ff 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3_compat.h +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3_compat.h @@ -1,7 +1,18 @@ -/* Copyright (C) 2008-2010 Broadcom Corporation. */ +/* Copyright (C) 2008-2012 Broadcom Corporation. */ #include "tg3_flags.h" +#ifdef CONFIG_X86 +#undef NET_IP_ALIGN +#define NET_IP_ALIGN 0 +#endif + +#ifdef BCM_HAS_IEEE1588_SUPPORT +#include +#include +#include +#endif + #if !defined(__maybe_unused) #define __maybe_unused /* unimplemented */ #endif @@ -10,6 +21,10 @@ #define __iomem #endif +#ifndef __always_unused +#define __always_unused +#endif + #ifndef __acquires #define __acquires(x) #endif @@ -38,6 +53,10 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif + #ifndef __ALIGN_MASK #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) #endif @@ -210,7 +229,7 @@ static unsigned long msleep_interruptible(unsigned int msecs) #define dev_driver_string(dev) "tg3" #endif -#ifndef BCM_HAS_DEV_NAME +#if !defined(BCM_HAS_DEV_NAME) || defined(__VMKLNX__) #define dev_name(dev) "" #endif @@ -480,6 +499,10 @@ typedef u32 pci_power_t; #define PCI_D3hot 3 #endif +#ifndef PCI_D3cold +#define PCI_D3cold 4 +#endif + #ifndef DMA_64BIT_MASK #define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL) #endif @@ -516,25 +539,6 @@ static inline pci_power_t pci_choose_state(struct pci_dev *dev, } #endif -#ifndef BCM_HAS_PCI_PME_CAPABLE -static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) -{ - int pm_cap; - u16 caps; - - pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM); - if (pm_cap == 0) - return false; - - pci_read_config_word(dev, pm_cap + PCI_PM_PMC, &caps); - - if (caps & PCI_PM_CAP_PME_D3cold) - return true; - - return false; -} -#endif /* BCM_HAS_PCI_PME_CAPABLE */ - #ifndef BCM_HAS_PCI_ENABLE_WAKE static int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { @@ -561,6 +565,37 @@ static int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) } #endif /* BCM_HAS_PCI_ENABLE_WAKE */ +#ifndef BCM_HAS_PCI_WAKE_FROM_D3 +#ifndef BCM_HAS_PCI_PME_CAPABLE +static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) +{ + int pm_cap; + u16 caps; + bool ret = false; + + pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM); + if (pm_cap == 0) + goto done; + + pci_read_config_word(dev, pm_cap + PCI_PM_PMC, &caps); + + if (state == PCI_D3cold && + (caps & PCI_PM_CAP_PME_D3cold)) + ret = true; + +done: + return ret; +} +#endif /* BCM_HAS_PCI_PME_CAPABLE */ + +static int pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + return pci_pme_capable(dev, PCI_D3cold) ? + pci_enable_wake(dev, PCI_D3cold, enable) : + pci_enable_wake(dev, PCI_D3hot, enable); +} +#endif /* BCM_HAS_PCI_WAKE_FROM_D3 */ + #ifndef BCM_HAS_PCI_SET_POWER_STATE static int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { @@ -587,13 +622,28 @@ static int pci_set_power_state(struct pci_dev *dev, pci_power_t state) } #endif /* BCM_HAS_PCI_SET_POWER_STATE */ +#ifdef __VMKLNX__ +/* VMWare disables CONFIG_PM in their kernel configs. + * This renders WOL inop, because device_may_wakeup() always returns false. + */ +#undef BCM_HAS_DEVICE_WAKEUP_API +#endif + #ifndef BCM_HAS_DEVICE_WAKEUP_API +#undef device_init_wakeup #define device_init_wakeup(dev, val) +#undef device_can_wakeup #define device_can_wakeup(dev) 1 +#undef device_set_wakeup_enable #define device_set_wakeup_enable(dev, val) +#undef device_may_wakeup #define device_may_wakeup(dev) 1 #endif /* BCM_HAS_DEVICE_WAKEUP_API */ +#ifndef BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE +#define device_set_wakeup_capable(dev, val) +#endif /* BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE */ + #ifndef PCI_X_CMD_READ_2K #define PCI_X_CMD_READ_2K 0x0008 @@ -628,13 +678,59 @@ static int pci_set_power_state(struct pci_dev *dev, pci_power_t state) #define PCI_EXP_DEVSTA_URD 0x08 #endif -#ifndef BCM_HAS_PCIE_SET_READRQ +#ifndef PCI_EXP_LNKSTA +#define PCI_EXP_LNKSTA 18 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + #ifndef PCI_EXP_DEVCTL #define PCI_EXP_DEVCTL 8 #endif #ifndef PCI_EXP_DEVCTL_READRQ #define PCI_EXP_DEVCTL_READRQ 0x7000 #endif + +#ifndef BCM_HAS_PCIE_GET_READRQ +int pcie_get_readrq(struct pci_dev *dev) +{ + int ret, cap; + u16 ctl; + + cap = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!cap) { + ret = -EINVAL; + goto out; + } + + ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); + if (!ret) + ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); + +out: + return ret; +} +#endif /* BCM_HAS_PCIE_GET_READRQ */ + +#ifndef BCM_HAS_PCIE_SET_READRQ static inline int pcie_set_readrq(struct pci_dev *dev, int rq) { int cap, err = -EINVAL; @@ -708,6 +804,10 @@ pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, u8 *buf) } #endif /* BCM_HAS_PCI_READ_VPD */ +#ifndef PCI_VPD_RO_KEYWORD_CHKSUM +#define PCI_VPD_RO_KEYWORD_CHKSUM "RV" +#endif + #ifndef PCI_VPD_LRDT #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ #define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) @@ -912,6 +1012,18 @@ static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data) #define BCM_NO_IPV6_CSUM 1 #endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM (1 << 29) +#endif + +#ifndef NETIF_F_GRO +#define NETIF_F_GRO 16384 +#endif + +#ifndef NETIF_F_LOOPBACK +#define NETIF_F_LOOPBACK (1 << 31) +#endif + #ifdef NETIF_F_TSO #ifndef NETIF_F_GSO #define gso_size tso_size @@ -925,6 +1037,18 @@ static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data) #define NETIF_F_TSO_ECN 0 #endif +#ifndef NETIF_F_ALL_TSO +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#endif + +#ifndef BCM_HAS_SKB_TX_TIMESTAMP +#define skb_tx_timestamp(skb) +#endif + +#ifndef BCM_HAS_SKB_FRAG_SIZE +#define skb_frag_size(skb_frag) ((skb_frag)->size) +#endif + #if (LINUX_VERSION_CODE < 0x2060c) static inline int skb_header_cloned(struct sk_buff *skb) { return 0; } #endif @@ -957,6 +1081,13 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) } #endif +#ifndef BCM_HAS_TCP_HDRLEN +static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) +{ + return tcp_hdr(skb)->doff * 4; +} +#endif + #ifndef BCM_HAS_TCP_OPTLEN static inline unsigned int tcp_optlen(const struct sk_buff *skb) { @@ -1185,10 +1316,6 @@ static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) #endif /* NETIF_F_TSO */ -#ifndef BCM_HAS_SKB_GET_QUEUE_MAPPING -#define skb_get_queue_mapping(skb) 0 -#endif - #ifndef BCM_HAS_SKB_COPY_FROM_LINEAR_DATA static inline void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, @@ -1198,10 +1325,37 @@ static inline void skb_copy_from_linear_data(const struct sk_buff *skb, } #endif +#if TG3_TSO_SUPPORT != 0 +#if defined(BCM_NO_TSO6) +static inline int skb_is_gso_v6(const struct sk_buff *skb) +{ + return 0; +} +#else +#if !defined(BCM_HAS_SKB_IS_GSO_V6) +static inline int skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif +#endif +#endif + +#ifndef BCM_HAS_SKB_CHECKSUM_NONE_ASSERT +static inline void skb_checksum_none_assert(struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; +} +#endif + #ifndef BCM_HAS_NETDEV_TX_T typedef int netdev_tx_t; #endif +#ifndef BCM_HAS_NETDEV_FEATURES_T +typedef u64 netdev_features_t; +#endif + #ifndef BCM_HAS_NETDEV_NAME #define netdev_name(netdev) netdev->name #endif @@ -1308,7 +1462,7 @@ static inline void netif_tx_unlock(struct net_device *dev) #endif /* BCM_HAS_STRUCT_NETDEV_QUEUE */ -#ifndef BCM_HAS_ALLOC_ETHERDEV_MQ +#if !defined(BCM_HAS_ALLOC_ETHERDEV_MQ) || !defined(TG3_NAPI) #define alloc_etherdev_mq(size, numqs) alloc_etherdev((size)) #endif @@ -1322,6 +1476,10 @@ static inline void netif_tx_unlock(struct net_device *dev) netif_receive_skb((skb)) #endif +#if !defined(BCM_HAS_SKB_GET_QUEUE_MAPPING) || !defined(TG3_NAPI) +#define skb_get_queue_mapping(skb) 0 +#endif + #if (LINUX_VERSION_CODE < 0x020612) static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, unsigned int length) @@ -1333,7 +1491,7 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, } #endif -#if !defined(HAVE_NETDEV_PRIV) && (LINUX_VERSION_CODE != 0x020603) && (LINUX_VERSION_CODE != 0x020604) && (LINUX_VERSION_CODE != 0x20605) +#ifndef BCM_HAS_NETDEV_PRIV static inline void *netdev_priv(struct net_device *dev) { return dev->priv; @@ -1363,6 +1521,26 @@ static inline void netif_tx_disable(struct net_device *dev) } #endif /* OLD_NETIF */ +#ifndef BCM_HAS_NETDEV_SENT_QUEUE +#define netdev_sent_queue(dev, bytes) +#endif + +#ifndef BCM_HAS_NETDEV_COMPLETED_QUEUE +#define netdev_completed_queue(dev, pkts, bytes) +#endif + +#ifndef BCM_HAS_NETDEV_RESET_QUEUE +#define netdev_reset_queue(dev_queue) +#endif + +#ifndef BCM_HAS_NETIF_SET_REAL_NUM_TX_QUEUES +#define netif_set_real_num_tx_queues(dev, nq) ((dev)->real_num_tx_queues = (nq)) +#endif + +#ifndef BCM_HAS_NETIF_SET_REAL_NUM_RX_QUEUES +#define netif_set_real_num_rx_queues(dev, nq) 0 +#endif + #ifndef netdev_mc_count #define netdev_mc_count(dev) ((dev)->mc_count) #endif @@ -1394,20 +1572,24 @@ struct netdev_hw_addr { u8 * addr; struct dev_mc_list * curr; }; +#undef netdev_for_each_mc_addr #define netdev_for_each_mc_addr(ha, dev) \ struct netdev_hw_addr mclist; \ ha = &mclist; \ for (mclist.curr = dev->mc_list; mclist.curr && (mclist.addr = &mclist.curr->dmi_addr[0]); mclist.curr = mclist.curr->next) #endif /* BCM_HAS_NETDEV_HW_ADDR */ +#ifndef BCM_HAS_GET_STATS64 +#define rtnl_link_stats64 net_device_stats +#endif /* BCM_HAS_GET_STATS64 */ -#ifndef VLAN_GROUP_ARRAY_SPLIT_PARTS -static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, - struct net_device *dev) -{ - if (vg) - vg->vlan_devices[vlan_id] = dev; -} +#ifndef BCM_HAS_EXTERNAL_LB_DONE +#define ETH_TEST_FL_EXTERNAL_LB (1 << 2) +#define ETH_TEST_FL_EXTERNAL_LB_DONE (1 << 3) +#endif + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define BCM_KERNEL_SUPPORTS_8021Q #endif #ifndef ETH_SS_TEST @@ -1426,6 +1608,18 @@ static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, #ifndef MII_CTRL1000 #define MII_CTRL1000 0x09 #endif +#ifndef ADVERTISE_1000HALF +#define ADVERTISE_1000HALF 0x0100 +#endif +#ifndef ADVERTISE_1000FULL +#define ADVERTISE_1000FULL 0x0200 +#endif +#ifndef CTL1000_AS_MASTER +#define CTL1000_AS_MASTER 0x0800 +#endif +#ifndef CTL1000_ENABLE_MASTER +#define CTL1000_ENABLE_MASTER 0x1000 +#endif #ifndef MII_STAT1000 #define MII_STAT1000 0x0a #endif @@ -1468,17 +1662,170 @@ static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, #ifndef LPA_PAUSE_ASYM #define LPA_PAUSE_ASYM 0x0800 #endif -#ifndef ADVERTISE_1000HALF -#define ADVERTISE_1000HALF 0x0100 +#ifndef LPA_1000FULL +#define LPA_1000FULL 0x0800 #endif -#ifndef ADVERTISE_1000FULL -#define ADVERTISE_1000FULL 0x0200 +#ifndef LPA_1000HALF +#define LPA_1000HALF 0x0400 #endif #ifndef ETHTOOL_FWVERS_LEN #define ETHTOOL_FWVERS_LEN 32 #endif +#ifndef MDIO_MMD_AN +#define MDIO_MMD_AN 7 +#endif + +#ifndef MDIO_AN_EEE_ADV +#define MDIO_AN_EEE_ADV 60 +#endif + +#ifndef MDIO_AN_EEE_ADV_100TX +#define MDIO_AN_EEE_ADV_100TX 0x0002 +#endif + +#ifndef MDIO_AN_EEE_ADV_1000T +#define MDIO_AN_EEE_ADV_1000T 0x0004 +#endif + +#ifndef BCM_HAS_ETHTOOL_ADV_TO_MII_100BT +static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_10baseT_Half) + result |= ADVERTISE_10HALF; + if (ethadv & ADVERTISED_10baseT_Full) + result |= ADVERTISE_10FULL; + if (ethadv & ADVERTISED_100baseT_Half) + result |= ADVERTISE_100HALF; + if (ethadv & ADVERTISED_100baseT_Full) + result |= ADVERTISE_100FULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_PAUSE_CAP; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_PAUSE_ASYM; + + return result; +} + +static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (adv & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000HALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000FULL; + + return result; +} + +static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_t(lpa); +} + +static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000XHALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000XFULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_1000XPAUSE; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_1000XPSE_ASYM; + + return result; +} + +static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000XHALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000XFULL) + result |= ADVERTISED_1000baseT_Full; + if (adv & ADVERTISE_1000XPAUSE) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_1000XPSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_x(lpa); +} +#endif /* BCM_HAS_ETHTOOL_ADV_TO_MII_100BT */ + +#ifndef BCM_HAS_ETHTOOL_RXFH_INDIR_DEFAULT +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif /* BCM_HAS_ETHTOOL_RXFH_INDIR_DEFAULT */ + #ifndef BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX #ifndef FLOW_CTRL_TX #define FLOW_CTRL_TX 0x01 @@ -1490,18 +1837,12 @@ static u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & ADVERTISE_PAUSE_CAP) { - if (lcladv & ADVERTISE_PAUSE_ASYM) { - if (rmtadv & LPA_PAUSE_CAP) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_PAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_PAUSE_CAP) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } + if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; } else if (lcladv & ADVERTISE_PAUSE_ASYM) { - if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM)) + if (lcladv & LPA_PAUSE_CAP) + cap = FLOW_CTRL_RX; + if (rmtadv & LPA_PAUSE_CAP) cap = FLOW_CTRL_TX; } @@ -1509,6 +1850,24 @@ static u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) } #endif /* BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX */ +#ifndef BCM_HAS_MII_ADVERTISE_FLOWCTRL +static u16 mii_advertise_flowctrl(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; +} +#endif /* BCM_HAS_MII_ADVERTISE_FLOWCTRL */ + #ifdef BCM_INCLUDE_PHYLIB_SUPPORT #ifndef PHY_ID_BCM50610 @@ -1574,3 +1933,18 @@ void mdiobus_free(struct mii_bus *bus) #endif #endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ + +#ifndef BCM_HAS_ETHTOOL_CMD_SPEED +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return ep->speed; +} +#endif /* BCM_HAS_ETHTOOL_CMD_SPEED */ + +#ifndef BCM_HAS_ETHTOOL_CMD_SPEED_SET +static inline __u32 ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed) +{ + ep->speed = speed; + return 0; +} +#endif /* BCM_HAS_ETHTOOL_CMD_SPEED_SET */ diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3_compat2.h b/vmkdrivers/src_9/drivers/net/tg3/tg3_compat2.h index 2619bc59a5b3786f99d78dce038aad4f3ee03a0d..056172c50f764b87bf79bc3015ea85df64e87733 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3_compat2.h +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3_compat2.h @@ -1,4 +1,76 @@ -/* Copyright (C) 2009-2010 Broadcom Corporation. */ +/* Copyright (C) 2009-2012 Broadcom Corporation. */ + +#ifndef BCM_HAS_PCI_PCIE_CAP +static inline int pci_pcie_cap(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + + return tp->pcie_cap; +} +#endif + +#ifndef BCM_HAS_PCI_IS_PCIE +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif + +#ifndef BCM_HAS_SKB_FRAG_DMA_MAP +#define skb_frag_dma_map(x, frag, y, len, z) \ + pci_map_page(tp->pdev, (frag)->page, \ + (frag)->page_offset, (len), PCI_DMA_TODEVICE) +#endif + +#ifdef SIMPLE_DEV_PM_OPS + +#define tg3_invalid_pci_state(tp, state) false +#define tg3_pci_save_state(tp) +#define tg3_pci_restore_state(tp) + +#else /* SIMPLE_DEV_PM_OPS */ + +#if (LINUX_VERSION_CODE < 0x2060b) +static bool tg3_invalid_pci_state(struct tg3 *tp, u32 state) +{ + bool ret = true; + pci_power_t target_state; + + target_state = pci_choose_state(tp->pdev, state); + if (target_state != PCI_D3hot || target_state != PCI_D3cold) + ret = false; + + return ret; +} +#else +static bool tg3_invalid_pci_state(struct tg3 *tp, pm_message_t state) +{ + bool ret = true; + pci_power_t target_state; + +#ifdef BCM_HAS_PCI_TARGET_STATE + target_state = tp->pdev->pm_cap ? pci_target_state(tp->pdev) : PCI_D3hot; +#else + target_state = pci_choose_state(tp->pdev, state); +#endif + if (target_state != PCI_D3hot || target_state != PCI_D3cold) + ret = false; + + return ret; +} +#endif + +#if (LINUX_VERSION_CODE < 0x2060a) +#define tg3_pci_save_state(tp) pci_save_state(tp->pdev, tp->pci_cfg_state) +#define tg3_pci_restore_state(tp) pci_restore_state(tp->pdev, tp->pci_cfg_state) +#else +#define tg3_pci_save_state(tp) pci_save_state(tp->pdev) +#define tg3_pci_restore_state(tp) pci_restore_state(tp->pdev) +#endif + +#endif /* SIMPLE_DEV_PM_OPS */ + #ifdef BCM_HAS_NEW_PCI_DMA_MAPPING_ERROR #define tg3_pci_dma_mapping_error(pdev, mapping) pci_dma_mapping_error((pdev), (mapping)) @@ -8,6 +80,13 @@ #define tg3_pci_dma_mapping_error(pdev, mapping) 0 #endif +#ifndef BCM_HAS_HW_FEATURES +#define hw_features features +#endif + +#ifndef BCM_HAS_VLAN_FEATURES +#define vlan_features features +#endif #ifdef HAVE_POLL_CONTROLLER #define CONFIG_NET_POLL_CONTROLLER @@ -16,8 +95,8 @@ static inline void tg3_5780_class_intx_workaround(struct tg3 *tp) { #ifndef BCM_HAS_INTX_MSI_WORKAROUND - if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) + if (tg3_flag(tp, 5780_CLASS) && + tg3_flag(tp, USING_MSI)) tg3_enable_intx(tp->pdev); #endif } @@ -28,7 +107,323 @@ static inline void tg3_5780_class_intx_workaround(struct tg3 *tp) #define tg3_update_trans_start(dev) ((dev)->trans_start = jiffies) #endif -#ifdef __VMKLNX__ +#ifndef BCM_HAS_VLAN_HWACCEL_PUT_TAG +#define TG3_TO_INT(Y) ((int)((unsigned long long)(Y) & (SMP_CACHE_BYTES - 1))) +#define TG3_COMPAT_VLAN_ALLOC_LEN (SMP_CACHE_BYTES + VLAN_HLEN) +#define TG3_COMPAT_VLAN_RESERVE(addr) (SKB_DATA_ALIGN((addr) + VLAN_HLEN) - (addr)) +#else +#define TG3_COMPAT_VLAN_ALLOC_LEN 0 +#define TG3_COMPAT_VLAN_RESERVE(addr) 0 +#endif + +#ifdef BCM_KERNEL_SUPPORTS_8021Q +#ifndef BCM_HAS_VLAN_HWACCEL_PUT_TAG + +#undef TG3_RAW_IP_ALIGN +#define TG3_RAW_IP_ALIGN (2 + VLAN_HLEN) + +static inline struct sk_buff *tg3_vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ + struct vlan_ethhdr *ve = (struct vlan_ethhdr *) + __skb_push(skb, VLAN_HLEN); + + memmove(ve, skb->data + VLAN_HLEN, ETH_ALEN * 2); + + ve->h_vlan_proto = htons(ETH_P_8021Q); + + ve->h_vlan_TCI = htons(vlan_tci); + + skb->protocol = htons(ETH_P_8021Q); + + return skb; +} + +#endif /* BCM_HAS_VLAN_HWACCEL_PUT_TAG */ + +#ifdef BCM_USE_OLD_VLAN_INTERFACE +static void __tg3_set_rx_mode(struct net_device *); +static inline void tg3_netif_start(struct tg3 *tp); +static inline void tg3_netif_stop(struct tg3 *tp); +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); +static inline void tg3_full_unlock(struct tg3 *tp); + +static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) { + tp->vlgrp = grp; + return; + } + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 0); + + tp->vlgrp = grp; + + /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ + __tg3_set_rx_mode(dev); + + tg3_netif_start(tp); + + tg3_full_unlock(tp); +} + +#ifndef BCM_HAS_NET_DEVICE_OPS +#ifndef BCM_HAS_VLAN_GROUP_SET_DEVICE +static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, + struct net_device *dev) +{ + if (vg) + vg->vlan_devices[vlan_id] = dev; +} +#endif + +static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct tg3 *tp = netdev_priv(dev); + + if (netif_running(dev)) + tg3_netif_stop(tp); + + tg3_full_lock(tp, 0); + vlan_group_set_device(tp->vlgrp, vid, NULL); + tg3_full_unlock(tp); + + if (netif_running(dev)) + tg3_netif_start(tp); +} +#endif /* BCM_HAS_NET_DEVICE_OPS */ +#endif /* BCM_USE_OLD_VLAN_INTERFACE */ +#endif /* BCM_KERNEL_SUPPORTS_8021Q */ + + +#ifndef BCM_HAS_NETDEV_UPDATE_FEATURES +static u32 tg3_get_rx_csum(struct net_device *dev) +{ + return (dev->features & NETIF_F_RXCSUM) != 0; +} + +static int tg3_set_rx_csum(struct net_device *dev, u32 data) +{ + struct tg3 *tp = netdev_priv(dev); + + /* BROKEN_CHECKSUMS */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) { + if (data != 0) + return -EINVAL; + return 0; + } + + spin_lock_bh(&tp->lock); + if (data) + dev->features |= NETIF_F_RXCSUM; + else + dev->features &= ~NETIF_F_RXCSUM; + spin_unlock_bh(&tp->lock); + + return 0; +} + +#ifdef BCM_HAS_SET_TX_CSUM +static int tg3_set_tx_csum(struct net_device *dev, u32 data) +{ + struct tg3 *tp = netdev_priv(dev); + + /* BROKEN_CHECKSUMS */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) { + if (data != 0) + return -EINVAL; + return 0; + } + + if (tg3_flag(tp, 5755_PLUS)) +#if defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM) + ethtool_op_set_tx_ipv6_csum(dev, data); +#elif defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM) + ethtool_op_set_tx_hw_csum(dev, data); +#else + tg3_set_tx_hw_csum(dev, data); +#endif + else + ethtool_op_set_tx_csum(dev, data); + + return 0; +} +#endif + +#if TG3_TSO_SUPPORT != 0 +static int tg3_set_tso(struct net_device *dev, u32 value) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!tg3_flag(tp, TSO_CAPABLE)) { + if (value) + return -EINVAL; + return 0; + } + if ((dev->features & NETIF_F_IPV6_CSUM) && + (tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3))) { + if (value) { + dev->features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + dev->features |= NETIF_F_TSO_ECN; + } else + dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); + } + return ethtool_op_set_tso(dev, value); +} +#endif + +static void netdev_update_features(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { +#if TG3_TSO_SUPPORT != 0 + ethtool_op_set_tso(dev, 0); +#endif + } + } +} +#endif /* BCM_HAS_NETDEV_UPDATE_FEATURES */ + +#ifndef BCM_HAS_SET_PHYS_ID +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE, + ETHTOOL_ID_ACTIVE, + ETHTOOL_ID_ON, + ETHTOOL_ID_OFF +}; + +static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state); +static int tg3_phys_id(struct net_device *dev, u32 data) +{ + struct tg3 *tp = netdev_priv(dev); + int i; + + if (!netif_running(tp->dev)) + return -EAGAIN; + + if (data == 0) + data = UINT_MAX / 2; + + for (i = 0; i < (data * 2); i++) { + if ((i % 2) == 0) + tg3_set_phys_id(dev, ETHTOOL_ID_ON); + else + tg3_set_phys_id(dev, ETHTOOL_ID_OFF); + + if (msleep_interruptible(500)) + break; + } + tg3_set_phys_id(dev, ETHTOOL_ID_INACTIVE); + return 0; +} +#endif /* BCM_HAS_SET_PHYS_ID */ + +#ifdef BCM_HAS_GET_RXFH_INDIR +#ifndef BCM_HAS_GET_RXFH_INDIR_SIZE +static int tg3_get_rxfh_indir(struct net_device *dev, + struct ethtool_rxfh_indir *indir) +{ + struct tg3 *tp = netdev_priv(dev); + int i; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EINVAL; + + if (!indir->size) { + indir->size = TG3_RSS_INDIR_TBL_SIZE; + return 0; + } + + if (indir->size != TG3_RSS_INDIR_TBL_SIZE) + return -EINVAL; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + indir->ring_index[i] = tp->rss_ind_tbl[i]; + + return 0; +} + +static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt); +static void tg3_rss_write_indir_tbl(struct tg3 *tp); +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); +static inline void tg3_full_unlock(struct tg3 *tp); + +static int tg3_set_rxfh_indir(struct net_device *dev, + const struct ethtool_rxfh_indir *indir) +{ + struct tg3 *tp = netdev_priv(dev); + size_t i; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EINVAL; + + if (!indir->size) { + tg3_flag_clear(tp, USER_INDIR_TBL); + tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); + } else { + int limit; + + /* Validate size and indices */ + if (indir->size != TG3_RSS_INDIR_TBL_SIZE) + return -EINVAL; + + if (netif_running(dev)) + limit = tp->irq_cnt; + else { + limit = num_online_cpus(); + if (limit > TG3_IRQ_MAX_VECS_RSS) + limit = TG3_IRQ_MAX_VECS_RSS; + } + + /* The first interrupt vector only + * handles link interrupts. + */ + limit -= 1; + + /* Check the indices in the table. + * Leave the existing table unmodified + * if an error is detected. + */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + if (indir->ring_index[i] >= limit) + return -EINVAL; + + tg3_flag_set(tp, USER_INDIR_TBL); + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = indir->ring_index[i]; + } + + if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) + return 0; + + /* It is legal to write the indirection + * table while the device is running. + */ + tg3_full_lock(tp, 0); + tg3_rss_write_indir_tbl(tp); + tg3_full_unlock(tp); + + return 0; +} +#endif /* !BCM_HAS_GET_RXFH_INDIR_SIZE */ +#endif /* BCM_HAS_GET_RXFH_INDIR */ + +#ifdef __VMKLNX__ /** * skb_copy_expand - copy and expand sk_buff @@ -78,6 +473,4 @@ void *memmove(void *dest, const void *src, size_t count) } return dest; } - - #endif diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3_firmware.h b/vmkdrivers/src_9/drivers/net/tg3/tg3_firmware.h index e8399abacefe07f5a5a566b0fd8cd4300d7d041c..4a7e932f333b7cb332c996a0e58470f5f96821f3 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3_firmware.h +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3_firmware.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2010 Broadcom Corporation. */ +/* Copyright (C) 2009-2012 Broadcom Corporation. */ #ifdef NETIF_F_TSO #define TG3_TSO_SUPPORT 1 diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3_flags.h b/vmkdrivers/src_9/drivers/net/tg3/tg3_flags.h index 9501ea003452a61678ef75b4b0d7969f8859738f..270fe7172afb86d500d77058543bc49eb4e7765d 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3_flags.h +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3_flags.h @@ -11,6 +11,8 @@ #define BCM_HAS_PCI_CHOOSE_STATE #define BCM_HAS_PCI_ENABLE_WAKE #define BCM_HAS_PCI_SET_POWER_STATE +#define BCM_HAS_PCI_EEH_SUPPORT +#define BCM_HAS_DEVICE_WAKEUP_API #define BCM_HAS_PCI_DMA_MAPPING_ERROR #define BCM_HAS_PCIE_SET_READRQ #define BCM_HAS_PRINT_MAC @@ -22,9 +24,13 @@ #define BCM_HAS_IP_HDR #define BCM_HAS_IP_HDRLEN #define BCM_HAS_TCP_HDR +#define BCM_HAS_TCP_HDRLEN #define BCM_HAS_TCP_OPTLEN #define BCM_HAS_STRUCT_NETDEV_QUEUE +#define BCM_HAS_NETDEV_PRIV +#define BCM_USE_OLD_VLAN_INTERFACE #define BCM_HAS_ALLOC_ETHERDEV_MQ #define BCM_HAS_NETIF_TX_LOCK #define BCM_HAS_DEV_DRIVER_STRING +#define BCM_HAS_DEV_NAME #define BCM_HAS_DMA_DATA_DIRECTION diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.c b/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.c index b47218b439a3067eaefe4cb2a201ec00674c02bb..f355213b2505d9c8ba9e9e63b48b8028ce8c7aa7 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.c +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.c @@ -1,7 +1,14 @@ -/* Copyright (C) 2010 Broadcom Corporation. - * Portions Copyright (C) VMware, Inc. 2007-2011. All Rights Reserved. +/* Copyright (C) 2010 - 2012 Broadcom Corporation. + * Portions Copyright (C) VMware, Inc. 2007-2012. All Rights Reserved. */ +#define TG3_NETQ_WAIT_EVENT_TIMEOUT msecs_to_jiffies(100) +#define TG3_VRQ_CHANGE_TIMEOUT_US 10000 +#define TG3_VRQ_FLUSH_TIMEOUT_US 50000 +#define TG3_VRQ_MAX_NUM_TX_QS(tp) \ + (tg3_flag((tp), ENABLE_TSS) ? (tp)->irq_cnt - 2 : 0) +#define TG3_VRQ_MAX_NUM_RX_QS(tp) (tp->irq_cnt - 1) + static void tg3_vmware_timer(struct tg3 *tp) { /* @@ -17,13 +24,6 @@ static void tg3_vmware_timer(struct tg3 *tp) if (netif_carrier_ok(tp->dev)) { u32 rx_mode = tr32(MAC_RX_MODE); if (!(rx_mode & RX_MODE_PROMISC) && (rx_mode != tp->rx_mode)) { - u32 val; - tg3_read_mem(tp, 0xc04, &val); - if (val != 1) { - netdev_info(tp->dev, "ASF has wrong host driver state (0x%x)\n", - val); - } - /* * We love to warn the users every time there is such a * register reset, but we do not want to do it forever @@ -31,12 +31,10 @@ static void tg3_vmware_timer(struct tg3 *tp) */ if (tp->vmware.rx_mode_reset_counter < 200) { netdev_info(tp->dev, "%s: rx_mode " - "0x%x(%s)=>0x%x tg3_flags 0x%x " - "tg3_flags2 0x%x\n", + "0x%x(%s)=>0x%x", tp->dev->name, rx_mode, - rx_mode & RX_MODE_PROMISC ? "on" : "off", - tp->rx_mode, tp->tg3_flags, - tp->tg3_flags2); + rx_mode & RX_MODE_PROMISC ? "on" : "off", + tp->rx_mode); tp->vmware.rx_mode_reset_counter++; } tw32_f(MAC_RX_MODE, tp->rx_mode); @@ -48,8 +46,8 @@ static void tg3_vmware_timer(struct tg3 *tp) * generate an interrupt on link-up state changes. bug 89197. */ if (!netif_carrier_ok(tp->dev) && - !(tp->tg3_flags & - (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES))) { + !(tg3_flag(tp, USE_LINKCHG_REG)) && + !(tg3_flag(tp, POLL_SERDES))) { struct tg3_hw_status *sblk = tp->napi[0].hw_status; if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | @@ -59,13 +57,63 @@ static void tg3_vmware_timer(struct tg3 *tp) } } +/* The following debug buffers and exported routines are used by GDB to access + * * tg3 hardware registers when doing live debug over serial port. */ +#define DBG_BUF_SZ 128 + +static u32 tg3_dbg_buf[DBG_BUF_SZ]; + +void tg3_dbg_read32(struct net_device *dev, u32 off, u32 len) +{ + struct tg3 *tp = netdev_priv(dev); + u32 *buf = tg3_dbg_buf; + + memset(tg3_dbg_buf, 0, sizeof(tg3_dbg_buf)); + + if (off & 0x3) { + len += off & 0x3; + off &= ~0x3; + } + + if (off >= TG3_REG_BLK_SIZE) + return; + + if (len & 0x3) + len = (len + 3) & ~3; + + if (len > DBG_BUF_SZ) + len = DBG_BUF_SZ; + + if (off + len > TG3_REG_BLK_SIZE) + len = TG3_REG_BLK_SIZE - off; + + while (len > 0) { + *buf = tr32(off); + buf++; + off += 4; + len -= 4; + } +} +EXPORT_SYMBOL(tg3_dbg_read32); + +void tg3_dbg_write32(struct net_device *dev, u32 off, u32 val) +{ + struct tg3 *tp = netdev_priv(dev); + + if (off & 0x3) + return; + + tw32(off, val); +} +EXPORT_SYMBOL(tg3_dbg_write32); + #if !defined(TG3_VMWARE_BMAPILNX_DISABLE) static int tg3_vmware_ioctl_cim(struct net_device *dev, struct ifreq *ifr) { struct tg3 *tp = netdev_priv(dev); void __user *useraddr = ifr->ifr_data; - struct tg3_ioctl_req req; + struct brcm_vmware_ioctl_req req; int rc = 0; u32 val; @@ -75,64 +123,114 @@ tg3_vmware_ioctl_cim(struct net_device *dev, struct ifreq *ifr) return -EFAULT; } - switch(req.cmd) { - case TG3_VMWARE_CIM_CMD_ENABLE_NIC: + switch (req.cmd) { + case BRCM_VMWARE_CIM_CMD_ENABLE_NIC: netdev_info(dev, "%s: enable NIC\n", __func__); rc = tg3_open(tp->dev); break; - case TG3_VMWARE_CIM_CMD_DISABLE_NIC: + case BRCM_VMWARE_CIM_CMD_DISABLE_NIC: netdev_info(dev, "%s: disable NIC\n", __func__); rc = tg3_close(tp->dev); break; - case TG3_VMWARE_CIM_CMD_REG_READ: { + case BRCM_VMWARE_CIM_CMD_REG_READ: { + struct brcm_vmware_ioctl_reg_read_req *rd_req; - if(0x7ffc < req.cmd_req.reg_read.reg_offset) { - netdev_err(dev, "%s: reg read: " + rd_req = &req.cmd_req.reg_read_req; + if (0x7ffc < rd_req->reg_offset) { + netdev_err(dev, "%s: %s: " "out of range: req reg: 0x%x\n", - __func__, req.cmd_req.reg_read.reg_offset); + __func__, "reg read", rd_req->reg_offset); + rc = -EINVAL; + break; + } + + if (rd_req->reg_offset & 0x3) { + netdev_err(dev, "%s: %s: " + "offset not dword aligned: req reg: 0x%x\n", + __func__, "reg read", rd_req->reg_offset); rc = -EINVAL; break; } - val = tr32(req.cmd_req.reg_read.reg_offset); + switch (rd_req->reg_access_type) { + case BRCM_VMWARE_REG_ACCESS_DIRECT: + val = tr32(rd_req->reg_offset); + break; + case BRCM_VMWARE_REG_ACCESS_PCI_CFG: + pci_read_config_dword(tp->pdev, + rd_req->reg_offset, &val); + break; + default: + netdev_err(dev, "%s: %s: " + "invalid access method: access type: 0x%x\n", + __func__, "reg read", + rd_req->reg_access_type); + rc = -EINVAL; + break; + } - netdev_info(dev, "%s: reg read: reg: 0x%x value:0x%x", - __func__, req.cmd_req.reg_read.reg_offset, - req.cmd_req.reg_read.reg_value); - req.cmd_req.reg_read.reg_value = val; + req.cmd_req.reg_read_req.reg_value = val; + netdev_info(dev, "%s: %s: reg: 0x%x value:0x%x", + __func__, "reg read", rd_req->reg_offset, + rd_req->reg_value); break; - } case TG3_VMWARE_CIM_CMD_REG_WRITE: { - if(0x7ffc < req.cmd_req.reg_write.reg_offset) { - netdev_err(dev, "%s: reg write: " + } case BRCM_VMWARE_CIM_CMD_REG_WRITE: { + struct brcm_vmware_ioctl_reg_write_req *wr_req; + + wr_req = &req.cmd_req.reg_write_req; + if (0x7ffc < wr_req->reg_offset) { + netdev_err(dev, "%s: %s: " "out of range: req reg: 0x%x\n", - __func__, req.cmd_req.reg_write.reg_offset); + __func__, "reg write", wr_req->reg_offset); + rc = -EINVAL; + break; + } + + if (wr_req->reg_offset & 0x3) { + netdev_err(dev, "%s: %s: " + "offset not dword aligned: req reg: 0x%x\n", + __func__, "reg write", wr_req->reg_offset); rc = -EINVAL; break; } - netdev_info(dev, "%s: reg write: reg: 0x%x value:0x%x", - __func__, req.cmd_req.reg_write.reg_offset, - req.cmd_req.reg_write.reg_value); + switch (wr_req->reg_access_type) { + case BRCM_VMWARE_REG_ACCESS_DIRECT: + tw32(wr_req->reg_offset, wr_req->reg_value); + break; + case BRCM_VMWARE_REG_ACCESS_PCI_CFG: + pci_write_config_dword(tp->pdev, wr_req->reg_offset, + wr_req->reg_value); + break; + default: + netdev_err(dev, "%s: %s: " + "invalid access method: access type: 0x%x\n", + __func__, "reg write", + wr_req->reg_access_type); + rc = -EINVAL; + break; + } - tw32(req.cmd_req.reg_write.reg_offset, - req.cmd_req.reg_write.reg_value); + netdev_info(dev, "%s: %s: reg: 0x%x value:0x%x", + __func__, "reg write", wr_req->reg_offset, + wr_req->reg_value); break; - } case TG3_VMWARE_CIM_CMD_GET_NIC_PARAM: + } case BRCM_VMWARE_CIM_CMD_GET_NIC_PARAM: netdev_info(dev, "%s: get NIC param\n", __func__); - req.cmd_req.get_nic_param.mtu = dev->mtu; - memcpy(req.cmd_req.get_nic_param.current_mac_addr, + req.cmd_req.get_nic_param_req.mtu = dev->mtu; + memcpy(req.cmd_req.get_nic_param_req.current_mac_addr, dev->dev_addr, - sizeof(req.cmd_req.get_nic_param.current_mac_addr)); + sizeof(req.cmd_req.get_nic_param_req.current_mac_addr)); break; - case TG3_VMWARE_CIM_CMD_GET_NIC_STATUS: + case BRCM_VMWARE_CIM_CMD_GET_NIC_STATUS: netdev_info(dev, "%s: get NIC status\n", __func__); - req.cmd_req.get_nic_status.nic_status = netif_running(dev); + req.cmd_req.get_nic_status_req.nic_status = netif_running(dev); break; default: netdev_err(dev, "%s: unknown req.cmd: 0x%x\n", @@ -144,9 +242,991 @@ tg3_vmware_ioctl_cim(struct net_device *dev, struct ifreq *ifr) copy_to_user(useraddr, &req, sizeof(req))) { netdev_err(dev, "%s: couldn't copy to user tg3_ioctl_req\n", __func__); - return -EFAULT; + return -EFAULT; } return rc; } #endif /* TG3_VMWARE_BMAPILNX */ + +#ifdef TG3_VMWARE_NETQ_ENABLE +static void tg3_vmware_fetch_stats(struct tg3 *tp) +{ + int i; + u32 addr; + struct tg3_napi *tnapi = &tp->napi[0]; + + tnapi->netq.stats.rx_bytes_hw += tr32(0x9d0); + tnapi->netq.stats.rx_errors_hw += tr32(0x9d4); + tnapi->netq.stats.rx_packets_hw += tr32(0x9d8); + + for (i = 1, addr = 0xa00; i < 16; i++, addr += 0x20) { + tnapi = &tp->napi[i]; + + tnapi->netq.stats.tx_bytes += tr32(addr + 0x00); + tnapi->netq.stats.tx_ucast_packets += tr32(addr + 0x04); + tnapi->netq.stats.tx_mcast_packets += tr32(addr + 0x08); + tnapi->netq.stats.tx_bcast_packets += tr32(addr + 0x0c); + + tnapi->netq.stats.rx_bytes_hw += tr32(addr + 0x10); + tnapi->netq.stats.rx_errors_hw += tr32(addr + 0x14); + tnapi->netq.stats.rx_packets_hw += tr32(addr + 0x18); + } +} + +static void tg3_set_prod_bdinfo(struct tg3 *tp, u32 bdinfo_addr, + dma_addr_t mapping, u32 maxlen_flags) +{ + tw32(bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) mapping >> 32)); + tw32(bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) mapping & 0xffffffff)); + tw32(bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS, maxlen_flags); + /* Leave the nic addr field alone */ +} + +static void tg3_rx_prod_rcb_disable(struct tg3 *tp, u32 bdinfo_addr) +{ + tw32(bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); + tw32(bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 0); + tw32(bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 0); +} + +static void tg3_disable_prod_rcbs(struct tg3 *tp, u32 ring) +{ + struct tg3_rx_prodring_set *tpr; + u32 offset; + + if (!(tg3_flag(tp, IOV_CAPABLE))) + return; + + tpr = &tp->napi[ring].prodring; + offset = RCVDBDI_JMB_BD_RING1 + (ring - 1) * 2 * TG3_BDINFO_SIZE; + + /* Disable the jumbo ring */ + tg3_rx_prod_rcb_disable(tp, offset); + offset += TG3_BDINFO_SIZE; + + /* Disable the standard ring */ + tg3_rx_prod_rcb_disable(tp, offset); +} + +static void tg3_setup_prod_rcbs(struct tg3 *tp, u32 ring) +{ + struct tg3_rx_prodring_set *tpr; + u32 offset; + + if (!(tg3_flag(tp, ENABLE_IOV))) + return; + + tpr = &tp->napi[ring].prodring; + offset = RCVDBDI_JMB_BD_RING1 + (ring - 1) * 2 * TG3_BDINFO_SIZE; + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { + tg3_set_prod_bdinfo(tp, offset, tpr->rx_jmb_mapping, + (TG3_RX_JMB_MAX_SIZE_5717 << BDINFO_FLAGS_MAXLEN_SHIFT) | + BDINFO_FLAGS_USE_EXT_RECV); + tpr->rx_jmb_prod_idx = tp->rx_jumbo_pending; + tw32_rx_mbox(tpr->rx_jmb_mbox, tpr->rx_jmb_prod_idx); + } + + offset += TG3_BDINFO_SIZE; + + tg3_set_prod_bdinfo(tp, offset, tpr->rx_std_mapping, + (TG3_RX_STD_MAX_SIZE_5717 << BDINFO_FLAGS_MAXLEN_SHIFT) | + (TG3_RX_STD_DMA_SZ << 2)); + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(tpr->rx_std_mbox, tpr->rx_std_prod_idx); +} + +static void tg3_setup_prod_mboxes(struct tg3 *tp, u32 ring) +{ + struct tg3_rx_prodring_set *tpr = &tp->napi[ring].prodring; + + if (!ring) { + tpr->rx_std_mbox = TG3_RX_STD_PROD_IDX_REG; + tpr->rx_jmb_mbox = TG3_RX_JMB_PROD_IDX_REG; + return; + } + + tpr->rx_std_mbox = MAILBOX_RCV_STD_PROD_IDX_RING1 + (ring - 1) * 4; + if (ring % 2) + tpr->rx_std_mbox -= 4; + else + tpr->rx_std_mbox += 4; + + if (ring < 12) + tpr->rx_jmb_mbox = MAILBOX_RCV_JUMBO_PROD_IDX_RING1 + + (ring - 1) * 4; + else + tpr->rx_jmb_mbox = MAILBOX_RCV_JMB_PROD_IDX_RING12 + + (ring - 12) * 4; + + if (ring % 2) + tpr->rx_jmb_mbox -= 4; + else + tpr->rx_jmb_mbox += 4; +} + +static int +tg3_netq_get_netqueue_features(vmknetddi_queueop_get_features_args_t *args) +{ + struct tg3 *tp = netdev_priv(args->netdev); + + args->features = VMKNETDDI_QUEUEOPS_FEATURE_RXQUEUES; + if (tg3_flag(tp, ENABLE_TSS)) + args->features |= VMKNETDDI_QUEUEOPS_FEATURE_TXQUEUES; + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netq_get_queue_count(vmknetddi_queueop_get_queue_count_args_t *args) +{ + struct tg3 *tp = netdev_priv(args->netdev); + + if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { + args->count = TG3_VRQ_MAX_NUM_RX_QS(tp); + + netdev_info(tp->dev, "Using %d RX NetQ rings\n", args->count); + + return VMKNETDDI_QUEUEOPS_OK; + } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { + args->count = TG3_VRQ_MAX_NUM_TX_QS(tp); + + netdev_info(tp->dev, "Using %d TX NetQ rings\n", args->count); + + return VMKNETDDI_QUEUEOPS_OK; + } else { + netdev_err(tp->dev, "Counting queue: invalid queue type\n"); + + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int +tg3_netq_get_filter_count(vmknetddi_queueop_get_filter_count_args_t *args) +{ + /* Only support 1 Mac filter per queue */ + args->count = 1; + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netq_alloc_tx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t *p_qid, + u16 *queue_mapping) +{ + struct tg3 *tp = netdev_priv(netdev); + int i; + + if (tp->vmware.netq.n_tx_queues_allocated >= TG3_VRQ_MAX_NUM_TX_QS(tp)) + return VMKNETDDI_QUEUEOPS_ERR; + + for (i = 1; i < 1 + TG3_VRQ_MAX_NUM_TX_QS(tp); i++) { + struct tg3_napi *tnapi = &tp->napi[i + 1]; + if (!(tnapi->netq.flags & TG3_NETQ_TXQ_ALLOCATED)) { + tnapi->netq.flags |= TG3_NETQ_TXQ_ALLOCATED; + tp->vmware.netq.n_tx_queues_allocated++; + *p_qid = VMKNETDDI_QUEUEOPS_MK_TX_QUEUEID(i); + *queue_mapping = i; + + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, + tp->coal.tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, + tp->coal.tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, + tp->coal.tx_max_coalesced_frames_irq); + + netdev_info(tp->dev, "TX NetQ allocated on %d\n", i); + return VMKNETDDI_QUEUEOPS_OK; + } + } + + netdev_err(tp->dev, "No free tx queues found!\n"); + return VMKNETDDI_QUEUEOPS_ERR; +} + +static int +tg3_netq_alloc_rx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t *p_qid, + struct napi_struct **napi_p) +{ + int i; + struct tg3 *tp = netdev_priv(netdev); + + if (tp->vmware.netq.n_rx_queues_allocated >= TG3_VRQ_MAX_NUM_RX_QS(tp)) { + netdev_err(tp->dev, "RX Q alloc: No queues available!\n" ); + return VMKNETDDI_QUEUEOPS_ERR; + } + + for (i = 1; i < TG3_VRQ_MAX_NUM_RX_QS(tp) + 1; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (!(tnapi->netq.flags & TG3_NETQ_RXQ_ALLOCATED)) { + tnapi->netq.flags |= TG3_NETQ_RXQ_ALLOCATED; + tp->vmware.netq.n_rx_queues_allocated++; + *p_qid = VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(i); + *napi_p = &tnapi->napi; + + netdev_info(tp->dev, "RX NetQ allocated on %d\n", i); + return VMKNETDDI_QUEUEOPS_OK; + } + } + netdev_err(tp->dev, "No free rx queues found!\n"); + return VMKNETDDI_QUEUEOPS_ERR; +} + +static int +tg3_netq_alloc_queue(vmknetddi_queueop_alloc_queue_args_t *args) +{ + struct net_device *netdev = args->netdev; + struct tg3 *tp = netdev_priv(netdev); + + if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { + return tg3_netq_alloc_tx_queue(args->netdev, &args->queueid, + &args->queue_mapping); + } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { + return tg3_netq_alloc_rx_queue(args->netdev, &args->queueid, + &args->napi); + } else { + netdev_err(tp->dev, "Trying to alloc invalid queue type: %x\n", + args->type); + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static void tg3_netq_txq_free(struct tg3 *tp, int qid) +{ + struct tg3_napi *tnapi = &tp->napi[qid + 1]; + + tnapi->netq.flags &= ~TG3_NETQ_TXQ_ALLOCATED; + tp->vmware.netq.n_tx_queues_allocated--; + + /* Don't sit on tx packet completions. + * Send them up as soon as they are ready. + */ + tw32(HOSTCC_TXCOL_TICKS_VEC1 + qid * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + qid * 0x18, 1); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + qid * 0x18, 1); +} + +static int +tg3_netq_free_tx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t qid) +{ + struct tg3 *tp = netdev_priv(netdev); + struct tg3_napi *tnapi; + + u16 index = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(qid); + if (index == 0 || index > TG3_VRQ_MAX_NUM_TX_QS(tp)) { + netdev_err(tp->dev, + "Trying to free invalid tx queue: %d\n", index); + return VMKNETDDI_QUEUEOPS_ERR; + } + + tnapi = &tp->napi[index + 1]; + + if (!(tnapi->netq.flags & TG3_NETQ_TXQ_ALLOCATED)) { + netdev_info(tp->dev, + "Trying to free unallocated tx queue: %d\n", index); + return VMKNETDDI_QUEUEOPS_ERR; + } + + tg3_netq_txq_free(tp, index); + + if (tnapi->tx_cons != tnapi->tx_prod) { + netdev_warn(tp->dev, + "Timeout submitting free NetQ TX Queue: %x\n", + index); + return VMKNETDDI_QUEUEOPS_ERR; + } + + netdev_info(tp->dev, "Free NetQ TX Queue: %d\n", index); + + return VMKNETDDI_QUEUEOPS_OK; +} + +static void +tg3_netq_disable_queue(struct tg3 *tp, int qid) +{ + int i; + struct tg3_napi *tnapi = &tp->napi[qid]; + u32 val; + + tnapi->netq.flags &= ~TG3_NETQ_RXQ_ENABLED; + + tg3_disable_prod_rcbs(tp, qid); + + /* Disable the VRQ */ + tw32(MAC_VRQ_ENABLE, tr32(MAC_VRQ_ENABLE) & ~(1 << qid)); + + /* Poll for acknowledgement from the hardware. */ + for (i = 0; i < TG3_VRQ_CHANGE_TIMEOUT_US / 10; i++) { + if (!(tr32(VRQ_STATUS) & (1 << qid))) + break; + udelay(10); + } + + if (i == TG3_VRQ_CHANGE_TIMEOUT_US / 10) + netdev_warn(tp->dev, + "Timeout performing initial queue disable: %x\n", + qid); + +#if 0 + /* Force a DMA of all remaining rx packets in this VRQ */ + tw32(HOSTCC_MODE, + tp->coalesce_mode | HOSTCC_MODE_ENABLE | tnapi->coal_now); + + /* There isn't a reliable way to tell that all the rx packets have + * made it to the host. The status tag can change before all packets + * have drained. The best we can do is wait a little bit. + */ + msleep(100); +#endif + + tw32(HOSTCC_PARAM_SET_RESET, 1 << qid); + + tw32_rx_mbox(tnapi->prodring.rx_std_mbox, 0); + tw32_rx_mbox(tnapi->prodring.rx_jmb_mbox, 0); + tw32_rx_mbox(tnapi->consmbox, 0); + tnapi->rx_rcb_ptr = 0; + + tw32(HOSTCC_RXCOL_TICKS_VEC1 + (qid - 1) * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + (qid - 1) * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + (qid - 1) * 0x18, 0); + + val = ~(1 << qid) << 15; + val |= VRQ_FLUSH_ENABLE | VRQ_FLUSH_RESET_ENABLE | + VRQ_FLUSH_STATUPDT_INT_ENABLE | VRQ_FLUSH_DISCARD_PKT_ENABLE | + VRQ_FLUSH_SW_FLUSH; + tw32(VRQ_FLUSH_CTRL, val); + + /* Poll for acknowledgement from the hardware. */ + for (i = 0; i < TG3_VRQ_FLUSH_TIMEOUT_US / 10; i++) { + if (!(tr32(VRQ_FLUSH_CTRL) & VRQ_FLUSH_SW_FLUSH)) + break; + udelay(10); + } + if (tr32(VRQ_FLUSH_CTRL) & VRQ_FLUSH_SW_FLUSH) { + netdev_warn(tp->dev, + "Timeout flushing hardware queue: %x\n", + qid); + } + + /* Force a status block update to refresh all the producer + * and consumer indexes. Also, we want to make sure we + * use the right tag when reenabling interrupts. + */ + tw32_f(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOINT_ON_NOW | + tnapi->coal_now); + + for (i = 0; i < 500; i++) { + if (tnapi->hw_status->rx_jumbo_consumer == 0 && + tnapi->hw_status->rx_consumer == 0 && + tnapi->hw_status->rx_mini_consumer == 0 && + tnapi->hw_status->idx[0].rx_producer == 0) + break; + mdelay(1); + } + + if (i == 500) { + netdev_warn(tp->dev, + "%d: Timeout waiting for final status block update.\n", + qid); + } + + /* Disable the VRQ mapper for this vector */ + tw32(MAC_VRQMAP_2H + qid * 8, 0); + + /* Disable the perfect match term */ + tw32(MAC_VRQMAP_1H + qid * 8, 0); + + /* Disable the VLAN match term */ + tw32(MAC_VRQFLT_FLTSET + qid * 4, 0); + tw32(MAC_VRQFLT_CFG + qid * 4, 0); + tw32(MAC_VRQFLT_PTRN + qid * 4, 0); + + /* Remove the perfect match filter. */ + if (qid < 4) { + tw32(MAC_ADDR_0_HIGH + (qid * 8), tr32(MAC_ADDR_0_HIGH)); + tw32(MAC_ADDR_0_LOW + (qid * 8), tr32(MAC_ADDR_0_LOW)); + } else { + tw32(MAC_VRQ_PMATCH_HI_5 + (qid - 4) * 8, + tr32(MAC_ADDR_0_HIGH)); + tw32(MAC_VRQ_PMATCH_LO_5 + (qid - 4) * 8, + tr32(MAC_ADDR_0_LOW)); + } + + tg3_rx_prodring_free(tp, &tnapi->prodring); + + tnapi->last_tag = tnapi->hw_status->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = 0; + tnapi->last_tx_cons = 0; +} + +static int +tg3_netq_free_rx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t qid) +{ + struct tg3 *tp = netdev_priv(netdev); + struct tg3_napi *tnapi; + + u16 index = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(qid); + if (index == 0 || index > TG3_VRQ_MAX_NUM_RX_QS(tp)) { + netdev_err(tp->dev, + "Trying to free invalid rx queue: %d\n", index); + return VMKNETDDI_QUEUEOPS_ERR; + } + + tnapi = &tp->napi[index]; + if (!(tnapi->netq.flags & TG3_NETQ_RXQ_ALLOCATED)) { + netdev_warn(tp->dev, + "Attempt to free a queue that is already free: %x\n", + qid); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (tnapi->netq.flags & TG3_NETQ_RXQ_ENABLED) { + napi_disable(&tnapi->napi); + tw32_mailbox_f(tnapi->int_mbox, 1); + + tg3_netq_disable_queue(tp, index); + + napi_enable(&tnapi->napi); + tg3_int_reenable(tnapi); + +#ifdef BCM_HAS_NEW_IRQ_SIG + tg3_msi(0, tnapi); +#else + tg3_msi(0, tnapi, 0); +#endif + } + + tnapi->netq.flags &= ~TG3_NETQ_RXQ_ALLOCATED; + tp->vmware.netq.n_rx_queues_allocated--; + + netdev_info(tp->dev, "Free NetQ RX Queue: %x\n", index); + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netq_free_queue(vmknetddi_queueop_free_queue_args_t *args) +{ + struct tg3 *tp = netdev_priv(args->netdev); + if (VMKNETDDI_QUEUEOPS_IS_TX_QUEUEID(args->queueid)) { + return tg3_netq_free_tx_queue(args->netdev, args->queueid); + } else if (VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { + return tg3_netq_free_rx_queue(args->netdev, args->queueid); + } else { + netdev_err(tp->dev, "free netq: invalid queue type\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int +tg3_netq_get_queue_vector(vmknetddi_queueop_get_queue_vector_args_t *args) +{ + int qid; + struct tg3 *tp = netdev_priv(args->netdev); + + qid = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + + if (VMKNETDDI_QUEUEOPS_IS_TX_QUEUEID(args->queueid)) { + if (qid > tp->irq_cnt - 2) { + netdev_err(tp->dev, + "Attempt to get vector for " + "invalid TX queue ID 0x%x\n", + qid); + return VMKNETDDI_QUEUEOPS_ERR; + } + qid++; + } else if (VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { + if (qid > tp->irq_cnt - 1) { + netdev_err(tp->dev, + "Attempt to get vector for " + "invalid RX queue ID 0x%x\n", + qid); + return VMKNETDDI_QUEUEOPS_ERR; + } + } else { + netdev_err(tp->dev, + "Attempt to get vector for invalid " + "queue type, ID 0x%x\n", + qid); + } + + args->vector = tp->napi[qid].irq_vec; + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netq_get_default_queue(vmknetddi_queueop_get_default_queue_args_t *args) +{ + struct tg3 *tp = netdev_priv(args->netdev); + + if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { + args->queueid = VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(0); + args->napi = &tp->napi[0].napi; + return VMKNETDDI_QUEUEOPS_OK; + } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { + args->queueid = VMKNETDDI_QUEUEOPS_MK_TX_QUEUEID(0); + args->queue_mapping = 0; + return VMKNETDDI_QUEUEOPS_OK; + } else + return VMKNETDDI_QUEUEOPS_ERR; +} + +static int +tg3_netq_remove_rx_filter(vmknetddi_queueop_remove_rx_filter_args_t *args) +{ + struct tg3 *tp = netdev_priv(args->netdev); + struct tg3_napi *tnapi; + u16 qid = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + u16 fid = VMKNETDDI_QUEUEOPS_FILTERID_VAL(args->filterid); + + if (!VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { + netdev_err(tp->dev, "0X%x is not a valid QID\n", qid ); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (qid == 0 || qid > TG3_VRQ_MAX_NUM_RX_QS(tp)) { + netdev_err(tp->dev, "QID 0X%x is out of range\n", qid ); + return VMKNETDDI_QUEUEOPS_ERR; + } + + tnapi = &tp->napi[qid]; + + if (!(tnapi->netq.flags & TG3_NETQ_RXQ_ENABLED)) { + netdev_err(tp->dev, "Filter not allocated on QID %d\n", qid ); + return VMKNETDDI_QUEUEOPS_ERR; + } + + /* Only support one Mac filter per queue */ + if (fid != qid) { + netdev_err(tp->dev, "Invalid filter ID (0x%x) on QID %d\n", fid, qid ); + return VMKNETDDI_QUEUEOPS_ERR; + } + + napi_disable(&tnapi->napi); + tw32_mailbox_f(tnapi->int_mbox, 1); + + tg3_netq_disable_queue(tp, qid); + + napi_enable(&tnapi->napi); + tg3_int_reenable(tnapi); + +#ifdef BCM_HAS_NEW_IRQ_SIG + tg3_msi(0, tnapi); +#else + tg3_msi(0, tnapi, 0); +#endif + + netdev_info(tp->dev, "NetQ remove RX filter: %d\n", qid); + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netq_apply_rx_filter(vmknetddi_queueop_apply_rx_filter_args_t *args) +{ + u8 *macaddr = NULL; + struct tg3_napi *tnapi; + struct tg3 *tp = netdev_priv(args->netdev); + u16 qid = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + vmknetddi_queueops_filter_class_t class; + DECLARE_MAC_BUF(mac); + u32 val; + u16 vlan_id = 0; + + if (!VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { + netdev_err(tp->dev, "Invalid NetQ RX ID: %x\n", args->queueid); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (qid == 0 || qid > TG3_VRQ_MAX_NUM_RX_QS(tp)) { + netdev_err(tp->dev, + "Applying filter with invalid RX NetQ %d ID\n", qid); + return VMKNETDDI_QUEUEOPS_ERR; + } + + tnapi = &tp->napi[qid]; + + if (!(tnapi->netq.flags & TG3_NETQ_RXQ_ALLOCATED)) { + netdev_err(tp->dev, "RX NetQ %d not allocated\n", qid); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (tnapi->netq.flags & TG3_NETQ_RXQ_ENABLED) { + netdev_err(tp->dev, "RX NetQ %d already enabled\n", qid); + return VMKNETDDI_QUEUEOPS_ERR; + } + + class = vmknetddi_queueops_get_filter_class(&args->filter); + switch (class) { + case VMKNETDDI_QUEUEOPS_FILTER_VLAN: + case VMKNETDDI_QUEUEOPS_FILTER_MACADDR: + case VMKNETDDI_QUEUEOPS_FILTER_VLANMACADDR: + break; + default: + netdev_err(tp->dev, "Received invalid RX NetQ filter: %x\n", + class); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (class == VMKNETDDI_QUEUEOPS_FILTER_MACADDR || + class == VMKNETDDI_QUEUEOPS_FILTER_VLANMACADDR) + macaddr = (void *)vmknetddi_queueops_get_filter_macaddr(&args->filter); + + if (class == VMKNETDDI_QUEUEOPS_FILTER_VLAN || + class == VMKNETDDI_QUEUEOPS_FILTER_VLANMACADDR) + vlan_id = vmknetddi_queueops_get_filter_vlanid(&args->filter); + + /* Populate the producer rings with skbs */ + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + netdev_err(tp->dev, "Failed to allocate queue buffers!\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } + + /* Make the producer rings available to the hardware */ + tg3_setup_prod_rcbs(tp, qid); + + if (macaddr) { + /* Program the perfect match filter. */ + if (qid < 4) { + val = (macaddr[0] << 8) | macaddr[1]; + tw32(MAC_ADDR_0_HIGH + (qid * 8), val); + + val = ((macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | (macaddr[5] << 0)); + tw32(MAC_ADDR_0_LOW + (qid * 8), val); + } else { + val = (macaddr[0] << 8) | macaddr[1]; + tw32(MAC_VRQ_PMATCH_HI_5 + (qid - 4) * 8, val); + + val = (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | (macaddr[5] << 0); + tw32(MAC_VRQ_PMATCH_LO_5 + (qid - 4) * 8, val); + } + + /* Tell the hardware which perfect match filter to use. */ + val = MAC_VRQMAP_1H_PTA_PFEN | qid; + tw32(MAC_VRQMAP_1H + qid * 8, val); + } + + if (vlan_id) { + val = MAC_VRQFLT_PTRN_VLANID | (htons(vlan_id) << 16); + tw32(MAC_VRQFLT_PTRN + qid * 4, val); + + tw32(MAC_VRQFLT_FLTSET + qid * 4, 1 << qid); + + val = MAC_VRQFLT_ELEM_EN | MAC_VRQFLT_HDR_VLAN; + tw32(MAC_VRQFLT_CFG + qid * 4, val); + + val = MAC_VRQMAP_2H_PTA_VFEN | (1 << qid);; + } else + val = 0; + + if (macaddr && vlan_id) + val |= MAC_VRQMAP_2H_PTA_AND; + else + val |= MAC_VRQMAP_2H_PTA_OR; + + val |= MAC_VRQMAP_2H_PTA_EN; + tw32(MAC_VRQMAP_2H + qid * 8, val); + + /* Enable the VRQ */ + val = tr32(MAC_VRQ_ENABLE); + tw32(MAC_VRQ_ENABLE, val | (1 << qid)); + + tw32(HOSTCC_RXCOL_TICKS_VEC1 + (qid - 1) * 0x18, + tp->coal.rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + (qid - 1) * 0x18, + tp->coal.rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + (qid - 1) * 0x18, + tp->coal.rx_max_coalesced_frames_irq); + + tnapi->netq.flags |= TG3_NETQ_RXQ_ENABLED; + + /* Apply RX filter code here */ + args->filterid = VMKNETDDI_QUEUEOPS_MK_FILTERID(qid); + + netdev_info(tp->dev, "NetQ set RX Filter: %d [%s %d]\n", qid, + macaddr ? print_mac(mac, macaddr) : "00:00:00:00:00:00", + vmknetddi_queueops_get_filter_vlanid(&args->filter)); + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netq_get_queue_stats(vmknetddi_queueop_get_stats_args_t *args) +{ + u16 qid = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + struct tg3_napi *tnapi; + struct tg3 *tp = netdev_priv(args->netdev); + struct net_device_stats *netstats; + + tnapi = &tp->napi[qid]; + netstats = &tnapi->netq.net_stats; + + netstats->rx_packets = tnapi->netq.stats.rx_packets_sw; + netstats->rx_bytes = tnapi->netq.stats.rx_bytes_sw; + netstats->rx_errors = tnapi->netq.stats.rx_errors_sw; + netstats->rx_crc_errors = tnapi->netq.stats.rx_crc_errors; + netstats->rx_frame_errors = tnapi->netq.stats.rx_frame_errors; + netstats->tx_packets = tnapi->netq.stats.tx_ucast_packets + + tnapi->netq.stats.tx_mcast_packets + + tnapi->netq.stats.tx_bcast_packets; + netstats->tx_bytes = tnapi->netq.stats.tx_bytes; + + args->stats = netstats; + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +tg3_netqueue_ops(vmknetddi_queueops_op_t op, void *args) +{ + struct tg3 *tp; + + if (op == VMKNETDDI_QUEUEOPS_OP_GET_VERSION) + return vmknetddi_queueops_version( + (vmknetddi_queueop_get_version_args_t *)args); + + tp = netdev_priv(((vmknetddi_queueop_get_features_args_t *)args)->netdev); + + if (!(tg3_flag(tp, USING_MSIX)) || + !(tg3_flag(tp, INIT_COMPLETE)) || + tp->irq_cnt < 2) { + netdev_err(tp->dev, "Device not ready for netq ops!\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } + + switch (op) { + case VMKNETDDI_QUEUEOPS_OP_GET_FEATURES: + return tg3_netq_get_netqueue_features( + (vmknetddi_queueop_get_features_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_GET_QUEUE_COUNT: + return tg3_netq_get_queue_count( + (vmknetddi_queueop_get_queue_count_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_GET_FILTER_COUNT: + return tg3_netq_get_filter_count( + (vmknetddi_queueop_get_filter_count_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_ALLOC_QUEUE: + return tg3_netq_alloc_queue( + (vmknetddi_queueop_alloc_queue_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_FREE_QUEUE: + return tg3_netq_free_queue( + (vmknetddi_queueop_free_queue_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_GET_QUEUE_VECTOR: + return tg3_netq_get_queue_vector( + (vmknetddi_queueop_get_queue_vector_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_GET_DEFAULT_QUEUE: + return tg3_netq_get_default_queue( + (vmknetddi_queueop_get_default_queue_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_APPLY_RX_FILTER: + return tg3_netq_apply_rx_filter( + (vmknetddi_queueop_apply_rx_filter_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_REMOVE_RX_FILTER: + return tg3_netq_remove_rx_filter( + (vmknetddi_queueop_remove_rx_filter_args_t *)args); + + case VMKNETDDI_QUEUEOPS_OP_GET_STATS: + return tg3_netq_get_queue_stats( + (vmknetddi_queueop_get_stats_args_t *)args); + + /* Unsupported for now */ + default: + break; + } + + netdev_warn(to->dev, "Unhandled NETQUEUE OP %d\n", op); + + return VMKNETDDI_QUEUEOPS_ERR; +} + +static void tg3_netq_init(struct tg3 *tp) +{ + int i; + struct tg3_netq_napi *tnetq; + + if (!(tg3_flag(tp, IOV_CAPABLE))) { + for (i = 0; i < 5; i++) + tp->napi[i].srcprodring = &tp->napi[0].prodring; + return; + } + + if (tg3_netq_force[tp->vmware.netq.index] >= 0) { + tp->rxq_req = tg3_netq_force[tp->vmware.netq.index] + 1; + + tp->txq_req = tg3_netq_force[tp->vmware.netq.index]; + tp->txq_req = min(tp->txq_req, tp->txq_max); + tp->txq_req = max_t(u32, tp->txq_req, 1); + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].srcprodring = &tp->napi[i].prodring; + + tnetq = &tp->napi[0].netq; + tnetq->flags |= TG3_NETQ_RXQ_ALLOCATED | TG3_NETQ_RXQ_ENABLED; + + tnetq->flags |= TG3_NETQ_TXQ_ALLOCATED; + tnetq = &tp->napi[1].netq; + tnetq->flags |= TG3_NETQ_TXQ_ALLOCATED; + + VMKNETDDI_REGISTER_QUEUEOPS(tp->dev, tg3_netqueue_ops); + netdev_info(tp->dev, "VMware NetQueue Ops is registered\n"); +} + +static void tg3_netq_free_all_qs(struct tg3 *tp) +{ + int i; + + for (i = 1; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (i != 1 && (tnapi->netq.flags & TG3_NETQ_TXQ_ALLOCATED)) + tg3_netq_txq_free(tp, i - 1); + + if (tnapi->netq.flags & TG3_NETQ_RXQ_ALLOCATED) { + if (tnapi->netq.flags & TG3_NETQ_RXQ_ENABLED) + tg3_netq_disable_queue(tp, i); + + tnapi->netq.flags &= ~TG3_NETQ_RXQ_ALLOCATED; + tp->vmware.netq.n_rx_queues_allocated--; + } + } +} + +static void tg3_netq_invalidate_state(struct tg3 *tp) +{ + if (!(tg3_flag(tp, ENABLE_IOV))) + return; + + tg3_netq_free_all_qs(tp); + vmknetddi_queueops_invalidate_state(tp->dev); +} + +static void tg3_netq_restore(struct tg3 *tp) +{ + if (!(tg3_flag(tp, IOV_CAPABLE))) + return; + + /* Enable the VRQs */ + tw32(MAC_VRQ_ENABLE, MAC_VRQ_ENABLE_DFLT_VRQ); +} + +static void tg3_netq_limit_dflt_queue_counts(struct tg3 *tp) +{ + if (!tg3_flag(tp, IOV_CAPABLE)) + return; + + /* If the number of rx and tx queues was not formally + * requested, artificially cap the number of queues + * to ease system resource consumption. + */ + if (!tp->rxq_req) { + /* Allocated 9 queues (8 + 1) by default. */ + tp->rxq_cnt = min_t(u32, tp->rxq_cnt + 1, 9); + } + + if (!tp->txq_req) { + tp->txq_cnt = min_t(u32, tp->txq_cnt, 8); + } +} + +static u32 tg3_netq_tune_vector_count(struct tg3 *tp) +{ + u32 irqcnt = max(tp->rxq_cnt, tp->txq_cnt); + + if (irqcnt > 1 && tp->txq_cnt > tp->rxq_cnt - 1) + irqcnt++; + + return irqcnt; +} + +static int tg3_netq_stats_size(struct tg3 *tp) +{ + int size = TG3_NUM_STATS; + + if (!tg3_flag(tp, ENABLE_IOV)) + return size; + + size += ARRAY_SIZE(tg3_vmware_ethtool_stats_keys) * tp->irq_cnt; + + return size; +} + +static void tg3_netq_stats_get_strings(struct tg3 *tp, u8 *buf) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + sprintf(buf, "[%d]: rx_packets (sw)", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_packets (hw)", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_bytes (sw)", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_bytes (hw)", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_errors (sw)", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_errors (hw)", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_crc_errors", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_frame_errors", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_ucast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_mcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_bcast_packets", i); + buf += ETH_GSTRING_LEN; + } +} + +static void tg3_netq_stats_get(struct tg3 *tp, u64 *tmp_stats) +{ + int i; + + if (!tg3_flag(tp, ENABLE_IOV)) + return; + + /* Copy over the NetQ specific statistics */ + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + memcpy(tmp_stats, &tnapi->netq.stats, + sizeof(struct tg3_netq_stats)); + tmp_stats += TG3_NETQ_NUM_STATS; + } +} + +static void tg3_netq_stats_clear(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + memset(&tnapi->netq.stats, 0, + sizeof(struct tg3_netq_stats)); + } +} +#endif /* TG3_VMWARE_NETQ_ENABLE */ diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.h b/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.h index cfd8b6b4de4c51307948d29bf4ac64d4710835b2..4167dc7a5e8d5fa52fe04e10af8bbe416a2b578f 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.h +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3_vmware.h @@ -1,60 +1,122 @@ -/* Copyright (C) 2010 Broadcom Corporation. - * Portions Copyright (C) VMware, Inc. 2007-2011. All Rights Reserved. +/* Copyright (C) 2010 - 2012 Broadcom Corporation. + * Portions Copyright (C) VMware, Inc. 2007-2012. All Rights Reserved. */ struct tg3; -struct tg3_vmware { - u32 rx_mode_reset_counter; -}; +/* + * On ESX the wmb() instruction is defined to only a compiler barrier. + * The macro wmb() needs to be overridden to properly synchronize memory. + */ +#if defined(__VMKLNX__) +#undef wmb +#define wmb() asm volatile("sfence" ::: "memory") +#endif -#if !defined(TG3_VMWARE_BMAPILNX_DISABLE) +static int psod_on_tx_timeout = 0; -#define SIOTG3CIM 0x89F0 +module_param(psod_on_tx_timeout, int, 0); +MODULE_PARM_DESC(psod_on_tx_timeout, "For debugging purposes, crash the system " + " when a tx timeout occurs"); -#define TG3_VMWARE_CIM_CMD_ENABLE_NIC 0x0001 -#define TG3_VMWARE_CIM_CMD_DISABLE_NIC 0x0002 -#define TG3_VMWARE_CIM_CMD_REG_READ 0x0003 -#define TG3_VMWARE_CIM_CMD_REG_WRITE 0x0004 -#define TG3_VMWARE_CIM_CMD_GET_NIC_PARAM 0x0005 -#define TG3_VMWARE_CIM_CMD_GET_NIC_STATUS 0x0006 +#ifndef TG3_VMWARE_NETQ_DISABLE +#define TG3_VMWARE_NETQ_ENABLE -struct tg3_ioctl_reg_read_req -{ - u32 reg_offset; - u32 reg_value; -} __attribute__((packed)); +#define TG3_MAX_NIC 32 +#define TG3_OPTION_UNSET -1 -struct tg3_ioctl_reg_write_req -{ - u32 reg_offset; - u32 reg_value; -} __attribute__((packed)); +static unsigned int __devinitdata tg3_netq_index; +static int __devinitdata tg3_netq_force[TG3_MAX_NIC+1] = + { [0 ... TG3_MAX_NIC] = TG3_OPTION_UNSET }; -struct tg3_ioctl_get_nic_param_req -{ - u32 version; - u32 mtu; - u8 current_mac_addr[8]; -} __attribute__((packed)); +module_param_array_named(force_netq, tg3_netq_force, int, NULL, 0); +MODULE_PARM_DESC(force_netq, +"Force the maximum number of NetQueues available per port (NetQueue capable devices only)"); -struct tg3_ioctl_get_nic_status_req -{ - u32 nic_status; // 1: Up, 0: Down -} __attribute__((packed)); +static const struct { + const char string[ETH_GSTRING_LEN]; +} tg3_vmware_ethtool_stats_keys[] = { + { "[0]: rx_packets (sw)" }, + { "[0]: rx_packets (hw)" }, + { "[0]: rx_bytes (sw)" }, + { "[0]: rx_bytes (hw)" }, + { "[0]: rx_errors (sw)" }, + { "[0]: rx_errors (hw)" }, + { "[0]: rx_crc_errors" }, + { "[0]: rx_frame_errors" }, + { "[0]: tx_bytes" }, + { "[0]: tx_ucast_packets" }, + { "[0]: tx_mcast_packets" }, + { "[0]: tx_bcast_packets" }, +}; -struct tg3_ioctl_req -{ - u32 cmd; - union { - // no struct for reset_nic command - struct tg3_ioctl_reg_read_req reg_read; - struct tg3_ioctl_reg_write_req reg_write; - struct tg3_ioctl_get_nic_param_req get_nic_param; - struct tg3_ioctl_get_nic_status_req get_nic_status; - } cmd_req; +/* + * Pack this structure so that we don't get an extra 8 bytes + * should this driver be built for a 128-bit CPU. :) + */ +struct tg3_netq_stats { + u64 rx_packets_sw; + u64 rx_packets_hw; + u64 rx_bytes_sw; + u64 rx_bytes_hw; + u64 rx_errors_sw; + u64 rx_errors_hw; + u64 rx_crc_errors; + u64 rx_frame_errors; + u64 tx_bytes; + u64 tx_ucast_packets; + u64 tx_mcast_packets; + u64 tx_bcast_packets; } __attribute__((packed)); +#define TG3_NETQ_NUM_STATS (sizeof(struct tg3_netq_stats)/sizeof(u64)) + +struct tg3_netq_napi { + u32 flags; + #define TG3_NETQ_TXQ_ALLOCATED 0x0001 + #define TG3_NETQ_RXQ_ALLOCATED 0x0002 + #define TG3_NETQ_RXQ_ENABLED 0x0008 + #define TG3_NETQ_TXQ_FREE_STATE 0x0010 + #define TG3_NETQ_RXQ_FREE_STATE 0x0020 + + struct tg3_netq_stats stats; + struct net_device_stats net_stats; +}; + +struct tg3_vmware_netq { + u16 n_tx_queues_allocated; + u16 n_rx_queues_allocated; + + u32 index; +}; + +static void tg3_vmware_fetch_stats(struct tg3 *tp); +static void tg3_disable_prod_rcbs(struct tg3 *tp, u32 ring); +static void tg3_setup_prod_mboxes(struct tg3 *tp, u32 ring); +static void tg3_netq_init(struct tg3 *tp); +static void tg3_netq_free_all_qs(struct tg3 *tp); +static void tg3_netq_invalidate_state(struct tg3 *tp); +static void tg3_netq_restore(struct tg3 *tp); +static void tg3_netq_limit_dflt_queue_counts(struct tg3 *tp); +static u32 tg3_netq_tune_vector_count(struct tg3 *tp); +static int tg3_netq_stats_size(struct tg3 *tp); +static void tg3_netq_stats_get_strings(struct tg3 *tp, u8 *buf); +static void tg3_netq_stats_get(struct tg3 *tp, u64 *tmp_stats); +static void tg3_netq_stats_clear(struct tg3 *tp); +#endif /* TG3_VMWARE_NETQ_ENABLE */ + +struct tg3_vmware { + u32 rx_mode_reset_counter; + +#ifdef TG3_VMWARE_NETQ_ENABLE + struct tg3_vmware_netq netq; +#endif +}; + +#if !defined(TG3_VMWARE_BMAPILNX_DISABLE) + +#include "esx_ioctl.h" + static int tg3_vmware_ioctl_cim(struct net_device *dev, struct ifreq *ifr); diff --git a/vmkdrivers/src_9/drivers/scsi/fcoe/libfcoe.c b/vmkdrivers/src_9/drivers/scsi/fcoe/libfcoe.c index d1079b81ca48194a83c2f628078c67588941be93..36da13d20d6aee1c168e68175de266ad07a6759e 100644 --- a/vmkdrivers/src_9/drivers/scsi/fcoe/libfcoe.c +++ b/vmkdrivers/src_9/drivers/scsi/fcoe/libfcoe.c @@ -1807,6 +1807,11 @@ static int fcoe_ctlr_recv_vlan_notification(struct fcoe_ctlr *fip, struct fip_he if (!old_vlan_valid && (new_vlan != 0)) { struct net_device *netdev; + if (new_vlan > VLAN_MAX_VALID_VID) { + LIBFCOE_FIP_DBG(fip, "invalid vlan id %d, ignored\n", new_vlan); + return -1; + } + fip->vlan_id = new_vlan; netdev = (fip->lp)->tt.get_cna_netdev(fip->lp); diff --git a/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_attr.c b/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_attr.c index 53827d2187570e21ace56de0bf3ba1d45a042219..3ece38b3febcee478f5533315d66afd9a26fcc7d 100644 --- a/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_attr.c +++ b/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_attr.c @@ -2176,8 +2176,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost) case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: - /* Links up, beyond this port_type reports state */ - fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + /* Links up, reports port state accordingly */ + if (vport->port_state < LPFC_VPORT_READY) + fc_host_port_state(shost) = + FC_PORTSTATE_BYPASSED; + else + fc_host_port_state(shost) = + FC_PORTSTATE_ONLINE; break; case LPFC_HBA_ERROR: fc_host_port_state(shost) = FC_PORTSTATE_ERROR; diff --git a/vmkdrivers/src_9/drivers/scsi/mpt/mptsas/mptsas.c b/vmkdrivers/src_9/drivers/scsi/mpt/mptsas/mptsas.c index d57883dfb6b2ff7006cbd381aa4391fc0fd93efe..2c4ab5f8b5441877c3f61ab07f5d944e789f037f 100644 --- a/vmkdrivers/src_9/drivers/scsi/mpt/mptsas/mptsas.c +++ b/vmkdrivers/src_9/drivers/scsi/mpt/mptsas/mptsas.c @@ -2287,10 +2287,12 @@ mptsas_test_unit_ready(MPT_ADAPTER *ioc, u8 channel, u8 id, u16 count) } /* * LU becoming ready, or - * LU hasn't self-configured yet + * LU hasn't self-configured yet, or + * LU is in asymmetric access state transition */ if ((asc == 0x04 && ascq == 0x01) || (asc == 0x04 && ascq == 0x11) || + (asc == 0x04 && ascq == 0x0a) || asc == 0x3e) { state = DEVICE_RETRY; break; diff --git a/vmkdrivers/src_9/drivers/usb/input/hid-core.c b/vmkdrivers/src_9/drivers/usb/input/hid-core.c index e5452f474ae24a70b07828b74d6f87b0e53df77a..c86a38e991715cf93188d1c7f234f2abf157645a 100644 --- a/vmkdrivers/src_9/drivers/usb/input/hid-core.c +++ b/vmkdrivers/src_9/drivers/usb/input/hid-core.c @@ -1008,17 +1008,6 @@ static int usbhid_start(struct hid_device *hid) } } - init_waitqueue_head(&usbhid->wait); - INIT_WORK(&usbhid->reset_work, hid_reset); - INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues); - setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); - - spin_lock_init(&usbhid->lock); - spin_lock_init(&usbhid->lock); - - usbhid->intf = intf; - usbhid->ifnum = interface->desc.bInterfaceNumber; - usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); if (!usbhid->urbctrl) { ret = -ENOMEM; @@ -1214,12 +1203,20 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id) hid->driver_data = usbhid; usbhid->hid = hid; + usbhid->intf = intf; + usbhid->ifnum = interface->desc.bInterfaceNumber; #if defined(__VMKLNX__) if (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) vmklnx_hcd_register_kbd_handler(intf); #endif + init_waitqueue_head(&usbhid->wait); + INIT_WORK(&usbhid->reset_work, hid_reset); + INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues); + setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); + spin_lock_init(&usbhid->lock); + ret = hid_add_device(hid); if (ret) { if (ret != -ENODEV) diff --git a/vmkdrivers/src_9/vmklinux_9/vmware/linux_block.c b/vmkdrivers/src_9/vmklinux_9/vmware/linux_block.c index 9a3d3769375939db3385ff2c342c8d905340b10a..7367a9bfaf632407815d1cb30a3df626f6a69a45 100644 --- a/vmkdrivers/src_9/vmklinux_9/vmware/linux_block.c +++ b/vmkdrivers/src_9/vmklinux_9/vmware/linux_block.c @@ -1,5 +1,5 @@ /* **************************************************************** - * Portions Copyright 1998, 2010 VMware, Inc. + * Portions Copyright 1998, 2010, 2012 VMware, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -2288,7 +2288,7 @@ retry: adapter->constraints.sgMaxEntries = MAX_SEGMENTS; adapter->constraints.sgElemSizeMult = SECTOR_SIZE; adapter->constraints.sgElemAlignment = 0; - adapter->constraints.sgElemStraddle = 0; + adapter->constraints.sgElemStraddle = DMA_32BIT_MASK + 1; VMK_ASSERT(strlen(vmk_NameToString(&adapterName)) < sizeof(adapter->name)); vmk_NameFormat(&adapter->name, "%s", vmk_NameToString(&adapterName)); @@ -2315,10 +2315,10 @@ retry: return retval; unrollBlockAdapter: - kfree(&pBlockAdapter); + kfree(pBlockAdapter); unrollBlockDevice: - kfree(&blockDevices[major]); + kfree(blockDevices[major]); blockDevices[major] = NULL; return -ENOMEM; @@ -2372,7 +2372,7 @@ unregister_blkdev(unsigned int major, const char *name) } if (bd->disks) { - kfree(&bd->disks); + kfree(bd->disks); } status = vmk_ScsiUnregisterAdapter(bd->adapter); @@ -2382,10 +2382,10 @@ unregister_blkdev(unsigned int major, const char *name) /* Free up the block mgmt Structure */ bd->adapter->mgmtAdapter.transport = VMK_STORAGE_ADAPTER_TRANSPORT_UNKNOWN; - kfree(&bd->adapter->mgmtAdapter.t.block); + kfree(bd->adapter->mgmtAdapter.t.block); vmk_ScsiFreeAdapter(bd->adapter); - kfree(&bd); + kfree(bd); blockDevices[major] = NULL; VMKLNX_DEBUG(2, "Device %s unregistered.", name); diff --git a/vmkdrivers/src_9/vmklinux_9/vmware/linux_net.c b/vmkdrivers/src_9/vmklinux_9/vmware/linux_net.c index 16a74ee66ff10645147e708aa49a7a16e0cb509c..325e5d1c78f7618aa25001003eebf100dba29137 100644 --- a/vmkdrivers/src_9/vmklinux_9/vmware/linux_net.c +++ b/vmkdrivers/src_9/vmklinux_9/vmware/linux_net.c @@ -262,6 +262,12 @@ map_skb_to_pkt(struct sk_buff *skb) if (skb_shinfo(skb)->gso_type != 0) { switch (skb_shinfo(skb)->gso_type) { case SKB_GSO_TCPV4: + if (unlikely(skb_shinfo(skb)->gso_size == 0)) { + printk("dropping LRO packet with zero gso_size\n"); + VMK_ASSERT(VMK_FALSE); + goto drop; + } + status = vmk_PktSetLargeTcpPacket(pkt, skb_shinfo(skb)->gso_size); VMK_ASSERT(status == VMK_OK); break; @@ -451,7 +457,7 @@ netif_receive_skb(struct sk_buff *skb) if (vmk_NetPollGetCurrent(&pollPriv) == VMK_OK) { napi = (struct napi_struct *)vmk_NetPollGetPrivate(pollPriv); } - if (!napi) { + if (!napi || vmk_SystemCheckState(VMK_SYSTEM_STATE_PANIC)) { pkt = skb->pkt; status = map_skb_to_pkt(skb); if (likely(status == NET_RX_SUCCESS)) { diff --git a/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi.c b/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi.c index 71b4e34864e2b12b6acdb29d75e49688fee5036f..893c7b930a7258eae958eece9d734a4042ee61fc 100644 --- a/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi.c +++ b/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi.c @@ -3177,9 +3177,12 @@ SCSILinuxWorldletFn(vmk_Worldlet wdt, void *data, yield += vmk_TimerUSToTC(100); } - vmk_WorldletAffinityTrackerCheck(tls->tracker, now); spin_unlock_irqrestore(&tls->lock, flags); + if (tls->tracker != NULL) { + vmk_WorldletAffinityTrackerCheck(tls->tracker, now); + } + vInfo = SCSILinuxGetVectorInfo(tls->vmk26Adap, tls->activatingVector); if (vInfo != NULL && tls->worldlet != NULL) { vmk_WorldletInterruptTracker *intTracker = vInfo->intTracker; diff --git a/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi_lld_if.c b/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi_lld_if.c index 1ec0bb37f145825ff63bfbfe68d607c7ac9a0209..ba44bb393a73d2b7a550742e38d32d91c491fcb3 100644 --- a/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi_lld_if.c +++ b/vmkdrivers/src_9/vmklinux_9/vmware/linux_scsi_lld_if.c @@ -165,31 +165,27 @@ static void vmklnx_scsi_update_lun_path(struct scsi_device *sdev, void *data); * scsi/scsi_host.h * \par ESX Deviation Notes: * This interface will assume a default value for - * Scsi_Host->dma_boundary to be 0 if the Scsi Host template does - * not specify a value for dma_boundary. This is different from - * the linux behavior which defaults to a 4G boundary in a similar - * situation. + * Scsi_Host->dma_boundary to be 4G if the Scsi Host template does + * not specify it, this is to make it compatible with Linux behavior * \sa None. ********************************************************************** */ -/** - * scsi_host_alloc - allocate a Scsi_Host structure - * @sht: pointer to scsi host template - * @privateSize: additional size to be allocated as requested by the driver - * +/** + * scsi_host_alloc - allocate a Scsi_Host structure + * @sht: pointer to scsi host template + * @privateSize: additional size to be allocated as requested by the driver + * * Allocate a Scsi_Host structure - * + * * ESX Deviation Notes: * This interface will assume a default value for - * Scsi_Host->dma_boundary to be 0 if the Scsi Host template does - * not specify a value for dma_boundary. This is different from - * the linux behavior which defaults to a 4G boundary in a similar - * situation. + * Scsi_Host->dma_boundary to be 4G if the Scsi Host template does + * not specify it, this is to make it compatible with Linux behavior. * * RETURN VALUE: * On Success pointer to the newly allocated Scsi_Host structure, * otherwise NULL - */ + */ /* _VMKLNX_CODECHECK_: scsi_host_alloc */ struct Scsi_Host * scsi_host_alloc(struct scsi_host_template *sht, int privateSize) @@ -270,7 +266,8 @@ scsi_host_alloc(struct scsi_host_template *sht, int privateSize) if (sht->dma_boundary) { sh->dma_boundary = sht->dma_boundary; } else { - sh->dma_boundary = 0; // avoid the checking of all SGE by upper layer + /* PR 842721 */ + sh->dma_boundary = DMA_32BIT_MASK; } retval = scsi_setup_command_freelist(sh);