diff --git a/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/net/vmkapi_net_proto.h b/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/net/vmkapi_net_proto.h index c2218c6bba6fb2744873bbbd306d316db5d6a4b0..d93e580077a65ebb0cb026df5c2887dd401a14d1 100644 --- a/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/net/vmkapi_net_proto.h +++ b/BLD/build/HEADERS/vmkapi-current-all-public-bincomp/vmkernel64/release/net/vmkapi_net_proto.h @@ -251,13 +251,8 @@ typedef struct vmk_EthHdr { typedef struct vmk_VLANHdr { /** High four bits of the VLAN ID. */ vmk_uint8 vlanIDHigh:4; - /** This field has two different interpretations. */ - union { - /** The MAC addresses are in canonical format. */ - vmk_uint8 canonical:1; - /** The frame is eligible to be dropped in the presence of congestion. */ - vmk_uint8 dropEligible:1; - }; + /** The frame is eligible to be dropped in the presence of congestion. */ + vmk_uint8 dropEligible:1; /** Priority tag. */ vmk_uint8 priority:3; /** Low eight bits of the VLAN ID. */ diff --git a/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/net/vmkapi_net_proto.h b/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/net/vmkapi_net_proto.h index c2218c6bba6fb2744873bbbd306d316db5d6a4b0..d93e580077a65ebb0cb026df5c2887dd401a14d1 100644 --- a/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/net/vmkapi_net_proto.h +++ b/BLD/build/HEADERS/vmkapi-current-all-public/vmkernel64/release/net/vmkapi_net_proto.h @@ -251,13 +251,8 @@ typedef struct vmk_EthHdr { typedef struct vmk_VLANHdr { /** High four bits of the VLAN ID. */ vmk_uint8 vlanIDHigh:4; - /** This field has two different interpretations. */ - union { - /** The MAC addresses are in canonical format. */ - vmk_uint8 canonical:1; - /** The frame is eligible to be dropped in the presence of congestion. */ - vmk_uint8 dropEligible:1; - }; + /** The frame is eligible to be dropped in the presence of congestion. */ + vmk_uint8 dropEligible:1; /** Priority tag. */ vmk_uint8 priority:3; /** Low eight bits of the VLAN ID. */ diff --git a/BLD/build/version/buildNumber.h b/BLD/build/version/buildNumber.h index c3ae9edfe4d53f88443670679826c8fda50a9d92..5002ee8782f59898c3879f0fc2c04cc5698f6123 100644 --- a/BLD/build/version/buildNumber.h +++ b/BLD/build/version/buildNumber.h @@ -1,6 +1,6 @@ -#define BUILD_NUMBER "build-1604073" -#define BUILD_NUMBER_NUMERIC 1604073 -#define BUILD_NUMBER_NUMERIC_STRING "1604073" -#define PRODUCT_BUILD_NUMBER "product-build-4509" -#define PRODUCT_BUILD_NUMBER_NUMERIC 4509 -#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "4509" +#define BUILD_NUMBER "build-1623387" +#define BUILD_NUMBER_NUMERIC 1623387 +#define BUILD_NUMBER_NUMERIC_STRING "1623387" +#define PRODUCT_BUILD_NUMBER "product-build-4950" +#define PRODUCT_BUILD_NUMBER_NUMERIC 4950 +#define PRODUCT_BUILD_NUMBER_NUMERIC_STRING "4950" diff --git a/BUILD.txt b/BUILD.txt index 013b225e85782bdd28148f8a9bd872fba6cdd1ee..bc3e5e14dffd9f914c9386ea5b966732833a4fa1 100644 --- a/BUILD.txt +++ b/BUILD.txt @@ -1,8 +1,13 @@ The following assumes the files disclosed for this package have been -copied to the directory "/usr/vmware/src", and any commands that need to -be executed for the disclosure should be executed from this directory -on a "centos-5.3-x64" system (see the file "SYSTEMS.txt" for definition -of this system). +copied to the directory "/usr/vmware/src": + +rm -rf /usr/vmware/src +mkdir /usr/vmware/src +cp * /usr/vmware/src + +And any commands that need to be executed for the disclosure should be +executed from this directory on a "centos-5.3-x64" system (see the file +"SYSTEMS.txt" for definition of this system). This package should be built on a "centos-5.3-x64" system. Please see the "System Configurations" document for a definition of the configuration diff --git a/machines.cfgx b/machines.cfgx new file mode 100644 index 0000000000000000000000000000000000000000..159253b605af3645b1bf8b73dfe1df00e71afcbc --- /dev/null +++ b/machines.cfgx @@ -0,0 +1,26 @@ + + +
10.111.110.45
+ root + password + + centos-5.3-x64 + Rush-CentOS-5.3-x64 + linux + root + welcome + +
+ +
10.111.110.98
+ root + welcome + + BDE-Management-Server + management-server + linux + root + vmware + +
+
diff --git a/vmkdrivers-gpl.odpx b/vmkdrivers-gpl.odpx new file mode 100644 index 0000000000000000000000000000000000000000..510d6557b8fdbf936784506feba17cf0e9ce146b --- /dev/null +++ b/vmkdrivers-gpl.odpx @@ -0,0 +1,26 @@ + + + vmkdrivers + gpl + VMWsource + + centos-5.3-x64 + + vmkdrivers-gpl.tgz + collect-drivers.sh + update-drivers.sh + + + tar xzf vmkdrivers-gpl.tgz + sh ./build-vmkdrivers.sh + mv collect-drivers.sh BLD/build/collect-drivers.sh + cd BLD/build + ./collect-drivers.sh + + + drivers + update-drivers.sh + + centos-5.3-x64 + + diff --git a/vmkdrivers/src_9/drivers/ata/ahci.c b/vmkdrivers/src_9/drivers/ata/ahci.c index c9d210210a90990526dbb40fa006c2a4c6d7cce3..fba3c78b8657809d6156a456f872858e3fd5e6ff 100644 --- a/vmkdrivers/src_9/drivers/ata/ahci.c +++ b/vmkdrivers/src_9/drivers/ata/ahci.c @@ -1,5 +1,5 @@ /* - * Portions Copyright 2008 - 2010 VMware, Inc. + * Portions Copyright 2008 - 2013 VMware, Inc. */ /* * ahci.c - AHCI SATA support @@ -54,7 +54,7 @@ #endif #define DRV_NAME "ahci" -#define DRV_VERSION "3.0-17vmw" +#define DRV_VERSION "3.0-18vmw" #if defined(__VMKLNX__) static int ahci_skip_host_reset = 0; @@ -634,6 +634,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ @@ -1968,7 +1971,6 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { #if defined(__VMKLNX__) if ((irq_stat & PORT_IRQ_PHYRDY) && (ap->link.device[0].sdev)) { - vmklnx_scsi_device_hot_removed(ap->link.device[0].sdev); ata_ehi_push_desc(host_ehi, "hotplug handled"); } #endif diff --git a/vmkdrivers/src_9/drivers/net/bnx2/bnx2.c b/vmkdrivers/src_9/drivers/net/bnx2/bnx2.c index b28f29ca133dc04f06aac8fe0351e1845cf86356..09891e2d2e344584859b7d22eb36a91671b04888 100644 --- a/vmkdrivers/src_9/drivers/net/bnx2/bnx2.c +++ b/vmkdrivers/src_9/drivers/net/bnx2/bnx2.c @@ -91,7 +91,7 @@ #include "bnx2_fw2.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.3h.v55.2" +#define DRV_MODULE_VERSION "2.2.3d.v55.2" #define DRV_MODULE_RELDATE "Feb 12, 2013" #define RUN_AT(x) (jiffies + (x)) diff --git a/vmkdrivers/src_9/drivers/net/bnx2/cnic_register.h b/vmkdrivers/src_9/drivers/net/bnx2/cnic_register.h index 28bedc0c7ac65781830602e1d73d3cf4fb33372a..c0e805642a0f2d85466c269c8cdf988dcda6351d 100644 --- a/vmkdrivers/src_9/drivers/net/bnx2/cnic_register.h +++ b/vmkdrivers/src_9/drivers/net/bnx2/cnic_register.h @@ -15,7 +15,7 @@ #ifndef CNIC_REGISTER_H #define CNIC_REGISTER_H -#define CNIC_REGISTER_MODULE_VERSION "1.72.2.v55.1" +#define CNIC_REGISTER_MODULE_VERSION "1.72.1.v50.1i" #define CNIC_REGISTER_MODULE_RELDATE "February 06, 2013" extern int cnic_register_adapter(const char * name, void *callback); extern void *cnic_register_get_callback(const char * name); diff --git a/vmkdrivers/src_9/drivers/net/bnx2x/cnic_register.h b/vmkdrivers/src_9/drivers/net/bnx2x/cnic_register.h index 28bedc0c7ac65781830602e1d73d3cf4fb33372a..c0e805642a0f2d85466c269c8cdf988dcda6351d 100644 --- a/vmkdrivers/src_9/drivers/net/bnx2x/cnic_register.h +++ b/vmkdrivers/src_9/drivers/net/bnx2x/cnic_register.h @@ -15,7 +15,7 @@ #ifndef CNIC_REGISTER_H #define CNIC_REGISTER_H -#define CNIC_REGISTER_MODULE_VERSION "1.72.2.v55.1" +#define CNIC_REGISTER_MODULE_VERSION "1.72.1.v50.1i" #define CNIC_REGISTER_MODULE_RELDATE "February 06, 2013" extern int cnic_register_adapter(const char * name, void *callback); extern void *cnic_register_get_callback(const char * name); diff --git a/vmkdrivers/src_9/drivers/net/e1000e/e1000_80003es2lan.c b/vmkdrivers/src_9/drivers/net/e1000e/e1000_80003es2lan.c index a13ee177a0218bde0e811d1f622e3c380501318d..1aceb32d187cf2bb0bd96f6e9a57ffcc971ef2a9 100644 --- a/vmkdrivers/src_9/drivers/net/e1000e/e1000_80003es2lan.c +++ b/vmkdrivers/src_9/drivers/net/e1000e/e1000_80003es2lan.c @@ -997,6 +997,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) reg |= (1 << 28); ew32(TARC(1), reg); + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + return; } diff --git a/vmkdrivers/src_9/drivers/net/e1000e/e1000_82571.c b/vmkdrivers/src_9/drivers/net/e1000e/e1000_82571.c index e84d04d32c20c9a860a58187c37819bb1cd32017..7b7b13ab4590e4229f609fe18ff79a32e4def982 100644 --- a/vmkdrivers/src_9/drivers/net/e1000e/e1000_82571.c +++ b/vmkdrivers/src_9/drivers/net/e1000e/e1000_82571.c @@ -1184,6 +1184,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) ew32(CTRL_EXT, reg); } + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + } + /* PCI-Ex Control Registers */ switch (hw->mac.type) { diff --git a/vmkdrivers/src_9/drivers/net/e1000e/e1000_ich8lan.c b/vmkdrivers/src_9/drivers/net/e1000e/e1000_ich8lan.c index ce88f77c0527f51b3f42cccf0beae326593e30f9..912b636871f30fc989080a7dfacc60b8240d2833 100644 --- a/vmkdrivers/src_9/drivers/net/e1000e/e1000_ich8lan.c +++ b/vmkdrivers/src_9/drivers/net/e1000e/e1000_ich8lan.c @@ -2785,6 +2785,21 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(STATUS, reg); } + /* + * work-around descriptor data corruption issue during nfs v2 udp + * traffic,just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* + * Disable IPv6 extension header parsing because some + * malformed IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + return; } diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_82575.c b/vmkdrivers/src_9/drivers/net/igb/e1000_82575.c index 7b50b0446dff6286d9923c617ac9ea146d380b7f..89c4f07a8e54f8a92eb71e808e98d336e5b46be1 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_82575.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_82575.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -31,57 +31,125 @@ * 82575GB Gigabit Network Connection * 82576 Gigabit Network Connection * 82576 Quad Port Gigabit Mezzanine Adapter + * 82580 Gigabit Network Connection + * I350 Gigabit Network Connection */ #include "e1000_api.h" +#include "e1000_i210.h" static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); -static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); static void e1000_release_phy_82575(struct e1000_hw *hw); static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); static void e1000_release_nvm_82575(struct e1000_hw *hw); static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, - u16 *duplex); + u16 *duplex); static s32 e1000_init_hw_82575(struct e1000_hw *hw); static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, - u16 *data); + u16 *data); static s32 e1000_reset_hw_82575(struct e1000_hw *hw); static s32 e1000_reset_hw_82580(struct e1000_hw *hw); -static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, - u32 offset, u16 *data); -static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, - u32 offset, u16 data); +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, - bool active); + bool active); static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_get_media_type_82575(struct e1000_hw *hw); +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, - u32 offset, u16 data); + u32 offset, u16 data); static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, - u16 *speed, u16 *duplex); + u16 *speed, u16 *duplex); static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); static bool e1000_sgmii_active_82575(struct e1000_hw *hw); static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +static void e1000_config_collision_dist_82575(struct e1000_hw *hw); static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); - -static const u16 e1000_82580_rxpbs_table[] = - { 36, 72, 144, 1, 2, 4, 8, 16, - 35, 70, 140 }; +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); +static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); +static void e1000_clear_vfta_i350(struct e1000_hw *hw); + +static void e1000_i2c_start(struct e1000_hw *hw); +static void e1000_i2c_stop(struct e1000_hw *hw); +static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); +static s32 e1000_get_i2c_ack(struct e1000_hw *hw); +static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); +static bool e1000_get_i2c_data(u32 *i2cctl); + +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; #define E1000_82580_RXPBS_TABLE_SIZE \ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) + +/** + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + /** * e1000_init_phy_params_82575 - Init PHY func ptrs. * @hw: pointer to the HW structure @@ -90,9 +158,13 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; DEBUGFUNC("e1000_init_phy_params_82575"); + phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; + if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; goto out; @@ -101,27 +173,48 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82575; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; - phy->ops.acquire = e1000_acquire_phy_82575; - phy->ops.check_reset_block = e1000_check_reset_block_generic; - phy->ops.commit = e1000_phy_sw_reset_generic; - phy->ops.get_cfg_done = e1000_get_cfg_done_82575; - phy->ops.release = e1000_release_phy_82575; + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); if (e1000_sgmii_active_82575(hw)) { - phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; - phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; - phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; - } else if (hw->mac.type >= e1000_82580) { - phy->ops.reset = e1000_phy_hw_reset_generic; - phy->ops.read_reg = e1000_read_phy_reg_82580; - phy->ops.write_reg = e1000_write_phy_reg_82580; + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; } else { - phy->ops.reset = e1000_phy_hw_reset_generic; - phy->ops.read_reg = e1000_read_phy_reg_igp; - phy->ops.write_reg = e1000_write_phy_reg_igp; + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = e1000_read_phy_reg_gs40g; + phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } } /* Set phy->phy_addr and phy->id. */ @@ -129,30 +222,78 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) /* Verify phy id and set remaining function pointers */ switch (phy->id) { + case M88E1543_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: case M88E1111_I_PHY_ID: - phy->type = e1000_phy_m88; - phy->ops.check_polarity = e1000_check_polarity_m88; - phy->ops.get_info = e1000_get_phy_info_m88; - phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else if (phy->id == M88E1543_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + e1000_check_for_link_media_swap; + } break; case IGP03E1000_E_PHY_ID: case IGP04E1000_E_PHY_ID: - phy->type = e1000_phy_igp_3; - phy->ops.check_polarity = e1000_check_polarity_igp; - phy->ops.get_info = e1000_get_phy_info_igp; - phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; - phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; - phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; break; case I82580_I_PHY_ID: case I350_I_PHY_ID: - phy->type = e1000_phy_82580; - phy->ops.check_polarity = e1000_check_polarity_82577; - phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; - phy->ops.get_cable_length = e1000_get_cable_length_82577; - phy->ops.get_info = e1000_get_phy_info_82577; + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; default: ret_val = -E1000_ERR_PHY; @@ -167,7 +308,7 @@ out: * e1000_init_nvm_params_82575 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ -static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = E1000_READ_REG(hw, E1000_EECD); @@ -175,47 +316,75 @@ static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) DEBUGFUNC("e1000_init_nvm_params_82575"); - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; - break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; - break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; - break; - } - - nvm->type = e1000_nvm_eeprom_spi; - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); - + E1000_EECD_SIZE_EX_SHIFT); /* * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; - /* EEPROM access above 16k is unsupported */ - if (size > 14) - size = 14; + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + nvm->word_size = 1 << size; + if (hw->mac.type < e1000_i210) { + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + } else { + nvm->type = e1000_nvm_flash_hw; + } /* Function Pointers */ - nvm->ops.acquire = e1000_acquire_nvm_82575; - nvm->ops.read = e1000_read_nvm_eerd; - nvm->ops.release = e1000_release_nvm_82575; - nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = e1000_read_nvm_eerd; + else + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_82575; - nvm->ops.validate = e1000_validate_nvm_checksum_generic; - nvm->ops.write = e1000_write_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; + break; + case e1000_i350: + case e1000_i354: + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } return E1000_SUCCESS; } @@ -228,46 +397,11 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; - u32 ctrl_ext = 0; DEBUGFUNC("e1000_init_mac_params_82575"); - /* Set media type */ - /* - * The 82575 uses bits 22:23 for link mode. The mode can be changed - * based on the EEPROM. We cannot rely upon device ID. There - * is no distinguishable difference between fiber and internal - * SerDes mode on the 82575. There can be an external PHY attached - * on the SGMII interface. For this, we'll set sgmii_active to true. - */ - hw->phy.media_type = e1000_media_type_copper; - dev_spec->sgmii_active = false; - - ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - dev_spec->sgmii_active = true; - ctrl_ext |= E1000_CTRL_I2C_ENA; - break; - case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: - hw->phy.media_type = e1000_media_type_internal_serdes; - ctrl_ext |= E1000_CTRL_I2C_ENA; - break; - default: - ctrl_ext &= ~E1000_CTRL_I2C_ENA; - break; - } - - E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); - - /* - * if using i2c make certain the MDICNFG register is cleared to prevent - * communications from being misrouted to the mdic registers - */ - if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580)) - E1000_WRITE_REG(hw, E1000_MDICNFG, 0); - + /* Derives media type */ + e1000_get_media_type_82575(hw); /* Set mta register count */ mac->mta_reg_count = 128; /* Set uta register count */ @@ -278,21 +412,31 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) mac->rar_entry_count = E1000_RAR_ENTRIES_82576; if (mac->type == e1000_82580) mac->rar_entry_count = E1000_RAR_ENTRIES_82580; - if (mac->type == e1000_i350) + if (mac->type == e1000_i350 || mac->type == e1000_i354) mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + + /* Enable EEE default settings for EEE supported devices */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; - /* Set if manageability features are enabled. */ + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ mac->arc_subsystem_valid = - (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; + !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; /* reset */ - if (mac->type == e1000_82580) + if (mac->type >= e1000_82580) mac->ops.reset_hw = e1000_reset_hw_82580; else mac->ops.reset_hw = e1000_reset_hw_82575; @@ -302,25 +446,34 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) mac->ops.setup_link = e1000_setup_link_generic; /* physical interface link setup */ mac->ops.setup_physical_interface = - (hw->phy.media_type == e1000_media_type_copper) - ? e1000_setup_copper_link_82575 - : e1000_setup_serdes_link_82575; + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; /* physical interface shutdown */ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; /* check for link */ mac->ops.check_for_link = e1000_check_for_link_82575; - /* receive address register setting */ - mac->ops.rar_set = e1000_rar_set_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; - /* writing VFTA */ - mac->ops.write_vfta = e1000_write_vfta_generic; - /* clearing VFTA */ - mac->ops.clear_vfta = e1000_clear_vfta_generic; - /* setting MTA */ - mac->ops.mta_set = e1000_mta_set_generic; + if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_i350; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_i350; + } else { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + } + if (hw->mac.type >= e1000_82580) + mac->ops.validate_mdi_setting = + e1000_validate_mdi_setting_crossover_generic; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* blink LED */ @@ -336,6 +489,18 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + /* get thermal sensor data */ + mac->ops.get_thermal_sensor_data = + e1000_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + e1000_init_thermal_sensor_thresh_generic; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } /* set lan id for port to determine which phy lock to use */ hw->mac.ops.set_lan_id(hw); @@ -378,7 +543,7 @@ static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; - return e1000_acquire_swfw_sync_82575(hw, mask); + return hw->mac.ops.acquire_swfw_sync(hw, mask); } /** @@ -400,7 +565,7 @@ static void e1000_release_phy_82575(struct e1000_hw *hw) else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; - e1000_release_swfw_sync_82575(hw, mask); + hw->mac.ops.release_swfw_sync(hw, mask); } /** @@ -413,7 +578,7 @@ static void e1000_release_phy_82575(struct e1000_hw *hw) * interface and stores the retrieved information in data. **/ static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, - u16 *data) + u16 *data) { s32 ret_val = -E1000_ERR_PARAM; @@ -446,7 +611,7 @@ out: * media independent interface. **/ static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, - u16 data) + u16 data) { s32 ret_val = -E1000_ERR_PARAM; @@ -482,6 +647,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) s32 ret_val = E1000_SUCCESS; u16 phy_id; u32 ctrl_ext; + u32 mdic; DEBUGFUNC("e1000_get_phy_id_82575"); @@ -498,10 +664,36 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) goto out; } + if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = e1000_get_phy_id(hw); + goto out; + } + /* Power on sgmii phy if it is disabled */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); E1000_WRITE_REG(hw, E1000_CTRL_EXT, - ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); E1000_WRITE_FLUSH(hw); msec_delay(300); @@ -513,8 +705,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); if (ret_val == E1000_SUCCESS) { DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", - phy_id, - phy->addr); + phy_id, phy->addr); /* * At the time of this writing, The M88 part is * the only supported SGMII PHY product. @@ -523,7 +714,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) break; } else { DEBUGOUT1("PHY address %u was unreadable\n", - phy->addr); + phy->addr); } } @@ -609,22 +800,22 @@ static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) if (active) { data |= IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); + &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); + data); if (ret_val) goto out; } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most @@ -633,28 +824,28 @@ static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } @@ -664,6 +855,101 @@ out: return ret_val; } +/** + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return ret_val; +} + +/** + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return ret_val; +} + /** * e1000_acquire_nvm_82575 - Request for access to EEPROM * @hw: pointer to the HW structure @@ -683,8 +969,32 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) if (ret_val) goto out; - ret_val = e1000_acquire_nvm_generic(hw); + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + ret_val = e1000_acquire_nvm_generic(hw); if (ret_val) e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); @@ -704,6 +1014,7 @@ static void e1000_release_nvm_82575(struct e1000_hw *hw) DEBUGFUNC("e1000_release_nvm_82575"); e1000_release_nvm_generic(hw); + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); } @@ -773,8 +1084,8 @@ static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) DEBUGFUNC("e1000_release_swfw_sync_82575"); - while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS); - /* Empty */ + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); swfw_sync &= ~mask; @@ -817,7 +1128,7 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) DEBUGOUT("MNG configuration cycle has not completed.\n"); /* If EEPROM is not marked present, init the PHY manually */ - if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) e1000_phy_init_script_igp3(hw); @@ -835,7 +1146,7 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) * Otherwise, use the generic function to get the link speed and duplex info. **/ static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, - u16 *duplex) + u16 *duplex) { s32 ret_val; @@ -843,10 +1154,10 @@ static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, if (hw->phy.media_type != e1000_media_type_copper) ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, - duplex); + duplex); else ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, - duplex); + duplex); return ret_val; } @@ -867,13 +1178,23 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw) if (hw->phy.media_type != e1000_media_type_copper) { ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, - &duplex); + &duplex); /* * Use this flag to determine if link needs to be checked or * not. If we have link clear the flag so that we do not * continue to check for link. */ hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); } else { ret_val = e1000_check_for_copper_link_generic(hw); } @@ -881,6 +1202,85 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw) return ret_val; } +/** + * e1000_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + DEBUGFUNC("e1000_check_for_link_media_swap"); + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } else { + ret_val = e1000_check_for_link_82575(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); +} + /** * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure @@ -891,18 +1291,14 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw) * duplex, then store the values in the pointers provided. **/ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, - u16 *speed, u16 *duplex) + u16 *speed, u16 *duplex) { struct e1000_mac_info *mac = &hw->mac; u32 pcs; + u32 status; DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); - /* Set up defaults for the return values of this function */ - mac->serdes_has_link = false; - *speed = 0; - *duplex = 0; - /* * Read the PCS Status register for link state. For non-copper mode, * the status register is not accurate. The PCS status register is @@ -911,28 +1307,41 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); /* - * The link up bit determines when link is up on autoneg. The sync ok - * gets set once both sides sync up and agree upon link. Stable link - * can be determined by checking for both link up and link sync ok + * The link up bit determines when link is up on autoneg. */ - if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + if (pcs & E1000_PCS_LSTS_LINK_OK) { mac->serdes_has_link = true; /* Detect and store PCS speed */ - if (pcs & E1000_PCS_LSTS_SPEED_1000) { + if (pcs & E1000_PCS_LSTS_SPEED_1000) *speed = SPEED_1000; - } else if (pcs & E1000_PCS_LSTS_SPEED_100) { + else if (pcs & E1000_PCS_LSTS_SPEED_100) *speed = SPEED_100; - } else { + else *speed = SPEED_10; - } /* Detect and store PCS duplex */ - if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) *duplex = FULL_DUPLEX; - } else { + else *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + DEBUGOUT("2500 Mbs, "); + DEBUGOUT("Full Duplex\n"); + } } + + } else { + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; } return E1000_SUCCESS; @@ -948,27 +1357,14 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; - u16 eeprom_data = 0; + + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !e1000_sgmii_active_82575(hw)) return; - if (hw->bus.func == E1000_FUNC_0) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - else if (hw->mac.type == e1000_82580) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + - NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, - &eeprom_data); - else if (hw->bus.func == E1000_FUNC_1) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - - /* - * If APM is not enabled in the EEPROM and management interface is - * not enabled, then power down. - */ - if (!(eeprom_data & E1000_NVM_APME_82575) && - !e1000_enable_mng_pass_thru(hw)) { + if (!e1000_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; @@ -995,7 +1391,7 @@ void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) **/ static s32 e1000_reset_hw_82575(struct e1000_hw *hw) { - u32 ctrl, icr; + u32 ctrl; s32 ret_val; DEBUGFUNC("e1000_reset_hw_82575"); @@ -1005,15 +1401,13 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw) * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); - if (ret_val) { + if (ret_val) DEBUGOUT("PCI-E Master disable polling has failed.\n"); - } /* set the completion timeout for interface */ ret_val = e1000_set_pcie_completion_timeout(hw); - if (ret_val) { + if (ret_val) DEBUGOUT("PCI-E Set completion timeout has failed.\n"); - } DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); @@ -1040,12 +1434,12 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw) } /* If EEPROM is not present, run manual init scripts */ - if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) e1000_reset_init_script_82575(hw); /* Clear any pending interrupt events. */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); - icr = E1000_READ_REG(hw, E1000_ICR); + E1000_READ_REG(hw, E1000_ICR); /* Install any alternate MAC address into RAR0 */ ret_val = e1000_check_alt_mac_addr_generic(hw); @@ -1094,6 +1488,9 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw) /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); + /* Set the default MTU size */ + hw->dev_spec._82575.mtu = 1500; + /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link @@ -1116,7 +1513,8 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw) static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) { u32 ctrl; - s32 ret_val; + s32 ret_val; + u32 phpm_reg; DEBUGFUNC("e1000_setup_copper_link_82575"); @@ -1125,6 +1523,20 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + ret_val = e1000_setup_serdes_link_82575(hw); if (ret_val) goto out; @@ -1140,8 +1552,20 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) } } switch (hw->phy.type) { + case e1000_phy_i210: case e1000_phy_m88: - ret_val = e1000_copper_link_setup_m88(hw); + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = e1000_copper_link_setup_m88(hw); + break; + } break; case e1000_phy_igp_3: ret_val = e1000_copper_link_setup_igp(hw); @@ -1173,14 +1597,16 @@ out: **/ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) { - u32 ctrl_ext, ctrl_reg, reg; + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; bool pcs_autoneg; + s32 ret_val = E1000_SUCCESS; + u16 data; DEBUGFUNC("e1000_setup_serdes_link_82575"); if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !e1000_sgmii_active_82575(hw)) - return E1000_SUCCESS; + return ret_val; /* * On the 82575, SerDes loopback mode persists until it is @@ -1198,16 +1624,10 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); ctrl_reg |= E1000_CTRL_SLU; - if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { - /* set both sw defined pins */ + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; - /* Set switch control to serdes energy detect */ - reg = E1000_READ_REG(hw, E1000_CONNSW); - reg |= E1000_CONNSW_ENRGSRC; - E1000_WRITE_REG(hw, E1000_CONNSW, reg); - } - reg = E1000_READ_REG(hw, E1000_PCS_LCTL); /* default pcs_autoneg to the same setting as mac autoneg */ @@ -1223,14 +1643,27 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: /* disable PCS autoneg and support parallel detect only */ pcs_autoneg = false; + /* fall through to default case */ default: - /* + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* * non-SGMII modes only supports a speed of 1000/Full for the * link so it is best to just force the MAC and let the pcs * link either autoneg or be forced to 1000/Full */ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | - E1000_CTRL_FD | E1000_CTRL_FRCDPX; + E1000_CTRL_FD | E1000_CTRL_FRCDPX; /* set speed of 1000/Full if speed/duplex is forced */ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; @@ -1246,31 +1679,203 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) * However, both are supported by the hardware and some drivers/tools. */ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | - E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); - - /* - * We force flow control to prevent the CTRL register values from being - * overwritten by the autonegotiated flow control values - */ - reg |= E1000_PCS_LCTL_FORCE_FCTRL; + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); if (pcs_autoneg) { /* Set PCS register for autoneg */ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + + E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { /* Set PCS register for forced link */ - reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); } E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); - if (!e1000_sgmii_active_82575(hw)) + if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) e1000_force_mac_fc_generic(hw); - return E1000_SUCCESS; + return ret_val; +} + +/** + * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * + * The media type is chosen reflecting few settings. + * The following are taken into account: + * - link mode set in the current port Init Control Word #3 + * - current link mode settings in CSR register + * - MDIO vs. I2C PHY control interface chosen + * - SFP module media type + **/ +static s32 e1000_get_media_type_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + /* Set internal phy as default */ + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + /* Get CSR setting */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* extract link mode setting */ + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_GMII: + hw->phy.media_type = e1000_media_type_copper; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (e1000_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = e1000_set_sfp_media_type_82575(hw); + if ((ret_val != E1000_SUCCESS) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* + * If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + break; + } + + return ret_val; +} + +/** + * e1000_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + E1000_WRITE_FLUSH(hw); + + /* Read SFP module data */ + while (timeout) { + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == E1000_SUCCESS) + break; + msec_delay(100); + timeout--; + } + if (ret_val != E1000_SUCCESS) + goto out; + + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != E1000_SUCCESS) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + DEBUGOUT("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = E1000_SUCCESS; +out: + /* Restore I2C interface setting */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + return ret_val; } /** @@ -1294,7 +1899,7 @@ static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { - switch(hw->phy.media_type) { + switch (hw->phy.media_type) { case e1000_media_type_internal_serdes: *data = ID_LED_DEFAULT_82575_SERDES; break; @@ -1329,7 +1934,7 @@ static bool e1000_sgmii_active_82575(struct e1000_hw *hw) * Inits recommended HW defaults after a reset when there is no EEPROM * detected. This is only for the 82575. **/ -static s32 e1000_reset_init_script_82575(struct e1000_hw* hw) +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) { DEBUGFUNC("e1000_reset_init_script_82575"); @@ -1385,6 +1990,28 @@ out: return ret_val; } +/** + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + /** * e1000_power_down_phy_copper_82575 - Remove link during PHY power down * @hw: pointer to the HW structure @@ -1395,13 +2022,12 @@ out: static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - struct e1000_mac_info *mac = &hw->mac; if (!(phy->ops.check_reset_block)) return; /* If the management interface is not enabled, then power down */ - if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); return; @@ -1473,7 +2099,7 @@ static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) } /** - * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable * @hw: pointer to the HW structure * * After rx enable if managability is enabled then there is likely some @@ -1491,11 +2117,11 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; - /* Disable all RX queues */ + /* Disable all Rx queues */ for (i = 0; i < 4; i++) { rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); E1000_WRITE_REG(hw, E1000_RXDCTL(i), - rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); } /* Poll all queues to verify they have shut down */ for (ms_wait = 0; ms_wait < 10; ms_wait++) { @@ -1529,7 +2155,7 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) E1000_WRITE_FLUSH(hw); msec_delay(2); - /* Enable RX queues that were previously enabled and restore our + /* Enable Rx queues that were previously enabled and restore our * previous state */ for (i = 0; i < 4; i++) @@ -1581,14 +2207,14 @@ static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) * 16ms to 55ms */ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); + &pcie_devctl2); if (ret_val) goto out; pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); + &pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; @@ -1597,6 +2223,45 @@ out: return ret_val; } +/** + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, reg_offset, reg_val); +} + /** * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback * @hw: pointer to the hardware struct @@ -1606,14 +2271,32 @@ out: **/ void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) { - u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + case e1000_i354: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } - if (enable) - dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; - else - dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; - E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); } /** @@ -1646,7 +2329,6 @@ void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) **/ static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { - u32 mdicnfg = 0; s32 ret_val; DEBUGFUNC("e1000_read_phy_reg_82580"); @@ -1655,15 +2337,6 @@ static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) if (ret_val) goto out; - /* - * We config the phy address in MDICNFG register now. Same bits - * as before. The values in MDIC can be written but will be - * ignored. This allows us to call the old function after - * configuring the PHY address in the new register - */ - mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); - E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); - ret_val = e1000_read_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); @@ -1682,7 +2355,6 @@ out: **/ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { - u32 mdicnfg = 0; s32 ret_val; DEBUGFUNC("e1000_write_phy_reg_82580"); @@ -1691,15 +2363,6 @@ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) if (ret_val) goto out; - /* - * We config the phy address in MDICNFG register now. Same bits - * as before. The values in MDIC can be written but will be - * ignored. This allows us to call the old function after - * configuring the PHY address in the new register - */ - mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); - E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); - ret_val = e1000_write_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); @@ -1708,6 +2371,45 @@ out: return ret_val; } +/** + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + + if (hw->mac.type != e1000_82580) + goto out; + if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + /** * e1000_reset_hw_82580 - Reset hardware * @hw: pointer to the HW structure @@ -1720,13 +2422,17 @@ static s32 e1000_reset_hw_82580(struct e1000_hw *hw) s32 ret_val = E1000_SUCCESS; /* BH SW mailbox bit in SW_FW_SYNC */ u16 swmbsw_mask = E1000_SW_SYNCH_MB; - u32 ctrl, icr; + u32 ctrl; bool global_device_reset = hw->dev_spec._82575.global_device_reset; DEBUGFUNC("e1000_reset_hw_82580"); hw->dev_spec._82575.global_device_reset = false; + /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + /* Get current control state. */ ctrl = E1000_READ_REG(hw, E1000_CTRL); @@ -1747,17 +2453,18 @@ static s32 e1000_reset_hw_82580(struct e1000_hw *hw) msec_delay(10); /* Determine whether or not a global dev reset is requested */ - if (global_device_reset && - e1000_acquire_swfw_sync_82575(hw, swmbsw_mask)) + if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, + swmbsw_mask)) global_device_reset = false; - if (global_device_reset && - !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STAT_DEV_RST_SET)) ctrl |= E1000_CTRL_DEV_RST; else ctrl |= E1000_CTRL_RST; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); /* Add delay to insure DEV_RST has time to complete */ if (global_device_reset) @@ -1773,29 +2480,29 @@ static s32 e1000_reset_hw_82580(struct e1000_hw *hw) DEBUGOUT("Auto Read Done did not complete\n"); } - /* If EEPROM is not present, run manual init scripts */ - if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) - e1000_reset_init_script_82575(hw); - /* clear global device reset status bit */ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); /* Clear any pending interrupt events. */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); - icr = E1000_READ_REG(hw, E1000_ICR); + E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); /* Install any alternate MAC address into RAR0 */ ret_val = e1000_check_alt_mac_addr_generic(hw); /* Release semaphore */ if (global_device_reset) - e1000_release_swfw_sync_82575(hw, swmbsw_mask); + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); return ret_val; } /** - * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size * @data: data received by reading RXPBS register * * The 82580 uses a table based approach for packet buffer allocation sizes. @@ -1813,3 +2520,1141 @@ u16 e1000_rxpbs_adjust_82580(u32 data) return ret_val; } + +/** + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * __e1000_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_access_emi_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg"); + + return __e1000_access_emi_reg(hw, addr, data, true); +} + +/** + * e1000_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ipcnfg, eeer; + + DEBUGFUNC("e1000_set_eee_i350"); + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); + + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); +out: + + return ret_val; +} + +/** + * e1000_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i354(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_set_eee_i354"); + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED; + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * e1000_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_get_eee_status_i354"); + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID))) + goto out; + + ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * e1000_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + DEBUGFUNC("e1000_clear_vfta_350"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + DEBUGFUNC("e1000_write_vfta_350"); + + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + + E1000_WRITE_FLUSH(hw); +} + + +/** + * e1000_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 e1000_set_i2c_bb(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("e1000_set_i2c_bb"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); + i2cparams |= E1000_I2CBB_EN; + i2cparams |= E1000_I2C_DATA_OE_N; + i2cparams |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); + E1000_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = true; + + DEBUGFUNC("e1000_read_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto read_byte_out; + } + + e1000_i2c_start(hw); + + /* Device Address and write indication */ + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_start(hw); + + /* Device Address and read indication */ + status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_in_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_bit(hw, nack); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("e1000_write_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + e1000_i2c_start(hw); + + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * e1000_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void e1000_i2c_start(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_start"); + + /* Start condition must begin with data and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 1); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(E1000_I2C_T_SU_STA); + + e1000_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(E1000_I2C_T_HD_STA); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void e1000_i2c_stop(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 0); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(E1000_I2C_T_SU_STO); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(E1000_I2C_T_BUF); +} + +/** + * e1000_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("e1000_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + e1000_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) +{ + s32 status = E1000_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("e1000_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = e1000_clock_out_i2c_bit(hw, bit); + + if (status != E1000_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + i2cctl |= E1000_I2C_DATA_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); + E1000_WRITE_FLUSH(hw); + + return status; +} + +/** + * e1000_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 e1000_get_i2c_ack(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u32 i = 0; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 timeout = 10; + bool ack = true; + + DEBUGFUNC("e1000_get_i2c_ack"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (i2cctl & E1000_I2C_CLK_IN) + break; + } + if (!(i2cctl & E1000_I2C_CLK_IN)) + return E1000_ERR_I2C; + + ack = e1000_get_i2c_data(&i2cctl); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = E1000_ERR_I2C; + } + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return status; +} + +/** + * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_in_i2c_bit"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + *data = e1000_get_i2c_data(&i2cctl); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_out_i2c_bit"); + + status = e1000_set_i2c_data(hw, &i2cctl, data); + if (status == E1000_SUCCESS) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(E1000_I2C_T_LOW); + } else { + status = E1000_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * e1000_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("e1000_raise_i2c_clk"); + + *i2cctl |= E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(E1000_I2C_T_RISE); +} + +/** + * e1000_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("e1000_lower_i2c_clk"); + + *i2cctl &= ~E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(E1000_I2C_T_FALL); +} + +/** + * e1000_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_i2c_data"); + + if (data) + *i2cctl |= E1000_I2C_DATA_OUT; + else + *i2cctl &= ~E1000_I2C_DATA_OUT; + + *i2cctl &= ~E1000_I2C_DATA_OE_N; + *i2cctl |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + + *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (data != e1000_get_i2c_data(i2cctl)) { + status = E1000_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * e1000_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool e1000_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("e1000_get_i2c_data"); + + if (*i2cctl & E1000_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * e1000_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void e1000_i2c_bus_clear(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 i; + + DEBUGFUNC("e1000_i2c_bus_clear"); + + e1000_i2c_start(hw); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(E1000_I2C_T_LOW); + } + + e1000_i2c_start(hw); + + /* Put the i2c bus back to default state */ + e1000_i2c_stop(hw); +} + +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return status; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return status; +} + +/** + * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return status; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return status; +} diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_82575.h b/vmkdrivers/src_9/drivers/net/igb/e1000_82575.h index ba6212c6d93efe5ec037c5795a5dbed54a5cbcde..1aec75ababf9f41c14e9464297adfb62478f2fee 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_82575.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_82575.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,10 +28,10 @@ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ -#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_DEF1_DEF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_OFF1_ON2)) +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) /* * Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address @@ -42,149 +42,150 @@ * For 82576, there are an additional set of RARs that begin at an offset * separate from the first set of RARs. */ -#define E1000_RAR_ENTRIES_82575 16 -#define E1000_RAR_ENTRIES_82576 24 -#define E1000_RAR_ENTRIES_82580 24 -#define E1000_SW_SYNCH_MB 0x00000100 -#define E1000_STAT_DEV_RST_SET 0x00100000 -#define E1000_CTRL_DEV_RST 0x20000000 -#define E1000_RAR_ENTRIES_I350 32 +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 struct e1000_adv_data_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ union { u32 data; struct { - u32 datalen :16; /* Data buffer length */ - u32 rsvd :4; - u32 dtyp :4; /* Descriptor type */ - u32 dcmd :8; /* Descriptor command */ + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ } config; } lower; union { u32 data; struct { - u32 status :4; /* Descriptor status */ - u32 idx :4; - u32 popts :6; /* Packet Options */ - u32 paylen :18; /* Payload length */ + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ } options; } upper; }; -#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ -#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ -#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ -#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ -#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ -#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ -#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ -#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ -#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ -#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ -#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ /* Extended Device Control */ -#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ struct e1000_adv_context_desc { union { u32 ip_config; struct { - u32 iplen :9; - u32 maclen :7; - u32 vlan_tag :16; + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; } fields; } ip_setup; u32 seq_num; union { u64 l4_config; struct { - u32 mkrloc :9; - u32 tucmd :11; - u32 dtyp :4; - u32 adv :8; - u32 rsvd :4; - u32 idx :4; - u32 l4len :8; - u32 mss :16; + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; } fields; } l4_setup; }; /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_TIMESTAMP 0x40000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 - -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 - -#define E1000_TX_HEAD_WB_ENABLE 0x1 -#define E1000_TX_SEQNUM_WB_ENABLE 0x2 - -#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 -#define E1000_MRQC_ENABLE_VMDQ 0x00000003 -#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 -#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 -#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 -#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 - -#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 -#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT) -#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) -#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) -#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) #define E1000_EICR_TX_QUEUE ( \ - E1000_EICR_TX_QUEUE0 | \ - E1000_EICR_TX_QUEUE1 | \ - E1000_EICR_TX_QUEUE2 | \ - E1000_EICR_TX_QUEUE3) + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) #define E1000_EICR_RX_QUEUE ( \ - E1000_EICR_RX_QUEUE0 | \ - E1000_EICR_RX_QUEUE1 | \ - E1000_EICR_RX_QUEUE2 | \ - E1000_EICR_RX_QUEUE3) + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) -#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE -#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE #define EIMS_ENABLE_MASK ( \ - E1000_EIMS_RX_QUEUE | \ - E1000_EIMS_TX_QUEUE | \ - E1000_EIMS_TCP_TIMER | \ - E1000_EIMS_OTHER) + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ -#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ -#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ -#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ -#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ -#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ -#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ -#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ -#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { @@ -192,79 +193,79 @@ union e1000_adv_rx_desc { __le32 data; struct { __le16 pkt_info; /*RSS type, Pkt type*/ - __le16 hdr_info; /* Split Header, - * header buffer len*/ + /* Split Header, header buffer len */ + __le16 hdr_info; } hs_rss; } lo_dword; union { - __le32 rss; /* RSS Hash */ + __le32 rss; /* RSS Hash */ struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F -#define E1000_RXDADV_RSSTYPE_SHIFT 12 -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 -#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 -#define E1000_RXDADV_SPH 0x8000 -#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ -#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ -#define E1000_RXDADV_ERR_HBO 0x00800000 +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 /* RSS Hash results */ -#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 -#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 -#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 #define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 #define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 /* RSS Packet Types as indicated in the receive descriptor */ -#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 -#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ -#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ -#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ -#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ -#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ -#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ -#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ -#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ - -#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ -#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ -#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ -#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ /* LinkSec results */ /* Security Processing bit Indication */ -#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 -#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 -#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 -#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 -#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 - -#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 -#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 -#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 -#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 -#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { @@ -281,25 +282,26 @@ union e1000_adv_tx_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on packet */ -#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ -#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ -#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ -#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -309,65 +311,69 @@ struct e1000_adv_tx_context_desc { __le32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ /* IPSec Encrypt Enable for ESP */ -#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 -#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Adv ctxt IPSec SA IDX mask */ -#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF /* Adv ctxt IPSec ESP len mask */ -#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF /* Additional Transmit Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ /* Tx Queue Arbitration Priority 0=low, 1=high */ -#define E1000_TXDCTL_PRIORITY 0x08000000 +#define E1000_TXDCTL_PRIORITY 0x08000000 /* Additional Receive Descriptor Control definitions */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ -#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ -#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ -#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ -#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ -#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ -#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ -#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ -#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ -#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ /* Additional interrupt register bit definitions */ -#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ -#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ -#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ /* ETQF register bit definitions */ -#define E1000_ETQF_FILTER_ENABLE (1 << 26) -#define E1000_ETQF_IMM_INT (1 << 29) -#define E1000_ETQF_1588 (1 << 30) -#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1 << 31) /* * ETQF filter list: one static filter per filter consumer. This is * to avoid filter collisions later. Add new filters @@ -376,76 +382,128 @@ struct e1000_adv_tx_context_desc { * Current filters: * EAPOL 802.1x (0x888e): Filter 0 */ -#define E1000_ETQF_FILTER_EAPOL 0 - -#define E1000_FTQF_VF_BP 0x00008000 -#define E1000_FTQF_1588_TIME_STAMP 0x08000000 -#define E1000_FTQF_MASK 0xF0000000 -#define E1000_FTQF_MASK_PROTO_BP 0x10000000 -#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 -#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 -#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 - -#define E1000_NVM_APME_82575 0x0400 -#define MAX_NUM_VFS 8 - -#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ -#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ -#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ -#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 -#define E1000_DTXSWC_LLE_SHIFT 16 -#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ /* Easy defines for setting default pool, would normally be left a zero */ -#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 -#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) /* Other useful VMD_CTL register defines */ -#define E1000_VT_CTL_IGNORE_MAC (1 << 28) -#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) -#define E1000_VT_CTL_VM_REPL_EN (1 << 30) +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) /* Per VM Offload register setup */ -#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ -#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ -#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ -#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ -#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ -#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ -#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ -#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ -#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ -#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ - -#define E1000_VLVF_ARRAY_SIZE 32 -#define E1000_VLVF_VLANID_MASK 0x00000FFF -#define E1000_VLVF_POOLSEL_SHIFT 12 -#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) -#define E1000_VLVF_LVLAN 0x00100000 -#define E1000_VLVF_VLANID_ENABLE 0x80000000 - -#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ - -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ - -#define E1000_IOVCTL 0x05BBC -#define E1000_IOVCTL_REUSE_VFQ 0x00000001 - -#define E1000_RPLOLR_STRVLAN 0x40000000 -#define E1000_RPLOLR_STRCRC 0x80000000 - -#define E1000_DTXCTL_8023LL 0x0004 -#define E1000_DTXCTL_VLAN_ADDED 0x0008 -#define E1000_DTXCTL_OOS_ENABLE 0x0010 -#define E1000_DTXCTL_MDP_EN 0x0020 -#define E1000_DTXCTL_SPOOF_INT 0x0040 - -#define ALL_QUEUES 0xFFFF - -/* RX packet buffer size defines */ -#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); + u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_set_eee_i350(struct e1000_hw *); +s32 e1000_set_eee_i354(struct e1000_hw *); +s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); + +/* I2C SDA and SCL timing parameters for standard mode */ +#define E1000_I2C_T_HD_STA 4 +#define E1000_I2C_T_LOW 5 +#define E1000_I2C_T_HIGH 4 +#define E1000_I2C_T_SU_STA 5 +#define E1000_I2C_T_HD_DATA 5 +#define E1000_I2C_T_SU_DATA 1 +#define E1000_I2C_T_RISE 1 +#define E1000_I2C_T_FALL 1 +#define E1000_I2C_T_SU_STO 4 +#define E1000_I2C_T_BUF 5 + +s32 e1000_set_i2c_bb(struct e1000_hw *hw); +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void e1000_i2c_bus_clear(struct e1000_hw *hw); #endif /* _E1000_82575_H_ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_api.c b/vmkdrivers/src_9/drivers/net/igb/e1000_api.c index 8bad5485028a8f673ff81112e718a5e83e194d43..e34ebef265fc2be75932aec37ac1103cfe8abd78 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_api.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_api.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -157,7 +157,7 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: case E1000_DEV_ID_82576_NS: case E1000_DEV_ID_82576_NS_SERDES: case E1000_DEV_ID_82576_SERDES_QUAD: @@ -168,14 +168,39 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82580_SERDES: case E1000_DEV_ID_82580_SGMII: case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: mac->type = e1000_82580; break; case E1000_DEV_ID_I350_COPPER: case E1000_DEV_ID_I350_FIBER: case E1000_DEV_ID_I350_SERDES: case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: mac->type = e1000_i350; break; + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; default: /* Should never have loaded on this device */ ret_val = -E1000_ERR_MAC_INIT; @@ -189,10 +214,10 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) * e1000_setup_init_funcs - Initializes function pointers * @hw: pointer to the HW structure * @init_device: true will initialize the rest of the function pointers - * getting the device ready for use. false will only set - * MAC type and the function pointers for the other init - * functions. Passing false will not generate any hardware - * reads or writes. + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. * * This function must be called by a driver in order to use the rest * of the 'shared' code files. Called by drivers only. @@ -232,8 +257,13 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) case e1000_82576: case e1000_82580: case e1000_i350: + case e1000_i354: e1000_init_function_pointers_82575(hw); break; + case e1000_i210: + case e1000_i211: + e1000_init_function_pointers_i210(hw); + break; default: DEBUGOUT("Hardware not supported\n"); ret_val = -E1000_ERR_CONFIG; @@ -320,11 +350,11 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) * The caller must have a packed mc_addr_list of multicast addresses. **/ void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count) + u32 mc_addr_count) { if (hw->mac.ops.update_mc_addr_list) hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, - mc_addr_count); + mc_addr_count); } /** @@ -618,20 +648,6 @@ s32 e1000_validate_mdi_setting(struct e1000_hw *hw) return E1000_SUCCESS; } -/** - * e1000_mta_set - Sets multicast table bit - * @hw: pointer to the HW structure - * @hash_value: Multicast hash value. - * - * This sets the bit in the multicast table corresponding to the - * hash value. This is a function pointer entry point called by drivers. - **/ -void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) -{ - if (hw->mac.ops.mta_set) - hw->mac.ops.mta_set(hw, hash_value); -} - /** * e1000_hash_mc_addr - Determines address location in multicast table * @hw: pointer to the HW structure @@ -672,14 +688,10 @@ bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) * It also does alignment considerations to do the writes in most efficient * way. Also fills up the sum of the buffer in *buffer parameter. **/ -s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length, - u16 offset, u8 *sum) +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) { - if (hw->mac.ops.mng_host_if_write) - return hw->mac.ops.mng_host_if_write(hw, buffer, length, - offset, sum); - - return E1000_NOT_IMPLEMENTED; + return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); } /** @@ -690,12 +702,9 @@ s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length, * Writes the command header after does the checksum calculation. **/ s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, - struct e1000_host_mng_command_header *hdr) + struct e1000_host_mng_command_header *hdr) { - if (hw->mac.ops.mng_write_cmd_header) - return hw->mac.ops.mng_write_cmd_header(hw, hdr); - - return E1000_NOT_IMPLEMENTED; + return e1000_mng_write_cmd_header_generic(hw, hdr); } /** @@ -708,27 +717,9 @@ s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, * and also checks whether the previous command is completed. It busy waits * in case of previous command is not completed. **/ -s32 e1000_mng_enable_host_if(struct e1000_hw * hw) +s32 e1000_mng_enable_host_if(struct e1000_hw *hw) { - if (hw->mac.ops.mng_enable_host_if) - return hw->mac.ops.mng_enable_host_if(hw); - - return E1000_NOT_IMPLEMENTED; -} - -/** - * e1000_wait_autoneg - Waits for autonegotiation completion - * @hw: pointer to the HW structure - * - * Waits for autoneg to complete. Currently no func pointer exists and all - * implementations are handled in the generic version of this function. - **/ -s32 e1000_wait_autoneg(struct e1000_hw *hw) -{ - if (hw->mac.ops.wait_autoneg) - return hw->mac.ops.wait_autoneg(hw); - - return E1000_SUCCESS; + return e1000_mng_enable_host_if_generic(hw); } /** @@ -961,18 +952,34 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw) } /** - * e1000_read_pba_num - Read device part number + * e1000_read_pba_string - Read device part number string * @hw: pointer to the HW structure * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores * the value in pba_num. * Currently no func pointer exists and all implementations are handled in the * generic version of this function. **/ -s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num) +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) { - return e1000_read_pba_num_generic(hw, pba_num); + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); } /** @@ -1065,7 +1072,7 @@ s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) * This is a function pointer entry point called by drivers. **/ s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, - u8 data) + u8 data) { return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); } @@ -1098,6 +1105,18 @@ void e1000_power_down_phy(struct e1000_hw *hw) hw->phy.ops.power_down(hw); } +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + /** * e1000_shutdown_fiber_serdes_link - Remove link during power down * @hw: pointer to the HW structure @@ -1110,3 +1129,31 @@ void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) hw->mac.ops.shutdown_serdes(hw); } +/** + * e1000_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_thermal_sensor_data) + return hw->mac.ops.get_thermal_sensor_data(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + **/ +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_thermal_sensor_thresh) + return hw->mac.ops.init_thermal_sensor_thresh(hw); + + return E1000_SUCCESS; +} + diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_api.h b/vmkdrivers/src_9/drivers/net/igb/e1000_api.h index 3fba2930bb87ac170d403a90f46fa738a104bdff..b21294ec9e1862c63ae969b6995314b47a54d591 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_api.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_api.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -30,78 +30,81 @@ #include "e1000_hw.h" -extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); -extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); -extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); -extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); - -s32 e1000_set_mac_type(struct e1000_hw *hw); -s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); -s32 e1000_init_mac_params(struct e1000_hw *hw); -s32 e1000_init_nvm_params(struct e1000_hw *hw); -s32 e1000_init_phy_params(struct e1000_hw *hw); -s32 e1000_init_mbx_params(struct e1000_hw *hw); -s32 e1000_get_bus_info(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); + +s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); void e1000_clear_vfta(struct e1000_hw *hw); void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); -s32 e1000_force_mac_fc(struct e1000_hw *hw); -s32 e1000_check_for_link(struct e1000_hw *hw); -s32 e1000_reset_hw(struct e1000_hw *hw); -s32 e1000_init_hw(struct e1000_hw *hw); -s32 e1000_setup_link(struct e1000_hw *hw); -s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, - u16 *duplex); -s32 e1000_disable_pcie_master(struct e1000_hw *hw); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); void e1000_config_collision_dist(struct e1000_hw *hw); void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); -void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); -u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); -void e1000_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count); -s32 e1000_setup_led(struct e1000_hw *hw); -s32 e1000_cleanup_led(struct e1000_hw *hw); -s32 e1000_check_reset_block(struct e1000_hw *hw); -s32 e1000_blink_led(struct e1000_hw *hw); -s32 e1000_led_on(struct e1000_hw *hw); -s32 e1000_led_off(struct e1000_hw *hw); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); s32 e1000_id_led_init(struct e1000_hw *hw); void e1000_reset_adaptive(struct e1000_hw *hw); void e1000_update_adaptive(struct e1000_hw *hw); -s32 e1000_get_cable_length(struct e1000_hw *hw); -s32 e1000_validate_mdi_setting(struct e1000_hw *hw); -s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); -s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); -s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data); -s32 e1000_get_phy_info(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); void e1000_release_phy(struct e1000_hw *hw); -s32 e1000_acquire_phy(struct e1000_hw *hw); -s32 e1000_phy_hw_reset(struct e1000_hw *hw); -s32 e1000_phy_commit(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); void e1000_power_up_phy(struct e1000_hw *hw); void e1000_power_down_phy(struct e1000_hw *hw); -s32 e1000_read_mac_addr(struct e1000_hw *hw); -s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); void e1000_reload_nvm(struct e1000_hw *hw); -s32 e1000_update_nvm_checksum(struct e1000_hw *hw); -s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); -s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); -s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); -s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); -s32 e1000_wait_autoneg(struct e1000_hw *hw); -s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); -s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); bool e1000_check_mng_mode(struct e1000_hw *hw); bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); -s32 e1000_mng_enable_host_if(struct e1000_hw *hw); -s32 e1000_mng_host_if_write(struct e1000_hw *hw, - u8 *buffer, u16 length, u16 offset, u8 *sum); -s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, - struct e1000_host_mng_command_header *hdr); -s32 e1000_mng_write_dhcp_info(struct e1000_hw * hw, - u8 *buffer, u16 length); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); + + /* * TBI_ACCEPT macro definition: @@ -134,14 +137,21 @@ s32 e1000_mng_write_dhcp_info(struct e1000_hw * hw, /* The carrier extension symbol, as received by the NIC. */ #define CARRIER_EXTENSION 0x0F -#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \ - (e1000_tbi_sbp_enabled_82543(a) && \ - (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ - ((last_byte) == CARRIER_EXTENSION) && \ - (((status) & E1000_RXD_STAT_VP) ? \ - (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \ - ((length) <= (max_frame_size + 1))) : \ - (((length) > min_frame_size) && \ - ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1))))) +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= (max_frame_size + 1))) : \ + (((length) > min_frame_size) && \ + ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1))))) +#ifndef E1000_MAX +#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif +#ifndef E1000_DIVIDE_ROUND_UP +#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ #endif +#endif /* _E1000_API_H_ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_defines.h b/vmkdrivers/src_9/drivers/net/igb/e1000_defines.h index 949134aade94765876ad7065bda268e95b9f87cf..63b228c504ccff0387798b036bb4a0dde33e41f8 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_defines.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -34,740 +34,496 @@ /* Definitions for power management and wakeup registers */ /* Wake Up Control */ -#define E1000_WUC_APME 0x00000001 /* APM Enable */ -#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ -#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ -#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */ -#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */ -#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ -#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ /* Wake Up Filter Control */ -#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ -#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ -#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ -#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ -#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ -#define E1000_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ -#define E1000_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ -#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ -#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ -#define E1000_WUFC_FLX_FILTERS 0x000F0000 /*Mask for the 4 flexible filters */ -/* - * For 82576 to utilize Extended filter masks in addition to - * existing (filter) masks - */ -#define E1000_WUFC_EXT_FLX_FILTERS 0x00300000 /* Ext. FLX filter mask */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ /* Wake Up Status */ -#define E1000_WUS_LNKC E1000_WUFC_LNKC -#define E1000_WUS_MAG E1000_WUFC_MAG -#define E1000_WUS_EX E1000_WUFC_EX -#define E1000_WUS_MC E1000_WUFC_MC -#define E1000_WUS_BC E1000_WUFC_BC -#define E1000_WUS_ARP E1000_WUFC_ARP -#define E1000_WUS_IPV4 E1000_WUFC_IPV4 -#define E1000_WUS_IPV6 E1000_WUFC_IPV6 -#define E1000_WUS_FLX0 E1000_WUFC_FLX0 -#define E1000_WUS_FLX1 E1000_WUFC_FLX1 -#define E1000_WUS_FLX2 E1000_WUFC_FLX2 -#define E1000_WUS_FLX3 E1000_WUFC_FLX3 -#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS - -/* Wake Up Packet Length */ -#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ - -/* Four Flexible Filters are supported */ -#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 -/* Two Extended Flexible Filters are supported (82576) */ -#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 -#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ -#define E1000_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ - -/* Each Flexible Filter is at most 128 (0x80) bytes in length */ -#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 - -#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX -#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX -#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC /* Extended Device Control */ -#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ -#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ -#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN -#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ -#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ -/* Reserved (bits 4,5) in >= 82575 */ -#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */ -#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */ -#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA -#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */ -#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ -/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ -#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ -#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ -#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ -#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ -#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ -#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ -#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ /* Physical Func Reset Done Indication */ -#define E1000_CTRL_EXT_PFRSTD 0x00004000 -#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ -#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/ -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IRCA 0x00000001 -#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 -#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 -#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 -#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 -#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 -#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */ -#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ -/* IAME enable bit (27) was removed in >= 82575 */ -#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */ -#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error - * detection enabled */ -#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity - * error detection enable */ -#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_I2CCMD_REG_ADDR_SHIFT 16 -#define E1000_I2CCMD_REG_ADDR 0x00FF0000 -#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 -#define E1000_I2CCMD_PHY_ADDR 0x07000000 -#define E1000_I2CCMD_OPCODE_READ 0x08000000 -#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 -#define E1000_I2CCMD_RESET 0x10000000 -#define E1000_I2CCMD_READY 0x20000000 -#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000 -#define E1000_I2CCMD_ERROR 0x80000000 -#define E1000_MAX_SGMII_PHY_REG_ADDR 255 -#define E1000_I2CCMD_PHY_TIMEOUT 200 -#define E1000_IVAR_VALID 0x80 -#define E1000_GPIE_NSICR 0x00000001 -#define E1000_GPIE_MSIX_MODE 0x00000010 -#define E1000_GPIE_EIAME 0x40000000 -#define E1000_GPIE_PBA 0x80000000 +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ -#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ -#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ -#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ -#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ -#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define E1000_RXD_SPC_PRI_SHIFT 13 -#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define E1000_RXD_SPC_CFI_SHIFT 12 - -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 /* mask to determine if packets should be dropped due to frame errors */ #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ - E1000_RXD_ERR_CE | \ - E1000_RXD_ERR_SE | \ - E1000_RXD_ERR_SEQ | \ - E1000_RXD_ERR_CXE | \ - E1000_RXD_ERR_RXE) + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - -#define E1000_MRQC_ENABLE_MASK 0x00000007 -#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 -#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 -#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 -#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 -#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 -#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 - -#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 /* Management Control */ -#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ -#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ -#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ -#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ -#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ -#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ -#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ -/* Enable Neighbor Discovery Filtering */ -#define E1000_MANC_NEIGHBOR_EN 0x00004000 -#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ -#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ -#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ -#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ -#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ /* Enable MAC address filtering */ -#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MNG packets to host memory */ -#define E1000_MANC_EN_MNG2HOST 0x00200000 -/* Enable IP address filtering */ -#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 -#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ -#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ -#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ -#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ -#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ -#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ -#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ -#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ - -#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ -#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ /* Receive Control */ -#define E1000_RCTL_RST 0x00000001 /* Software reset */ -#define E1000_RCTL_EN 0x00000002 /* enable */ -#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ -#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ -#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ -#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ -#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ -#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ -#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min thresh size */ -#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min thresh size */ -#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min thresh size */ -#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ -#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ -#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ -#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ -#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ -#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ -#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ -#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ -#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ -#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ -#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ -#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ -#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ -#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ -#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ -#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ -#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ -#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ -#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ -#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ - -/* - * Use byte values for the following shift parameters +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters * Usage: * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & - * E1000_PSRCTL_BSIZE0_MASK) | - * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & - * E1000_PSRCTL_BSIZE1_MASK) | - * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & - * E1000_PSRCTL_BSIZE2_MASK) | - * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; - * E1000_PSRCTL_BSIZE3_MASK)) + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) * where value0 = [128..16256], default=256 * value1 = [1024..64512], default=4096 * value2 = [0..64512], default=4096 * value3 = [0..64512], default=0 */ -#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F -#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 -#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 -#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 -#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ -#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ -#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ -#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ /* SWFW_SYNC Definitions */ -#define E1000_SWFW_EEP_SM 0x01 -#define E1000_SWFW_PHY0_SM 0x02 -#define E1000_SWFW_PHY1_SM 0x04 -#define E1000_SWFW_CSR_SM 0x08 -#define E1000_SWFW_PHY2_SM 0x20 -#define E1000_SWFW_PHY3_SM 0x40 - -/* FACTPS Definitions */ -#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */ +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 + /* Device Control */ -#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ -#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ -#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ -#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ -#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ -#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ -#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ -#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ -#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock - * indication in SDP[0] */ -#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through - * PHYRST_N pin */ -#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external - * LINK_0 and LINK_1 pins */ -#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ -#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ -#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ -#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ -#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ -#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ -#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */ -#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ - -/* - * Bit definitions for the Management Data IO (MDIO) and Management Data - * Clock (MDC) pins in the Device Control Register. - */ -#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 -#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 -#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 -#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 -#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 -#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 -#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR -#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA - -#define E1000_CONNSW_ENRGSRC 0x4 -#define E1000_PCS_CFG_PCS_EN 8 -#define E1000_PCS_LCTL_FLV_LINK_UP 1 -#define E1000_PCS_LCTL_FSV_10 0 -#define E1000_PCS_LCTL_FSV_100 2 -#define E1000_PCS_LCTL_FSV_1000 4 -#define E1000_PCS_LCTL_FDV_FULL 8 -#define E1000_PCS_LCTL_FSD 0x10 -#define E1000_PCS_LCTL_FORCE_LINK 0x20 -#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40 -#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 -#define E1000_PCS_LCTL_AN_ENABLE 0x10000 -#define E1000_PCS_LCTL_AN_RESTART 0x20000 -#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 -#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000 -#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000 -#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000 -#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000 -#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000 -#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 - -#define E1000_PCS_LSTS_LINK_OK 1 -#define E1000_PCS_LSTS_SPEED_10 0 -#define E1000_PCS_LSTS_SPEED_100 2 -#define E1000_PCS_LSTS_SPEED_1000 4 -#define E1000_PCS_LSTS_DUPLEX_FULL 8 -#define E1000_PCS_LSTS_SYNK_OK 0x10 -#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 -#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000 -#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000 -#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000 -#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000 +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 /* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ -#define E1000_STATUS_FUNC_SHIFT 2 -#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ -#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ -#define E1000_STATUS_SPEED_MASK 0x000000C0 -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ -#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ -#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ -#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ -#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. - * Clear on write '0'. */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ -#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ -#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ -#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ -#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ -#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ -#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ -#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ -#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ -#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ -#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution - * disabled */ -#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ -#define E1000_STATUS_FUSE_8 0x04000000 -#define E1000_STATUS_FUSE_9 0x08000000 -#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ -#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ - -/* Constants used to interpret the masked PCI-X bus speed. */ -#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ -#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ -#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - -#define PHY_FORCE_TIME 20 - -#define ADVERTISE_10_HALF 0x0001 -#define ADVERTISE_10_FULL 0x0002 -#define ADVERTISE_100_HALF 0x0004 -#define ADVERTISE_100_FULL 0x0008 -#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ -#define ADVERTISE_1000_FULL 0x0020 +#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 /* 1000/H is not supported, nor spec-compliant. */ -#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) -#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) - -#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX /* LED Control */ -#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020 -#define E1000_LEDCTL_LED0_IVRT 0x00000040 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 -#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 -#define E1000_LEDCTL_LED1_MODE_SHIFT 8 -#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000 -#define E1000_LEDCTL_LED1_IVRT 0x00004000 -#define E1000_LEDCTL_LED1_BLINK 0x00008000 -#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 -#define E1000_LEDCTL_LED2_MODE_SHIFT 16 -#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 -#define E1000_LEDCTL_LED2_IVRT 0x00400000 -#define E1000_LEDCTL_LED2_BLINK 0x00800000 -#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 -#define E1000_LEDCTL_LED3_MODE_SHIFT 24 -#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 -#define E1000_LEDCTL_LED3_IVRT 0x40000000 -#define E1000_LEDCTL_LED3_BLINK 0x80000000 - -#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 -#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 -#define E1000_LEDCTL_MODE_LINK_UP 0x2 -#define E1000_LEDCTL_MODE_ACTIVITY 0x3 -#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 -#define E1000_LEDCTL_MODE_LINK_10 0x5 -#define E1000_LEDCTL_MODE_LINK_100 0x6 -#define E1000_LEDCTL_MODE_LINK_1000 0x7 -#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 -#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 -#define E1000_LEDCTL_MODE_COLLISION 0xA -#define E1000_LEDCTL_MODE_BUS_SPEED 0xB -#define E1000_LEDCTL_MODE_BUS_SIZE 0xC -#define E1000_LEDCTL_MODE_PAUSED 0xD -#define E1000_LEDCTL_MODE_LED_ON 0xE -#define E1000_LEDCTL_MODE_LED_OFF 0xF +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF /* Transmit Descriptor bit definitions */ -#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ -#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ -#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ -/* Extended desc bits for Linksec and timesync */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ /* Transmit Control */ -#define E1000_TCTL_RST 0x00000001 /* software reset */ -#define E1000_TCTL_EN 0x00000002 /* enable tx */ -#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ -#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ -#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ -#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ -#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ /* Transmit Arbitration Count */ -#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ /* SerDes Control */ -#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 /* Receive Checksum Control */ -#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ -#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ -#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ -#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ -#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ -#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ -#define E1000_RFCTL_ISCSI_DIS 0x00000001 -#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E -#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 -#define E1000_RFCTL_NFSW_DIS 0x00000040 -#define E1000_RFCTL_NFSR_DIS 0x00000080 -#define E1000_RFCTL_NFS_VER_MASK 0x00000300 -#define E1000_RFCTL_NFS_VER_SHIFT 8 -#define E1000_RFCTL_IPV6_DIS 0x00000400 -#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 -#define E1000_RFCTL_ACK_DIS 0x00001000 -#define E1000_RFCTL_ACKD_DIS 0x00002000 -#define E1000_RFCTL_IPFRSP_DIS 0x00004000 -#define E1000_RFCTL_EXTEN 0x00008000 -#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 -#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 -#define E1000_RFCTL_LEF 0x00040000 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ -#define E1000_COLLISION_THRESHOLD 15 -#define E1000_CT_SHIFT 4 -#define E1000_COLLISION_DISTANCE 63 -#define E1000_COLD_SHIFT 12 +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 /* Default values for the transmit IPG register */ -#define DEFAULT_82543_TIPG_IPGT_FIBER 9 -#define DEFAULT_82543_TIPG_IPGT_COPPER 8 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 -#define E1000_TIPG_IPGT_MASK 0x000003FF -#define E1000_TIPG_IPGR1_MASK 0x000FFC00 -#define E1000_TIPG_IPGR2_MASK 0x3FF00000 +#define E1000_TIPG_IPGT_MASK 0x000003FF -#define DEFAULT_82543_TIPG_IPGR1 8 -#define E1000_TIPG_IPGR1_SHIFT 10 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 -#define DEFAULT_82543_TIPG_IPGR2 6 -#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 -#define E1000_TIPG_IPGR2_SHIFT 20 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 /* Ethertype field values */ -#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -#define ETHERNET_FCS_SIZE 4 -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 /* Extended Configuration Control and Size */ -#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 -#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 -#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 -#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 -#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 -#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 -#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 -#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 - -#define E1000_PHY_CTRL_SPD_EN 0x00000001 -#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 -#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 -#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 -#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 - -#define E1000_KABGTXD_BGSQLBIAS 0x00050000 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 /* PBA constants */ -#define E1000_PBA_6K 0x0006 /* 6KB */ -#define E1000_PBA_8K 0x0008 /* 8KB */ -#define E1000_PBA_10K 0x000A /* 10KB */ -#define E1000_PBA_12K 0x000C /* 12KB */ -#define E1000_PBA_14K 0x000E /* 14KB */ -#define E1000_PBA_16K 0x0010 /* 16KB */ -#define E1000_PBA_18K 0x0012 -#define E1000_PBA_20K 0x0014 -#define E1000_PBA_22K 0x0016 -#define E1000_PBA_24K 0x0018 -#define E1000_PBA_26K 0x001A -#define E1000_PBA_30K 0x001E -#define E1000_PBA_32K 0x0020 -#define E1000_PBA_34K 0x0022 -#define E1000_PBA_35K 0x0023 -#define E1000_PBA_38K 0x0026 -#define E1000_PBA_40K 0x0028 -#define E1000_PBA_48K 0x0030 /* 48KB */ -#define E1000_PBA_64K 0x0040 /* 64KB */ - -#define E1000_PBS_16K E1000_PBA_16K -#define E1000_PBS_24K E1000_PBA_24K - -#define IFS_MAX 80 -#define IFS_MIN 40 -#define IFS_RATIO 4 -#define IFS_STEP 10 -#define MIN_NUM_XMITS 1000 +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 /* SW Semaphore Register */ -#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ -#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ -#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ /* Interrupt Cause Read */ -#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ -#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ -#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ -#define E1000_ICR_RXO 0x00000040 /* rx overrun */ -#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ -#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ -#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ -#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ -#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ -#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ -#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ -#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ -#define E1000_ICR_TXD_LOW 0x00008000 -#define E1000_ICR_SRPD 0x00010000 -#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ -#define E1000_ICR_MNG 0x00040000 /* Manageability event */ -#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ -#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ -#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver - * should claim the interrupt */ -#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */ -#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */ -#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */ -#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ -#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */ -#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */ -#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ -#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW - * bit in the FWSM */ -#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates - * an interrupt */ -#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ -#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ -#define E1000_ICR_FER 0x00400000 /* Fatal Error */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ /* Extended Interrupt Cause Read */ -#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ -#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ -#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ -#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ -#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ -#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ -#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ -#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ -#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ -#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* TCP Timer */ -#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ -#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ -#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ -#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ -/* - * This defines the bits that are set in the Interrupt Mask - * Set/Read Register. Each bit is documented below: - * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) - * o RXSEQ = Receive Sequence Error - */ -#define POLL_IMS_ENABLE_MASK ( \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ) - -/* - * This defines the bits that are set in the Interrupt Mask +/* This defines the bits that are set in the Interrupt Mask * Set/Read Register. Each bit is documented below: * o RXT0 = Receiver Timer Interrupt (ring 0) * o TXDW = Transmit Descriptor Written Back @@ -776,800 +532,849 @@ * o LSC = Link Status Change */ #define IMS_ENABLE_MASK ( \ - E1000_IMS_RXT0 | \ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ - E1000_IMS_LSC) + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) /* Interrupt Mask Set */ -#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ -#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ -#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ -#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_IMS_SRPD E1000_ICR_SRPD -#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ -#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO - * parity error */ -#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO - * parity error */ -#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer - * parity error */ -#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity - * error */ -#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO - * parity error */ -#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO - * parity error */ -#define E1000_IMS_DSW E1000_ICR_DSW -#define E1000_IMS_PHYINT E1000_ICR_PHYINT -#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ -#define E1000_IMS_EPRST E1000_ICR_EPRST -#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ - +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ /* Extended Interrupt Mask Set */ -#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ -#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ -#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ -#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ -#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ -#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ -#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ -#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ -#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ -#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ /* Interrupt Cause Set */ -#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */ -#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ -#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_ICS_SRPD E1000_ICR_SRPD -#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ -#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO - * parity error */ -#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO - * parity error */ -#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer - * parity error */ -#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity - * error */ -#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO - * parity error */ -#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO - * parity error */ -#define E1000_ICS_DSW E1000_ICR_DSW -#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ -#define E1000_ICS_PHYINT E1000_ICR_PHYINT -#define E1000_ICS_EPRST E1000_ICR_EPRST +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ /* Extended Interrupt Cause Set */ -#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ -#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ -#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ -#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ -#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ -#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ -#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ -#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ -#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ -#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ - -#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_EITR_INTERVAL 0x00007FFC /* Transmit Descriptor Control */ -#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ -#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ -#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ -#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ -#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ -#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ /* Enable the counting of descriptors still to be processed. */ -#define E1000_TXDCTL_COUNT_DESC 0x00400000 +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Flow Control Constants */ -#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 -#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 -#define FLOW_CONTROL_TYPE 0x8808 +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 /* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ -#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ -/* Receive Address */ -/* +/* Receive Address * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * Technically, we have 16 spots. However, we reserve one of these spots * (RAR[15]) for our directed address used by controllers with * manageability enabled, allowing us room for 15 multicast addresses. */ -#define E1000_RAR_ENTRIES 15 -#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ -#define E1000_RAL_MAC_ADDR_LEN 4 -#define E1000_RAH_MAC_ADDR_LEN 2 -#define E1000_RAH_POOL_MASK 0x03FC0000 -#define E1000_RAH_POOL_1 0x00040000 +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_1 0x00040000 /* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_NVM 1 -#define E1000_ERR_PHY 2 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_PARAM 4 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_PHY_TYPE 6 -#define E1000_ERR_RESET 9 -#define E1000_ERR_MASTER_REQUESTS_PENDING 10 -#define E1000_ERR_HOST_INTERFACE_COMMAND 11 -#define E1000_BLK_PHY_RESET 12 -#define E1000_ERR_SWFW_SYNC 13 -#define E1000_NOT_IMPLEMENTED 14 -#define E1000_ERR_MBX 15 +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_I2C 19 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 /* Loop limit on how long we wait for auto-negotiation to complete */ -#define FIBER_LINK_UP_LIMIT 50 -#define COPPER_LINK_UP_LIMIT 10 -#define PHY_AUTO_NEG_LIMIT 45 -#define PHY_FORCE_LIMIT 20 +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 /* Number of 100 microseconds we wait for PCI Express master disable */ -#define MASTER_DISABLE_TIMEOUT 800 +#define MASTER_DISABLE_TIMEOUT 800 /* Number of milliseconds we wait for PHY configuration done after MAC reset */ -#define PHY_CFG_TIMEOUT 100 +#define PHY_CFG_TIMEOUT 100 /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ -#define MDIO_OWNERSHIP_TIMEOUT 10 +#define MDIO_OWNERSHIP_TIMEOUT 10 /* Number of milliseconds for NVM auto read done after MAC reset. */ -#define AUTO_READ_DONE_TIMEOUT 10 +#define AUTO_READ_DONE_TIMEOUT 10 /* Flow Control */ -#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ -#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ -#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ -#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ /* Transmit Configuration Word */ -#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ -#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ -#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ -#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ -#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ -#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ -#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ -#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ -#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ -#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ /* Receive Configuration Word */ -#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ -#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ -#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ -#define E1000_RXCW_CC 0x10000000 /* Receive config change */ -#define E1000_RXCW_C 0x20000000 /* Receive config */ -#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ -#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ - -#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ -#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ - -#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ -#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ -#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 -#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 -#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 -#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 -#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A -#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ - -#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF -#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 -#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 -#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 -#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 -#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 - -#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 -#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 -#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 -#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 -#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 -#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 -#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 -#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 - -#define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +#define E1000_TSICR_TXTS 0x00000002 +#define E1000_TSIM_TXTS 0x00000002 /* TUPLE Filtering Configuration */ -#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ -#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ -#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ /* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ -#define E1000_TTQF_PROTOCOL_TCP 0x0 +#define E1000_TTQF_PROTOCOL_TCP 0x0 /* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ -#define E1000_TTQF_PROTOCOL_UDP 0x1 +#define E1000_TTQF_PROTOCOL_UDP 0x1 /* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ -#define E1000_TTQF_PROTOCOL_SCTP 0x2 -#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ -#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ -#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ -#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ -#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ -#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ -#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ -#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ - +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* I350 EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ /* PCI Express Control */ -#define E1000_GCR_RXD_NO_SNOOP 0x00000001 -#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 -#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 -#define E1000_GCR_TXD_NO_SNOOP 0x00000008 -#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 -#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 -#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 -#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define E1000_GCR_CAP_VER2 0x00040000 - -#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ - E1000_GCR_RXDSCW_NO_SNOOP | \ - E1000_GCR_RXDSCR_NO_SNOOP | \ - E1000_GCR_TXD_NO_SNOOP | \ - E1000_GCR_TXDSCW_NO_SNOOP | \ - E1000_GCR_TXDSCR_NO_SNOOP) +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* mPHY address control and data registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 /* PHY Control Register */ -#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ #define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ /* Autoneg Advertisement Register */ -#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ -#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ -#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ -#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ -#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Link Partner Ability Register (Base Page) */ -#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ -#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ -#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ -#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ -#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ -#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ -#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ -#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ -#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ -#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Autoneg Expansion Register */ -#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ -#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ -#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ -#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ -#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ /* 1000BASE-T Control Register */ -#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ -#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ - /* 0=DTE device */ -#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ - /* 0=Configure PHY as Slave */ -#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ - /* 0=Automatic Master/Slave config */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 #define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ -#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ -#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ -#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ -#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ /* 1000BASE-T Status Register */ -#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ -#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ -#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ -#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ -#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ -#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx is Master, 0=Slave */ -#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ -#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ -#define PHY_CONTROL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ -#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ -#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ - -#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ /* NVM Control */ -#define E1000_EECD_SK 0x00000001 /* NVM Clock */ -#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ -#define E1000_EECD_DI 0x00000004 /* NVM Data In */ -#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ -#define E1000_EECD_FWE_MASK 0x00000030 -#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define E1000_EECD_FWE_SHIFT 4 -#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ -#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ -#define E1000_EECD_PRES 0x00000100 /* NVM Present */ -#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ /* NVM Addressing bits based on type 0=small, 1=large */ -#define E1000_EECD_ADDR_BITS 0x00000400 -#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ -#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ -#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ -#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ -#define E1000_EECD_SIZE_EX_SHIFT 11 -#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ -#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ -#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ -#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ -#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ -#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ -#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ -#define E1000_EECD_SECVAL_SHIFT 22 -#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) - -#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */ -#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */ -#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ -#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_NVM_RW_REG_START 1 /* Start operation */ -#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ -#define E1000_FLASH_UPDATES 2000 +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ -#define NVM_COMPAT 0x0003 -#define NVM_ID_LED_SETTINGS 0x0004 -#define NVM_VERSION 0x0005 -#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ -#define NVM_PHY_CLASS_WORD 0x0007 -#define NVM_INIT_CONTROL1_REG 0x000A -#define NVM_INIT_CONTROL2_REG 0x000F -#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010 -#define NVM_INIT_CONTROL3_PORT_B 0x0014 -#define NVM_INIT_3GIO_3 0x001A -#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 -#define NVM_INIT_CONTROL3_PORT_A 0x0024 -#define NVM_CFG 0x0012 -#define NVM_FLASH_VERSION 0x0032 -#define NVM_ALT_MAC_ADDR_PTR 0x0037 -#define NVM_CHECKSUM_REG 0x003F - -#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ -#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ -#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ -#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ - -#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 +#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +/* FW version defines */ +/* Offset of "Loader patch ptr" in Firmware Header */ +#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 +/* Patch generation hour & minutes */ +#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 +/* Patch generation month & day */ +#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 +/* Patch generation year */ +#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 +/* Patch major & minor numbers */ +#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 + +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + /* Mask bits for fields in Word 0x0f of the NVM */ -#define NVM_WORD0F_PAUSE_MASK 0x3000 -#define NVM_WORD0F_PAUSE 0x1000 -#define NVM_WORD0F_ASM_DIR 0x2000 -#define NVM_WORD0F_ANE 0x0800 -#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 -#define NVM_WORD0F_LPLU 0x0001 +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 /* Mask bits for fields in Word 0x1a of the NVM */ -#define NVM_WORD1A_ASPM_MASK 0x000C +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ -#define NVM_SUM 0xBABA - -#define NVM_MAC_ADDR_OFFSET 0 -#define NVM_PBA_OFFSET_0 8 -#define NVM_PBA_OFFSET_1 9 -#define NVM_RESERVED_WORD 0xFFFF -#define NVM_PHY_CLASS_A 0x8000 -#define NVM_SERDES_AMPLITUDE_MASK 0x000F -#define NVM_SIZE_MASK 0x1C00 -#define NVM_SIZE_SHIFT 10 -#define NVM_WORD_SIZE_BASE_SHIFT 6 -#define NVM_SWDPIO_EXT_SHIFT 4 +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_WORD_SIZE_BASE_SHIFT 6 /* NVM Commands - SPI */ -#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ -#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ -#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ -#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ -#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */ -#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ -#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ /* SPI NVM Status Register */ -#define NVM_STATUS_RDY_SPI 0x01 -#define NVM_STATUS_WEN_SPI 0x02 -#define NVM_STATUS_BP0_SPI 0x04 -#define NVM_STATUS_BP1_SPI 0x08 -#define NVM_STATUS_WPEN_SPI 0x80 +#define NVM_STATUS_RDY_SPI 0x01 /* Word definitions for ID LED Settings */ -#define ID_LED_RESERVED_0000 0x0000 -#define ID_LED_RESERVED_FFFF 0xFFFF -#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ - (ID_LED_OFF1_OFF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) -#define ID_LED_DEF1_DEF2 0x1 -#define ID_LED_DEF1_ON2 0x2 -#define ID_LED_DEF1_OFF2 0x3 -#define ID_LED_ON1_DEF2 0x4 -#define ID_LED_ON1_ON2 0x5 -#define ID_LED_ON1_OFF2 0x6 -#define ID_LED_OFF1_DEF2 0x7 -#define ID_LED_OFF1_ON2 0x8 -#define ID_LED_OFF1_OFF2 0x9 - -#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF -#define IGP_ACTIVITY_LED_ENABLE 0x0300 -#define IGP_LED3_MODE 0x07000000 +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 /* PCI/PCI-X/PCI-EX Config space */ -#define PCI_HEADER_TYPE_REGISTER 0x0E -#define PCIE_LINK_STATUS 0x12 -#define PCIE_DEVICE_CONTROL2 0x28 - -#define PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define PCIE_LINK_WIDTH_MASK 0x3F0 -#define PCIE_LINK_WIDTH_SHIFT 4 -#define PCIE_DEVICE_CONTROL2_16ms 0x0005 +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 #ifndef ETH_ADDR_LEN -#define ETH_ADDR_LEN 6 +#define ETH_ADDR_LEN 6 #endif -#define PHY_REVISION_MASK 0xFFFFFFF0 -#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -#define MAX_PHY_MULTI_PAGE_REG 0xF +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF -/* Bit definitions for valid PHY IDs. */ -/* +/* Bit definitions for valid PHY IDs. * I = Integrated * E = External */ -#define M88E1000_E_PHY_ID 0x01410C50 -#define M88E1000_I_PHY_ID 0x01410C30 -#define M88E1011_I_PHY_ID 0x01410C20 -#define IGP01E1000_I_PHY_ID 0x02A80380 -#define M88E1011_I_REV_4 0x04 -#define M88E1111_I_PHY_ID 0x01410CC0 -#define GG82563_E_PHY_ID 0x01410CA0 -#define IGP03E1000_E_PHY_ID 0x02A80390 -#define IFE_E_PHY_ID 0x02A80330 -#define IFE_PLUS_E_PHY_ID 0x02A80320 -#define IFE_C_E_PHY_ID 0x02A80310 -#define I82580_I_PHY_ID 0x015403A0 -#define I350_I_PHY_ID 0x015403B0 -#define IGP04E1000_E_PHY_ID 0x02A80391 -#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define I210_I_PHY_ID 0x01410C00 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ -#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ -#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ -#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ -#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ -#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ - -#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ -#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ -#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ -#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ -#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ /* M88E1000 PHY Specific Control Register */ -#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ -#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ -#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ -/* 1=CLK125 low, 0=CLK125 toggling */ -#define M88E1000_PSCR_CLK125_DISABLE 0x0010 -#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ - /* Manual MDI configuration */ -#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ -#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ -#define M88E1000_PSCR_AUTO_X_MODE 0x0060 -/* - * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold - * 0=Normal 10BASE-T Rx Threshold - */ -#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080 -/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ -#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 -#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ -#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ -#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ /* M88E1000 PHY Specific Status Register */ -#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ -#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ -#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ -#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ -/* - * 0 = <50M +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M * 1 = 50-80M * 2 = 80-110M * 3 = 110-140M * 4 = >140M */ -#define M88E1000_PSSR_CABLE_LENGTH 0x0380 -#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ -#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ -#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ -#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ -#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ -#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ -#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ - -#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 - -/* M88E1000 Extended PHY Specific Control Register */ -#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ -/* - * 1 = Lost lock detect enabled. - * Will assert lost lock and bring - * link down if idle not seen - * within 1ms in 1000BASE-T - */ -#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 -/* - * Number of times we will attempt to autonegotiate before downshifting if we +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we * are the master */ -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 -/* - * Number of times we will attempt to autonegotiate before downshifting if we +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we * are the slave */ -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 -#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ -#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ -#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel I347AT4 Registers */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A /* M88EC018 Rev 2 specific DownShift settings */ -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 - -/* - * Bits... +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* Bits... * 15-5: page * 4-0: register offset */ -#define GG82563_PAGE_SHIFT 5 -#define GG82563_REG(page, reg) \ - (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) -#define GG82563_MIN_ALT_REG 30 +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 /* GG82563 Specific Registers */ -#define GG82563_PHY_SPEC_CTRL \ - GG82563_REG(0, 16) /* PHY Specific Control */ -#define GG82563_PHY_SPEC_STATUS \ - GG82563_REG(0, 17) /* PHY Specific Status */ -#define GG82563_PHY_INT_ENABLE \ - GG82563_REG(0, 18) /* Interrupt Enable */ -#define GG82563_PHY_SPEC_STATUS_2 \ - GG82563_REG(0, 19) /* PHY Specific Status 2 */ -#define GG82563_PHY_RX_ERR_CNTR \ - GG82563_REG(0, 21) /* Receive Error Counter */ -#define GG82563_PHY_PAGE_SELECT \ - GG82563_REG(0, 22) /* Page Select */ -#define GG82563_PHY_SPEC_CTRL_2 \ - GG82563_REG(0, 26) /* PHY Specific Control 2 */ -#define GG82563_PHY_PAGE_SELECT_ALT \ - GG82563_REG(0, 29) /* Alternate Page Select */ -#define GG82563_PHY_TEST_CLK_CTRL \ - GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ - -#define GG82563_PHY_MAC_SPEC_CTRL \ - GG82563_REG(2, 21) /* MAC Specific Control Register */ -#define GG82563_PHY_MAC_SPEC_CTRL_2 \ - GG82563_REG(2, 26) /* MAC Specific Control 2 */ - -#define GG82563_PHY_DSP_DISTANCE \ - GG82563_REG(5, 26) /* DSP Distance */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ /* Page 193 - Port Control Registers */ -#define GG82563_PHY_KMRN_MODE_CTRL \ - GG82563_REG(193, 16) /* Kumeran Mode Control */ -#define GG82563_PHY_PORT_RESET \ - GG82563_REG(193, 17) /* Port Reset */ -#define GG82563_PHY_REVISION_ID \ - GG82563_REG(193, 18) /* Revision ID */ -#define GG82563_PHY_DEVICE_ID \ - GG82563_REG(193, 19) /* Device ID */ -#define GG82563_PHY_PWR_MGMT_CTRL \ - GG82563_REG(193, 20) /* Power Management Control */ -#define GG82563_PHY_RATE_ADAPT_CTRL \ - GG82563_REG(193, 25) /* Rate Adaptation Control */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ /* Page 194 - KMRN Registers */ -#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ - GG82563_REG(194, 16) /* FIFO's Control/Status */ -#define GG82563_PHY_KMRN_CTRL \ - GG82563_REG(194, 17) /* Control */ -#define GG82563_PHY_INBAND_CTRL \ - GG82563_REG(194, 18) /* Inband Control */ -#define GG82563_PHY_KMRN_DIAGNOSTIC \ - GG82563_REG(194, 19) /* Diagnostic */ -#define GG82563_PHY_ACK_TIMEOUTS \ - GG82563_REG(194, 20) /* Acknowledge Timeouts */ -#define GG82563_PHY_ADV_ABILITY \ - GG82563_REG(194, 21) /* Advertised Ability */ -#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ - GG82563_REG(194, 23) /* Link Partner Advertised Ability */ -#define GG82563_PHY_ADV_NEXT_PAGE \ - GG82563_REG(194, 24) /* Advertised Next Page */ -#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ - GG82563_REG(194, 25) /* Link Partner Advertised Next page */ -#define GG82563_PHY_KMRN_MISC \ - GG82563_REG(194, 26) /* Misc. */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ /* MDI Control */ -#define E1000_MDIC_DATA_MASK 0x0000FFFF -#define E1000_MDIC_REG_MASK 0x001F0000 -#define E1000_MDIC_REG_SHIFT 16 -#define E1000_MDIC_PHY_MASK 0x03E00000 -#define E1000_MDIC_PHY_SHIFT 21 -#define E1000_MDIC_OP_WRITE 0x04000000 -#define E1000_MDIC_OP_READ 0x08000000 -#define E1000_MDIC_READY 0x10000000 -#define E1000_MDIC_INT_EN 0x20000000 -#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 /* SerDes Control */ -#define E1000_GEN_CTL_READY 0x80000000 -#define E1000_GEN_CTL_ADDRESS_SHIFT 8 -#define E1000_GEN_POLL_TIMEOUT 640 +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 /* LinkSec register fields */ -#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 -#define E1000_LSECTXCAP_SUM_SHIFT 16 -#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 -#define E1000_LSECRXCAP_SUM_SHIFT 16 - -#define E1000_LSECTXCTRL_EN_MASK 0x00000003 -#define E1000_LSECTXCTRL_DISABLE 0x0 -#define E1000_LSECTXCTRL_AUTH 0x1 -#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 -#define E1000_LSECTXCTRL_AISCI 0x00000020 -#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 -#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 - -#define E1000_LSECRXCTRL_EN_MASK 0x0000000C -#define E1000_LSECRXCTRL_EN_SHIFT 2 -#define E1000_LSECRXCTRL_DISABLE 0x0 -#define E1000_LSECRXCTRL_CHECK 0x1 -#define E1000_LSECRXCTRL_STRICT 0x2 -#define E1000_LSECRXCTRL_DROP 0x3 -#define E1000_LSECRXCTRL_PLSH 0x00000040 -#define E1000_LSECRXCTRL_RP 0x00000080 -#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 - +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) /* DMA Coalescing register fields */ -#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing - * Watchdog Timer */ -#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive - * Threshold */ -#define E1000_DMACR_DMACTHR_SHIFT 16 -#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe - * transactions */ -#define E1000_DMACR_DMAC_LX_SHIFT 28 -#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ - -#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit - * Threshold */ - -#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ - -#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate - * Threshold */ -#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in - * current window */ - -#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic - * Current Cnt */ - -#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold - * High val */ -#define E1000_FCRTC_RTH_COAL_SHIFT 4 -#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based - on DMA coal */ - +/* DMA Coalescing Watchdog Timer */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 +#define E1000_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 + +#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ +#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ +#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ + +/* Proxy Filter Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define E1000_STATUS_LAN_ID_OFFSET 2 +#define E1000_VFTA_ENTRIES 128 +#ifndef E1000_UNUSEDARG +#define E1000_UNUSEDARG +#endif /* E1000_UNUSEDARG */ #endif /* _E1000_DEFINES_H_ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_hw.h b/vmkdrivers/src_9/drivers/net/igb/e1000_hw.h index 04c49069eede42ba66cabbf2fa55b577fde7be99..347cef7169944fa5eed858852092f26b18c20fe9 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_hw.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -34,41 +34,60 @@ struct e1000_hw; -#define E1000_DEV_ID_82576 0x10C9 -#define E1000_DEV_ID_82576_FIBER 0x10E6 -#define E1000_DEV_ID_82576_SERDES 0x10E7 -#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 -#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 -#define E1000_DEV_ID_82576_NS 0x150A -#define E1000_DEV_ID_82576_NS_SERDES 0x1518 -#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D -#define E1000_DEV_ID_82575EB_COPPER 0x10A7 -#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 -#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 -#define E1000_DEV_ID_82580_COPPER 0x150E -#define E1000_DEV_ID_82580_FIBER 0x150F -#define E1000_DEV_ID_82580_SERDES 0x1510 -#define E1000_DEV_ID_82580_SGMII 0x1511 -#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 -#define E1000_DEV_ID_I350_COPPER 0x1521 -#define E1000_DEV_ID_I350_FIBER 0x1522 -#define E1000_DEV_ID_I350_SERDES 0x1523 -#define E1000_DEV_ID_I350_SGMII 0x1524 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 -#define E1000_REVISION_4 4 - -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 -#define E1000_FUNC_2 2 -#define E1000_FUNC_3 3 - -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 +#define E1000_DEV_ID_I210_COPPER_IT 0x1535 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 enum e1000_mac_type { e1000_undefined = 0, @@ -76,6 +95,9 @@ enum e1000_mac_type { e1000_82576, e1000_82580, e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ }; @@ -92,6 +114,7 @@ enum e1000_nvm_type { e1000_nvm_none, e1000_nvm_eeprom_spi, e1000_nvm_flash_hw, + e1000_nvm_invm, e1000_nvm_flash_sw }; @@ -112,6 +135,7 @@ enum e1000_phy_type { e1000_phy_ife, e1000_phy_82580, e1000_phy_vf, + e1000_phy_i210, }; enum e1000_bus_type { @@ -185,13 +209,22 @@ enum e1000_serdes_link_state { e1000_serdes_link_forced_up }; +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif /* Receive Descriptor */ struct e1000_rx_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ __le16 length; /* Length of data DMAed into data buffer */ - __le16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ __le16 special; }; @@ -203,9 +236,9 @@ union e1000_rx_desc_extended { } read; struct { struct { - __le32 mrq; /* Multiple Rx Queues */ + __le32 mrq; /* Multiple Rx Queues */ union { - __le32 rss; /* RSS Hash */ + __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ __le16 csum; /* Packet Checksum */ @@ -215,12 +248,16 @@ union e1000_rx_desc_extended { struct { __le32 status_error; /* ext status/error */ __le16 length; - __le16 vlan; /* VLAN tag */ + __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; #define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + /* Receive Descriptor - Packet Split */ union e1000_rx_desc_packet_split { struct { @@ -229,9 +266,9 @@ union e1000_rx_desc_packet_split { } read; struct { struct { - __le32 mrq; /* Multiple Rx Queues */ + __le32 mrq; /* Multiple Rx Queues */ union { - __le32 rss; /* RSS Hash */ + __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ __le16 csum; /* Packet Checksum */ @@ -240,12 +277,13 @@ union e1000_rx_desc_packet_split { } lower; struct { __le32 status_error; /* ext status/error */ - __le16 length0; /* length of buffer 0 */ - __le16 vlan; /* VLAN tag */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ } middle; struct { __le16 header_status; - __le16 length[3]; /* length of buffers 1-3 */ + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; } upper; __le64 reserved; } wb; /* writeback */ @@ -257,16 +295,16 @@ struct e1000_tx_desc { union { __le32 data; struct { - __le16 length; /* Data buffer length */ - u8 cso; /* Checksum offset */ - u8 cmd; /* Descriptor control */ + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ } flags; } lower; union { __le32 data; struct { - u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ __le16 special; } fields; } upper; @@ -277,37 +315,37 @@ struct e1000_context_desc { union { __le32 ip_config; struct { - u8 ipcss; /* IP checksum start */ - u8 ipcso; /* IP checksum offset */ - __le16 ipcse; /* IP checksum end */ + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ } ip_fields; } lower_setup; union { __le32 tcp_config; struct { - u8 tucss; /* TCP checksum start */ - u8 tucso; /* TCP checksum offset */ - __le16 tucse; /* TCP checksum end */ + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ } tcp_fields; } upper_setup; __le32 cmd_and_length; union { __le32 data; struct { - u8 status; /* Descriptor status */ - u8 hdr_len; /* Header length */ - __le16 mss; /* Maximum segment size */ + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ } fields; } tcp_seg_setup; }; /* Offload data descriptor */ struct e1000_data_desc { - __le64 buffer_addr; /* Address of the descriptor's buffer address */ + __le64 buffer_addr; /* Address of the descriptor's buffer address */ union { __le32 data; struct { - __le16 length; /* Data buffer length */ + __le16 length; /* Data buffer length */ u8 typ_len_ext; u8 cmd; } flags; @@ -315,8 +353,8 @@ struct e1000_data_desc { union { __le32 data; struct { - u8 status; /* Descriptor status */ - u8 popts; /* Packet Options */ + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ __le16 special; } fields; } upper; @@ -400,6 +438,10 @@ struct e1000_hw_stats { u64 scvpc; u64 hrmpc; u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; }; @@ -427,7 +469,7 @@ struct e1000_host_command_header { u8 checksum; }; -#define E1000_HI_MAX_DATA_LENGTH 252 +#define E1000_HI_MAX_DATA_LENGTH 252 struct e1000_host_command_info { struct e1000_host_command_header command_header; u8 command_data[E1000_HI_MAX_DATA_LENGTH]; @@ -442,7 +484,7 @@ struct e1000_host_mng_command_header { u16 command_length; }; -#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 struct e1000_host_mng_command_info { struct e1000_host_mng_command_header command_header; u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; @@ -454,13 +496,13 @@ struct e1000_host_mng_command_info { #include "e1000_manage.h" #include "e1000_mbx.h" +/* Function pointers for the MAC. */ struct e1000_mac_operations { - /* Function pointers for the MAC. */ s32 (*init_params)(struct e1000_hw *); s32 (*id_led_init)(struct e1000_hw *); s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); s32 (*check_for_link)(struct e1000_hw *); - bool (*check_mng_mode)(struct e1000_hw *hw); s32 (*cleanup_led)(struct e1000_hw *); void (*clear_hw_cntrs)(struct e1000_hw *); void (*clear_vfta)(struct e1000_hw *); @@ -473,22 +515,35 @@ struct e1000_mac_operations { s32 (*reset_hw)(struct e1000_hw *); s32 (*init_hw)(struct e1000_hw *); void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); s32 (*setup_link)(struct e1000_hw *); s32 (*setup_physical_interface)(struct e1000_hw *); s32 (*setup_led)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); - void (*mta_set)(struct e1000_hw *, u32); void (*config_collision_dist)(struct e1000_hw *); void (*rar_set)(struct e1000_hw *, u8*, u32); s32 (*read_mac_addr)(struct e1000_hw *); s32 (*validate_mdi_setting)(struct e1000_hw *); - s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*); - s32 (*mng_write_cmd_header)(struct e1000_hw *hw, - struct e1000_host_mng_command_header*); - s32 (*mng_enable_host_if)(struct e1000_hw *); - s32 (*wait_autoneg)(struct e1000_hw *); -}; - + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ struct e1000_phy_operations { s32 (*init_params)(struct e1000_hw *); s32 (*acquire)(struct e1000_hw *); @@ -499,18 +554,24 @@ struct e1000_phy_operations { s32 (*get_cfg_done)(struct e1000_hw *hw); s32 (*get_cable_length)(struct e1000_hw *); s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); s32 (*read_reg)(struct e1000_hw *, u32, u16 *); s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); void (*release)(struct e1000_hw *); s32 (*reset)(struct e1000_hw *); s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); s32 (*write_reg)(struct e1000_hw *, u32, u16); s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); void (*power_up)(struct e1000_hw *); void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); }; +/* Function pointers for the NVM. */ struct e1000_nvm_operations { s32 (*init_params)(struct e1000_hw *); s32 (*acquire)(struct e1000_hw *); @@ -523,10 +584,23 @@ struct e1000_nvm_operations { s32 (*write)(struct e1000_hw *, u16, u16, u16 *); }; +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + struct e1000_mac_info { struct e1000_mac_operations ops; - u8 addr[6]; - u8 perm_addr[6]; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; enum e1000_mac_type type; @@ -554,6 +628,7 @@ struct e1000_mac_info { u8 forced_speed_duplex; bool adaptive_ifs; + bool has_fwsm; bool arc_subsystem_valid; bool asf_firmware_present; bool autoneg; @@ -563,6 +638,7 @@ struct e1000_mac_info { enum e1000_serdes_link_state serdes_link_state; bool serdes_has_link; bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; }; struct e1000_phy_info { @@ -624,13 +700,14 @@ struct e1000_bus_info { }; struct e1000_fc_info { - u32 high_water; /* Flow control high-water mark */ - u32 low_water; /* Flow control low-water mark */ - u16 pause_time; /* Flow control pause timer */ - bool send_xon; /* Flow control send XON */ - bool strict_ieee; /* Strict IEEE mode */ - enum e1000_fc_mode current_mode; /* FC mode in effect */ - enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ }; struct e1000_mbx_operations { @@ -664,14 +741,20 @@ struct e1000_mbx_info { struct e1000_dev_spec_82575 { bool sgmii_active; bool global_device_reset; + bool eee_disable; + bool module_plugged; + bool clear_semaphore_once; + u32 mtu; + struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; }; struct e1000_dev_spec_vf { - u32 vf_number; - u32 v2p_mailbox; + u32 vf_number; + u32 v2p_mailbox; }; - struct e1000_hw { void *back; @@ -688,8 +771,8 @@ struct e1000_hw { struct e1000_host_mng_dhcp_cookie mng_cookie; union { - struct e1000_dev_spec_82575 _82575; - struct e1000_dev_spec_vf vf; + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; } dev_spec; u16 device_id; @@ -701,6 +784,7 @@ struct e1000_hw { }; #include "e1000_82575.h" +#include "e1000_i210.h" /* These functions must be implemented by drivers */ s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_i210.c b/vmkdrivers/src_9/drivers/net/igb/e1000_i210.c new file mode 100755 index 0000000000000000000000000000000000000000..2c527d09d6134131fba894784bb7084ff0b0e3ee --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_i210.c @@ -0,0 +1,909 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + + +static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); +static void e1000_release_nvm_i210(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); +static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); +static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + +/** + * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_i210"); + + ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_i210(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_i210"); + + e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_i210"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_i210"); + + while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_i210"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + e1000_put_hw_semaphore_generic(hw); + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_read_nvm_srrd_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_write_nvm_srwr_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_srwr"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + E1000_READ_REG(hw, E1000_SRWR)) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + DEBUGFUNC("e1000_read_invm_word_i210"); + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 E1000_UNUSEDARG words, u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val != E1000_SUCCESS) + DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; + ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) +{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + DEBUGFUNC("e1000_read_invm_version"); + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + } + + if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = E1000_SUCCESS; + break; + } + } + return status; +} + +/** + * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + DEBUGFUNC("e1000_validate_nvm_checksum_i210"); + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + + /* + * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = e1000_read_nvm_eerd; + + status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_i210"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + /* + * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = e1000_update_flash_i210(hw); + } else { + ret_val = E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * e1000_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool e1000_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + DEBUGFUNC("e1000_get_flash_presence_i210"); + + eec = E1000_READ_REG(hw, E1000_EECD); + + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * e1000_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +s32 e1000_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 flup; + + DEBUGFUNC("e1000_update_flash_i210"); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; + E1000_WRITE_REG(hw, E1000_EECD, flup); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == E1000_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + DEBUGFUNC("e1000_pool_flash_update_done_i210"); + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize the i210/i211 NVM parameters and function pointers. + **/ +static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_i210"); + + ret_val = e1000_init_nvm_params_82575(hw); + nvm->ops.acquire = e1000_acquire_nvm_i210; + nvm->ops.release = e1000_release_nvm_i210; + nvm->ops.valid_led_default = e1000_valid_led_default_i210; + if (e1000_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = e1000_read_nvm_srrd_i210; + nvm->ops.write = e1000_write_nvm_srwr_i210; + nvm->ops.validate = e1000_validate_nvm_checksum_i210; + nvm->ops.update = e1000_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = e1000_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return ret_val; +} + +/** + * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_i210(struct e1000_hw *hw) +{ + e1000_init_function_pointers_82575(hw); + hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + + return; +} + +/** + * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_i210"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_i210.h b/vmkdrivers/src_9/drivers/net/igb/e1000_i210.h new file mode 100755 index 0000000000000000000000000000000000000000..57b2eb5602c2392268fb38c0cbc7bc92b4a8c245 --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_i210.h @@ -0,0 +1,91 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +bool e1000_get_flash_presence_i210(struct e1000_hw *hw); +s32 e1000_update_flash_i210(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 *data); +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 data); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for I211 devices */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C +#endif diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_mac.c b/vmkdrivers/src_9/drivers/net/igb/e1000_mac.c index 103f0cfa494edd4bb061ed2b888c8410ebbe5c10..3fbcfde5dc2f8ed76b0a81c4c0cead0809896b36 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_mac.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,11 +27,10 @@ #include "e1000_api.h" -static s32 e1000_set_default_fc_generic(struct e1000_hw *hw); -static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); -static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +static void e1000_config_collision_dist_generic(struct e1000_hw *hw); +static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); /** * e1000_init_mac_ops_generic - Initialize MAC function pointers @@ -49,11 +48,7 @@ void e1000_init_mac_ops_generic(struct e1000_hw *hw) mac->ops.read_mac_addr = e1000_read_mac_addr_generic; mac->ops.config_collision_dist = e1000_config_collision_dist_generic; /* LINK */ - mac->ops.wait_autoneg = e1000_wait_autoneg_generic; /* Management */ - mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic; - mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic; - mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic; /* VLAN, MC, etc. */ mac->ops.rar_set = e1000_rar_set_generic; mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; @@ -71,24 +66,34 @@ s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_bus_info *bus = &hw->bus; - s32 ret_val; u16 pcie_link_status; DEBUGFUNC("e1000_get_bus_info_pcie_generic"); bus->type = e1000_bus_type_pci_express; - bus->speed = e1000_bus_speed_2500; - ret_val = e1000_read_pcie_cap_reg(hw, - PCIE_LINK_STATUS, - &pcie_link_status); - if (ret_val) + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { bus->width = e1000_bus_width_unknown; - else + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + bus->width = (enum e1000_bus_width)((pcie_link_status & - PCIE_LINK_WIDTH_MASK) >> - PCIE_LINK_WIDTH_SHIFT); + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } mac->ops.set_lan_id(hw); @@ -108,8 +113,7 @@ static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) struct e1000_bus_info *bus = &hw->bus; u32 reg; - /* - * The status register reports the correct function number + /* The status register reports the correct function number * for the device regardless of function swap state. */ reg = E1000_READ_REG(hw, E1000_STATUS); @@ -170,7 +174,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) * @hw: pointer to the HW structure * @rar_count: receive address registers * - * Setups the receive address registers by setting the base receive address + * Setup the receive address registers by setting the base receive address * register to the devices MAC address and clearing all the other receive * address registers to 0. **/ @@ -207,28 +211,34 @@ void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) { u32 i; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 offset, nvm_alt_mac_addr_offset, nvm_data; u8 alt_mac_addr[ETH_ADDR_LEN]; DEBUGFUNC("e1000_check_alt_mac_addr_generic"); - /* Alternate MAC address is handled by the option ROM for 82580 */ - /* and newer. SW support not required. */ + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ if (hw->mac.type >= e1000_82580) - goto out; + return E1000_SUCCESS; ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, - &nvm_alt_mac_addr_offset); + &nvm_alt_mac_addr_offset); if (ret_val) { DEBUGOUT("NVM Read Error\n"); - goto out; + return ret_val; } - if (nvm_alt_mac_addr_offset == 0xFFFF) { + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) /* There is no Alternate MAC Address */ - goto out; - } + return E1000_SUCCESS; if (hw->bus.func == E1000_FUNC_1) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; @@ -242,7 +252,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); - goto out; + return ret_val; } alt_mac_addr[i] = (u8)(nvm_data & 0xFF); @@ -252,18 +262,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) /* if multicast bit is set, the alternate address will not be used */ if (alt_mac_addr[0] & 0x01) { DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); - goto out; + return E1000_SUCCESS; } - /* - * We have a valid alternate MAC address, and we want to treat it the + /* We have a valid alternate MAC address, and we want to treat it the * same as the normal permanent MAC address stored by the HW into the * RAR. Do this by mapping this address into RAR0. */ hw->mac.ops.rar_set(hw, alt_mac_addr, 0); -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -275,19 +283,17 @@ out: * Sets the receive address array register at index to the address passed * in by addr. **/ -void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; DEBUGFUNC("e1000_rar_set_generic"); - /* - * HW expects these in little endian so we reverse the byte order + /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ - rar_low = ((u32) addr[0] | - ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); @@ -295,8 +301,7 @@ void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) if (rar_low || rar_high) rar_high |= E1000_RAH_AV; - /* - * Some bridges will combine consecutive 32-bit writes into + /* Some bridges will combine consecutive 32-bit writes into * a single burst write, which will malfunction on some parts. * The flushes avoid this. */ @@ -306,87 +311,13 @@ void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) E1000_WRITE_FLUSH(hw); } -/** - * e1000_mta_set_generic - Set multicast filter table address - * @hw: pointer to the HW structure - * @hash_value: determines the MTA register and bit to set - * - * The multicast table address is a register array of 32-bit registers. - * The hash_value is used to determine what register the bit is in, the - * current value is read, the new bit is OR'd in and the new value is - * written back into the register. - **/ -void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value) -{ - u32 hash_bit, hash_reg, mta; - - DEBUGFUNC("e1000_mta_set_generic"); - /* - * The MTA is a register array of 32-bit registers. It is - * treated like an array of (32*mta_reg_count) bits. We want to - * set bit BitArray[hash_value]. So we figure out what register - * the bit is in, read it, OR in the new bit, then write - * back the new value. The (hw->mac.mta_reg_count - 1) serves as a - * mask to bits 31:5 of the hash value which gives us the - * register we're modifying. The hash bit within that register - * is determined by the lower 5 bits of the hash value. - */ - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); - - mta |= (1 << hash_bit); - - E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); - E1000_WRITE_FLUSH(hw); -} - -/** - * e1000_update_mc_addr_list_generic - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * - * Updates entire Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - **/ -void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count) -{ - u32 hash_value, hash_bit, hash_reg; - int i; - - DEBUGFUNC("e1000_update_mc_addr_list_generic"); - - /* clear mta_shadow */ - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - - /* update mta_shadow from mc_addr_list */ - for (i = 0; (u32) i < mc_addr_count; i++) { - hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); - - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); - mc_addr_list += (ETH_ADDR_LEN); - } - - /* replace the entire MTA table */ - for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) - E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); - E1000_WRITE_FLUSH(hw); -} - /** * e1000_hash_mc_addr_generic - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address * * Generates a multicast address hash value which is used to determine - * the multicast filter table array address and new table value. See - * e1000_mta_set_generic() + * the multicast filter table array address and new table value. **/ u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) { @@ -398,15 +329,13 @@ u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; - /* - * For a mc_filter_type of 0, bit_shift is the number of left-shifts + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; - /* - * The portion of the address that is used for the hash table + /* The portion of the address that is used for the hash table * is determined by the mc_filter_type setting. * The algorithm is such that there is a total of 8 bits of shifting. * The bit_shift for a mc_filter_type of 0 represents the number of @@ -424,7 +353,7 @@ u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) * values resulting from each mc_filter_type... * [0] [1] [2] [3] [4] [5] * 01 AA 00 12 34 56 - * LSB MSB + * LSB MSB * * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 @@ -447,11 +376,48 @@ u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) } hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); + (((u16) mc_addr[5]) << bit_shift))); return hash_value; } +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); +} + /** * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters * @hw: pointer to the HW structure @@ -517,55 +483,45 @@ s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) DEBUGFUNC("e1000_check_for_copper_link"); - /* - * We only want to go out to the PHY registers to see if Auto-Neg + /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ - if (!mac->get_link_status) { - ret_val = E1000_SUCCESS; - goto out; - } + if (!mac->get_link_status) + return E1000_SUCCESS; - /* - * First we want to see if the MII Status Register reports + /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) - goto out; /* No link detected */ + return E1000_SUCCESS; /* No link detected */ mac->get_link_status = false; - /* - * Check if there was DownShift, must be checked + /* Check if there was DownShift, must be checked * immediately after link-up */ e1000_check_downshift_generic(hw); - /* - * If we are forcing speed/duplex, then we simply return since + /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } + if (!mac->autoneg) + return -E1000_ERR_CONFIG; - /* - * Auto-Neg is enabled. Auto Speed Detection takes care + /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ - e1000_config_collision_dist_generic(hw); + mac->ops.config_collision_dist(hw); - /* - * Configure Flow Control now that Auto-Neg has completed. + /* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. @@ -574,7 +530,6 @@ s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) if (ret_val) DEBUGOUT("Error configuring flow control\n"); -out: return ret_val; } @@ -591,7 +546,7 @@ s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) u32 rxcw; u32 ctrl; u32 status; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_check_for_fiber_link_generic"); @@ -599,8 +554,7 @@ s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) status = E1000_READ_REG(hw, E1000_STATUS); rxcw = E1000_READ_REG(hw, E1000_RXCW); - /* - * If we don't have link (auto-negotiation failed or link partner + /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), the cable is plugged in (we have signal), * and our link partner is not trying to auto-negotiate with us (we * are receiving idles or data), we need to force link up. We also @@ -608,13 +562,13 @@ s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) * was just plugged in. The autoneg_failed flag does this. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && - (!(rxcw & E1000_RXCW_C))) { - if (mac->autoneg_failed == 0) { - mac->autoneg_failed = 1; - goto out; + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; } - DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -628,24 +582,22 @@ s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) { DEBUGOUT("Error configuring flow control\n"); - goto out; + return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* - * If we are forcing link and we are receiving /C/ ordered + /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = true; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -661,7 +613,7 @@ s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) u32 rxcw; u32 ctrl; u32 status; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_check_for_serdes_link_generic"); @@ -669,20 +621,19 @@ s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) status = E1000_READ_REG(hw, E1000_STATUS); rxcw = E1000_READ_REG(hw, E1000_RXCW); - /* - * If we don't have link (auto-negotiation failed or link partner + /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), and our link partner is not trying to * auto-negotiate with us (we are receiving idles or data), * we need to force link up. We also need to give auto-negotiation * time to complete. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { - if (mac->autoneg_failed == 0) { - mac->autoneg_failed = 1; - goto out; + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; } - DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -696,23 +647,21 @@ s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) { DEBUGOUT("Error configuring flow control\n"); - goto out; + return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* - * If we are forcing link and we are receiving /C/ ordered + /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = true; } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { - /* - * If we force link for non-auto-negotiation switch, check + /* If we force link for non-auto-negotiation switch, check * link status based on MAC synchronization for internal * serdes media type. */ @@ -739,12 +688,10 @@ s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; - DEBUGOUT("SERDES: Link up - autoneg " - "completed sucessfully.\n"); + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); } else { mac->serdes_has_link = false; - DEBUGOUT("SERDES: Link down - invalid" - "codewords detected in autoneg.\n"); + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); } } else { mac->serdes_has_link = false; @@ -756,8 +703,47 @@ s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) } } -out: - return ret_val; + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return E1000_SUCCESS; } /** @@ -772,30 +758,26 @@ out: **/ s32 e1000_setup_link_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_setup_link_generic"); - /* - * In the case of the phy reset being blocked, we already have a link. + /* In the case of the phy reset being blocked, we already have a link. * We do not need to set it up again. */ - if (hw->phy.ops.check_reset_block) - if (hw->phy.ops.check_reset_block(hw)) - goto out; + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; - /* - * If requested flow control is set to default, set flow control + /* If requested flow control is set to default, set flow control * based on the EEPROM flow control settings. */ if (hw->fc.requested_mode == e1000_fc_default) { ret_val = e1000_set_default_fc_generic(hw); if (ret_val) - goto out; + return ret_val; } - /* - * Save off the requested flow control mode for use later. Depending + /* Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; @@ -806,10 +788,9 @@ s32 e1000_setup_link_generic(struct e1000_hw *hw) /* Call the necessary media_type subroutine to configure the link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) - goto out; + return ret_val; - /* - * Initialize the flow control address, type, and PAUSE timer + /* Initialize the flow control address, type, and PAUSE timer * registers to their default values. This is done even if flow * control is disabled, because it does not hurt anything to * initialize these registers. @@ -821,10 +802,125 @@ s32 e1000_setup_link_generic(struct e1000_hw *hw) E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); - ret_val = e1000_set_fc_watermarks_generic(hw); + return e1000_set_fc_watermarks_generic(hw); +} -out: - return ret_val; +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + + return E1000_SUCCESS; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return E1000_SUCCESS; } /** @@ -837,7 +933,7 @@ out: s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) { u32 ctrl; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); @@ -846,14 +942,13 @@ s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) /* Take the link out of reset */ ctrl &= ~E1000_CTRL_LRST; - e1000_config_collision_dist_generic(hw); + hw->mac.ops.config_collision_dist(hw); ret_val = e1000_commit_fc_settings_generic(hw); if (ret_val) - goto out; + return ret_val; - /* - * Since auto-negotiation is enabled, take the link out of reset (the + /* Since auto-negotiation is enabled, take the link out of reset (the * link will be in reset, because we previously reset the chip). This * will restart auto-negotiation. If auto-negotiation is successful * then the link-up status bit will be set and the flow control enable @@ -863,173 +958,42 @@ s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) E1000_WRITE_REG(hw, E1000_CTRL, ctrl); E1000_WRITE_FLUSH(hw); - msec_delay(1); - - /* - * For these adapters, the SW definable pin 1 is set when the optics - * detect a signal. If we have a signal, then poll for a "Link-Up" - * indication. - */ - if (hw->phy.media_type == e1000_media_type_internal_serdes || - (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { - ret_val = e1000_poll_fiber_serdes_link_generic(hw); - } else { - DEBUGOUT("No signal detected\n"); - } - -out: - return ret_val; -} - -/** - * e1000_config_collision_dist_generic - Configure collision distance - * @hw: pointer to the HW structure - * - * Configures the collision distance to the default value and is used - * during link setup. Currently no func pointer exists and all - * implementations are handled in the generic version of this function. - **/ -void e1000_config_collision_dist_generic(struct e1000_hw *hw) -{ - u32 tctl; - - DEBUGFUNC("e1000_config_collision_dist_generic"); - - tctl = E1000_READ_REG(hw, E1000_TCTL); - - tctl &= ~E1000_TCTL_COLD; - tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; - - E1000_WRITE_REG(hw, E1000_TCTL, tctl); - E1000_WRITE_FLUSH(hw); -} - -/** - * e1000_poll_fiber_serdes_link_generic - Poll for link up - * @hw: pointer to the HW structure - * - * Polls for link up by reading the status register, if link fails to come - * up with auto-negotiation, then the link is forced if a signal is detected. - **/ -static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 i, status; - s32 ret_val = E1000_SUCCESS; - - DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); - - /* - * If we have a signal (the cable is plugged in, or assumed true for - * serdes media) then poll for a "Link-Up" indication in the Device - * Status Register. Time-out if a link isn't seen in 500 milliseconds - * seconds (Auto-negotiation should complete in less than 500 - * milliseconds even if the other end is doing it in SW). + msec_delay(1); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. */ - for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { - msec_delay(10); - status = E1000_READ_REG(hw, E1000_STATUS); - if (status & E1000_STATUS_LU) - break; - } - if (i == FIBER_LINK_UP_LIMIT) { - DEBUGOUT("Never got a valid link from auto-neg!!!\n"); - mac->autoneg_failed = 1; - /* - * AutoNeg failed to achieve a link, so we'll call - * mac->check_for_link. This routine will force the - * link up if we detect a signal. This will allow us to - * communicate with non-autonegotiating link partners. - */ - ret_val = hw->mac.ops.check_for_link(hw); - if (ret_val) { - DEBUGOUT("Error while checking for link\n"); - goto out; - } - mac->autoneg_failed = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); } else { - mac->autoneg_failed = 0; - DEBUGOUT("Valid Link Found\n"); + DEBUGOUT("No signal detected\n"); } -out: return ret_val; } /** - * e1000_commit_fc_settings_generic - Configure flow control + * e1000_config_collision_dist_generic - Configure collision distance * @hw: pointer to the HW structure * - * Write the flow control settings to the Transmit Config Word Register (TXCW) - * base on the flow control settings in e1000_mac_info. + * Configures the collision distance to the default value and is used + * during link setup. **/ -static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +static void e1000_config_collision_dist_generic(struct e1000_hw *hw) { - struct e1000_mac_info *mac = &hw->mac; - u32 txcw; - s32 ret_val = E1000_SUCCESS; + u32 tctl; - DEBUGFUNC("e1000_commit_fc_settings_generic"); + DEBUGFUNC("e1000_config_collision_dist_generic"); - /* - * Check for a software override of the flow control settings, and - * setup the device accordingly. If auto-negotiation is enabled, then - * software will have to set the "PAUSE" bits to the correct value in - * the Transmit Config Word Register (TXCW) and re-start auto- - * negotiation. However, if auto-negotiation is disabled, then - * software will have to manually configure the two flow control enable - * bits in the CTRL register. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but we - * do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - */ - switch (hw->fc.current_mode) { - case e1000_fc_none: - /* Flow control completely disabled by a software over-ride. */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); - break; - case e1000_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is disabled - * by a software over-ride. Since there really isn't a way to - * advertise that we are capable of Rx Pause ONLY, we will - * advertise that we support both symmetric and asymmetric RX - * PAUSE. Later, we will disable the adapter's ability to send - * PAUSE frames. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); - break; - case e1000_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is disabled, - * by a software over-ride. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); - break; - case e1000_fc_full: - /* - * Flow control (both Rx and Tx) is enabled by a software - * over-ride. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); - break; - default: - DEBUGOUT("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - break; - } + tctl = E1000_READ_REG(hw, E1000_TCTL); - E1000_WRITE_REG(hw, E1000_TXCW, txcw); - mac->txcw = txcw; + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; -out: - return ret_val; + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); } /** @@ -1042,21 +1006,18 @@ out: **/ s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; u32 fcrtl = 0, fcrth = 0; DEBUGFUNC("e1000_set_fc_watermarks_generic"); - /* - * Set the flow control receive threshold registers. Normally, + /* Set the flow control receive threshold registers. Normally, * these registers will be set to a default threshold that may be * adjusted later by the driver's runtime code. However, if the * ability to transmit pause frames is not enabled, then these * registers will be set to 0. */ if (hw->fc.current_mode & e1000_fc_tx_pause) { - /* - * We need to set up the Receive Threshold high and low water + /* We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of * XON frames. */ @@ -1069,49 +1030,7 @@ s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); - return ret_val; -} - -/** - * e1000_set_default_fc_generic - Set flow control default values - * @hw: pointer to the HW structure - * - * Read the EEPROM for the default values for flow control and store the - * values. - **/ -static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) -{ - s32 ret_val = E1000_SUCCESS; - u16 nvm_data; - - DEBUGFUNC("e1000_set_default_fc_generic"); - - /* - * Read and store word 0x0F of the EEPROM. This word contains bits - * that determine the hardware's default PAUSE (flow control) mode, - * a bit that determines whether the HW defaults to enabling or - * disabling auto-negotiation, and the direction of the - * SW defined pins. If there is no SW over-ride of the flow - * control setting, then the variable hw->fc will - * be initialized based on a value in the EEPROM. - */ - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); - - if (ret_val) { - DEBUGOUT("NVM Read Error\n"); - goto out; - } - - if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) - hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) - hw->fc.requested_mode = e1000_fc_tx_pause; - else - hw->fc.requested_mode = e1000_fc_full; - -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1127,14 +1046,12 @@ out: s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) { u32 ctrl; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_force_mac_fc_generic"); ctrl = E1000_READ_REG(hw, E1000_CTRL); - /* - * Because we didn't get link via the internal auto-negotiation + /* Because we didn't get link via the internal auto-negotiation * mechanism (we either forced link or we got link via PHY * auto-neg), we have to manually enable/disable transmit an * receive flow control. @@ -1170,14 +1087,12 @@ s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) break; default: DEBUGOUT("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1194,13 +1109,13 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; DEBUGFUNC("e1000_config_fc_after_link_up_generic"); - /* - * Check for the case where we have fiber media and auto-neg failed + /* Check for the case where we have fiber media and auto-neg failed * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ @@ -1215,52 +1130,47 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) if (ret_val) { DEBUGOUT("Error forcing flow control settings\n"); - goto out; + return ret_val; } - /* - * Check for the case where we have copper media and auto-neg is + /* Check for the case where we have copper media and auto-neg is * enabled. In this case, we need to check and see if Auto-Neg * has completed, and if so, how the PHY and link partner has * flow control configured. */ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { - /* - * Read the MII Status Register and check to see if AutoNeg + /* Read the MII Status Register and check to see if AutoNeg * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); if (ret_val) - goto out; + return ret_val; if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - DEBUGOUT("Copper PHY and Auto Neg " - "has not completed.\n"); - goto out; + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; } - /* - * The AutoNeg process has completed, so we now need to + /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement * Register (Address 4) and the Auto_Negotiation Base * Page Ability Register (Address 5) to determine how * flow control was negotiated. */ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, - &mii_nway_adv_reg); + &mii_nway_adv_reg); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, - &mii_nway_lp_ability_reg); + &mii_nway_lp_ability_reg); if (ret_val) - goto out; + return ret_val; - /* - * Two bits in the Auto Negotiation Advertisement Register + /* Two bits in the Auto Negotiation Advertisement Register * (Address 4) and two bits in the Auto Negotiation Base * Page Ability Register (Address 5) determine flow control * for both the PHY and the link partner. The following @@ -1295,24 +1205,21 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) */ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { - /* - * Now we need to check if the user selected Rx ONLY + /* Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX + * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. + * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; - DEBUGOUT("Flow Control = FULL.\r\n"); + DEBUGOUT("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - DEBUGOUT("Flow Control = " - "RX PAUSE frames only.\r\n"); + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); } } - /* - * For receiving PAUSE frames ONLY. + /* For receiving PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result @@ -1320,14 +1227,13 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; - DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n"); + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); } - /* - * For transmitting PAUSE frames ONLY. + /* For transmitting PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result @@ -1335,47 +1241,167 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) * 1 | 1 | 0 | 1 | e1000_fc_rx_pause */ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; - DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n"); + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); } else { - /* - * Per the IEEE spec, at this point flow control + /* Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; - DEBUGOUT("Flow Control = NONE.\r\n"); + DEBUGOUT("Flow Control = NONE.\n"); } - /* - * Now we need to do one last check... If we auto- + /* Now we need to do one last check... If we auto- * negotiated to HALF DUPLEX, flow control should not be * enabled per IEEE 802.3 spec. */ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { DEBUGOUT("Error getting link speed and duplex\n"); - goto out; + return ret_val; } if (duplex == HALF_DUPLEX) hw->fc.current_mode = e1000_fc_none; - /* - * Now we call a subroutine to actually force the MAC + /* Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ ret_val = e1000_force_mac_fc_generic(hw); if (ret_val) { DEBUGOUT("Error forcing flow control settings\n"); - goto out; + return ret_val; } } -out: - return ret_val; + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + return E1000_SUCCESS; } /** @@ -1388,7 +1414,7 @@ out: * speed and duplex for copper connections. **/ s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, - u16 *duplex) + u16 *duplex) { u32 status; @@ -1426,8 +1452,8 @@ s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, * Sets the speed and duplex to gigabit full duplex (the only possible option) * for fiber/serdes links. **/ -s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, - u16 *speed, u16 *duplex) +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, + u16 *speed, u16 *duplex) { DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); @@ -1446,7 +1472,6 @@ s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) { u32 swsm; - s32 ret_val = E1000_SUCCESS; s32 timeout = hw->nvm.word_size + 1; s32 i = 0; @@ -1464,8 +1489,7 @@ s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) if (i == timeout) { DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); - ret_val = -E1000_ERR_NVM; - goto out; + return -E1000_ERR_NVM; } /* Get the FW semaphore. */ @@ -1484,12 +1508,10 @@ s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) /* Release semaphores */ e1000_put_hw_semaphore_generic(hw); DEBUGOUT("Driver can't access the NVM\n"); - ret_val = -E1000_ERR_NVM; - goto out; + return -E1000_ERR_NVM; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1520,7 +1542,6 @@ void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) { s32 i = 0; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_get_auto_rd_done_generic"); @@ -1533,12 +1554,10 @@ s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) if (i == AUTO_READ_DONE_TIMEOUT) { DEBUGOUT("Auto read by HW from NVM has not completed.\n"); - ret_val = -E1000_ERR_RESET; - goto out; + return -E1000_ERR_RESET; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1558,14 +1577,13 @@ s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); - goto out; + return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT; -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1587,7 +1605,7 @@ s32 e1000_id_led_init_generic(struct e1000_hw *hw) ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) - goto out; + return ret_val; mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; @@ -1631,8 +1649,7 @@ s32 e1000_id_led_init_generic(struct e1000_hw *hw) } } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1645,31 +1662,26 @@ out: s32 e1000_setup_led_generic(struct e1000_hw *hw) { u32 ledctl; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_setup_led_generic"); - if (hw->mac.ops.setup_led != e1000_setup_led_generic) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } + if (hw->mac.ops.setup_led != e1000_setup_led_generic) + return -E1000_ERR_CONFIG; if (hw->phy.media_type == e1000_media_type_fiber) { ledctl = E1000_READ_REG(hw, E1000_LEDCTL); hw->mac.ledctl_default = ledctl; /* Turn off LED0 */ - ledctl &= ~(E1000_LEDCTL_LED0_IVRT | - E1000_LEDCTL_LED0_BLINK | - E1000_LEDCTL_LED0_MODE_MASK); + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); ledctl |= (E1000_LEDCTL_MODE_LED_OFF << - E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_LEDCTL_LED0_MODE_SHIFT); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); } else if (hw->phy.media_type == e1000_media_type_copper) { E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1681,19 +1693,10 @@ out: **/ s32 e1000_cleanup_led_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; - DEBUGFUNC("e1000_cleanup_led_generic"); - if (hw->mac.ops.cleanup_led != e1000_cleanup_led_generic) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } - E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); - -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1714,16 +1717,28 @@ s32 e1000_blink_led_generic(struct e1000_hw *hw) ledctl_blink = E1000_LEDCTL_LED0_BLINK | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); } else { - /* - * set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. */ ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } } E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); @@ -1802,24 +1817,19 @@ void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); - if (hw->bus.type != e1000_bus_type_pci_express) - goto out; - if (no_snoop) { gcr = E1000_READ_REG(hw, E1000_GCR); gcr &= ~(PCIE_NO_SNOOP_ALL); gcr |= no_snoop; E1000_WRITE_REG(hw, E1000_GCR, gcr); } -out: - return; } /** * e1000_disable_pcie_master_generic - Disables PCI-express master access * @hw: pointer to the HW structure * - * Returns 0 (E1000_SUCCESS) if successful, else returns -10 + * Returns E1000_SUCCESS if successful, else returns -10 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused * the master requests to be disabled. * @@ -1830,13 +1840,9 @@ s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) { u32 ctrl; s32 timeout = MASTER_DISABLE_TIMEOUT; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_disable_pcie_master_generic"); - if (hw->bus.type != e1000_bus_type_pci_express) - goto out; - ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); @@ -1851,12 +1857,10 @@ s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) if (!timeout) { DEBUGOUT("Master requests are pending.\n"); - ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; - goto out; + return -E1000_ERR_MASTER_REQUESTS_PENDING; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1873,7 +1877,7 @@ void e1000_reset_adaptive_generic(struct e1000_hw *hw) if (!mac->adaptive_ifs) { DEBUGOUT("Not in Adaptive IFS mode!\n"); - goto out; + return; } mac->current_ifs_val = 0; @@ -1884,8 +1888,6 @@ void e1000_reset_adaptive_generic(struct e1000_hw *hw) mac->in_ifs_mode = false; E1000_WRITE_REG(hw, E1000_AIT, 0); -out: - return; } /** @@ -1903,7 +1905,7 @@ void e1000_update_adaptive_generic(struct e1000_hw *hw) if (!mac->adaptive_ifs) { DEBUGOUT("Not in Adaptive IFS mode!\n"); - goto out; + return; } if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { @@ -1915,7 +1917,8 @@ void e1000_update_adaptive_generic(struct e1000_hw *hw) else mac->current_ifs_val += mac->ifs_step_size; - E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val); + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); } } } else { @@ -1926,8 +1929,6 @@ void e1000_update_adaptive_generic(struct e1000_hw *hw) E1000_WRITE_REG(hw, E1000_AIT, 0); } } -out: - return; } /** @@ -1939,19 +1940,29 @@ out: **/ static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; - DEBUGFUNC("e1000_validate_mdi_setting_generic"); if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { DEBUGOUT("Invalid MDI setting detected\n"); hw->phy.mdix = 1; - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } -out: - return ret_val; + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Validate the MDI/MDIx setting, allowing for auto-crossover during forced + * operation. + **/ +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); + + return E1000_SUCCESS; } /** @@ -1966,10 +1977,9 @@ out: * completion. **/ s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data) + u32 offset, u8 data) { u32 i, regvalue = 0; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); @@ -1986,10 +1996,8 @@ s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, } if (!(regvalue & E1000_GEN_CTL_READY)) { DEBUGOUT1("Reg %08x did not indicate ready\n", reg); - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; } -out: - return ret_val; + return E1000_SUCCESS; } diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_mac.h b/vmkdrivers/src_9/drivers/net/igb/e1000_mac.h index d010b33baaf800b7cb8e7f91953b87982055b1dd..45d4e0266cc411b814bb2fd86d2ade5aa69b7c66 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_mac.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_mac.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,10 +28,6 @@ #ifndef _E1000_MAC_H_ #define _E1000_MAC_H_ -/* - * Functions that should not be called directly from drivers but can be used - * by other files in this 'shared code' - */ void e1000_init_mac_ops_generic(struct e1000_hw *hw); s32 e1000_blink_led_generic(struct e1000_hw *hw); s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); @@ -46,31 +42,28 @@ s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); void e1000_set_lan_id_single_port(struct e1000_hw *hw); s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, - u16 *duplex); + u16 *duplex); s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, - u16 *speed, u16 *duplex); + u16 *speed, u16 *duplex); s32 e1000_id_led_init_generic(struct e1000_hw *hw); s32 e1000_led_on_generic(struct e1000_hw *hw); s32 e1000_led_off_generic(struct e1000_hw *hw); void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count); + u8 *mc_addr_list, u32 mc_addr_count); s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); s32 e1000_setup_led_generic(struct e1000_hw *hw); s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data); + u32 offset, u8 data); u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); void e1000_clear_vfta_generic(struct e1000_hw *hw); -void e1000_config_collision_dist_generic(struct e1000_hw *hw); void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); -void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value); -void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); -void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); void e1000_reset_adaptive_generic(struct e1000_hw *hw); void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_manage.c b/vmkdrivers/src_9/drivers/net/igb/e1000_manage.c index d61a7ab18f2130aea2c4ae136de46109d24a377b..e1a2abe08dd27d507b1c8fe8912ac067f16309a6 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_manage.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_manage.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,8 +27,6 @@ #include "e1000_api.h" -static u8 e1000_calculate_checksum(u8 *buffer, u32 length); - /** * e1000_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM @@ -37,10 +35,10 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length); * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ -static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +u8 e1000_calculate_checksum(u8 *buffer, u32 length) { u32 i; - u8 sum = 0; + u8 sum = 0; DEBUGFUNC("e1000_calculate_checksum"); @@ -66,17 +64,20 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length) s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) { u32 hicr; - s32 ret_val = E1000_SUCCESS; - u8 i; + u8 i; DEBUGFUNC("e1000_mng_enable_host_if_generic"); + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* Check that the host interface is enabled. */ hicr = E1000_READ_REG(hw, E1000_HICR); - if ((hicr & E1000_HICR_EN) == 0) { + if (!(hicr & E1000_HICR_EN)) { DEBUGOUT("E1000_HOST_EN bit disabled.\n"); - ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; - goto out; + return -E1000_ERR_HOST_INTERFACE_COMMAND; } /* check the previous command is completed */ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { @@ -88,12 +89,10 @@ s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { DEBUGOUT("Previous command timeout failed .\n"); - ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; - goto out; + return -E1000_ERR_HOST_INTERFACE_COMMAND; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -105,18 +104,17 @@ out: **/ bool e1000_check_mng_mode_generic(struct e1000_hw *hw) { - u32 fwsm; + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); DEBUGFUNC("e1000_check_mng_mode_generic"); - fwsm = E1000_READ_REG(hw, E1000_FWSM); return (fwsm & E1000_FWSM_MODE_MASK) == - (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); } /** - * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx * @hw: pointer to the HW structure * * Enables packet filtering on transmit packets if manageability is enabled @@ -129,102 +127,50 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) u32 offset; s32 ret_val, hdr_csum, csum; u8 i, len; - bool tx_filter = true; DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + hw->mac.tx_pkt_filtering = true; + /* No manageability, no filtering */ if (!hw->mac.ops.check_mng_mode(hw)) { - tx_filter = false; - goto out; + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; } - /* - * If we can't read from the host interface for whatever + /* If we can't read from the host interface for whatever * reason, disable filtering. */ - ret_val = hw->mac.ops.mng_enable_host_if(hw); + ret_val = e1000_mng_enable_host_if_generic(hw); if (ret_val != E1000_SUCCESS) { - tx_filter = false; - goto out; + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; } /* Read in the header. Length and offset are in dwords. */ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; - for (i = 0; i < len; i++) { - *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, - E1000_HOST_IF, - offset + i); - } + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); hdr_csum = hdr->checksum; hdr->checksum = 0; csum = e1000_calculate_checksum((u8 *)hdr, - E1000_MNG_DHCP_COOKIE_LENGTH); - /* - * If either the checksums or signature don't match, then + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then * the cookie area isn't considered valid, in which case we * take the safe route of assuming Tx filtering is enabled. */ - if (hdr_csum != csum) - goto out; - if (hdr->signature != E1000_IAMT_SIGNATURE) - goto out; + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } /* Cookie area is valid, make the final check for filtering. */ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) - tx_filter = false; + hw->mac.tx_pkt_filtering = false; -out: - hw->mac.tx_pkt_filtering = tx_filter; - return tx_filter; -} - -/** - * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface - * @hw: pointer to the HW structure - * @buffer: pointer to the host interface - * @length: size of the buffer - * - * Writes the DHCP information to the host interface. - **/ -s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, - u16 length) -{ - struct e1000_host_mng_command_header hdr; - s32 ret_val; - u32 hicr; - - DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); - - hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; - hdr.command_length = length; - hdr.reserved1 = 0; - hdr.reserved2 = 0; - hdr.checksum = 0; - - /* Enable the host interface */ - ret_val = hw->mac.ops.mng_enable_host_if(hw); - if (ret_val) - goto out; - - /* Populate the host interface with the contents of "buffer". */ - ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length, - sizeof(hdr), &(hdr.checksum)); - if (ret_val) - goto out; - - /* Write the manageability command header */ - ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr); - if (ret_val) - goto out; - - /* Tell the ARC a new command is pending. */ - hicr = E1000_READ_REG(hw, E1000_HICR); - E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); - -out: - return ret_val; + return hw->mac.tx_pkt_filtering; } /** @@ -235,7 +181,7 @@ out: * Writes the command header after does the checksum calculation. **/ s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, - struct e1000_host_mng_command_header *hdr) + struct e1000_host_mng_command_header *hdr) { u16 i, length = sizeof(struct e1000_host_mng_command_header); @@ -249,7 +195,7 @@ s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, /* Write the relevant command block into the ram area. */ for (i = 0; i < length; i++) { E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, - *((u32 *) hdr + i)); + *((u32 *) hdr + i)); E1000_WRITE_FLUSH(hw); } @@ -269,22 +215,19 @@ s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, * way. Also fills up the sum of the buffer in *buffer parameter. **/ s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, - u16 length, u16 offset, u8 *sum) + u16 length, u16 offset, u8 *sum) { u8 *tmp; u8 *bufptr = buffer; u32 data = 0; - s32 ret_val = E1000_SUCCESS; u16 remaining, i, j, prev_bytes; DEBUGFUNC("e1000_mng_host_if_write_generic"); /* sum = only sum of the data and it is not checksum */ - if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { - ret_val = -E1000_ERR_PARAM; - goto out; - } + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; tmp = (u8 *)&data; prev_bytes = offset & 0x3; @@ -307,8 +250,7 @@ s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, /* Calculate length in DWORDs */ length >>= 2; - /* - * The device driver writes the relevant command block into the + /* The device driver writes the relevant command block into the * ram area. */ for (i = 0; i < length; i++) { @@ -318,7 +260,7 @@ s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, } E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, - data); + data); } if (remaining) { for (j = 0; j < sizeof(u32); j++) { @@ -329,55 +271,286 @@ s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, *sum += *(tmp + j); } - E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data); + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); } -out: - return ret_val; + return E1000_SUCCESS; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; } /** - * e1000_enable_mng_pass_thru - Enable processing of ARP's + * e1000_enable_mng_pass_thru - Check if management passthrough is needed * @hw: pointer to the HW structure * - * Verifies the hardware needs to allow ARPs to be processed by the host. + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. **/ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) { u32 manc; u32 fwsm, factps; - bool ret_val = false; DEBUGFUNC("e1000_enable_mng_pass_thru"); if (!hw->mac.asf_firmware_present) - goto out; + return false; manc = E1000_READ_REG(hw, E1000_MANC); - if (!(manc & E1000_MANC_RCV_TCO_EN) || - !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) - goto out; + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; - if (hw->mac.arc_subsystem_valid) { + if (hw->mac.has_fwsm) { fwsm = E1000_READ_REG(hw, E1000_FWSM); factps = E1000_READ_REG(hw, E1000_FACTPS); if (!(factps & E1000_FACTPS_MNGCG) && ((fwsm & E1000_FWSM_MODE_MASK) == - (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { - ret_val = true; - goto out; - } - } else { - if ((manc & E1000_MANC_SMBUS_EN) && - !(manc & E1000_MANC_ASF_EN)) { - ret_val = true; - goto out; + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return E1000_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return E1000_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + + return E1000_SUCCESS; +} +/** + * e1000_load_firmware - Writes proxy FW code buffer to host interface + * and execute. + * @hw: pointer to the HW structure + * @buffer: contains a firmware to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled + * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, hibba, fwsm, icr, i; + + DEBUGFUNC("e1000_load_firmware"); + + if (hw->mac.type < e1000_i210) { + DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); + return -E1000_ERR_CONFIG; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { + DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + + if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + /* Clear notification from ROM-FW by reading ICR register */ + icr = E1000_READ_REG(hw, E1000_ICR_V2); + + /* Reset ROM-FW */ + hicr = E1000_READ_REG(hw, E1000_HICR); + hicr |= E1000_HICR_FW_RESET_ENABLE; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + hicr |= E1000_HICR_FW_RESET; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + E1000_WRITE_FLUSH(hw); + + /* Wait till MAC notifies about its readiness after ROM-FW reset */ + for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { + icr = E1000_READ_REG(hw, E1000_ICR_V2); + if (icr & E1000_ICR_MNG) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Wait till MAC is ready to accept new FW code */ + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if ((fwsm & E1000_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == + E1000_FWSM_HI_EN_ONLY_MODE)) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant FW code block + * into the ram area in DWORDs via 1kB ram addressing window. + */ + for (i = 0; i < length; i++) { + if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { + /* Point to correct 1kB ram window */ + hibba = E1000_HI_FW_BASE_ADDRESS + + ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * + (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); + + E1000_WRITE_REG(hw, E1000_HIBBA, hibba); } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + i % E1000_HI_FW_BLOCK_DWORD_LENGTH, + *((u32 *)buffer + i)); + } + + /* Setting this bit tells the ARC that a new FW is ready to execute. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check for successful FW start. */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("New FW did not start within timeout period.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; } -out: - return ret_val; + return E1000_SUCCESS; } + diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_manage.h b/vmkdrivers/src_9/drivers/net/igb/e1000_manage.h index ed6601adc869bfa5349862d6a20df3a753141ab5..c94b218542e2e0c192777198ded8b6c46f6a37d6 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_manage.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_manage.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -32,12 +32,15 @@ bool e1000_check_mng_mode_generic(struct e1000_hw *hw); bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, - u16 length, u16 offset, u8 *sum); + u16 length, u16 offset, u8 *sum); s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, - struct e1000_host_mng_command_header *hdr); + struct e1000_host_mng_command_header *hdr); s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, - u8 *buffer, u16 length); + u8 *buffer, u16 length); bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); enum e1000_mng_mode { e1000_mng_mode_none = 0, @@ -47,35 +50,40 @@ enum e1000_mng_mode { e1000_mng_mode_host_if_only }; -#define E1000_FACTPS_MNGCG 0x20000000 - -#define E1000_FWSM_MODE_MASK 0xE -#define E1000_FWSM_MODE_SHIFT 1 - -#define E1000_MNG_IAMT_MODE 0x3 -#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 -#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 -#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 -#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 -#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 - -#define E1000_VFTA_ENTRY_SHIFT 5 -#define E1000_VFTA_ENTRY_MASK 0x7F -#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F - -#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ - -#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 +#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define E1000_HI_FW_BASE_ADDRESS 0x10000 +#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ +#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ +#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ /* Driver sets this bit when done to put command in RAM */ -#define E1000_HICR_C 0x02 -#define E1000_HICR_SV 0x04 /* Status Validity */ -#define E1000_HICR_FW_RESET_ENABLE 0x40 -#define E1000_HICR_FW_RESET 0x80 +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 /* Intel(R) Active Management Technology signature */ -#define E1000_IAMT_SIGNATURE 0x544D4149 +#define E1000_IAMT_SIGNATURE 0x544D4149 #endif diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.c b/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.c index 35293babd3d1f46827739ea4877ae9708a947ef0..f034db333ee5c286ee7ceb3c25c1fc6967a6975a 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -386,7 +386,7 @@ static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) * returns SUCCESS if it successfully copied message into the buffer **/ static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number) { s32 ret_val; u16 i; @@ -429,7 +429,7 @@ out_no_write: * a message due to a VF request so no polling for message is needed. **/ static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number) { s32 ret_val; u16 i; @@ -465,7 +465,10 @@ s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; - if (hw->mac.type == e1000_82576) { + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + case e1000_i354: mbx->timeout = 0; mbx->usec_delay = 0; @@ -484,8 +487,8 @@ s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; } - - return E1000_SUCCESS; } diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.h b/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.h index 9652e932d1deeb1415276b11cd725ca343c7871a..bbf838c8cb4d8641ca0e4e76552a347d0647d7a6 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -30,49 +30,49 @@ #include "e1000_api.h" -#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ -#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ -#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) - -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ #define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ -#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ -#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ -#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) - -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ - -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.c b/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.c index 13d96ac293d99235b2b63cf213f4ebb2bb786612..aee8f62427535762a0c280fec002d8309bee9c81 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,7 +27,6 @@ #include "e1000_api.h" -static void e1000_stop_nvm(struct e1000_hw *hw); static void e1000_reload_nvm_generic(struct e1000_hw *hw); /** @@ -170,7 +169,6 @@ s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) { u32 attempts = 100000; u32 i, reg = 0; - s32 ret_val = -E1000_ERR_NVM; DEBUGFUNC("e1000_poll_eerd_eewr_done"); @@ -180,15 +178,13 @@ s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) else reg = E1000_READ_REG(hw, E1000_EEWR); - if (reg & E1000_NVM_RW_REG_DONE) { - ret_val = E1000_SUCCESS; - break; - } + if (reg & E1000_NVM_RW_REG_DONE) + return E1000_SUCCESS; usec_delay(5); } - return ret_val; + return -E1000_ERR_NVM; } /** @@ -203,7 +199,6 @@ s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) { u32 eecd = E1000_READ_REG(hw, E1000_EECD); s32 timeout = E1000_NVM_GRANT_ATTEMPTS; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_acquire_nvm_generic"); @@ -222,10 +217,10 @@ s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) eecd &= ~E1000_EECD_REQ; E1000_WRITE_REG(hw, E1000_EECD, eecd); DEBUGOUT("Could not acquire NVM grant\n"); - ret_val = -E1000_ERR_NVM; + return -E1000_ERR_NVM; } - return ret_val; + return E1000_SUCCESS; } /** @@ -303,28 +298,27 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = E1000_READ_REG(hw, E1000_EECD); - s32 ret_val = E1000_SUCCESS; - u16 timeout = 0; u8 spi_stat_reg; DEBUGFUNC("e1000_ready_nvm_eeprom"); if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); usec_delay(1); - timeout = NVM_MAX_RETRY_SPI; - /* - * Read "Status Register" repeatedly until the LSB is cleared. + /* Read "Status Register" repeatedly until the LSB is cleared. * The EEPROM will signal that the command has been completed * by clearing bit 0 of the internal status register. If it's * not cleared within 'timeout', then error out. */ while (timeout) { e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, - hw->nvm.opcode_bits); + hw->nvm.opcode_bits); spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) break; @@ -336,12 +330,70 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) if (!timeout) { DEBUGOUT("SPI NVM Status error\n"); - ret_val = -E1000_ERR_NVM; - goto out; + return -E1000_ERR_NVM; } } -out: + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + return ret_val; } @@ -362,15 +414,13 @@ s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) DEBUGFUNC("e1000_read_nvm_eerd"); - /* - * A check for invalid values: offset too large, too many words, + /* A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; + return -E1000_ERR_NVM; } for (i = 0; i < words; i++) { @@ -383,10 +433,9 @@ s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; data[i] = (E1000_READ_REG(hw, E1000_EERD) >> - E1000_NVM_RW_REG_DATA); + E1000_NVM_RW_REG_DATA); } -out: return ret_val; } @@ -405,43 +454,42 @@ out: s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val; + s32 ret_val = -E1000_ERR_NVM; u16 widx = 0; DEBUGFUNC("e1000_write_nvm_spi"); - /* - * A check for invalid values: offset too large, too many words, + /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; + return -E1000_ERR_NVM; } - ret_val = nvm->ops.acquire(hw); - if (ret_val) - goto out; - while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; - ret_val = e1000_ready_nvm_eeprom(hw); + ret_val = nvm->ops.acquire(hw); if (ret_val) - goto release; + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } e1000_standby_nvm(hw); /* Send the WRITE ENABLE command (8 bit opcode) */ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, - nvm->opcode_bits); + nvm->opcode_bits); e1000_standby_nvm(hw); - /* - * Some SPI eeproms use the 8th address bit embedded in the + /* Some SPI eeproms use the 8th address bit embedded in the * opcode */ if ((nvm->address_bits == 8) && (offset >= 128)) @@ -450,7 +498,7 @@ s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) /* Send the Write command (8-bit opcode + addr) */ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), - nvm->address_bits); + nvm->address_bits); /* Loop to allow for up to whole page write of eeprom */ while (widx < words) { @@ -464,49 +512,186 @@ s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; } } + msec_delay(10); + nvm->ops.release(hw); } - msec_delay(10); -release: - nvm->ops.release(hw); - -out: return ret_val; } /** - * e1000_read_pba_num_generic - Read device part number + * e1000_read_pba_string_generic - Read device part number * @hw: pointer to the HW structure * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores * the value in pba_num. **/ -s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num) +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) { - s32 ret_val; + s32 ret_val; u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("e1000_read_pba_string_generic"); - DEBUGFUNC("e1000_read_pba_num_generic"); + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); - goto out; + return ret_val; } - *pba_num = (u32)(nvm_data << 16); - ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); if (ret_val) { DEBUGOUT("NVM Read Error\n"); - goto out; + return ret_val; } - *pba_num |= nvm_data; -out: - return ret_val; + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = E1000_PBANUM_LENGTH; + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + /* Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return E1000_SUCCESS; } + + + + /** * e1000_read_mac_addr_generic - Read device MAC address * @hw: pointer to the HW structure @@ -545,7 +730,7 @@ s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) **/ s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 checksum = 0; u16 i, nvm_data; @@ -555,19 +740,17 @@ s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); - goto out; + return ret_val; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { DEBUGOUT("NVM Checksum Invalid\n"); - ret_val = -E1000_ERR_NVM; - goto out; + return -E1000_ERR_NVM; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -580,7 +763,7 @@ out: **/ s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) { - s32 ret_val; + s32 ret_val; u16 checksum = 0; u16 i, nvm_data; @@ -590,7 +773,7 @@ s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error while updating checksum.\n"); - goto out; + return ret_val; } checksum += nvm_data; } @@ -599,7 +782,6 @@ s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) if (ret_val) DEBUGOUT("NVM Write Error while updating checksum.\n"); -out: return ret_val; } @@ -623,3 +805,110 @@ static void e1000_reload_nvm_generic(struct e1000_hw *hw) E1000_WRITE_FLUSH(hw); } +/** + * e1000_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images */ + /* Check which data format we have */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(e1000_get_flash_presence_i210(hw))) { + e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: + case e1000_i354: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } + return; +} + + diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.h b/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.h index 579bc8ba71d598f9b164f7a18dae5274fc3d65da..d83cbb43a0aba13a5be9c81efb1a5394ed816eb0 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_nvm.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,23 +28,44 @@ #ifndef _E1000_NVM_H_ #define _E1000_NVM_H_ + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + + void e1000_init_nvm_ops_generic(struct e1000_hw *hw); s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); -s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); + u16 *data); s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); -s32 e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data); s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); + u16 *data); s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_get_fw_version(struct e1000_hw *hw, + struct e1000_fw_version *fw_vers); -#define E1000_STM_OPCODE 0xDB00 +#define E1000_STM_OPCODE 0xDB00 #endif diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_osdep.h b/vmkdrivers/src_9/drivers/net/igb/e1000_osdep.h index 76a14b8802a5fa90704a9455e1585c2fe3147b71..7542e991abb416a0c61f3e5e6613822180ed52f0 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_osdep.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_osdep.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -41,6 +41,7 @@ #include "kcompat.h" #define usec_delay(x) udelay(x) +#define usec_delay_irq(x) udelay(x) #ifndef msec_delay #define msec_delay(x) do { \ /* Don't mdelay in interrupt context! */ \ @@ -70,7 +71,11 @@ #define DEBUGOUT(S) #define DEBUGOUT1(S, A...) +#ifdef DEBUG_FUNC #define DEBUGFUNC(F) DEBUGOUT(F "\n") +#else +#define DEBUGFUNC(F) +#endif #define DEBUGOUT2 DEBUGOUT1 #define DEBUGOUT3 DEBUGOUT2 #define DEBUGOUT7 DEBUGOUT3 diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_phy.c b/vmkdrivers/src_9/drivers/net/igb/e1000_phy.c index 7a99e5cd9449fac9f22ac96ee16891d1e96b32b6..3230adb1c27247744d43cd7890571129289f664c 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_phy.c +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -27,27 +27,26 @@ #include "e1000_api.h" -static s32 e1000_copper_link_autoneg(struct e1000_hw *hw); -static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); /* Cable length tables */ -static const u16 e1000_m88_cable_length_table[] = - { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_m88_cable_length_table) / \ - sizeof(e1000_m88_cable_length_table[0])) - -static const u16 e1000_igp_2_cable_length_table[] = - { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, - 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, - 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, - 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, - 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, - 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, - 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, - 104, 109, 114, 118, 121, 124}; + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_igp_2_cable_length_table) / \ - sizeof(e1000_igp_2_cable_length_table[0])) + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) /** * e1000_check_reset_block_generic - Check if PHY reset is blocked @@ -84,24 +83,24 @@ s32 e1000_get_phy_id(struct e1000_hw *hw) DEBUGFUNC("e1000_get_phy_id"); - if (!(phy->ops.read_reg)) - goto out; + if (!phy->ops.read_reg) + return E1000_SUCCESS; - ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); - if (ret_val) - goto out; + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; - phy->id = (u32)(phy_id << 16); - usec_delay(20); - ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); - if (ret_val) - goto out; + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; - phy->id |= (u32)(phy_id & PHY_REVISION_MASK); - phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); -out: - return ret_val; + + return E1000_SUCCESS; } /** @@ -112,21 +111,15 @@ out: **/ s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_phy_reset_dsp_generic"); - if (!(hw->phy.ops.write_reg)) - goto out; - ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) - goto out; - - ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + return ret_val; -out: - return ret_val; + return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); } /** @@ -142,46 +135,51 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_read_phy_reg_mdic"); - /* - * Set up Op-code, Phy Address, and register offset in the MDI + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = ((offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); E1000_WRITE_REG(hw, E1000_MDIC, mdic); - /* - * Poll the ready bit to see if the MDI read completed + /* Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usec_delay(50); + usec_delay_irq(50); mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { DEBUGOUT("MDI Read did not complete\n"); - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { DEBUGOUT("MDI Error\n"); - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; } *data = (u16) mdic; -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -196,46 +194,51 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_phy_reg_mdic"); - /* - * Set up Op-code, Phy Address, and register offset in the MDI + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = (((u32)data) | - (offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); E1000_WRITE_REG(hw, E1000_MDIC, mdic); - /* - * Poll the ready bit to see if the MDI read completed + /* Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usec_delay(50); + usec_delay_irq(50); mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { DEBUGOUT("MDI Write did not complete\n"); - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { DEBUGOUT("MDI Error\n"); - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -254,14 +257,13 @@ s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) DEBUGFUNC("e1000_read_phy_reg_i2c"); - /* - * Set up Op-code, Phy Address, and register address in the I2CCMD + /* Set up Op-code, Phy Address, and register address in the I2CCMD * register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - (E1000_I2CCMD_OPCODE_READ)); + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); @@ -303,18 +305,24 @@ s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) DEBUGFUNC("e1000_write_phy_reg_i2c"); + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + /* Swap the data bytes for the I2C interface */ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); - /* - * Set up Op-code, Phy Address, and register address in the I2CCMD + /* Set up Op-code, Phy Address, and register address in the I2CCMD * register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_WRITE | - phy_data_swapped); + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); @@ -337,6 +345,134 @@ s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) return E1000_SUCCESS; } +/** + * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_read_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return E1000_SUCCESS; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_write_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return E1000_SUCCESS; +} + /** * e1000_read_phy_reg_m88 - Read m88 PHY register * @hw: pointer to the HW structure @@ -349,23 +485,19 @@ s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) **/ s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_read_phy_reg_m88"); - if (!(hw->phy.ops.acquire)) - goto out; - ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + data); hw->phy.ops.release(hw); -out: return ret_val; } @@ -380,26 +512,42 @@ out: **/ s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_write_phy_reg_m88"); - if (!(hw->phy.ops.acquire)) - goto out; - ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + data); hw->phy.ops.release(hw); -out: return ret_val; } +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + DEBUGFUNC("e1000_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + /** * __e1000_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure @@ -412,38 +560,35 @@ out: * semaphores before exiting. **/ static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) + bool locked) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("__e1000_read_phy_reg_igp"); if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } - if (offset > MAX_PHY_MULTI_PAGE_REG) { + if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) - goto release; - } - - ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -release: + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); if (!locked) hw->phy.ops.release(hw); -out: + return ret_val; } + /** * e1000_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure @@ -484,37 +629,32 @@ s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) + bool locked) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_phy_reg_igp"); if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } - if (offset > MAX_PHY_MULTI_PAGE_REG) { + if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) - goto release; - } - - ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -release: + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); if (!locked) hw->phy.ops.release(hw); -out: return ret_val; } @@ -558,25 +698,27 @@ s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) + bool locked) { u32 kmrnctrlsta; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("__e1000_read_kmrn_reg"); if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); usec_delay(2); @@ -586,8 +728,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, if (!locked) hw->phy.ops.release(hw); -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -632,33 +773,34 @@ s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) + bool locked) { u32 kmrnctrlsta; - s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_kmrn_reg_generic"); if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_KMRNCTRLSTA_OFFSET) | data; E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); usec_delay(2); if (!locked) hw->phy.ops.release(hw); -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -689,6 +831,46 @@ s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) return __e1000_write_kmrn_reg(hw, offset, data, true); } +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + /** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure @@ -697,39 +879,62 @@ s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) **/ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) { - struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; DEBUGFUNC("e1000_copper_link_setup_82577"); - if (phy->reset_disable) { - ret_val = E1000_SUCCESS; - goto out; - } + if (hw->phy.reset_disable) + return E1000_SUCCESS; - if (phy->type == e1000_phy_82580) { + if (hw->phy.type == e1000_phy_82580) { ret_val = hw->phy.ops.reset(hw); if (ret_val) { DEBUGOUT("Error resetting the PHY.\n"); - goto out; + return ret_val; } } - /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; -out: - return ret_val; + /* Set MDI/MDIX mode */ + ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); } /** @@ -747,20 +952,17 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) DEBUGFUNC("e1000_copper_link_setup_m88"); - if (phy->reset_disable) { - ret_val = E1000_SUCCESS; - goto out; - } + if (phy->reset_disable) + return E1000_SUCCESS; - /* Enable CRS on TX. This must be set for half-duplex operation. */ + /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - /* - * Options: + /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode @@ -785,30 +987,28 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) break; } - /* - * Options: + /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (phy->disable_polarity_correction == 1) + if (phy->disable_polarity_correction) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; if (phy->revision < E1000_REVISION_4) { - /* - * Force TX_CLK in the Extended PHY Specific Control Register + /* Force TX_CLK in the Extended PHY Specific Control Register * to 25MHz clock. */ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); + &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; @@ -820,25 +1020,124 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) } else { /* Configure Master and Slave downshift values */ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); } ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); + phy_data); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + if (phy->ops.commit) { + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } } + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ ret_val = phy->ops.commit(hw); if (ret_val) { DEBUGOUT("Error committing the PHY changes\n"); - goto out; + return ret_val; } -out: - return ret_val; + ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; } /** @@ -856,48 +1155,32 @@ s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) DEBUGFUNC("e1000_copper_link_setup_igp"); - if (phy->reset_disable) { - ret_val = E1000_SUCCESS; - goto out; - } + if (phy->reset_disable) + return E1000_SUCCESS; ret_val = hw->phy.ops.reset(hw); if (ret_val) { DEBUGOUT("Error resetting the PHY.\n"); - goto out; + return ret_val; } - /* - * Wait 100ms for MAC to configure PHY from NVM settings, to avoid + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ msec_delay(100); - /* - * The NVM settings will configure LPLU in D3 for - * non-IGP1 PHYs. - */ - if (phy->type == e1000_phy_igp) { - /* disable lplu d3 during driver init */ - ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); - if (ret_val) { - DEBUGOUT("Error Disabling LPLU D3\n"); - goto out; - } - } - /* disable lplu d0 during driver init */ if (hw->phy.ops.set_d0_lplu_state) { ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); if (ret_val) { DEBUGOUT("Error Disabling LPLU D0\n"); - goto out; + return ret_val; } } /* Configure mdi-mdix settings */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCR_AUTO_MDIX; @@ -915,141 +1198,43 @@ s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) } ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); if (ret_val) - goto out; + return ret_val; /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { - /* - * when autonegotiation advertisement is only 1000Mbps then we + /* when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) - goto out; + return ret_val; /* Set auto Master/Slave resolution process */ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~CR_1000T_MS_ENABLE; ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); if (ret_val) - goto out; + return ret_val; } - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); - if (ret_val) - goto out; - - /* load defaults for future use */ - phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? - ((data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy->ms_type) { - case e1000_ms_force_master: - data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - data |= CR_1000T_MS_ENABLE; - data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); - if (ret_val) - goto out; + ret_val = e1000_set_master_slave_mode(hw); } -out: - return ret_val; -} - -/** - * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link - * @hw: pointer to the HW structure - * - * Performs initial bounds checking on autoneg advertisement parameter, then - * configure to advertise the full capability. Setup the PHY to autoneg - * and restart the negotiation process between the link partner. If - * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. - **/ -static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_ctrl; - - DEBUGFUNC("e1000_copper_link_autoneg"); - - /* - * Perform some bounds checking on the autoneg advertisement - * parameter. - */ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* - * If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (phy->autoneg_advertised == 0) - phy->autoneg_advertised = phy->autoneg_mask; - - DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); - ret_val = e1000_phy_setup_autoneg(hw); - if (ret_val) { - DEBUGOUT("Error Setting up Auto-Negotiation\n"); - goto out; - } - DEBUGOUT("Restarting Auto-Neg\n"); - - /* - * Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - goto out; - - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - goto out; - - /* - * Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (phy->autoneg_wait_to_complete) { - ret_val = hw->mac.ops.wait_autoneg(hw); - if (ret_val) { - DEBUGOUT("Error while waiting for " - "autoneg to complete\n"); - goto out; - } - } - - hw->mac.get_link_status = true; - -out: return ret_val; } @@ -1076,33 +1261,31 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); if (ret_val) - goto out; + return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, - &mii_1000t_ctrl_reg); + &mii_1000t_ctrl_reg); if (ret_val) - goto out; + return ret_val; } - /* - * Need to parse both autoneg_advertised and fc and set up + /* Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ - /* - * First we clear all the 10/100 mb speed bits in the Auto-Neg + /* First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | - NWAY_AR_100TX_HD_CAPS | - NWAY_AR_10T_FD_CAPS | - NWAY_AR_10T_HD_CAPS); + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); @@ -1141,8 +1324,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } - /* - * Check for a software override of the flow control settings, and + /* Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation @@ -1161,15 +1343,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ switch (hw->fc.current_mode) { case e1000_fc_none: - /* - * Flow control (Rx & Tx) is completely disabled by a + /* Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_rx_pause: - /* - * Rx Flow control is enabled, and Tx Flow control is + /* Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are @@ -1181,41 +1361,97 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is + /* Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; break; case e1000_fc_full: - /* - * Flow control (both Rx and Tx) is enabled by a software + /* Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; default: DEBUGOUT("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); if (ret_val) - goto out; + return ret_val; DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - if (phy->autoneg_mask & ADVERTISE_1000_FULL) { - ret_val = phy->ops.write_reg(hw, - PHY_1000T_CTRL, - mii_1000t_ctrl_reg); - if (ret_val) - goto out; + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; -out: return ret_val; } @@ -1236,46 +1472,40 @@ s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) DEBUGFUNC("e1000_setup_copper_link_generic"); if (hw->mac.autoneg) { - /* - * Setup autoneg and flow control advertisement and perform + /* Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) - goto out; + return ret_val; } else { - /* - * PHY will be set to 10H, 10F, 100H or 100F + /* PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ DEBUGOUT("Forcing Speed and Duplex\n"); ret_val = hw->phy.ops.force_speed_duplex(hw); if (ret_val) { DEBUGOUT("Error Forcing Speed and Duplex\n"); - goto out; + return ret_val; } } - /* - * Check link status. Wait up to 100 microseconds for link to become + /* Check link status. Wait up to 100 microseconds for link to become * valid. */ - ret_val = e1000_phy_has_link_generic(hw, - COPPER_LINK_UP_LIMIT, - 10, - &link); + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); if (ret_val) - goto out; + return ret_val; if (link) { DEBUGOUT("Valid link established!!!\n"); - e1000_config_collision_dist_generic(hw); + hw->mac.ops.config_collision_dist(hw); ret_val = e1000_config_fc_after_link_up_generic(hw); } else { DEBUGOUT("Unable to establish link!!!\n"); } -out: return ret_val; } @@ -1298,28 +1528,27 @@ s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; + return ret_val; - /* - * Clear Auto-Crossover to force MDI manually. IGP requires MDI + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI * forced whenever speed and duplex are forced. */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; DEBUGOUT1("IGP PSCR: %X\n", phy_data); @@ -1328,26 +1557,19 @@ s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); - ret_val = e1000_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ - ret_val = e1000_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); } -out: return ret_val; } @@ -1370,92 +1592,125 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); - /* - * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - goto out; + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; + return ret_val; /* Reset the phy to commit changes. */ - ret_val = hw->phy.ops.commit(hw); - if (ret_val) - goto out; + if (hw->phy.ops.commit) { + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + } if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) { - /* - * We didn't get link. - * Reset the DSP and cross our fingers. - */ - ret_val = phy->ops.write_reg(hw, - M88E1000_PHY_PAGE_SELECT, - 0x001d); - if (ret_val) - goto out; - ret_val = e1000_phy_reset_dsp_generic(hw); - if (ret_val) - goto out; + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } } /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) - goto out; + return ret_val; } + if (hw->phy.type != e1000_phy_m88) + return E1000_SUCCESS; + + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return E1000_SUCCESS; + if (hw->phy.id == I210_I_PHY_ID) + return E1000_SUCCESS; + if ((hw->phy.id == M88E1543_E_PHY_ID)) + return E1000_SUCCESS; ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; - /* - * Resetting the phy means we need to re-force TX_CLK in the + /* Resetting the phy means we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock from * the reset value of 2.5MHz. */ phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; - /* - * In addition, we must re-enable CRS on Tx for both half and full + /* In addition, we must re-enable CRS on Tx for both half and full * duplex. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); -out: return ret_val; } @@ -1476,32 +1731,27 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); - if (phy->type != e1000_phy_ife) { - ret_val = e1000_phy_force_speed_duplex_igp(hw); - goto out; - } - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); if (ret_val) - goto out; + return ret_val; e1000_phy_force_speed_duplex_setup(hw, &data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); if (ret_val) - goto out; + return ret_val; /* Disable MDI-X support for 10/100 */ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) - goto out; + return ret_val; data &= ~IFE_PMC_AUTO_MDIX; data &= ~IFE_PMC_FORCE_MDIX; ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); if (ret_val) - goto out; + return ret_val; DEBUGOUT1("IFE PMC: %X\n", data); @@ -1510,27 +1760,22 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); - ret_val = e1000_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ - ret_val = e1000_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1581,16 +1826,15 @@ void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= MII_CR_SPEED_100; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + *phy_ctrl &= ~MII_CR_SPEED_1000; DEBUGOUT("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - *phy_ctrl |= MII_CR_SPEED_10; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); DEBUGOUT("Forcing 10mb\n"); } - e1000_config_collision_dist_generic(hw); + hw->mac.ops.config_collision_dist(hw); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); } @@ -1612,78 +1856,73 @@ void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 data; DEBUGFUNC("e1000_set_d3_lplu_state_generic"); - if (!(hw->phy.ops.read_reg)) - goto out; - ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) - goto out; + return ret_val; if (!active) { data &= ~IGP02E1000_PM_D3_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); if (ret_val) - goto out; - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) - goto out; + return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) - goto out; + return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) - goto out; + return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || - (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || - (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP02E1000_PM_D3_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + data); if (ret_val) - goto out; + return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); + &data); if (ret_val) - goto out; + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); + data); } -out: return ret_val; } @@ -1704,30 +1943,28 @@ s32 e1000_check_downshift_generic(struct e1000_hw *hw) DEBUGFUNC("e1000_check_downshift_generic"); switch (phy->type) { + case e1000_phy_i210: case e1000_phy_m88: case e1000_phy_gg82563: - offset = M88E1000_PHY_SPEC_STATUS; - mask = M88E1000_PSSR_DOWNSHIFT; + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: - case e1000_phy_igp: case e1000_phy_igp_3: - offset = IGP01E1000_PHY_LINK_HEALTH; - mask = IGP01E1000_PLHR_SS_DOWNGRADE; + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ phy->speed_downgraded = false; - ret_val = E1000_SUCCESS; - goto out; + return E1000_SUCCESS; } ret_val = phy->ops.read_reg(hw, offset, &phy_data); if (!ret_val) - phy->speed_downgraded = (phy_data & mask) ? true : false; + phy->speed_downgraded = !!(phy_data & mask); -out: return ret_val; } @@ -1750,9 +1987,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) - phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1774,35 +2011,32 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) DEBUGFUNC("e1000_check_polarity_igp"); - /* - * Polarity is determined based on the speed of + /* Polarity is determined based on the speed of * our connection. */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) - goto out; + return ret_val; if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - offset = IGP01E1000_PHY_PCS_INIT_REG; - mask = IGP01E1000_PHY_POLARITY_MASK; + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; } else { - /* - * This really only applies to 10Mbps since + /* This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ - offset = IGP01E1000_PHY_PORT_STATUS; - mask = IGP01E1000_PSSR_POLARITY_REVERSED; + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = phy->ops.read_reg(hw, offset, &data); if (!ret_val) - phy->cable_polarity = (data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); -out: return ret_val; } @@ -1820,8 +2054,7 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) DEBUGFUNC("e1000_check_polarity_ife"); - /* - * Polarity is determined based on the reversal feature being enabled. + /* Polarity is determined based on the reversal feature being enabled. */ if (phy->polarity_correction) { offset = IFE_PHY_EXTENDED_STATUS_CONTROL; @@ -1834,29 +2067,26 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, offset, &phy_data); if (!ret_val) - phy->cable_polarity = (phy_data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } /** - * e1000_wait_autoneg_generic - Wait for auto-neg completion + * e1000_wait_autoneg - Wait for auto-neg completion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time * limit to expire, which ever happens first. **/ -s32 e1000_wait_autoneg_generic(struct e1000_hw *hw) +static s32 e1000_wait_autoneg(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 i, phy_status; - DEBUGFUNC("e1000_wait_autoneg_generic"); - - if (!(hw->phy.ops.read_reg)) - return E1000_SUCCESS; + DEBUGFUNC("e1000_wait_autoneg"); /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { @@ -1871,8 +2101,7 @@ s32 e1000_wait_autoneg_generic(struct e1000_hw *hw) msec_delay(100); } - /* - * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation * has completed. */ return ret_val; @@ -1888,31 +2117,25 @@ s32 e1000_wait_autoneg_generic(struct e1000_hw *hw) * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success) + u32 usec_interval, bool *success) { s32 ret_val = E1000_SUCCESS; u16 i, phy_status; DEBUGFUNC("e1000_phy_has_link_generic"); - if (!(hw->phy.ops.read_reg)) - return E1000_SUCCESS; - for (i = 0; i < iterations; i++) { - /* - * Some PHYs require the PHY_STATUS register to be read + /* Some PHYs require the PHY_STATUS register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) { - /* - * If the first read fails, another entity may have + if (ret_val) + /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ usec_delay(usec_interval); - } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; @@ -1924,7 +2147,7 @@ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, usec_delay(usec_interval); } - *success = (i < iterations) ? true : false; + *success = (i < iterations); return ret_val; } @@ -1954,21 +2177,130 @@ s32 e1000_get_cable_length_m88(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: + return E1000_SUCCESS; +} + +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, is_cm; + u16 index, default_page; + + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + break; + + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + + break; + default: + return -E1000_ERR_PHY; + } + return ret_val; } @@ -1986,15 +2318,16 @@ out: s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; - u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = - {IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D}; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; DEBUGFUNC("e1000_get_cable_length_igp_2"); @@ -2002,23 +2335,20 @@ s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); if (ret_val) - goto out; + return ret_val; - /* - * Getting bits 15:9, which represent the combination of + /* Getting bits 15:9, which represent the combination of * coarse and fine gain values. The result is a number * that can be put into the lookup table to obtain the * approximate cable length. */ - cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & - IGP02E1000_AGC_LENGTH_MASK; + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || - (cur_agc_index == 0)) { - ret_val = -E1000_ERR_PHY; - goto out; - } + (cur_agc_index == 0)) + return -E1000_ERR_PHY; /* Remove min & max AGC values from calculation. */ if (e1000_igp_2_cable_length_table[min_agc_index] > @@ -2032,18 +2362,17 @@ s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) } agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + - e1000_igp_2_cable_length_table[max_agc_index]); + e1000_igp_2_cable_length_table[max_agc_index]); agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ - phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? - (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -2067,53 +2396,51 @@ s32 e1000_get_phy_info_m88(struct e1000_hw *hw) if (phy->media_type != e1000_media_type_copper) { DEBUGOUT("Phy info is only valid for copper media\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; - phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) - ? true : false; + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; - phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; } else { /* Set values to "undefined" */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; @@ -2121,7 +2448,6 @@ s32 e1000_get_phy_info_m88(struct e1000_hw *hw) phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: return ret_val; } @@ -2145,53 +2471,105 @@ s32 e1000_get_phy_info_igp(struct e1000_hw *hw) ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_igp(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) - goto out; + return ret_val; - phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { ret_val = phy->ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: return ret_val; } +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return E1000_SUCCESS; +} + /** * e1000_phy_sw_reset_generic - PHY software reset * @hw: pointer to the HW structure @@ -2201,26 +2579,22 @@ out: **/ s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 phy_ctrl; DEBUGFUNC("e1000_phy_sw_reset_generic"); - if (!(hw->phy.ops.read_reg)) - goto out; - ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) - goto out; + return ret_val; phy_ctrl |= MII_CR_RESET; ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); if (ret_val) - goto out; + return ret_val; usec_delay(1); -out: return ret_val; } @@ -2236,20 +2610,20 @@ out: s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u32 ctrl; DEBUGFUNC("e1000_phy_hw_reset_generic"); - ret_val = phy->ops.check_reset_block(hw); - if (ret_val) { - ret_val = E1000_SUCCESS; - goto out; + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; } ret_val = phy->ops.acquire(hw); if (ret_val) - goto out; + return ret_val; ctrl = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); @@ -2264,10 +2638,7 @@ s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) phy->ops.release(hw); - ret_val = phy->ops.get_cfg_done(hw); - -out: - return ret_val; + return phy->ops.get_cfg_done(hw); } /** @@ -2277,7 +2648,7 @@ out: * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ -s32 e1000_get_cfg_done_generic(struct e1000_hw *hw) +s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_get_cfg_done_generic"); @@ -2349,15 +2720,13 @@ s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) hw->phy.ops.write_reg(hw, 0x1796, 0x0008); /* Change cg_icount + enable integbp for channels BCD */ hw->phy.ops.write_reg(hw, 0x1798, 0xD008); - /* - * Change cg_icount + enable integbp + change prop_factor_master + /* Change cg_icount + enable integbp + change prop_factor_master * to 8 for channel A */ hw->phy.ops.write_reg(hw, 0x1898, 0xD918); /* Disable AHT in Slave mode on channel A */ hw->phy.ops.write_reg(hw, 0x187A, 0x0800); - /* - * Enable LPLU and disable AN to 1000 in non-D0a states, + /* Enable LPLU and disable AN to 1000 in non-D0a states, * Enable SPD+B2B */ hw->phy.ops.write_reg(hw, 0x0019, 0x008D); @@ -2381,11 +2750,15 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) { enum e1000_phy_type phy_type = e1000_phy_unknown; - switch (phy_id) { + switch (phy_id) { case M88E1000_I_PHY_ID: case M88E1000_E_PHY_ID: case M88E1111_I_PHY_ID: case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ @@ -2405,6 +2778,9 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) case I82580_I_PHY_ID: phy_type = e1000_phy_82580; break; + case I210_I_PHY_ID: + phy_type = e1000_phy_i210; + break; default: phy_type = e1000_phy_unknown; break; @@ -2422,7 +2798,6 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) **/ s32 e1000_determine_phy_address(struct e1000_hw *hw) { - s32 ret_val = -E1000_ERR_PHY_TYPE; u32 phy_addr = 0; u32 i; enum e1000_phy_type phy_type = e1000_phy_unknown; @@ -2437,21 +2812,18 @@ s32 e1000_determine_phy_address(struct e1000_hw *hw) e1000_get_phy_id(hw); phy_type = e1000_get_phy_type_from_id(hw->phy.id); - /* - * If phy_type is valid, break - we found our + /* If phy_type is valid, break - we found our * PHY address */ - if (phy_type != e1000_phy_unknown) { - ret_val = E1000_SUCCESS; - goto out; - } + if (phy_type != e1000_phy_unknown) + return E1000_SUCCESS; + msec_delay(1); i++; } while (i < 10); } -out: - return ret_val; + return -E1000_ERR_PHY_TYPE; } /** @@ -2465,10 +2837,16 @@ out: void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; + u16 power_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; + if (hw->phy.type == e1000_phy_i210) { + hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg); + power_reg &= ~GS40G_CS_POWER_DOWN; + hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg); + } hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); } @@ -2483,10 +2861,17 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw) void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; + u16 power_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; + /* i210 Phy requires an additional bit for power up/down */ + if (hw->phy.type == e1000_phy_i210) { + hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg); + power_reg |= GS40G_CS_POWER_DOWN; + hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg); + } hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); msec_delay(1); } @@ -2510,9 +2895,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) - phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -2521,9 +2906,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY * @hw: pointer to the HW structure * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Waits for link and returns - * successful if link up is successful, else -E1000_ERR_PHY (-2). + * Calls the PHY setup function to force speed and duplex. **/ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) { @@ -2536,56 +2919,32 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; - - /* - * Clear Auto-Crossover to force MDI manually. 82577 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); - if (ret_val) - goto out; - - phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; - phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; - - ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); - if (ret_val) - goto out; - - DEBUGOUT1("I82577_PHY_CTRL_2: %X\n", phy_data); + return ret_val; usec_delay(1); if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); - ret_val = e1000_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ - ret_val = e1000_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); } -out: return ret_val; } @@ -2609,51 +2968,49 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_82577(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (ret_val) - goto out; + return ret_val; - phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -2673,16 +3030,233 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; - length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> - I82577_DSTATUS_CABLE_LENGTH_SHIFT; + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); if (length == E1000_CABLE_LENGTH_UNDEFINED) - ret_val = -E1000_ERR_PHY; + return -E1000_ERR_PHY; phy->cable_length = length; -out: + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); return ret_val; } + +/** + * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready = false; + + DEBUGFUNC("e1000_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & + ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + *data = E1000_READ_REG(hw, E1000_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready = false; + + DEBUGFUNC("e1000_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + + return E1000_SUCCESS; +} + +/** + * e1000_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool e1000_is_mphy_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = false; + + while (retry_count < 2) { + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = true; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_phy.h b/vmkdrivers/src_9/drivers/net/igb/e1000_phy.h index fabf09470e010e30501d97f6170a6d9101474f7b..03d44000cc2c2a511dc5035483c945d10c48600f 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_phy.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,7 +28,6 @@ #ifndef _E1000_PHY_H_ #define _E1000_PHY_H_ -void e1000_init_phy_ops_generic(struct e1000_hw *hw); s32 e1000_check_downshift_generic(struct e1000_hw *hw); s32 e1000_check_polarity_m88(struct e1000_hw *hw); s32 e1000_check_polarity_igp(struct e1000_hw *hw); @@ -36,161 +35,212 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw); s32 e1000_check_reset_block_generic(struct e1000_hw *hw); s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); s32 e1000_get_phy_id(struct e1000_hw *hw); s32 e1000_get_phy_info_igp(struct e1000_hw *hw); s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); -s32 e1000_wait_autoneg_generic(struct e1000_hw *hw); s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); -s32 e1000_phy_reset_dsp(struct e1000_hw *hw); s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success); + u32 usec_interval, bool *success); s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); void e1000_power_up_phy_copper(struct e1000_hw *hw); void e1000_power_down_phy_copper(struct e1000_hw *hw); s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); s32 e1000_check_polarity_82577(struct e1000_hw *hw); s32 e1000_get_phy_info_82577(struct e1000_hw *hw); s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); s32 e1000_get_cable_length_82577(struct e1000_hw *hw); +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override); +bool e1000_is_mphy_ready(struct e1000_hw *hw); -#define E1000_MAX_PHY_ADDR 4 +#define E1000_MAX_PHY_ADDR 8 /* IGP01E1000 Specific Registers */ -#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ -#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ -#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ -#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ -#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ -#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */ -#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ -#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ -#define IGP_PAGE_SHIFT 5 -#define PHY_REG_MASK 0x1F - -#define HV_INTC_FC_PAGE_START 768 -#define I82578_ADDR_REG 29 -#define I82577_ADDR_REG 16 -#define I82577_CFG_REG 22 -#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ -#define I82577_CTRL_REG 23 +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 +#define GS40G_CS_POWER_DOWN 0x0002 + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 /* 82577 specific PHY registers */ -#define I82577_PHY_CTRL_2 18 -#define I82577_PHY_LBK_CTRL 19 -#define I82577_PHY_STATUS_2 26 -#define I82577_PHY_DIAG_STATUS 31 +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 /* I82577 PHY Status 2 */ -#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 -#define I82577_PHY_STATUS2_MDIX 0x0800 -#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 -#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 -#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100 +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 /* I82577 PHY Control 2 */ -#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 /* I82577 PHY Diagnostics Status */ -#define I82577_DSTATUS_CABLE_LENGTH 0x03FC -#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 - -#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -#define IGP01E1000_PHY_POLARITY_MASK 0x0078 - -#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ - -#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 - -/* Enable flexible speed on link-up */ -#define IGP01E1000_GMII_FLEX_SPD 0x0010 -#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ - -#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ -#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ -#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ - -#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 - -#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -#define IGP01E1000_PSSR_MDIX 0x0800 -#define IGP01E1000_PSSR_SPEED_MASK 0xC000 -#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 - -#define IGP02E1000_PHY_CHANNEL_NUM 4 -#define IGP02E1000_PHY_AGC_A 0x11B1 -#define IGP02E1000_PHY_AGC_B 0x12B1 -#define IGP02E1000_PHY_AGC_C 0x14B1 -#define IGP02E1000_PHY_AGC_D 0x18B1 - -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ -#define IGP02E1000_AGC_LENGTH_MASK 0x7F -#define IGP02E1000_AGC_RANGE 15 - -#define IGP03E1000_PHY_MISC_CTRL 0x1B -#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */ - -#define E1000_CABLE_LENGTH_UNDEFINED 0xFF - -#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 -#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 -#define E1000_KMRNCTRLSTA_REN 0x00200000 -#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ -#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ -#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ -#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ - -#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 -#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ -#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ -#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ +#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ /* IFE PHY Extended Status Control */ -#define IFE_PESC_POLARITY_REVERSED 0x0100 +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* IFE PHY Special Control */ -#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 -#define IFE_PSC_FORCE_POLARITY 0x0020 -#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 /* IFE PHY Special Control and LED Control */ -#define IFE_PSCL_PROBE_MODE 0x0020 -#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ -#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ /* IFE PHY MDIX Control */ -#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ -#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ -#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 +#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 +#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 #endif diff --git a/vmkdrivers/src_9/drivers/net/igb/e1000_regs.h b/vmkdrivers/src_9/drivers/net/igb/e1000_regs.h index 9d13f1f808666760bc919bf94ecd0cd793ea1430..0e083c5444e49fb6cfbcc96613ae0c213cebd9b4 100644 --- a/vmkdrivers/src_9/drivers/net/igb/e1000_regs.h +++ b/vmkdrivers/src_9/drivers/net/igb/e1000_regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,480 +28,619 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ -#define E1000_EERD 0x00014 /* EEPROM Read - RW */ -#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ -#define E1000_FLA 0x0001C /* Flash Access - RW */ -#define E1000_MDIC 0x00020 /* MDI Control - RW */ -#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ -#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ -#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ -#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ -#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ -#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ -#define E1000_SCTL 0x00024 /* SerDes Control - RW */ -#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ -#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ -#define E1000_FEXT 0x0002C /* Future Extended - RW */ -#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ -#define E1000_FCT 0x00030 /* Flow Control Type - RW */ -#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ -#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ -#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ -#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ -#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ -#define E1000_RCTL 0x00100 /* Rx Control - RW */ -#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ -#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ -#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -#define E1000_TCTL 0x00400 /* Tx Control - RW */ -#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ -#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ -#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ -#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ -#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ -#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ -#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ -#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ -#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ -#define E1000_PBS 0x01008 /* Packet Buffer Size */ -#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ -#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ -#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ -#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ -#define E1000_FLSWCTL 0x01030 /* FLASH control register */ -#define E1000_FLSWDATA 0x01034 /* FLASH data register */ -#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ -#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ -#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ -#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ -#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ -#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ -#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ -#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ -#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ -#define E1000_ICR_V2 0x01500 /* Interrupt Cause - new location - RC */ -#define E1000_ICS_V2 0x01504 /* Interrupt Cause Set - new location - WO */ -#define E1000_IMS_V2 0x01508 /* Interrupt Mask Set/Read - new location - RW */ -#define E1000_IMC_V2 0x0150C /* Interrupt Mask Clear - new location - WO */ -#define E1000_IAM_V2 0x01510 /* Interrupt Ack Auto Mask - new location - RW */ -#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ -#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ -#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ -#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ -#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n))) -#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ -#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ /* Split and Replication Rx Control - RW */ -#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ -#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ -#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ -#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ -#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ -#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ -#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ -#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer adapters - RW */ -#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ -#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ -/* - * Convenience macros +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +/* QAV Tx mode control register */ +#define E1000_I210_TQAVCTRL 0x3570 + +/* QAV Tx mode control register bitfields masks */ +/* QAV enable */ +#define E1000_TQAVCTRL_MODE (1 << 0) +/* Fetching arbitration type */ +#define E1000_TQAVCTRL_FETCH_ARB (1 << 4) +/* Fetching timer enable */ +#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) +/* Launch arbitration type */ +#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) +/* Launch timer enable */ +#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) +/* SP waits for SR enable */ +#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) +/* Fetching timer correction */ +#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 +#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ + (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) + +/* High credit registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) + +/* Queues fetch arbitration priority control register */ +#define E1000_I210_TQAVARBCTRL 0x3574 +/* Queues priority masks where _n and _p can be 0-3. */ +#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n)) +/* QAV Tx mode control registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) + +/* QAV Tx mode control register bitfields masks */ +#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ +#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ +#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +/* Good transmitted packets counter registers */ +#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ +#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n)) + +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ - (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ - (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ - (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ - (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ - (0x0C010 + ((_n) * 0x40))) -#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ - (0x0C014 + ((_n) * 0x40))) -#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ - (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ - (0x0C028 + ((_n) * 0x40))) -#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ - (0x0C030 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ - (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ - (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ - (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ - (0x0E010 + ((_n) * 0x40))) -#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ - (0x0E014 + ((_n) * 0x40))) +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ - (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ - (0x0E028 + ((_n) * 0x40))) -#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ - (0x0E038 + ((_n) * 0x40))) -#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ - (0x0E03C + ((_n) * 0x40))) -#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) -#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ -#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ -#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ -#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ -#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) -#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) -#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) -#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) -#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) -#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) -#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) -#define E1000_PBSLAC 0x03100 /* Packet Buffer Slave Access Control */ -#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Packet Buffer DWORD (_n) */ -#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ -#define E1000_ITPBS 0x03404 /* Same as TXPBS, renamed for newer adpaters - RW */ -#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ -#define E1000_TDPUMB 0x0357C /* DMA Tx Descriptor uC Mail Box - RW */ -#define E1000_TDPUAD 0x03580 /* DMA Tx Descriptor uC Addr Command - RW */ -#define E1000_TDPUWD 0x03584 /* DMA Tx Descriptor uC Data Write - RW */ -#define E1000_TDPURD 0x03588 /* DMA Tx Descriptor uC Data Read - RW */ -#define E1000_TDPUCTL 0x0358C /* DMA Tx Descriptor uC Control - RW */ -#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ -#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ -#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ -#define E1000_DTXMXSZRQ 0x03540 /* DMA Tx Max Total Allow Size Requests - RW */ -#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ -#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ -#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ -#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ -#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ -#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ -#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ -#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ -#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ -#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ -#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ -#define E1000_COLC 0x04028 /* Collision Count - R/clr */ -#define E1000_DC 0x04030 /* Defer Count - R/clr */ -#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ -#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ -#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ -#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ -#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ -#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ -#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ -#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ -#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ -#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ -#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ -#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ -#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ -#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ -#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ -#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ -#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ -#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ -#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ -#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ -#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ -#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ -#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ -#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ -#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ -#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ -#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ -#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ -#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ -#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ -#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ -#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ -#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ -#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ -#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ -#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ -#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ -#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ -#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ -#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ -#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ -#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ -#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ -#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ -#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ -#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ -#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ -#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ -#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ -#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ -#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ -#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ -#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ -#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ -#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ -#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ - -#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */ -#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */ -#define E1000_LSECTXPKTP 0x04308 /* LinkSec Protected Tx Packet Count - OutPktsProtected */ -#define E1000_LSECTXOCTE 0x0430C /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */ -#define E1000_LSECTXOCTP 0x04310 /* LinkSec Protected Tx Octets Count - OutOctetsProtected */ -#define E1000_LSECRXUT 0x04314 /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */ -#define E1000_LSECRXOCTD 0x0431C /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */ -#define E1000_LSECRXOCTV 0x04320 /* LinkSec Rx Octets Validated - InOctetsValidated */ -#define E1000_LSECRXBAD 0x04324 /* LinkSec Rx Bad Tag - InPktsBadTag */ -#define E1000_LSECRXNOSCI 0x04328 /* LinkSec Rx Packet No SCI Count - InPktsNoSci */ -#define E1000_LSECRXUNSCI 0x0432C /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */ -#define E1000_LSECRXUNCH 0x04330 /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */ -#define E1000_LSECRXDELAY 0x04340 /* LinkSec Rx Delayed Packet Count - InPktsDelayed */ -#define E1000_LSECRXLATE 0x04350 /* LinkSec Rx Late Packets Count - InPktsLate */ -#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */ -#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */ -#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */ -#define E1000_LSECRXUNSA 0x043C0 /* LinkSec Rx Unused SA Count - InPktsUnusedSa */ -#define E1000_LSECRXNUSA 0x043D0 /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */ -#define E1000_LSECTXCAP 0x0B000 /* LinkSec Tx Capabilities Register - RO */ -#define E1000_LSECRXCAP 0x0B300 /* LinkSec Rx Capabilities Register - RO */ -#define E1000_LSECTXCTRL 0x0B004 /* LinkSec Tx Control - RW */ -#define E1000_LSECRXCTRL 0x0B304 /* LinkSec Rx Control - RW */ -#define E1000_LSECTXSCL 0x0B008 /* LinkSec Tx SCI Low - RW */ -#define E1000_LSECTXSCH 0x0B00C /* LinkSec Tx SCI High - RW */ -#define E1000_LSECTXSA 0x0B010 /* LinkSec Tx SA0 - RW */ -#define E1000_LSECTXPN0 0x0B018 /* LinkSec Tx SA PN 0 - RW */ -#define E1000_LSECTXPN1 0x0B01C /* LinkSec Tx SA PN 1 - RW */ -#define E1000_LSECRXSCL 0x0B3D0 /* LinkSec Rx SCI Low - RW */ -#define E1000_LSECRXSCH 0x0B3E0 /* LinkSec Rx SCI High - RW */ -#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */ -#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */ -#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */ -#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */ -/* - * LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define E1000_ITPBS 0x03404 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define E1000_DTXMXSZRQ 0x03540 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ + +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +/* LinkSec */ +#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit * key - RW. */ -#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) - -#define E1000_SSVPC 0x041A0 /* Switch Security Violation Packet Count */ -#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ -#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ -#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ -#define E1000_IPSRXIPADDR(_n) (0x0B420+ (0x04 * (_n))) /* IPSec Rx IPv4/v6 Address - RW */ -#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */ -#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ -#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ -#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */ -#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ -#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ -#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ -#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ -#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ -#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ -#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ -#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ -#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ -#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ -#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ -#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ -#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ -#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ -#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ -#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ -#define E1000_LENERRS 0x04138 /* Length Errors Count */ -#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ -#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ -#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ -#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ -#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ -#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ -#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Packet Count - RW */ -#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ -#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ -#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ -#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ -#define E1000_RA 0x05400 /* Receive Address - RW Array */ -#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ -#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ -#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ -#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ -#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ -#define E1000_WUC 0x05800 /* Wakeup Control - RW */ -#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ -#define E1000_MANC 0x05820 /* Management Control - RW */ -#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ -#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ -#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ -#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ -#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ -#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ -#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ -#define E1000_HOST_IF 0x08800 /* Host Interface */ -#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ -#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ -#define E1000_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */ -#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */ - - -#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ -#define E1000_MDPHYA 0x0003C /* PHY address - RW */ -#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ -#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ -#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ -#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ -#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ -#define E1000_GCR 0x05B00 /* PCI-Ex Control */ -#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ -#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ -#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ -#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ -#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ -#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ -#define E1000_SWSM 0x05B50 /* SW Semaphore */ -#define E1000_FWSM 0x05B54 /* FW Semaphore */ -#define E1000_SWSM2 0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */ -#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ -#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ -#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ -#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ -#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ +#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ +/* Flexible Host Filter Table */ +#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ /* RSS registers */ -#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ -#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ -#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ -#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ -#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */ -#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register - * (_i) - RW */ -#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr - * low reg - RW */ -#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr - * upper reg - RW */ -#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry - * message reg - RW */ -#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry - * vector ctrl reg - RW */ -#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */ -#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ -#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ -#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ -#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ /* VT Registers */ -#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ -#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ -#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ -#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ -#define E1000_VFRE 0x00C8C /* VF Receive Enables */ -#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ -#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ -#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ -#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ -#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ -#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ -#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ /* These act per VF so an array friendly macro is used */ -#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) -#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) -#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) -#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine - * Filter - RW */ -#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) -/* Time Sync */ -#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ -#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ -#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ -#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ -#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ -#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ -#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ -#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ -#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ -#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ -#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ -#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ -#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ -#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ /* Filtering Registers */ -#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ -#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ -#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ -#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ -#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ -#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ -#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ - -#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ -#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ -#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ -#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ -#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ -#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */ -#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */ -#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */ -#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */ -#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */ -#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */ -#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */ -#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */ -#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */ -#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/ -#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */ -#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ -#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ -#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ -#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ -#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ -#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ -#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ -#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ -#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ -#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ -#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ -#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ -#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ -#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ /* DMA Coalescing registers */ -#define E1000_DMACR 0x02508 /* Control Register */ -#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ -#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ -#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ -#define E1000_DMCCNT 0x05DD4 /* Current RX Count */ -#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ -#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ /* PCIe Parity Status Register */ -#define E1000_PCIEERRSTS 0x05BA8 +#define E1000_PCIEERRSTS 0x05BA8 + +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + + + #endif diff --git a/vmkdrivers/src_9/drivers/net/igb/igb.h b/vmkdrivers/src_9/drivers/net/igb/igb.h index 951a2913ae2291180f37e740f3d120637a229a13..4f534fa65c5392b813d1d1f26f9bb5a79cff0c23 100644 --- a/vmkdrivers/src_9/drivers/net/igb/igb.h +++ b/vmkdrivers/src_9/drivers/net/igb/igb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -25,15 +25,13 @@ *******************************************************************************/ - /* Linux PRO/1000 Ethernet Driver main header file */ #ifndef _IGB_H_ #define _IGB_H_ -#ifdef IGB_LRO -#include -#endif +#include + #ifdef __VMKLNX__ #define NODE_ADDRESS_SIZE 6 @@ -52,12 +50,6 @@ #include #endif -#ifdef SIOCSHWTSTAMP -#include -#include -#include - -#endif struct igb_adapter; #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) @@ -67,33 +59,37 @@ struct igb_adapter; #include #endif -#ifndef SIOCSHWTSTAMP -#undef IGB_PER_PKT_TIMESTAMP -#endif - - #include "kcompat.h" +#ifdef HAVE_SCTP +#include +#endif + #include "e1000_api.h" #include "e1000_82575.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" #define IGB_ERR(args...) printk(KERN_ERR "igb: " args) -/* Disable Netqueue for 1G driver */ -#if defined( __VMKLNX__) -#ifdef __VMKNETDDI_QUEUEOPS__ -#undef __VMKNETDDI_QUEUEOPS__ -#endif -#endif /* defined(__VMKLNX__) */ - #define PFX "igb: " #define DPRINTK(nlevel, klevel, fmt, args...) \ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ __FUNCTION__ , ## args)) +#ifdef HAVE_PTP_1588_CLOCK +#include +#include +#include +#endif /* HAVE_PTP_1588_CLOCK */ + + /* Interrupt defines */ #define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 /* Interrupt modes, as used by the IntMode paramter */ #define IGB_INT_MODE_LEGACY 0 @@ -102,6 +98,7 @@ struct igb_adapter; /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 #define IGB_MIN_TXD 80 #define IGB_MAX_TXD 4096 @@ -113,7 +110,7 @@ struct igb_adapter; #define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ #define NON_Q_VECTORS 1 -#define MAX_Q_VECTORS 8 +#define MAX_Q_VECTORS 10 /* Transmit and receive queues */ #define IGB_MAX_RX_QUEUES 16 @@ -121,10 +118,13 @@ struct igb_adapter; #define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 -#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 #define IGB_MAX_UTA_ENTRIES 128 #define MAX_EMULATION_MAC_ADDRS 16 #define OUI_LEN 3 +#define IGB_MAX_VMDQ_QUEUES 8 + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; @@ -136,11 +136,20 @@ struct vf_data_storage { u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; u32 flags; unsigned long last_nack; +#ifdef IFLA_VF_MAX + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + bool spoofchk_enabled; +#endif +#endif }; #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ #define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ #define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of @@ -153,25 +162,33 @@ struct vf_data_storage { * descriptors until either it has this many to write back, or the * ITR timer expires. */ -#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8) -#define IGB_RX_HTHRESH 8 -#define IGB_RX_WTHRESH 1 -#define IGB_TX_PTHRESH 8 -#define IGB_TX_HTHRESH 1 -#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ - adapter->msix_entries) ? 1 : 16) +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + adapter->msix_entries) ? 1 : 4) /* this is the size past which hardware will drop packets when setting LPE=0 */ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 more, + * this adds roughly 448 bytes of extra data meaning the smallest + * allocation we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ /* Supported Rx Buffer Sizes */ -#define IGB_RXBUFFER_64 64 /* Used for packet split */ -#define IGB_RXBUFFER_128 128 /* Used for packet split */ -#define IGB_RXBUFFER_1024 1024 +#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_2048 2048 -#define IGB_RXBUFFER_4096 4096 -#define IGB_RXBUFFER_8192 8192 #define IGB_RXBUFFER_16384 16384 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#if MAX_SKB_FRAGS < 8 +#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024) +#else +#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 +#endif + /* Packet Buffer allocations */ #define IGB_PBA_BYTES_SHIFT 0xA @@ -180,13 +197,11 @@ struct vf_data_storage { #define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ -/* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGB_TX_QUEUE_WAKE 32 /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 #define IGB_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 #ifndef IGB_MASTER_SLAVE /* Switch to override PHY master/slave setting */ @@ -195,68 +210,67 @@ struct vf_data_storage { #define IGB_MNG_VLAN_NONE -1 -#ifdef IGB_LRO -#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ -#define IGB_LRO_GLOBAL 10 - -struct igb_lro_stats { - u32 flushed; - u32 coal; - u32 recycled; +struct igb_cb { +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif }; +#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) -struct igb_lro_desc { - struct hlist_node lro_node; - struct sk_buff *skb; - u32 source_ip; - u32 dest_ip; - u16 source_port; - u16 dest_port; - u16 vlan_tag; - u16 len; - u32 next_seq; - u32 ack_seq; - u16 window; - u16 mss; - u16 opt_bytes; - u16 psh:1; - u32 tsval; - u32 tsecr; - u32 append_cnt; -}; +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, -struct igb_lro_list { - struct hlist_head active; - struct hlist_head free; - int active_cnt; - struct igb_lro_stats stats; + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, }; -#endif /* IGB_LRO */ +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* + * The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ -struct igb_buffer { +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; struct sk_buff *skb; - dma_addr_t dma; - union { - /* TX */ - struct { - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - u16 mapped_as_page; - u16 gso_segs; - }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - /* RX */ - struct { - unsigned long page_offset; - struct page *page; - dma_addr_t page_dma; - }; +struct igb_rx_buffer { + dma_addr_t dma; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + struct sk_buff *skb; +#else + struct page *page; + u32 page_offset; #endif - }; }; struct igb_tx_queue_stats { @@ -271,61 +285,64 @@ struct igb_rx_queue_stats { u64 drops; u64 csum_err; u64 alloc_failed; + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ }; -struct igb_q_vector { - struct igb_adapter *adapter; /* backlink */ - struct igb_ring *rx_ring; - struct igb_ring *tx_ring; - struct napi_struct napi; - - u32 eims_value; - u16 cpu; - - u16 itr_val; - u8 set_itr; - void __iomem *itr_register; - -#ifdef IGB_LRO - struct igb_lro_list *lrolist; /* LRO list for queue vector*/ -#endif - char name[IFNAMSIZ + 9]; -#ifndef HAVE_NETDEV_NAPI_LIST - struct net_device poll_dev; -#endif +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ }; struct igb_ring { - struct igb_q_vector *q_vector; /* backlink to q_vector */ - struct net_device *netdev; /* back pointer to net_device */ - struct pci_dev *pdev; /* pci device for dma mapping */ - dma_addr_t dma; /* phys address of the ring */ - void *desc; /* descriptor ring memory */ - unsigned int size; /* length of desc. ring in bytes */ - u16 count; /* number of desc. in the ring */ - u16 next_to_use; + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; +#ifdef HAVE_PTP_1588_CLOCK + unsigned long last_rx_timestamp; +#endif /* HAVE_PTP_1588_CLOCK */ + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ u16 next_to_clean; - u8 queue_index; - u8 reg_idx; - void __iomem *head; - void __iomem *tail; - struct igb_buffer *buffer_info; /* array of buffer info structs */ - - unsigned int total_bytes; - unsigned int total_packets; - - u32 flags; + u16 next_to_use; + u16 next_to_alloc; union { /* TX */ struct { struct igb_tx_queue_stats tx_stats; - bool detect_tx_hung; }; /* RX */ struct { struct igb_rx_queue_stats rx_stats; - u32 rx_buffer_len; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + u16 rx_buffer_len; +#else + struct sk_buff *skb; +#endif #ifdef __VMKNETDDI_QUEUEOPS__ u8 mac_addr[NODE_ADDRESS_SIZE]; u8 active; @@ -333,89 +350,180 @@ struct igb_ring { #endif }; }; -}; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev; + int vqueue_index; /* queue index for virtual netdev */ +#endif +} ____cacheline_internodealigned_in_smp; -#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ -#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ -#ifdef IGB_LRO -#define IGB_RING_FLAG_RX_LRO 0x00000004 /* LRO enabled */ -#endif /* IGB_LRO */ +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ -#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; -#define IGB_ADVTXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + struct igb_ring_container rx, tx; -#define E1000_RX_DESC_ADV(R, i) \ - (&(((union e1000_adv_rx_desc *)((R).desc))[i])) -#define E1000_TX_DESC_ADV(R, i) \ - (&(((union e1000_adv_tx_desc *)((R).desc))[i])) -#define E1000_TX_CTXTDESC_ADV(R, i) \ - (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) + struct napi_struct napi; + char name[IFNAMSIZ + 9]; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { +#ifndef HAVE_NDO_SET_FEATURES + IGB_RING_FLAG_RX_CSUM, +#endif + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG, +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u16 queue; + u16 state; /* bitmask */ +}; +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_MODIFIED 0x2 +#define IGB_MAC_STATE_IN_USE 0x4 + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +#ifdef CONFIG_IGB_VMDQ_NETDEV +#define netdev_ring(ring) \ + ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) +#define ring_queue_index(ring) \ + ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) +#else +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} /* igb_desc_unused - calculate if we have unused descriptors */ -static inline int igb_desc_unused(struct igb_ring *ring) +static inline u16 igb_desc_unused(const struct igb_ring *ring) { - if (ring->next_to_clean > ring->next_to_use) - return ring->next_to_clean - ring->next_to_use - 1; + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; - return ring->count + ring->next_to_clean - ring->next_to_use - 1; + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } +#ifdef CONFIG_BQL +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} +#endif /* CONFIG_BQL */ + +// #ifdef EXT_THERMAL_SENSOR_SUPPORT +// #ifdef IGB_PROCFS +struct igb_therm_proc_data +{ + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor_data; +}; + +// #endif /* IGB_PROCFS */ +// #endif /* EXT_THERMAL_SENSOR_SUPPORT */ + +#ifdef IGB_HWMON +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +#endif /* IGB_HWMON */ + /* board specific private data structure */ struct igb_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry *msix_entries; + + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; struct timer_list phy_info_timer; - struct vlan_group *vlgrp; u16 mng_vlan_id; u32 bd_number; u32 wol; u32 en_mng_pt; u16 link_speed; u16 link_duplex; + u8 port_num; /* Interrupt Throttle Rate */ u32 rx_itr_setting; u32 tx_itr_setting; - u16 tx_itr; - u16 rx_itr; struct work_struct reset_task; struct work_struct watchdog_task; + struct work_struct dma_err_task; bool fc_autoneg; u8 tx_timeout_factor; -#ifdef ETHTOOL_PHYS_ID - struct timer_list blink_timer; - unsigned long led_status; -#endif - - /* TX */ - struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; - unsigned long tx_queue_len; - u32 tx_timeout_count; - - /* RX */ - struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; - int num_tx_queues; - int num_rx_queues; u32 max_frame_size; - u32 min_frame_size; /* OS defined structs */ - struct net_device *netdev; struct pci_dev *pdev; #ifndef HAVE_NETDEV_STATS_IN_NETDEV struct net_device_stats net_stats; #endif -#ifdef IGB_LRO - struct igb_lro_stats lro_stats; -#endif -#ifdef SIOCSHWTSTAMP - struct cyclecounter cycles; - struct timecounter clock; - struct timecompare compare; - struct hwtstamp_config hwtstamp_config; -#endif /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -431,51 +539,170 @@ struct igb_adapter { int msg_enable; - unsigned int num_q_vectors; struct igb_q_vector *q_vector[MAX_Q_VECTORS]; - struct msix_entry *msix_entries; u32 eims_enable_mask; u32 eims_other; /* to not mess up cache alignment, always add to the bottom */ - unsigned long state; - unsigned int flags; - u32 eeprom_wol; - u32 *config_space; -#ifdef HAVE_TX_MQ - struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; -#endif /* HAVE_TX_MQ */ u16 tx_ring_count; u16 rx_ring_count; #ifdef __VMKNETDDI_QUEUEOPS__ u32 n_rx_queues_allocated; u32 n_tx_queues_allocated; - /* A place to salt away the RAR table before resetting the adapter - * during change MTU - */ #endif struct vf_data_storage *vf_data; +#ifdef IFLA_VF_MAX + int vf_rate_link_speed; +#endif u32 lli_port; u32 lli_size; unsigned int vfs_allocated_count; + /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ + bool mdd; int int_mode; u32 rss_queues; u32 vmdq_pools; - u16 fw_version; + char fw_version[32]; + u32 wvbr; + struct igb_mac_addr *mac_table; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; +#endif + int vferr_refcount; + int dmac; + u32 *shadow_vfta; + + /* External Thermal Sensor support flag */ + bool ets; +#ifdef IGB_HWMON + struct hwmon_buff igb_hwmon_buff; +#else /* IGB_HWMON */ + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; + struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; + bool old_lsc; +#endif /* IGB_HWMON */ + u32 etrack_id; +#ifdef __VMKLNX__ + u16 SmbTblLen; + u32 SmbTblAddr; +#endif /* __VMKLNX__ */ + +#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; +#endif /* HAVE_PTP_1588_CLOCK */ + + unsigned long link_check_timeout; + + + int devrc; + + u16 eee_advert; }; +#ifdef CONFIG_IGB_VMDQ_NETDEV +struct igb_vmdq_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct igb_adapter *real_adapter; + struct net_device *vnetdev; + struct net_device_stats net_stats; + struct igb_ring *tx_ring; + struct igb_ring *rx_ring; +}; +#endif -#define IGB_FLAG_HAS_MSI (1 << 0) -#define IGB_FLAG_MSI_ENABLE (1 << 1) -#define IGB_FLAG_DCA_ENABLED (1 << 3) -#define IGB_FLAG_LLI_PUSH (1 << 4) -#define IGB_FLAG_QUAD_PORT_A (1 << 5) -#define IGB_FLAG_QUEUE_PAIRS (1 << 6) +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_DCA_ENABLED (1 << 1) +#define IGB_FLAG_LLI_PUSH (1 << 2) +#define IGB_FLAG_QUAD_PORT_A (1 << 3) +#define IGB_FLAG_QUEUE_PAIRS (1 << 4) +#define IGB_FLAG_EEE (1 << 5) +#define IGB_FLAG_DMAC (1 << 6) +#define IGB_FLAG_DETECT_BAD_DMA (1 << 7) +#define IGB_FLAG_PTP (1 << 8) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10) +#define IGB_FLAG_WOL_SUPPORTED (1 << 11) +#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12) +#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13) +#define IGB_FLAG_MEDIA_RESET (1 << 14) + +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 + +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +/* DMA Coalescing defines */ +#define IGB_DMAC_DISABLE 0 +#define IGB_DMAC_MIN 250 +#define IGB_DMAC_500 500 +#define IGB_DMAC_EN_DEFAULT 1000 +#define IGB_DMAC_2000 2000 +#define IGB_DMAC_3000 3000 +#define IGB_DMAC_4000 4000 +#define IGB_DMAC_5000 5000 +#define IGB_DMAC_6000 6000 +#define IGB_DMAC_7000 7000 +#define IGB_DMAC_8000 8000 +#define IGB_DMAC_9000 9000 +#define IGB_DMAC_MAX 10000 #define IGB_82576_TSYNC_SHIFT 19 #define IGB_82580_TSYNC_SHIFT 24 #define IGB_TS_HDR_LEN 16 + +/* CEM Support */ +#define FW_HDR_LEN 0x4 +#define FW_CMD_DRV_INFO 0xDD +#define FW_CMD_DRV_INFO_LEN 0x5 +#define FW_CMD_RESERVED 0X0 +#define FW_RESP_SUCCESS 0x1 +#define FW_UNUSED_VER 0x0 +#define FW_MAX_RETRIES 3 +#define FW_STATUS_SUCCESS 0x1 +#define FW_FAMILY_DRV_VER 0Xffffffff + +#define IGB_MAX_LINK_TRIES 20 + +struct e1000_fw_hdr { + u8 cmd; + u8 buf_len; + union + { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +#pragma pack(push,1) +struct e1000_fw_drv_info { + struct e1000_fw_hdr hdr; + u8 port_num; + u32 drv_version; + u16 pad; /* end spacing to ensure length is mult. of dword */ + u8 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; +#pragma pack(pop) + enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, @@ -498,17 +725,139 @@ extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); extern void igb_setup_tctl(struct igb_adapter *); extern void igb_setup_rctl(struct igb_adapter *); -extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); +extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); extern void igb_unmap_and_free_tx_resource(struct igb_ring *, - struct igb_buffer *); -extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); + struct igb_tx_buffer *); +extern void igb_alloc_rx_buffers(struct igb_ring *, u16); +extern void igb_clean_rx_ring(struct igb_ring *); extern void igb_update_stats(struct igb_adapter *); +extern bool igb_has_link(struct igb_adapter *adapter); extern void igb_set_ethtool_ops(struct net_device *); extern void igb_check_options(struct igb_adapter *); +extern void igb_power_up_link(struct igb_adapter *); +#ifdef HAVE_PTP_1588_CLOCK +extern void igb_ptp_init(struct igb_adapter *adapter); +extern void igb_ptp_stop(struct igb_adapter *adapter); +extern void igb_ptp_reset(struct igb_adapter *adapter); +extern void igb_ptp_tx_work(struct work_struct *work); +extern void igb_ptp_rx_hang(struct igb_adapter *adapter); +extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb); +extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb); +static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); + skb_pull(skb, IGB_TS_HDR_LEN); +#endif + return; + } + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + +extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd); +#endif /* HAVE_PTP_1588_CLOCK */ #ifdef ETHTOOL_OPS_COMPAT extern int ethtool_ioctl(struct ifreq *); #endif +extern int igb_write_mc_addr_list(struct net_device *netdev); +extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); +extern int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue); +extern int igb_available_rars(struct igb_adapter *adapter); extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); +extern void igb_enable_vlan_tags(struct igb_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +extern void igb_vlan_mode(struct net_device *, u32); +#endif + +#define E1000_PCS_CFG_IGN_SD 1 + +#ifdef IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#else +int igb_procfs_init(struct igb_adapter* adapter); +void igb_procfs_exit(struct igb_adapter* adapter); +int igb_procfs_topdir_init(void); +void igb_procfs_topdir_exit(void); +#endif /* IGB_HWMON */ + +/* ESX igb CIM IOCTL definition */ +#define SIOCINTELCIM 0x89F8 + +#define INTELCIM_ENUMDIAGS 0x01 /* enumerate diagnostics */ +#define INTELCIM_RUNDIAG 0x02 /* run diagnostics */ +#define INTELCIM_FNDSMB 0x03 /* Find SMBIOS entry and size */ +#define INTELCIM_GETSMBTBL 0x04 /* get SMBIOS tables */ +#define INTELCIM_WRITEMEM 0x05 /* write data from user space to memory */ +#define INTELCIM_READMEM 0x06 /* read data from memory to user space */ +#define INTELCIM_GET_PCIE_ERROR_INFO 0x07 +#define INTELCIM_GET_PCI_LINK_STATUS 0x08 + +#define SM_ADDR_HIGH 0x000FFFFF +#define SM_ADDR_LOW 0x000F0000 + +static const unsigned char sm_anchor[4] = "_SM_"; + +struct smbios_table { + u8 AnchorString[4]; + u8 EntryPointChecksum; + u8 EntryPointLength; + u8 SmMajorVersion; + u8 SmMinorVersion; + u16 MaxStructureSize; + u8 EntryPointRevision; + u8 FormattedArea[5]; + u8 IntermediateAnchorString[5]; + u8 IntermediateChecksum; + u16 TableLength; + u32 TableAddress; + u16 NumberSmStructures; + u8 SmBcdRevision; +} __attribute__((__packed__)); + +struct intelcim_mem_buf { + u64 addr; + u32 len; /* Length in bytes */ + u8 data[0]; +} __attribute__((__packed__)); + +struct intelcim_pcie_error_info +{ + u32 num_regs; /* Number of dwords */ + u32 data[0]; +} __attribute__((__packed__)); + +struct igb_intelcim_ioctl_req { + u32 cmd; + union { + struct ethtool_gstrings gstrings; + struct ethtool_test test; + struct smbios_table tbl; + u8 smbios[0]; + struct intelcim_mem_buf buf; + struct intelcim_pcie_error_info info; + u16 link_status; + } cmd_req; +} __attribute__((packed)); + +int igb_intelcim_ioctl(struct net_device *netdev, struct ifreq *ifr); + + #endif /* _IGB_H_ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_debugfs.c b/vmkdrivers/src_9/drivers/net/igb/igb_debugfs.c new file mode 100755 index 0000000000000000000000000000000000000000..d33c814a8795a50c5c42e0611abe452198336f6c --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/igb_debugfs.c @@ -0,0 +1,29 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" + diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_ethtool.c b/vmkdrivers/src_9/drivers/net/igb/igb_ethtool.c index 43a0513be4aee38b878b7fa7b3be02f28e8f5d27..366352e511be6f075c0f3ea803104e391c0b3533 100644 --- a/vmkdrivers/src_9/drivers/net/igb/igb_ethtool.c +++ b/vmkdrivers/src_9/drivers/net/igb/igb_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -32,15 +32,21 @@ #ifdef SIOCETHTOOL #include +#ifdef CONFIG_PM_RUNTIME +#include +#endif /* CONFIG_PM_RUNTIME */ +#include #include "igb.h" #include "igb_regtest.h" #include +#ifdef ETHTOOL_GEEE +#include +#endif #ifdef ETHTOOL_OPS_COMPAT #include "kcompat_ethtool.c" #endif - #ifdef ETHTOOL_GSTATS struct igb_stats { char stat_string[ETH_GSTRING_LEN]; @@ -86,14 +92,17 @@ static const struct igb_stats igb_gstrings_stats[] = { IGB_STAT("tx_flow_control_xoff", stats.xofftxc), IGB_STAT("rx_long_byte_count", stats.gorc), IGB_STAT("tx_dma_out_of_sync", stats.doosync), -#ifdef IGB_LRO - IGB_STAT("lro_aggregated", lro_stats.coal), - IGB_STAT("lro_flushed", lro_stats.flushed), - IGB_STAT("lro_recycled", lro_stats.recycled), -#endif /* IGB_LRO */ IGB_STAT("tx_smbus", stats.mgptc), IGB_STAT("rx_smbus", stats.mgprc), IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), +#ifdef HAVE_PTP_1588_CLOCK + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +#endif /* HAVE_PTP_1588_CLOCK */ }; #define IGB_NETDEV_STAT(_net_stat) { \ @@ -113,10 +122,8 @@ static const struct igb_stats igb_gstrings_net_stats[] = { IGB_NETDEV_STAT(tx_heartbeat_errors) }; -#define IGB_GLOBAL_STATS_LEN \ - (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) -#define IGB_NETDEV_STATS_LEN \ - (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats) +#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats) #define IGB_RX_QUEUE_STATS_LEN \ (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) #define IGB_TX_QUEUE_STATS_LEN \ @@ -148,12 +155,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) if (hw->phy.media_type == e1000_media_type_copper) { ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full| - SUPPORTED_Autoneg | - SUPPORTED_TP); + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); ecmd->advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { @@ -164,26 +172,63 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->port = PORT_TP; ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + if (hw->mac.type == e1000_i354) + ecmd->supported |= (SUPPORTED_2500baseX_Full); + + ecmd->advertising = ADVERTISED_FIBRE; + + switch (adapter->link_speed) { + case SPEED_2500: + ecmd->advertising = ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + ecmd->advertising = ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + ecmd->advertising = ADVERTISED_100baseT_Full; + break; + default: + break; + } - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg); + if (hw->mac.autoneg == 1) + ecmd->advertising |= ADVERTISED_Autoneg; ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; } - ecmd->transceiver = XCVR_INTERNAL; + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; + else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; + else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); status = E1000_READ_REG(hw, E1000_STATUS); if (status & E1000_STATUS_LU) { - - if ((status & E1000_STATUS_SPEED_1000) || - hw->phy.media_type != e1000_media_type_copper) + if ((hw->mac.type == e1000_i354) && + (status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) + ecmd->speed = SPEED_2500; + else if (status & E1000_STATUS_SPEED_1000) ecmd->speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) ecmd->speed = SPEED_100; @@ -195,12 +240,34 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; + } else { ecmd->speed = -1; ecmd->duplex = -1; } - ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; +#ifdef ETH_TP_MDI_X + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + +#ifdef ETH_TP_MDI_AUTO + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + +#endif +#endif /* ETH_TP_MDI_X */ return 0; } @@ -209,22 +276,72 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* When SoL/IDER sessions are active, autoneg/speed/duplex + if (ecmd->duplex == DUPLEX_HALF) { + if (!hw->dev_spec._82575.eee_disable) + dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n"); + hw->dev_spec._82575.eee_disable = true; + } else { + if (hw->dev_spec._82575.eee_disable) + dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n"); + hw->dev_spec._82575.eee_disable = false; + } + + /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (e1000_check_reset_block(hw)) { - DPRINTK(DRV, ERR, "Cannot change link characteristics " - "when SoL/IDER is active.\n"); + dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link " + "characteristics when SoL/IDER is active.\n"); return -EINVAL; } +#ifdef ETH_TP_MDI_AUTO + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + +#endif /* ETH_TP_MDI_AUTO */ while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (ecmd->autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; - hw->phy.autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; + if (hw->phy.media_type == e1000_media_type_fiber) { + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + switch (adapter->link_speed) { + case SPEED_2500: + hw->phy.autoneg_advertised = + ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + hw->phy.autoneg_advertised = + ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + hw->phy.autoneg_advertised = + ADVERTISED_100baseT_Full; + break; + default: + break; + } + } else { + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } ecmd->advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; @@ -235,6 +352,19 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } } +#ifdef ETH_TP_MDI_AUTO + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + +#endif /* ETH_TP_MDI_AUTO */ /* reset the link */ if (netif_running(adapter->netdev)) { igb_down(adapter); @@ -246,8 +376,26 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) return 0; } +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* + * If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + static void igb_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -266,7 +414,7 @@ static void igb_get_pauseparam(struct net_device *netdev, } static int igb_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -275,7 +423,7 @@ static int igb_set_pauseparam(struct net_device *netdev, adapter->fc_autoneg = pause->autoneg; while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; @@ -297,109 +445,22 @@ static int igb_set_pauseparam(struct net_device *netdev, hw->fc.current_mode = hw->fc.requested_mode; - retval = ((hw->phy.media_type == e1000_media_type_copper) ? - e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw)); + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000_force_mac_fc(hw); + if (retval) + goto out; + e1000_set_fc_watermarks_generic(hw); + } } +out: clear_bit(__IGB_RESETTING, &adapter->state); return retval; } -static u32 igb_get_rx_csum(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM); -} - -static int igb_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - int i; - - for (i = 0; i < adapter->rss_queues; i++) { - if (data) - adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM; - else - adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM; - } - - return 0; -} - -static u32 igb_get_tx_csum(struct net_device *netdev) -{ - return (netdev->features & NETIF_F_IP_CSUM) != 0; -} - -static int igb_set_tx_csum(struct net_device *netdev, u32 data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - if (data) { -#ifdef NETIF_F_IPV6_CSUM - netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - if (adapter->hw.mac.type >= e1000_82576) - netdev->features |= NETIF_F_SCTP_CSUM; - } else { - netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_SCTP_CSUM); -#else - netdev->features |= NETIF_F_IP_CSUM; - if (adapter->hw.mac.type == e1000_82576) - netdev->features |= NETIF_F_SCTP_CSUM; - } else { - netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM); -#endif - } - - return 0; -} - -#ifdef NETIF_F_TSO -static int igb_set_tso(struct net_device *netdev, u32 data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); -#ifndef HAVE_NETDEV_VLAN_FEATURES - int i; - struct net_device *v_netdev; -#endif - - if (data) { - netdev->features |= NETIF_F_TSO; -#ifdef NETIF_F_TSO6 - netdev->features |= NETIF_F_TSO6; -#endif - } else { - netdev->features &= ~NETIF_F_TSO; -#ifdef NETIF_F_TSO6 - netdev->features &= ~NETIF_F_TSO6; -#endif -#ifndef HAVE_NETDEV_VLAN_FEATURES - /* disable TSO on all VLANs if they're present */ - if (!adapter->vlgrp) - goto tso_out; - for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { - v_netdev = vlan_group_get_device(adapter->vlgrp, i); - if (!v_netdev) - continue; - - v_netdev->features &= ~NETIF_F_TSO; -#ifdef NETIF_F_TSO6 - v_netdev->features &= ~NETIF_F_TSO6; -#endif - vlan_group_set_device(adapter->vlgrp, i, v_netdev); - } -#endif /* HAVE_NETDEV_VLAN_FEATURES */ - } - -#ifndef HAVE_NETDEV_VLAN_FEATURES -tso_out: -#endif /* HAVE_NETDEV_VLAN_FEATURES */ - DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); - return 0; -} -#endif /* NETIF_F_TSO */ - static u32 igb_get_msglevel(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -414,12 +475,12 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data) static int igb_get_regs_len(struct net_device *netdev) { -#define IGB_REGS_LEN 551 +#define IGB_REGS_LEN 555 return IGB_REGS_LEN * sizeof(u32); } static void igb_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -631,7 +692,12 @@ static void igb_get_regs(struct net_device *netdev, regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT); regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS); regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC); - + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } } static int igb_get_eeprom_len(struct net_device *netdev) @@ -641,7 +707,7 @@ static int igb_get_eeprom_len(struct net_device *netdev) } static int igb_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -665,12 +731,12 @@ static int igb_get_eeprom(struct net_device *netdev, if (hw->nvm.type == e1000_nvm_eeprom_spi) ret_val = e1000_read_nvm(hw, first_word, - last_word - first_word + 1, - eeprom_buff); + last_word - first_word + 1, + eeprom_buff); else { for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, - &eeprom_buff[i]); + &eeprom_buff[i]); if (ret_val) break; } @@ -688,7 +754,7 @@ static int igb_get_eeprom(struct net_device *netdev, } static int igb_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -724,7 +790,7 @@ static int igb_set_eeprom(struct net_device *netdev, /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ ret_val = e1000_read_nvm(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); + &eeprom_buff[last_word - first_word]); } /* Device's eeprom is always little-endian, word addressable */ @@ -737,11 +803,11 @@ static int igb_set_eeprom(struct net_device *netdev, cpu_to_le16s(&eeprom_buff[i]); ret_val = e1000_write_nvm(hw, first_word, - last_word - first_word + 1, eeprom_buff); + last_word - first_word + 1, eeprom_buff); - /* Update the checksum over the first part of the EEPROM if needed + /* Update the checksum if write succeeded. * and flush shadow RAM for 82573 controllers */ - if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) + if (ret_val == 0) e1000_update_nvm_checksum(hw); kfree(eeprom_buff); @@ -749,22 +815,16 @@ static int igb_set_eeprom(struct net_device *netdev, } static void igb_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); - strncpy(drvinfo->driver, igb_driver_name, 32); - strncpy(drvinfo->version, igb_driver_version, 32); - - /* EEPROM image version # is reported as firmware version # for - * 82575 controllers */ + strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1); - snprintf(drvinfo->fw_version, 32, "%d.%d-%d", - (adapter->fw_version & 0xF000) >> 12, - (adapter->fw_version & 0x0FF0) >> 4, - adapter->fw_version & 0x000F); - - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strncpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1); drvinfo->n_stats = IGB_STATS_LEN; drvinfo->testinfo_len = IGB_TEST_LEN; drvinfo->regdump_len = igb_get_regs_len(netdev); @@ -772,7 +832,7 @@ static void igb_get_drvinfo(struct net_device *netdev, } static void igb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -787,7 +847,7 @@ static void igb_get_ringparam(struct net_device *netdev, } static int igb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; @@ -812,13 +872,13 @@ static int igb_set_ringparam(struct net_device *netdev, } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->count = new_tx_count; + adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; @@ -900,7 +960,6 @@ clear_reset: clear_bit(__IGB_RESETTING, &adapter->state); return err; } - static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { @@ -910,12 +969,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { E1000_WRITE_REG(hw, reg, (_test[pat] & write)); - val = E1000_READ_REG(hw, reg); + val = E1000_READ_REG(hw, reg) & mask; if (val != (_test[pat] & write & mask)) { - DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " - "0x%08X expected 0x%08X\n", - E1000_REGISTER(hw, reg), val, - (_test[pat] & write & mask)); + dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X " + "failed: got 0x%08X expected 0x%08X\n", + E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask)); *data = E1000_REGISTER(hw, reg); return 1; } @@ -932,8 +990,9 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, E1000_WRITE_REG(hw, reg, write & mask); val = E1000_READ_REG(hw, reg); if ((write & mask) != (val & mask)) { - DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X " - "expected 0x%08X\n", reg, (val & mask), (write & mask)); + dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:" + " got 0x%08X expected 0x%08X\n", reg, + (val & mask), (write & mask)); *data = E1000_REGISTER(hw, reg); return 1; } @@ -962,9 +1021,15 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) switch (adapter->hw.mac.type) { case e1000_i350: + case e1000_i354: test = reg_test_i350; toggle = 0x7FEFF3FF; break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; case e1000_82580: test = reg_test_82580; toggle = 0x7FEFF3FF; @@ -989,8 +1054,8 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) E1000_WRITE_REG(hw, E1000_STATUS, toggle); after = E1000_READ_REG(hw, E1000_STATUS) & toggle; if (value != after) { - DPRINTK(DRV, ERR, "failed STATUS register test got: " - "0x%08X expected: 0x%08X\n", after, value); + dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test " + "got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; } @@ -1018,7 +1083,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) case WRITE_NO_TEST: writel(test->write, (adapter->hw.hw_addr + test->reg) - + (i * test->reg_offset)); + + (i * test->reg_offset)); break; case TABLE32_TEST: REG_PATTERN_TEST(test->reg + (i * 4), @@ -1046,22 +1111,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) { - u16 temp; - u16 checksum = 0; - u16 i; - *data = 0; - /* Read and add up the contents of the EEPROM */ - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { - *data = 1; - break; - } - checksum += temp; - } - /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16) NVM_SUM) && !(*data)) + /* Validate NVM checksum */ + if (e1000_validate_nvm_checksum(&adapter->hw) < 0) *data = 2; return *data; @@ -1095,24 +1148,26 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) } } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = FALSE; - if (request_irq(irq, &igb_test_intr, 0, netdev->name, adapter)) { + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { *data = 1; return -1; } - } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, - netdev->name, adapter)) { + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { shared_int = FALSE; } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, - netdev->name, adapter)) { + netdev->name, adapter)) { *data = 1; return -1; } - DPRINTK(HW, INFO, "testing %s interrupt\n", - (shared_int ? "shared" : "unshared")); + dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ E1000_WRITE_REG(hw, E1000_IMC, ~0); - msleep(10); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); /* Define all writable bits for ICS */ switch (hw->mac.type) { @@ -1126,8 +1181,13 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) ics_mask = 0x77DCFED5; break; case e1000_i350: + case e1000_i354: ics_mask = 0x77DCFED5; break; + case e1000_i210: + case e1000_i211: + ics_mask = 0x774CFED5; + break; default: ics_mask = 0x7FFFFFFF; break; @@ -1155,7 +1215,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) E1000_WRITE_REG(hw, E1000_IMC, mask); E1000_WRITE_REG(hw, E1000_ICS, mask); - msleep(10); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); if (adapter->test_icr & mask) { *data = 3; @@ -1176,7 +1237,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) E1000_WRITE_REG(hw, E1000_IMS, mask); E1000_WRITE_REG(hw, E1000_ICS, mask); - msleep(10); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); if (!(adapter->test_icr & mask)) { *data = 4; @@ -1197,7 +1259,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) E1000_WRITE_REG(hw, E1000_IMC, ~mask); E1000_WRITE_REG(hw, E1000_ICS, ~mask); - msleep(10); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); if (adapter->test_icr & mask) { *data = 5; @@ -1208,7 +1271,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) /* Disable all the interrupts */ E1000_WRITE_REG(hw, E1000_IMC, ~0); - msleep(10); + E1000_WRITE_FLUSH(hw); + usleep_range(10000, 20000); /* Unhook test interrupt handler */ if (adapter->msix_entries) @@ -1234,7 +1298,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IGB_DEFAULT_TXD; - tx_ring->pdev = adapter->pdev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->vfs_allocated_count; @@ -1248,9 +1312,11 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) /* Setup Rx descriptor ring and Rx buffers */ rx_ring->count = IGB_DEFAULT_RXD; - rx_ring->pdev = adapter->pdev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); rx_ring->netdev = adapter->netdev; - rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + rx_ring->rx_buffer_len = IGB_RX_HDR_LEN; +#endif rx_ring->reg_idx = adapter->vfs_allocated_count; if (igb_setup_rx_resources(rx_ring)) { @@ -1259,14 +1325,13 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) } /* set the default queue to queue 0 of PF */ - E1000_WRITE_REG(hw, E1000_MRQC, - adapter->vfs_allocated_count << 3); + E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3); /* enable receive ring */ igb_setup_rctl(adapter); igb_configure_rx_ring(adapter, rx_ring); - igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring)); + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); return 0; @@ -1294,20 +1359,25 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) hw->mac.autoneg = FALSE; if (hw->phy.type == e1000_phy_m88) { - /* Auto-MDI/MDIX Off */ - e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140); - } else if (hw->phy.type == e1000_phy_82580) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } + } else { /* enable MII loopback */ - e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041); + if (hw->phy.type == e1000_phy_82580) + e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041); } - ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); - - /* force 1000, set loopback */ + /* force 1000, set loopback */ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); /* Now set up the MAC to the same speed/duplex as the PHY. */ @@ -1330,8 +1400,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) if (hw->phy.type == e1000_phy_m88) igb_phy_disable_receiver(adapter); - udelay(500); - + mdelay(500); return 0; } @@ -1349,6 +1418,22 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) /* use CTRL_EXT to identify link type as SGMII can appear as copper */ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + + /* Enable DH89xxCC MPHY for near end loopback */ + reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + + reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + reg = E1000_READ_REG(hw, E1000_RCTL); reg |= E1000_RCTL_LBM_TCVR; E1000_WRITE_REG(hw, E1000_RCTL, reg); @@ -1368,6 +1453,15 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) reg &= ~E1000_CONNSW_ENRGSRC; E1000_WRITE_REG(hw, E1000_CONNSW, reg); + /* Unset sigdetect for SERDES loopback on + * 82580 and newer devices + */ + if (hw->mac.type >= e1000_82580) { + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + } + /* Set PCS register for forced speed */ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ @@ -1390,7 +1484,24 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter) u32 rctl; u16 phy_reg; - rctl = E1000_READ_REG(hw, E1000_RCTL); + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + + reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + + rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -1398,13 +1509,14 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg); if (phy_reg & MII_CR_LOOPBACK) { phy_reg &= ~MII_CR_LOOPBACK; + if (hw->phy.type == I210_I_PHY_ID) + e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg); e1000_phy_commit(hw); } } - static void igb_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) + unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); frame_size /= 2; @@ -1413,51 +1525,77 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, memset(&skb->data[frame_size + 12], 0xAF, 1); } -static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) +static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, + unsigned int frame_size) { - frame_size /= 2; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size + 10) == 0xBE) && - (*(skb->data + frame_size + 12) == 0xAF)) { - return 0; - } - } - return 13; + unsigned char *data; + bool match = true; + + frame_size >>= 1; + +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else + data = kmap(rx_buffer->page); +#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + +#endif + return match; } -static int igb_clean_test_rings(struct igb_ring *rx_ring, +static u16 igb_clean_test_rings(struct igb_ring *rx_ring, struct igb_ring *tx_ring, unsigned int size) { union e1000_adv_rx_desc *rx_desc; - struct igb_buffer *buffer_info; - int rx_ntc, tx_ntc, count = 0; - u32 staterr; + struct igb_rx_buffer *rx_buffer_info; + struct igb_tx_buffer *tx_buffer_info; + u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ rx_ntc = rx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); - while (staterr & E1000_RXD_STAT_DD) { + while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { /* check rx buffer */ - buffer_info = &rx_ring->buffer_info[rx_ntc]; + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; - /* unmap rx buffer, will be remapped by alloc_rx_buffers */ - pci_unmap_single(rx_ring->pdev, - buffer_info->dma, - rx_ring->rx_buffer_len, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + IGB_RX_HDR_LEN, +#else + IGB_RX_BUFSZ, +#endif + DMA_FROM_DEVICE); /* verify contents of skb */ - if (!igb_check_lbtest_frame(buffer_info->skb, size)) + if (igb_check_lbtest_frame(rx_buffer_info, size)) count++; + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + IGB_RX_HDR_LEN, +#else + IGB_RX_BUFSZ, +#endif + DMA_FROM_DEVICE); + /* unmap buffer on tx side */ - buffer_info = &tx_ring->buffer_info[tx_ntc]; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); /* increment rx/tx next to clean counters */ rx_ntc++; @@ -1468,12 +1606,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, tx_ntc = 0; /* fetch next descriptor */ - rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); } /* re-map buffers to ring, store next to clean values */ - igb_alloc_rx_buffers_adv(rx_ring, count); + igb_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; tx_ring->next_to_clean = tx_ntc; @@ -1484,8 +1621,9 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) { struct igb_ring *tx_ring = &adapter->test_tx_ring; struct igb_ring *rx_ring = &adapter->test_rx_ring; - int i, j, lc, good_cnt, ret_val = 0; - unsigned int size = 1024; + u16 i, j, lc, good_cnt; + int ret_val = 0; + unsigned int size = IGB_RX_HDR_LEN; netdev_tx_t tx_ret_val; struct sk_buff *skb; @@ -1516,7 +1654,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) /* place 64 packets on the transmit queue*/ for (i = 0; i < 64; i++) { skb_get(skb); - tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); + tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); if (tx_ret_val == NETDEV_TX_OK) good_cnt++; } @@ -1547,8 +1685,15 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) /* PHY loopback cannot be performed if SoL/IDER * sessions are active */ if (e1000_check_reset_block(&adapter->hw)) { - DPRINTK(DRV, ERR, "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); + dev_err(pci_dev_to_dev(adapter->pdev), + "Cannot do PHY loopback test " + "when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + if (adapter->hw.mac.type == e1000_i354) { + dev_info(&adapter->pdev->dev, + "Loopback test not supported on i354.\n"); *data = 0; goto out; } @@ -1559,6 +1704,7 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) if (*data) goto err_loopback; *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); err_loopback: @@ -1569,8 +1715,11 @@ out: static int igb_link_test(struct igb_adapter *adapter, u64 *data) { - struct e1000_hw *hw = &adapter->hw; + u32 link; + int i, time; + *data = 0; + time = 0; if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { int i = 0; adapter->hw.mac.serdes_has_link = FALSE; @@ -1580,24 +1729,30 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) do { e1000_check_for_link(&adapter->hw); if (adapter->hw.mac.serdes_has_link) - return *data; + goto out; msleep(20); } while (i++ < 3750); *data = 1; } else { - e1000_check_for_link(&adapter->hw); - if (adapter->hw.mac.autoneg) - msleep(4000); - - if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) + for (i=0; i < IGB_MAX_LINK_TRIES; i++) { + link = igb_has_link(adapter); + if (link) + goto out; + else { + time++; + msleep(1000); + } + } + if (!link) *data = 1; } - return *data; + out: + return *data; } static void igb_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) + struct ethtool_test *eth_test, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); u16 autoneg_advertised; @@ -1613,9 +1768,12 @@ static void igb_diag_test(struct net_device *netdev, forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; autoneg = adapter->hw.mac.autoneg; - DPRINTK(HW, INFO, "offline testing starting\n"); + dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n"); - /* Link test performed before hardware reset so autoneg doesn't + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igb_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1638,6 +1796,10 @@ static void igb_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); + + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1655,10 +1817,13 @@ static void igb_diag_test(struct net_device *netdev, if (if_running) dev_open(netdev); } else { - DPRINTK(HW, INFO, "online testing starting\n"); - /* Online tests */ - if (igb_link_test(adapter, &data[4])) + dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[4] = 0; /* Online tests aren't run; pass by default */ data[0] = 0; @@ -1671,55 +1836,6 @@ static void igb_diag_test(struct net_device *netdev, msleep_interruptible(4 * 1000); } -static int igb_wol_exclusion(struct igb_adapter *adapter, - struct ethtool_wolinfo *wol) -{ - struct e1000_hw *hw = &adapter->hw; - int retval = 1; /* fail by default */ - - switch (hw->device_id) { - case E1000_DEV_ID_82575GB_QUAD_COPPER: - /* WoL not supported */ - wol->supported = 0; - break; - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82576_FIBER: - case E1000_DEV_ID_82576_SERDES: - /* Wake events not supported on port B */ - if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) { - wol->supported = 0; - break; - } - /* return success for non excluded adapter ports */ - retval = 0; - break; - case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: - /* quad port adapters only support WoL on port A */ - if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { - wol->supported = 0; - break; - } - /* return success for non excluded adapter ports */ - retval = 0; - break; - default: - /* dual port cards only support WoL on port A from now on - * unless it was enabled in the eeprom for port B - * so exclude FUNC_1 ports from having WoL enabled */ - if ((E1000_READ_REG(hw, E1000_STATUS) & - E1000_STATUS_FUNC_MASK) && - !adapter->eeprom_wol) { - wol->supported = 0; - break; - } - - retval = 0; - } - - return retval; -} - static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -1729,10 +1845,7 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) WAKE_PHY; wol->wolopts = 0; - /* this function will set ->supported = 0 and return 1 if wol is not - * supported by this hardware */ - if (igb_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) return; /* apply any specific unsupported masks here */ @@ -1760,7 +1873,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; - if (igb_wol_exclusion(adapter, wol)) + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) return wol->wolopts ? -EOPNOTSUPP : 0; /* these settings will always override what we currently have */ @@ -1782,8 +1895,32 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) } /* bit defines for adapter->led_status */ -#define IGB_LED_ON 0 - +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + e1000_led_off(hw); + e1000_cleanup_led(hw); + break; + } + + return 0; +} +#else static int igb_phys_id(struct net_device *netdev, u32 data) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -1803,11 +1940,11 @@ static int igb_phys_id(struct net_device *netdev, u32 data) msleep_interruptible(timeout); e1000_led_off(hw); - clear_bit(IGB_LED_ON, &adapter->led_status); e1000_cleanup_led(hw); return 0; } +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ static int igb_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) @@ -1819,7 +1956,10 @@ static int igb_set_coalesce(struct net_device *netdev, ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || (ec->rx_coalesce_usecs == 2)) + { + printk("set_coalesce:invalid parameter.."); return -EINVAL; + } if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->tx_coalesce_usecs > 3) && @@ -1830,7 +1970,15 @@ static int igb_set_coalesce(struct net_device *netdev, if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) return -EINVAL; - /* convert to rate of irq's per second */ + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + adapter->dmac = IGB_DMAC_DISABLE; + } + + /* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) adapter->rx_itr_setting = ec->rx_coalesce_usecs; else @@ -1846,7 +1994,8 @@ static int igb_set_coalesce(struct net_device *netdev, for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; - if (q_vector->rx_ring) + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) q_vector->itr_val = adapter->rx_itr_setting; else q_vector->itr_val = adapter->tx_itr_setting; @@ -1868,6 +2017,8 @@ static int igb_get_coalesce(struct net_device *netdev, else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { if (adapter->tx_itr_setting <= 3) ec->tx_coalesce_usecs = adapter->tx_itr_setting; @@ -1911,7 +2062,7 @@ static int igb_diag_test_count(struct net_device *netdev) #endif static void igb_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *stats, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); #ifdef HAVE_NETDEV_STATS_IN_NETDEV @@ -1988,47 +2139,621 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_alloc_failed", i); p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_ipv4_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_ipv4e_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_ipv6_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_ipv6e_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_tcp_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_udp_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_sctp_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_nfs_packets", i); + p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ break; } } -#ifdef NETIF_F_LRO -#ifdef ETHTOOL_GFLAGS -#ifdef IGB_LRO -static int igb_set_flags(struct net_device *netdev, u32 data) +#ifdef HAVE_ETHTOOL_GET_TS_INFO +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) { - struct igb_adapter *adapter = netdev_priv(netdev); - int i; - ethtool_op_set_flags(netdev, data); + struct igb_adapter *adapter = netdev_priv(dev); + + switch (adapter->hw.mac.type) { +#ifdef HAVE_PTP_1588_CLOCK + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); - /* enable / disable LRO for all of the applicable rx queues */ - for (i = 0; i < adapter->rss_queues; i++) { - if (data & ETH_FLAG_LRO) - adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_LRO; + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; else - adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_LRO; + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +#endif /* HAVE_PTP_1588_CLOCK */ + default: + return -EOPNOTSUPP; } +} +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + +#ifdef CONFIG_PM_RUNTIME +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); } +#endif /* CONFIG_PM_RUNTIME */ -#endif /* IGB_LRO */ -#endif /* ETHTOOL_GFLAGS */ -#endif /* NETIF_F_LRO */ -static struct ethtool_ops igb_ethtool_ops = { - .get_settings = igb_get_settings, - .set_settings = igb_set_settings, - .get_drvinfo = igb_get_drvinfo, - .get_regs_len = igb_get_regs_len, - .get_regs = igb_get_regs, - .get_wol = igb_get_wol, - .set_wol = igb_set_wol, - .get_msglevel = igb_get_msglevel, +#ifndef HAVE_NDO_SET_FEATURES +static u32 igb_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int igb_set_rx_csum(struct net_device *netdev, u32 data) +{ + const u32 feature_list = NETIF_F_RXCSUM; + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +static int igb_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + if (adapter->hw.mac.type >= e1000_82576) + feature_list |= NETIF_F_SCTP_CSUM; + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +static int igb_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + const u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct igb_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int igb_set_flags(struct net_device *netdev, u32 data) +{ + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | + ETH_FLAG_RXHASH; +#ifndef HAVE_VLAN_RX_REGISTER + u32 changed = netdev->features ^ data; +#endif + int rc; + /* + * Since there is no support for separate tx vlan accel + * enabled make sure tx flag is cleared if rx is. + */ + if (!(data & ETH_FLAG_RXVLAN)) + data &= ~ETH_FLAG_TXVLAN; + + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; +#ifndef HAVE_VLAN_RX_REGISTER + + if (changed & ETH_FLAG_RXVLAN) + igb_vlan_mode(netdev, data); +#endif + + return 0; +} + +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_SADV_COAL +static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + switch (edata->data) { + case IGB_DMAC_DISABLE: + adapter->dmac = edata->data; + break; + case IGB_DMAC_MIN: + adapter->dmac = edata->data; + break; + case IGB_DMAC_500: + adapter->dmac = edata->data; + break; + case IGB_DMAC_EN_DEFAULT: + adapter->dmac = edata->data; + break; + case IGB_DMAC_2000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_3000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_4000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_5000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_6000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_7000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_8000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_9000: + adapter->dmac = edata->data; + break; + case IGB_DMAC_MAX: + adapter->dmac = edata->data; + break; + default: + adapter->dmac = IGB_DMAC_DISABLE; + printk("set_dmac: invalid setting, setting DMAC to %d\n", + adapter->dmac); + } + printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac); + return 0; +} +#endif /* ETHTOOL_SADV_COAL */ +#ifdef ETHTOOL_GADV_COAL +static void igb_get_dmac(struct net_device *netdev, + struct ethtool_value *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + edata->data = adapter->dmac; + + return; +} +#endif + +#ifdef ETHTOOL_GEEE +static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + u16 phy_data; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); + + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } + + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: + ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, + E1000_EEE_LP_ADV_DEV_I210, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + default: + break; + } + + edata->eee_enabled = !hw->dev_spec._82575.eee_disable; + + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + + /* + * report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} +#endif + +#ifdef ETHTOOL_SEEE +static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI time is not implemented currently */ + if (edata->tx_lpi_timer) { + dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + dev_err(pci_dev_to_dev(adapter->pdev), + "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); + return -EINVAL; + } + + } else if (!edata->eee_enabled) { + dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE options is not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; + + /* reset link */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + } + + return 0; +} +#endif /* ETHTOOL_SEEE */ + +#ifdef ETHTOOL_GRXRINGS +static int igb_get_rss_hash_opts(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igb */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case UDP_V4_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case UDP_V6_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = igb_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ + IGB_FLAG_RSS_FIELD_IPV6_UDP) +static int igb_set_rss_hash_opt(struct igb_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = E1000_READ_REG(hw, E1000_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + DPRINTK(DRV, WARNING, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + } + + return 0; +} + +static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igb_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} +#endif /* ETHTOOL_GRXRINGS */ + +static const struct ethtool_ops igb_ethtool_ops = { + .get_settings = igb_get_settings, + .set_settings = igb_set_settings, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, .set_msglevel = igb_set_msglevel, .nway_reset = igb_nway_reset, - .get_link = ethtool_op_get_link, + .get_link = igb_get_link, .get_eeprom_len = igb_get_eeprom_len, .get_eeprom = igb_get_eeprom, .set_eeprom = igb_set_eeprom, @@ -2036,19 +2761,15 @@ static struct ethtool_ops igb_ethtool_ops = { .set_ringparam = igb_set_ringparam, .get_pauseparam = igb_get_pauseparam, .set_pauseparam = igb_set_pauseparam, - .get_rx_csum = igb_get_rx_csum, - .set_rx_csum = igb_set_rx_csum, - .get_tx_csum = igb_get_tx_csum, - .set_tx_csum = igb_set_tx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, -#ifdef NETIF_F_TSO - .get_tso = ethtool_op_get_tso, - .set_tso = igb_set_tso, -#endif .self_test = igb_diag_test, .get_strings = igb_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = igb_set_phys_id, +#else .phys_id = igb_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ #ifdef HAVE_ETHTOOL_GET_SSET_COUNT .get_sset_count = igb_get_sset_count, #else @@ -2056,25 +2777,491 @@ static struct ethtool_ops igb_ethtool_ops = { .self_test_count = igb_diag_test_count, #endif .get_ethtool_stats = igb_get_ethtool_stats, -#ifdef ETHTOOL_GPERMADDR +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR .get_perm_addr = ethtool_op_get_perm_addr, #endif .get_coalesce = igb_get_coalesce, .set_coalesce = igb_set_coalesce, -#ifdef NETIF_F_LRO +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = igb_get_ts_info, +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifdef CONFIG_PM_RUNTIME + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, +#endif /* CONFIG_PM_RUNTIME */ +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = igb_get_rx_csum, + .set_rx_csum = igb_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = igb_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = igb_set_tso, +#endif #ifdef ETHTOOL_GFLAGS .get_flags = ethtool_op_get_flags, -#ifdef IGB_LRO .set_flags = igb_set_flags, -#else - .set_flags = ethtool_op_set_flags, -#endif #endif /* ETHTOOL_GFLAGS */ -#endif /* NETIF_F_LRO */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GADV_COAL + .get_advcoal = igb_get_adv_coal, + .set_advcoal = igb_set_dmac_coal, +#endif /* ETHTOOL_GADV_COAL */ +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GEEE + .get_eee = igb_get_eee, +#endif +#ifdef ETHTOOL_SEEE + .set_eee = igb_set_eee, +#endif +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = igb_get_rxnfc, + .set_rxnfc = igb_set_rxnfc, +#endif +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext igb_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = igb_get_ts_info, + .set_phys_id = igb_set_phys_id, + .get_eee = igb_get_eee, + .set_eee = igb_set_eee, }; void igb_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); + set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext); } +#else +void igb_set_ethtool_ops(struct net_device *netdev) +{ + /* have to "undeclare" const on this struct to remove warnings */ + SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops); +} +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ #endif /* SIOCETHTOOL */ + +static bool is_cim_supported(struct igb_adapter *adapter) +{ + /* The CIM funcionality is only supported on HP devices */ + if (adapter->hw.subsystem_vendor_id == 0x103C) + return true; + else + return false; +} + +static bool validate_smbios_entry(const struct smbios_table *tbl, + struct igb_adapter *adapter) +{ + u32 size = tbl->EntryPointLength; + u32 offset; + u8 *byte; + u8 csum; + + if (size == 0x1e) + size = 0x1f; + + byte = (unsigned char *)tbl; + csum = 0; + for (offset = 0; offset < size; ++offset) + csum += byte[offset]; + + if (csum != 0) { + DPRINTK(DRV, ERR, "SMBIOS Checksum failed\n"); + return false; + } + + return true; +} + +static struct smbios_table * +find_smbios_entry(struct smbios_table *stbl, struct igb_adapter *adapter) +{ + void *vbase; + u32 offset; + u32 size = SM_ADDR_HIGH - SM_ADDR_LOW + 1; + struct smbios_table *tbl = 0; + + vbase = ioremap(SM_ADDR_LOW, size); + if (!vbase) { + DPRINTK(DRV, ERR, "Failed to map SMBIOS memory region\n"); + return 0; + } + + for (offset = 0; offset < size; offset += 0x10) { + tbl = (struct smbios_table *)(vbase + offset); + if (!memcmp(tbl->AnchorString, sm_anchor, sizeof(sm_anchor)) && + validate_smbios_entry(tbl, adapter)) + break; + tbl = 0; + } + + if (!tbl || !stbl || !tbl->TableAddress || !tbl->TableLength) { + adapter->SmbTblLen = 0; + adapter->SmbTblAddr = 0; + return 0; + } else { + adapter->SmbTblLen = tbl->TableLength; + adapter->SmbTblAddr = tbl->TableAddress; + memcpy(stbl, tbl, sizeof(*stbl)); + } + + iounmap(vbase); + return tbl; +} + +static bool get_smbios_tables(void *smbios, struct igb_adapter *adapter) +{ + void *vbase; + if (!smbios) + return false; + + vbase = ioremap(adapter->SmbTblAddr, (u32)adapter->SmbTblLen); + if (!vbase) { + DPRINTK(DRV, ERR, "Failed to map SMBIOS memory\n"); + return false; + } + + memcpy(smbios, vbase, adapter->SmbTblLen); + + iounmap(vbase); + return true; +} + +static int find_ext_cap(struct pci_dev *pdev, uint16_t cap) +{ + int res; + uint16_t cap_off; + uint32_t dword; + + for (cap_off = 0x100; cap_off; cap_off = PCI_EXT_CAP_NEXT(dword)) { + res = pci_read_config_dword(pdev, cap_off & ~3, &dword); + if (res < 0) + return res; + + if (PCI_EXT_CAP_ID(dword) == cap) + return cap_off; + } + + return -ENODEV; +} + +static int get_pcie_error_info(u32 *pcie_errors, u32 num_regs, struct igb_adapter *adapter) +{ + int cap; + int res; + u32 ix; + + if (num_regs == 0) { + DPRINTK(DRV, ERR, "num_regs is zero\n"); + return -EINVAL; + } + + cap = find_ext_cap(adapter->pdev, PCI_EXT_CAP_ID_ERR); + if (cap <= 0) { + DPRINTK(DRV, ERR, "PCIe err cap not found\n"); + res = -ENODEV; + return res; + } + + for (ix = 0; ix < num_regs; ++ix) { + res = pci_read_config_dword(adapter->pdev, cap + PCI_ERR_HEADER_LOG + + ix * sizeof(u32), &pcie_errors[ix]); + if (res < 0) + return res;; + } + return num_regs; +} + +static int get_link_status(u16 *link_status, struct igb_adapter *adapter) +{ + int cap; + int res; + + cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (cap <= 0) { + DPRINTK(DRV, ERR, "PCIe cap not found, cap=%d\n", cap); + res = -ENODEV; + return res; + } + + res = pci_read_config_word(adapter->pdev, cap + PCI_EXP_LNKSTA, link_status); + if (res < 0) { + DPRINTK(DRV, ERR, "pci_read_config_word failed with %d\n", res); + return res; + } + return 0; +} + +/** + * igb_intelcim_ioctl + * @netdev: Pointer to netdev device + * @ifr: user space request data + * + * called by CIM IOCTL to run diagnostics. + * Basically, this function is a wrapper for ethtool functions. + **/ +int igb_intelcim_ioctl(struct net_device *netdev, struct ifreq *ifr) +{ + void *useraddr = (void *)ifr->ifr_data; + struct igb_intelcim_ioctl_req req; + struct igb_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (copy_from_user(&req, useraddr, sizeof(req))) { + DPRINTK(DRV, ERR, "Cannot copy memory from user space\n"); + return -EFAULT; + } + useraddr += sizeof(req.cmd); + + switch (req.cmd) { + case INTELCIM_ENUMDIAGS: + { + struct ethtool_gstrings gstrings; + u8 *data; + + gstrings = req.cmd_req.gstrings; + gstrings.len = igb_diag_test_count(netdev); + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, + GFP_USER); + if (!data) { + DPRINTK(DRV, ERR, "Cannot allocate memory\n"); + return -ENOMEM; + } + + igb_get_strings(netdev, gstrings.string_set, data); + + if (copy_to_user(useraddr, &gstrings, + sizeof(gstrings))) { + DPRINTK(DRV, ERR, "Cannot copy string results to user space\n"); + kfree(data); + return -EFAULT; + } + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, + gstrings.len * ETH_GSTRING_LEN)) { + DPRINTK(DRV, ERR, "Cannot copy string results to user space\n"); + kfree(data); + return -EFAULT; + } + kfree(data); + } + break; + case INTELCIM_RUNDIAG: + { + struct ethtool_test test; + u64 *data; + + test = req.cmd_req.test; + test.len = igb_diag_test_count(netdev); + + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) { + DPRINTK(DRV, ERR, "Cannot allocate memory\n"); + return -ENOMEM; + } + + igb_diag_test(netdev, &test, data); + + if (copy_to_user(useraddr, &test, sizeof(test))) { + DPRINTK(DRV, ERR, "Cannot copy test results to user space\n"); + kfree(data); + return -EFAULT; + } + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, + test.len * sizeof(u64))) { + DPRINTK(DRV, ERR, "Cannot copy test results to user space\n"); + kfree(data); + return -EFAULT; + } + kfree(data); + } + break; + case INTELCIM_FNDSMB: + { + struct smbios_table smbtbl; + smbtbl = req.cmd_req.tbl; + + if (!is_cim_supported(adapter)) + return -EPERM; + if (!find_smbios_entry(&smbtbl, adapter)) { + DPRINTK(DRV, ERR, "SMBIOS table not found\n"); + return -EFAULT; + } else { + if (copy_to_user(useraddr, + &smbtbl, sizeof(smbtbl))) { + DPRINTK(DRV, ERR, "Cannot copy to user memory\n"); + return -EFAULT; + } + } + } + break; + case INTELCIM_GETSMBTBL: + { + void *smbios; + if (!is_cim_supported(adapter)) + return -EPERM; + + smbios = kmalloc(adapter->SmbTblLen * sizeof(u8), + GFP_USER); + if (!smbios) { + DPRINTK(DRV, ERR, "Cannot allocate memory\n"); + return -ENOMEM; + } + + if (!get_smbios_tables(smbios, adapter)) { + DPRINTK(DRV, ERR, "Cannot get SMBIOS tables\n"); + kfree(smbios); + return -EFAULT; + } + + if (copy_to_user(useraddr, smbios, + adapter->SmbTblLen * sizeof(u8))) { + DPRINTK(DRV, ERR, "Cannot copy to user memory\n"); + kfree(smbios); + return -EFAULT; + } + kfree(smbios); + } + break; + case INTELCIM_WRITEMEM: + { + struct intelcim_mem_buf buf; + void *data; + void *vbase; + + if (!is_cim_supported(adapter)) + return -EPERM; + + buf = req.cmd_req.buf; + data = kmalloc(buf.len * sizeof(u8), GFP_USER); + if (!data) { + DPRINTK(DRV, ERR, "Cannot allocate memory\n"); + return -ENOMEM; + } + + useraddr += sizeof(buf.addr) + sizeof(buf.len); + if (copy_from_user(data, useraddr, buf.len * sizeof(u8))) { + DPRINTK(DRV, ERR, "Cannot copy memory from user space\n"); + kfree(data); + return -EFAULT; + } + + vbase = ioremap(buf.addr, buf.len); + if (!vbase) { + DPRINTK(DRV, ERR, "Failed to map buffer memory\n"); + return -EFAULT; + } + memcpy(vbase, data, buf.len); + iounmap(vbase); + kfree(data); + } + break; + case INTELCIM_READMEM: + { + struct intelcim_mem_buf buf; + void *data; + void *vbase; + + if (!is_cim_supported(adapter)) + return -EPERM; + + buf = req.cmd_req.buf; + data = kmalloc(buf.len * sizeof(u8), GFP_USER); + if (!data) { + DPRINTK(DRV, ERR, "Cannot allocate memory\n"); + return -ENOMEM; + } + + vbase = ioremap(buf.addr, buf.len); + if (!vbase) { + DPRINTK(DRV, ERR, "Failed to map buffer memory\n"); + return -EFAULT; + } + + memcpy(data, vbase, buf.len); + useraddr += sizeof(buf.addr) + sizeof(buf.len); + if (copy_to_user(useraddr, data, buf.len * sizeof(u8))) { + DPRINTK(DRV, ERR, "Cannot copy mem contents to user space\n"); + kfree(data); + return -EFAULT; + } + + iounmap(vbase); + kfree(data); + } + break; + case INTELCIM_GET_PCIE_ERROR_INFO: + { + struct intelcim_pcie_error_info info; + void *pcie_errors; + + DPRINTK(DRV, DEBUG, "INTELCIM_GET_PCIE_ERROR_INFO ioctl called\n"); + + info = req.cmd_req.info; + + if (!is_cim_supported(adapter)) + return -EPERM; + + pcie_errors = kmalloc(info.num_regs * sizeof(u32), GFP_USER); + if (!pcie_errors) { + DPRINTK(DRV, ERR, "Cannot allocate memory\n"); + return -ENOMEM; + } + + if((get_pcie_error_info((u32*)pcie_errors, + (u32)info.num_regs, adapter)) < 0) { + DPRINTK(DRV, ERR, "Cannot get pcie error info\n"); + kfree(pcie_errors); + return -EFAULT; + } + + useraddr += sizeof(info.num_regs); + if (copy_to_user(useraddr, pcie_errors, info.num_regs * sizeof(u32))) { + DPRINTK(DRV, ERR, "Cannot copy mem contents to user space\n"); + kfree(pcie_errors); + return -EFAULT; + } + kfree(pcie_errors); + } + break; + case INTELCIM_GET_PCI_LINK_STATUS: + { + u16 link_status; + + DPRINTK(DRV, DEBUG, "INTELCIM_GET_PCI_LINK_STATUS ioctl called\n"); + + link_status = req.cmd_req.link_status; + + if (!is_cim_supported(adapter)) + return -EPERM; + + if ((get_link_status(&link_status, adapter)) < 0) { + DPRINTK(DRV, ERR, "Unable to get link status\n"); + return -EFAULT; + } else { + if (copy_to_user(useraddr, + &link_status, sizeof(link_status))) + { + DPRINTK(DRV, ERR, + "Cannot copy to user memory\n"); + return -EFAULT; + } + } + } + break; + default: + DPRINTK(DRV, ERR, "Unknown ioctl\n"); + } + return ret; +} + diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_hwmon.c b/vmkdrivers/src_9/drivers/net/igb/igb_hwmon.c new file mode 100755 index 0000000000000000000000000000000000000000..9228a3917f593964e49e5d3656c54092da5536d5 --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/igb_hwmon.c @@ -0,0 +1,242 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" +#ifdef IGB_HWMON +#include +#include +#include +#include +#include +#include +#include +#include + + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff.n_hwmon; + igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + rc = device_create_file(&adapter->pdev->dev, + &igb_attr->dev_attr); + if (rc == 0) + ++adapter->igb_hwmon_buff.n_hwmon; + + return rc; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) { + device_remove_file(&adapter->pdev->dev, + &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->igb_hwmon_buff.hwmon_list); + + if (adapter->igb_hwmon_buff.device) + hwmon_device_unregister(adapter->igb_hwmon_buff.device); +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff; + unsigned int i; + int n_attrs; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + /* Allocation space for max attributes + * max num sensors * values (loc, temp, max, caution) + */ + n_attrs = E1000_MAX_SENSORS * 4; + igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!igb_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev); + if (IS_ERR(igb_hwmon->device)) { + rc = PTR_ERR(igb_hwmon->device); + goto err; + } + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif /* IGB_HWMON */ diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_main.c b/vmkdrivers/src_9/drivers/net/igb/igb_main.c index 3082997edb2df9695e1afe0bab6a07fab0f47b34..0725ba8b01e4cfd518ada770e4ef499ccb026579 100644 --- a/vmkdrivers/src_9/drivers/net/igb/igb_main.c +++ b/vmkdrivers/src_9/drivers/net/igb/igb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -46,44 +46,65 @@ #include #endif #include +#ifdef CONFIG_PM_RUNTIME +#include +#endif /* CONFIG_PM_RUNTIME */ +#ifndef __VMKLNX__ +#include +#endif #include "igb.h" +#include "igb_vmdq.h" -#ifdef __VMKLNX__ -#ifdef __VMKNETDDI_QUEUEOPS__ -static int igb_netqueue_ops(vmknetddi_queueops_op_t op, void *args); -#endif -#endif #define DRV_DEBUG #define DRV_HW_PERF #define VERSION_SUFFIX -#define DRV_VERSION "2.1.11.1" VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF +#define MAJ 5 +#define MIN 0 +#define BUILD 5.1 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation."; - -static struct pci_device_id igb_pci_tbl[] = { +static const char igb_copyright[] = + "Copyright (c) 2007-2013 Intel Corporation."; + +static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) }, @@ -105,19 +126,22 @@ static void __devexit igb_remove(struct pci_dev *pdev); static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); -static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); +static void igb_dma_err_task(struct work_struct *); +static void igb_dma_err_timer(unsigned long data); +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); static struct net_device_stats *igb_get_stats(struct net_device *); static int igb_change_mtu(struct net_device *, int); +void igb_full_sync_mac_table(struct igb_adapter *adapter); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter); static irqreturn_t igb_intr(int irq, void *); @@ -128,27 +152,74 @@ static irqreturn_t igb_msix_ring(int irq, void *); static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); #endif /* IGB_DCA */ -static bool igb_clean_tx_irq(struct igb_q_vector *); static int igb_poll(struct napi_struct *, int); -static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); +static bool igb_clean_tx_irq(struct igb_q_vector *); +static bool igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); -static void igb_vlan_rx_register(struct net_device *, struct vlan_group *); +#ifdef HAVE_VLAN_RX_REGISTER +static void igb_vlan_mode(struct net_device *, struct vlan_group *); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX +static int igb_vlan_rx_add_vid(struct net_device *, + __always_unused __be16 proto, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, + __always_unused __be16 proto, u16); +#else +static int igb_vlan_rx_add_vid(struct net_device *, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, u16); +#endif +#else static void igb_vlan_rx_add_vid(struct net_device *, u16); static void igb_vlan_rx_kill_vid(struct net_device *, u16); +#endif static void igb_restore_vlan(struct igb_adapter *); -static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); +void igb_rar_set(struct igb_adapter *adapter, u32 index); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); static void igb_vmm_control(struct igb_adapter *); static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); - -#ifdef CONFIG_PM -static int igb_suspend(struct pci_dev *, pm_message_t); -static int igb_resume(struct pci_dev *); +static void igb_process_mdd_event(struct igb_adapter *); +#ifdef IFLA_VF_MAX +static int igb_ndo_set_vf_mac( struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos); +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting); +#endif +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +#ifndef __VMKLNX__ +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); #endif +static void igb_check_vf_rate_limit(struct igb_adapter *); +#endif +static int igb_vf_configure(struct igb_adapter *adapter, int vf); +#ifdef CONFIG_PM +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS +static int igb_suspend(struct device *dev); +static int igb_resume(struct device *dev); +#ifdef CONFIG_PM_RUNTIME +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +#endif /* CONFIG_PM_RUNTIME */ +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) +#ifdef CONFIG_PM_RUNTIME + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +#endif /* CONFIG_PM_RUNTIME */ +}; +#else +static int igb_suspend(struct pci_dev *pdev, pm_message_t state); +static int igb_resume(struct pci_dev *pdev); +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ +#endif /* CONFIG_PM */ #ifndef USE_REBOOT_NOTIFIER static void igb_shutdown(struct pci_dev *); #else @@ -174,7 +245,7 @@ static void igb_netpoll(struct net_device *); #ifdef HAVE_PCI_ERS static pci_ers_result_t igb_io_error_detected(struct pci_dev *, - pci_channel_state_t); + pci_channel_state_t); static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); static void igb_io_resume(struct pci_dev *); @@ -185,6 +256,8 @@ static struct pci_error_handlers igb_err_handler = { }; #endif +static void igb_init_fw(struct igb_adapter *adapter); +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); static struct pci_driver igb_driver = { .name = igb_driver_name, @@ -192,10 +265,13 @@ static struct pci_driver igb_driver = { .probe = igb_probe, .remove = __devexit_p(igb_remove), #ifdef CONFIG_PM - /* Power Managment Hooks */ +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + .driver.pm = &igb_pm_ops, +#else .suspend = igb_suspend, .resume = igb_resume, -#endif +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ +#endif /* CONFIG_PM */ #ifndef USE_REBOOT_NOTIFIER .shutdown = igb_shutdown, #endif @@ -209,8 +285,9 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -static void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add) { + struct e1000_hw *hw = &adapter->hw; struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie; u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); @@ -224,43 +301,17 @@ static void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) (vid == mng_cookie->vlan_id)) add = TRUE; - vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta = adapter->shadow_vfta[index]; + if (add) vfta |= mask; else vfta &= ~mask; e1000_write_vfta(hw, index, vfta); + adapter->shadow_vfta[index] = vfta; } -#ifdef SIOCSHWTSTAMP -/** - * igb_read_clock - read raw cycle counter (to be used by time counter) - */ -static cycle_t igb_read_clock(const struct cyclecounter *tc) -{ - struct igb_adapter *adapter = - container_of(tc, struct igb_adapter, cycles); - struct e1000_hw *hw = &adapter->hw; - u64 stamp = 0; - int shift = 0; - - /* - * The timestamp latches on lowest register read. For the 82580 - * the lowest register is SYSTIMR instead of SYSTIML. However we never - * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. - */ - if (hw->mac.type == e1000_82580) { - stamp = E1000_READ_REG(hw, E1000_SYSTIMR) >> 8; - shift = IGB_82580_TSYNC_SHIFT; - } - - stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIML) << shift; - stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << (shift + 32); - return stamp; -} - -#endif /* SIOCSHWTSTAMP */ static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)"); @@ -274,10 +325,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)"); static int __init igb_init_module(void) { int ret; + printk(KERN_INFO "%s - version %s\n", igb_driver_string, igb_driver_version); printk(KERN_INFO "%s\n", igb_copyright); +#ifdef IGB_HWMON +/* only use IGB_PROCFS if IGB_HWMON is not defined */ +#else + if (igb_procfs_topdir_init()) + printk(KERN_INFO "Procfs failed to initialize topdir\n"); +#endif /* IGB_HWMON */ #ifdef IGB_DCA dca_register_notify(&dca_notifier); @@ -308,6 +366,12 @@ static void __exit igb_exit_module(void) unregister_reboot_notifier(&igb_notifier_reboot); #endif pci_unregister_driver(&igb_driver); + +#ifdef IGB_HWMON +/* only compile IGB_PROCFS if IGB_HWMON is not defined */ +#else + igb_procfs_topdir_exit(); +#endif /* IGB_HWMON */ } module_exit(igb_exit_module); @@ -341,16 +405,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) for (; i < adapter->rss_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); -#ifdef HAVE_TX_MQ - for (; j < adapter->rss_queues; j++) - adapter->tx_ring[j]->reg_idx = rbase_offset + - Q_IDX_82576(j); -#endif } #endif case e1000_82575: case e1000_82580: case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@ -360,83 +422,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) } } -static void igb_free_queues(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - kfree(adapter->tx_ring[i]); - adapter->tx_ring[i] = NULL; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - kfree(adapter->rx_ring[i]); - adapter->rx_ring[i] = NULL; - } - adapter->num_rx_queues = 0; - adapter->num_tx_queues = 0; -} - -/** - * igb_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. - **/ -static int igb_alloc_queues(struct igb_adapter *adapter) -{ - struct igb_ring *ring; - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); - if (!ring) - goto err; - ring->count = adapter->tx_ring_count; - ring->queue_index = i; - ring->pdev = adapter->pdev; - ring->netdev = adapter->netdev; - /* For 82575, context index must be unique per ring. */ - if (adapter->hw.mac.type == e1000_82575) - ring->flags = IGB_RING_FLAG_TX_CTX_IDX; - adapter->tx_ring[i] = ring; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); - if (!ring) - goto err; - ring->count = adapter->rx_ring_count; - ring->queue_index = i; - ring->pdev = adapter->pdev; - ring->netdev = adapter->netdev; - ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ - /* set flag indicating ring supports SCTP checksum offload */ - if (adapter->hw.mac.type >= e1000_82576) - ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; -#ifdef IGB_LRO - /* set flag enabling LRO */ - if (i < adapter->rss_queues) - ring->flags |= IGB_RING_FLAG_RX_LRO; -#endif - adapter->rx_ring[i] = ring; - } - - igb_cache_ring_register(adapter); - -#ifdef __VMKNETDDI_QUEUEOPS__ - adapter->rx_ring[0]->active = 1; - adapter->rx_ring[0]->allocated = 1; -#endif - return E1000_SUCCESS; - -err: - igb_free_queues(adapter); - - return -ENOMEM; -} - static void igb_configure_lli(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -473,20 +458,45 @@ static void igb_configure_lli(struct igb_adapter *adapter) } +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); +} + #define IGB_N0_QUEUE -1 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) { - u32 msixbm = 0; struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; - u32 ivar, index; int rx_queue = IGB_N0_QUEUE; int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; - if (q_vector->rx_ring) - rx_queue = q_vector->rx_ring->reg_idx; - if (q_vector->tx_ring) - tx_queue = q_vector->tx_ring->reg_idx; + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; switch (hw->mac.type) { case e1000_82575: @@ -498,76 +508,48 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!adapter->msix_entries && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; case e1000_82576: - /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ - if (rx_queue > IGB_N0_QUEUE) { - index = (rx_queue & 0x7); - ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); - if (rx_queue < 8) { - /* vector goes into low byte of register */ - ivar = ivar & 0xFFFFFF00; - ivar |= msix_vector | E1000_IVAR_VALID; - } else { - /* vector goes into third byte of register */ - ivar = ivar & 0xFF00FFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 16; - } - E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); - } - if (tx_queue > IGB_N0_QUEUE) { - index = (tx_queue & 0x7); - ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); - if (tx_queue < 8) { - /* vector goes into second byte of register */ - ivar = ivar & 0xFFFF00FF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 8; - } else { - /* vector goes into high byte of register */ - ivar = ivar & 0x00FFFFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 24; - } - E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); - } + /* + * 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); q_vector->eims_value = 1 << msix_vector; break; case e1000_82580: case e1000_i350: - /* 82580 uses the same table-based approach as 82576 but has fewer - entries as a result we carry over for queues greater than 4. */ - if (rx_queue > IGB_N0_QUEUE) { - index = (rx_queue >> 1); - ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); - if (rx_queue & 0x1) { - /* vector goes into third byte of register */ - ivar = ivar & 0xFF00FFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 16; - } else { - /* vector goes into low byte of register */ - ivar = ivar & 0xFFFFFF00; - ivar |= msix_vector | E1000_IVAR_VALID; - } - E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); - } - if (tx_queue > IGB_N0_QUEUE) { - index = (tx_queue >> 1); - ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); - if (tx_queue & 0x1) { - /* vector goes into high byte of register */ - ivar = ivar & 0x00FFFFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 24; - } else { - /* vector goes into second byte of register */ - ivar = ivar & 0xFFFF00FF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 8; - } - E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); - } + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* + * On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); q_vector->eims_value = 1 << msix_vector; break; default: @@ -619,6 +601,9 @@ static void igb_configure_msix(struct igb_adapter *adapter) case e1000_82576: case e1000_82580: case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug. */ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | @@ -654,42 +639,52 @@ static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - int i, err = 0, vector = 0; + int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, &igb_msix_other, 0, netdev->name, adapter); if (err) - goto out; - vector++; + goto err_out; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; + vector++; + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); - if (q_vector->rx_ring && q_vector->tx_ring) + if (q_vector->rx.ring && q_vector->tx.ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, - q_vector->rx_ring->queue_index); - else if (q_vector->tx_ring) + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) sprintf(q_vector->name, "%s-tx-%u", netdev->name, - q_vector->tx_ring->queue_index); - else if (q_vector->rx_ring) + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) sprintf(q_vector->name, "%s-rx-%u", netdev->name, - q_vector->rx_ring->queue_index); + q_vector->rx.ring->queue_index); else sprintf(q_vector->name, "%s-unused", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, - &igb_msix_ring, 0, q_vector->name, + igb_msix_ring, 0, q_vector->name, q_vector); if (err) - goto out; - vector++; + goto err_free; } igb_configure_msix(adapter); return 0; -out: + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: return err; } @@ -704,45 +699,30 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) } } -#ifdef IGB_LRO -static void igb_lro_ring_exit(struct igb_lro_list *lrolist) -{ - struct hlist_node *node, *node2; - struct igb_lro_desc *lrod; - - hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, - lro_node) { - hlist_del(&lrod->lro_node); - kfree(lrod); - } - - hlist_for_each_entry_safe(lrod, node, node2, &lrolist->free, - lro_node) { - hlist_del(&lrod->lro_node); - kfree(lrod); - } -} - -static void igb_lro_ring_init(struct igb_lro_list *lrolist) +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) { - int j, bytes; - struct igb_lro_desc *lrod; + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - bytes = sizeof(struct igb_lro_desc); + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; - INIT_HLIST_HEAD(&lrolist->free); - INIT_HLIST_HEAD(&lrolist->active); + if (q_vector->rx.ring) + adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; - for (j = 0; j < IGB_LRO_MAX; j++) { - lrod = kzalloc(bytes, GFP_KERNEL); - if (lrod != NULL) { - INIT_HLIST_NODE(&lrod->lro_node); - hlist_add_head(&lrod->lro_node, &lrolist->free); - } - } + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree(q_vector); } -#endif /* IGB_LRO */ /** * igb_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize @@ -753,25 +733,14 @@ static void igb_lro_ring_init(struct igb_lro_list *lrolist) **/ static void igb_free_q_vectors(struct igb_adapter *adapter) { - struct igb_q_vector *q_vector; - int v_idx; + int v_idx = adapter->num_q_vectors; - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - q_vector = adapter->q_vector[v_idx]; - adapter->q_vector[v_idx] = NULL; - if (!q_vector) - continue; - netif_napi_del(&q_vector->napi); -#ifdef IGB_LRO - if (q_vector->lrolist) { - igb_lro_ring_exit(q_vector->lrolist); - vfree(q_vector->lrolist); - q_vector->lrolist = NULL; - } -#endif - kfree(q_vector); - } + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); } /** @@ -782,23 +751,216 @@ static void igb_free_q_vectors(struct igb_adapter *adapter) */ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) { - igb_free_queues(adapter); igb_free_q_vectors(adapter); igb_reset_interrupt_capability(adapter); } +/** + * igb_process_mdd_event + * @adapter - board private structure + * + * Identify a malicious VF, disable the VF TX/RX queues and log a message. + */ +static void igb_process_mdd_event(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc, vfte, vfre, mdfb; + u8 vf_queue; + + lvmmc = E1000_READ_REG(hw, E1000_LVMMC); + vf_queue = lvmmc >> 29; + + /* VF index cannot be bigger or equal to VFs allocated */ + if (vf_queue >= adapter->vfs_allocated_count) + return; + + netdev_info(adapter->netdev, + "VF %d misbehaved. VF queues are disabled. " + "VM misbehavior code is 0x%x\n", vf_queue, lvmmc); + + /* Disable VFTE and VFRE related bits */ + vfte = E1000_READ_REG(hw, E1000_VFTE); + vfte &= ~(1 << vf_queue); + E1000_WRITE_REG(hw, E1000_VFTE, vfte); + + vfre = E1000_READ_REG(hw, E1000_VFRE); + vfre &= ~(1 << vf_queue); + E1000_WRITE_REG(hw, E1000_VFRE, vfre); + + /* Disable MDFB related bit. Clear on write */ + mdfb = E1000_READ_REG(hw, E1000_MDFB); + mdfb |= (1 << vf_queue); + E1000_WRITE_REG(hw, E1000_MDFB, mdfb); + + /* Reset the specific VF */ + E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST); +} + +/** + * igb_disable_mdd + * @adapter - board private structure + * + * Disable MDD behavior in the HW + **/ +static void igb_disable_mdd(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + if ((hw->mac.type != e1000_i350) || + (hw->mac.type != e1000_i354)) + return; + + reg = E1000_READ_REG(hw, E1000_DTXCTL); + reg &= (~E1000_DTXCTL_MDP_EN); + E1000_WRITE_REG(hw, E1000_DTXCTL, reg); +} + +/** + * igb_enable_mdd + * @adapter - board private structure + * + * Enable the HW to detect malicious driver and sends an interrupt to + * the driver. + **/ +static void igb_enable_mdd(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + /* Only available on i350 device */ + if (hw->mac.type != e1000_i350) + return; + + reg = E1000_READ_REG(hw, E1000_DTXCTL); + reg |= E1000_DTXCTL_MDP_EN; + E1000_WRITE_REG(hw, E1000_DTXCTL, reg); +} + +/** + * igb_reset_sriov_capability - disable SR-IOV if enabled + * + * Attempt to disable single root IO virtualization capabilites present in the + * kernel. + **/ +static void igb_reset_sriov_capability(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + if (!pci_vfs_assigned(pdev)) { + /* + * disable iov and allow time for transactions to + * clear + */ + pci_disable_sriov(pdev); + msleep(500); + + dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n"); + } else { + dev_info(pci_dev_to_dev(pdev), "IOV Not Disabled\n " + "VF(s) are assigned to guests!\n"); + } + /* Disable Malicious Driver Detection */ + igb_disable_mdd(adapter); + + /* free vf data storage */ + kfree(adapter->vf_data); + adapter->vf_data = NULL; + + /* switch rings back to PF ownership */ + E1000_WRITE_REG(hw, E1000_IOVCTL, + E1000_IOVCTL_REUSE_VFQ); + E1000_WRITE_FLUSH(hw); + msleep(100); + } + + adapter->vfs_allocated_count = 0; +} + +/** + * igb_set_sriov_capability - setup SR-IOV if supported + * + * Attempt to enable single root IO virtualization capabilites present in the + * kernel. + **/ +static void igb_set_sriov_capability(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int old_vfs = 0; + int i; + + old_vfs = pci_num_vf(pdev); + if (old_vfs) { + dev_info(pci_dev_to_dev(pdev), + "%d pre-allocated VFs found - override " + "max_vfs setting of %d\n", old_vfs, + adapter->vfs_allocated_count); + adapter->vfs_allocated_count = old_vfs; + } + /* no VFs requested, do nothing */ + if (!adapter->vfs_allocated_count) + return; + + /* allocate vf data storage */ + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), + GFP_KERNEL); + + if (adapter->vf_data) { + if (!old_vfs) { + if (pci_enable_sriov(pdev, + adapter->vfs_allocated_count)) + goto err_out; + } + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_i350: + /* Enable VM to VM loopback by default */ + adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + + /* DMA Coalescing is not supported in IOV mode. */ + if (adapter->hw.mac.type >= e1000_i350) + adapter->dmac = IGB_DMAC_DISABLE; + if (adapter->hw.mac.type < e1000_i350) + adapter->flags |= IGB_FLAG_DETECT_BAD_DMA; + return; + + } + +err_out: + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + dev_warn(pci_dev_to_dev(pdev), + "Failed to initialize SR-IOV virtualization\n"); +} + /** * igb_set_interrupt_capability - set MSI or MSI-X if supported * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. **/ -static void igb_set_interrupt_capability(struct igb_adapter *adapter) +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) { struct pci_dev *pdev = adapter->pdev; int err; int numvecs, i; + if (!msix) + adapter->int_mode = IGB_INT_MODE_MSI; + /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; @@ -806,7 +968,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) adapter->num_rx_queues += adapter->vmdq_pools - 1; #ifdef HAVE_TX_MQ - adapter->num_tx_queues = adapter->num_rx_queues; + if (adapter->vmdq_pools) + adapter->num_tx_queues = adapter->vmdq_pools; + else + adapter->num_tx_queues = adapter->num_rx_queues; #else adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools); #endif @@ -838,19 +1003,20 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) break; } /* MSI-X failed, so fall through and try MSI */ - dev_warn(&pdev->dev, "Failed to initialize MSI-X interrupts." - " Falling back to MSI interrupts.\n"); + dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); igb_reset_interrupt_capability(adapter); case IGB_INT_MODE_MSI: if (!pci_enable_msi(pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; else - dev_warn(&pdev->dev, "Failed to initialize MSI " - "interrupts. Falling back to legacy interrupts.\n"); + dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI " + "interrupts. Falling back to legacy " + "interrupts.\n"); /* Fall through */ case IGB_INT_MODE_LEGACY: /* disable advanced features and set number of queues to 1 */ - adapter->vfs_allocated_count = 0; + igb_reset_sriov_capability(adapter); adapter->vmdq_pools = 0; adapter->rss_queues = 1; adapter->flags |= IGB_FLAG_QUEUE_PAIRS; @@ -860,116 +1026,195 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* Don't do anything; this is system default */ break; } +} -#ifdef HAVE_TX_MQ - /* Notify the stack of the (possibly) reduced Tx Queue count. */ -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - adapter->netdev->egress_subqueue_count = - min_t(u32, adapter->num_tx_queues, - max_t(u32, adapter->rss_queues, adapter->vmdq_pools)); -#else - adapter->netdev->real_num_tx_queues = - min_t(u32, adapter->num_tx_queues, - max_t(u32, adapter->rss_queues, adapter->vmdq_pools)); -#endif -#endif - - return; +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; } /** - * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * igb_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. + * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int igb_alloc_q_vectors(struct igb_adapter *adapter) +static int igb_alloc_q_vector(struct igb_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) { struct igb_q_vector *q_vector; - struct e1000_hw *hw = &adapter->hw; - int v_idx; + struct igb_ring *ring; + int ring_count, size; - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); - if (!q_vector) - goto err_out; - q_vector->adapter = adapter; - q_vector->itr_register = hw->hw_addr + E1000_EITR(0); - q_vector->itr_val = IGB_START_ITR; - netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); - adapter->q_vector[v_idx] = q_vector; -#ifdef IGB_LRO - if (v_idx < adapter->num_rx_queues) { - int size = sizeof(struct igb_lro_list); - q_vector->lrolist = vmalloc(size); - if (!q_vector->lrolist) - goto err_out; - memset(q_vector->lrolist, 0, size); - igb_lro_ring_init(q_vector->lrolist); - } -#endif + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct igb_q_vector) + + (sizeof(struct igb_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; } - return 0; -err_out: - igb_free_q_vectors(adapter); - return -ENOMEM; -} + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; -static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + /* configure backlink on ring */ + ring->q_vector = q_vector; - q_vector->rx_ring = adapter->rx_ring[ring_idx]; - q_vector->rx_ring->q_vector = q_vector; - q_vector->itr_val = adapter->rx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; -} + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); -static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); - q_vector->tx_ring = adapter->tx_ring[ring_idx]; - q_vector->tx_ring->q_vector = q_vector; - q_vector->itr_val = adapter->tx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + +#ifndef HAVE_NDO_SET_FEATURES + /* enable rx checksum */ + set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); + +#endif + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; } /** - * igb_map_ring_to_vector - maps allocated queues to vectors + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize * - * This function maps the recently allocated queues to vectors. + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. **/ -static int igb_map_ring_to_vector(struct igb_adapter *adapter) +static int igb_alloc_q_vectors(struct igb_adapter *adapter) { - int i; - int v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; - if ((adapter->num_q_vectors < adapter->num_rx_queues) || - (adapter->num_q_vectors < adapter->num_tx_queues)) - return -ENOMEM; + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); - if (adapter->num_q_vectors == (adapter->num_rx_queues + adapter->num_tx_queues)) { - for (i = 0; i < adapter->num_rx_queues; i++) - igb_map_rx_ring_to_vector(adapter, i, v_idx++); - for (i = 0; i < adapter->num_tx_queues; i++) - igb_map_tx_ring_to_vector(adapter, i, v_idx++); - } else { - for (i = 0; i < adapter->num_rx_queues; i++) { - if (i < adapter->num_tx_queues) - igb_map_tx_ring_to_vector(adapter, i, v_idx); - igb_map_rx_ring_to_vector(adapter, i, v_idx++); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; } - for (; i < adapter->num_tx_queues; i++) - igb_map_tx_ring_to_vector(adapter, i, v_idx++); } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; } /** @@ -977,37 +1222,28 @@ static int igb_map_ring_to_vector(struct igb_adapter *adapter) * * This function initializes the interrupts and allocates all of the queues. **/ -static int igb_init_interrupt_scheme(struct igb_adapter *adapter) +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) { struct pci_dev *pdev = adapter->pdev; int err; - igb_set_interrupt_capability(adapter); + igb_set_interrupt_capability(adapter, msix); err = igb_alloc_q_vectors(adapter); if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors; } - err = igb_alloc_queues(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - err = igb_map_ring_to_vector(adapter); - if (err) { - dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); - goto err_map_queues; - } + igb_cache_ring_register(adapter); +#ifdef __VMKNETDDI_QUEUEOPS__ + adapter->rx_ring[0]->active = 1; + adapter->rx_ring[0]->allocated = 1; +#endif return 0; -err_map_queues: - igb_free_queues(adapter); -err_alloc_queues: - igb_free_q_vectors(adapter); + err_alloc_q_vectors: igb_reset_interrupt_capability(adapter); return err; @@ -1023,7 +1259,6 @@ static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; int err = 0; if (adapter->msix_entries) { @@ -1031,50 +1266,24 @@ static int igb_request_irq(struct igb_adapter *adapter) if (!err) goto request_done; /* fall back to MSI */ - igb_clear_interrupt_scheme(adapter); - if (!pci_enable_msi(pdev)) - adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; - adapter->num_q_vectors = 1; - err = igb_alloc_q_vectors(adapter); - if (err) { - dev_err(&pdev->dev, - "Unable to allocate memory for vectors\n"); - goto request_done; - } - err = igb_alloc_queues(adapter); - if (err) { - dev_err(&pdev->dev, - "Unable to allocate memory for queues\n"); - igb_free_q_vectors(adapter); + + igb_clear_interrupt_scheme(adapter); + igb_reset_sriov_capability(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) goto request_done; - } igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); - } else { - switch (hw->mac.type) { - case e1000_82575: - E1000_WRITE_REG(hw, E1000_MSIXBM(0), - (E1000_EICR_RX_QUEUE0 | - E1000_EICR_TX_QUEUE0 | - E1000_EIMS_OTHER)); - break; - case e1000_82576: - case e1000_82580: - case e1000_i350: - E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID); - break; - default: - break; - } + igb_configure(adapter); } + igb_assign_vector(adapter->q_vector[0], 0); + if (adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(pdev->irq, &igb_intr_msi, 0, - netdev->name, adapter); + netdev->name, adapter); if (!err) goto request_done; @@ -1084,10 +1293,11 @@ static int igb_request_irq(struct igb_adapter *adapter) } err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED, - netdev->name, adapter); + netdev->name, adapter); if (err) - dev_err(&pdev->dev, "Error %d getting interrupt\n", err); + dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n", + err); request_done: return err; @@ -1123,33 +1333,26 @@ static void igb_irq_disable(struct igb_adapter *adapter) */ if (adapter->msix_entries) { u32 regval = E1000_READ_REG(hw, E1000_EIAM); - regval &= ~adapter->eims_enable_mask; - E1000_WRITE_REG(hw, E1000_EIAM, regval); + E1000_WRITE_REG(hw, E1000_EIAM, regval & ~adapter->eims_enable_mask); E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask); regval = E1000_READ_REG(hw, E1000_EIAC); - regval &= ~adapter->eims_enable_mask; - E1000_WRITE_REG(hw, E1000_EIAC, regval); + E1000_WRITE_REG(hw, E1000_EIAC, regval & ~adapter->eims_enable_mask); } E1000_WRITE_REG(hw, E1000_IAM, 0); E1000_WRITE_REG(hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(hw); -#ifdef __VMKLNX__ - /* - * PR 612379: We shouldn't be calling synchronize_irq on the legacy irq - * if MSIX vectors are enabled and allocated. - */ if (adapter->msix_entries) { - int i; + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + for (i = 0; i < adapter->num_q_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); + synchronize_irq(adapter->msix_entries[vector++].vector); } else { synchronize_irq(adapter->pdev->irq); } -#else - synchronize_irq(adapter->pdev->irq); -#endif } /** @@ -1161,21 +1364,20 @@ static void igb_irq_enable(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { - u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; u32 regval = E1000_READ_REG(hw, E1000_EIAC); - E1000_WRITE_REG(hw, E1000_EIAC, - regval | adapter->eims_enable_mask); + E1000_WRITE_REG(hw, E1000_EIAC, regval | adapter->eims_enable_mask); regval = E1000_READ_REG(hw, E1000_EIAM); - E1000_WRITE_REG(hw, E1000_EIAM, - regval | adapter->eims_enable_mask); + E1000_WRITE_REG(hw, E1000_EIAM, regval | adapter->eims_enable_mask); E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask); if (adapter->vfs_allocated_count) { E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF); ims |= E1000_IMS_VMMB; + if (adapter->mdd) + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + ims |= E1000_IMS_MDDET; } - if (adapter->hw.mac.type == e1000_82580) - ims |= E1000_IMS_DRSTA; - E1000_WRITE_REG(hw, E1000_IMS, ims); } else { E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK | @@ -1193,7 +1395,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ - igb_vfta_set(hw, vid, TRUE); + igb_vfta_set(adapter, vid, TRUE); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; @@ -1201,9 +1403,13 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && (vid != old_vid) && +#ifdef HAVE_VLAN_RX_REGISTER !vlan_group_get_device(adapter->vlgrp, old_vid)) { +#else + !test_bit(old_vid, adapter->active_vlans)) { +#endif /* remove VID from filter table */ - igb_vfta_set(hw, old_vid, FALSE); + igb_vfta_set(adapter, old_vid, FALSE); } } @@ -1224,7 +1430,7 @@ static void igb_release_hw_control(struct igb_adapter *adapter) /* Let firmware take over control of h/w */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); E1000_WRITE_REG(hw, E1000_CTRL_EXT, - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } /** @@ -1244,7 +1450,7 @@ static void igb_get_hw_control(struct igb_adapter *adapter) /* Let firmware know the driver has taken over */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); E1000_WRITE_REG(hw, E1000_CTRL_EXT, - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } /** @@ -1270,11 +1476,12 @@ static void igb_configure(struct igb_adapter *adapter) e1000_rx_fifo_flush_82575(&adapter->hw); #ifdef CONFIG_NETDEVICES_MULTIQUEUE - +#ifndef __VMKLNX__ if (adapter->num_tx_queues > 1) netdev->features |= NETIF_F_MULTI_QUEUE; else netdev->features &= ~NETIF_F_MULTI_QUEUE; +#endif /* __VMKLNX__ */ #endif /* call igb_desc_unused which always leaves @@ -1282,17 +1489,39 @@ static void igb_configure(struct igb_adapter *adapter) * next_to_use != next_to_clean */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; - igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); } - #ifdef __VMKNETDDI_QUEUEOPS__ -/* Invalidate netqueue state as filters have been lost after reinit */ + /* Invalidate netqueue state as filters have been lost after reinit */ vmknetddi_queueops_invalidate_state(adapter->netdev); #endif +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + e1000_phy_hw_reset(&adapter->hw); - adapter->tx_queue_len = netdev->tx_queue_len; + if (adapter->hw.phy.media_type == e1000_media_type_copper) + e1000_power_up_phy(&adapter->hw); + else + e1000_power_up_fiber_serdes_link(&adapter->hw); } +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(&adapter->hw); + else + e1000_shutdown_fiber_serdes_link(&adapter->hw); +} /** * igb_up - Open the interface and prepare it to handle traffic @@ -1310,8 +1539,11 @@ int igb_up(struct igb_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); + if (adapter->msix_entries) igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); igb_configure_lli(adapter); @@ -1328,10 +1560,16 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + schedule_work(&adapter->dma_err_task); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + return 0; } @@ -1364,20 +1602,21 @@ void igb_down(struct igb_adapter *adapter) E1000_WRITE_REG(hw, E1000_TCTL, tctl); /* flush both disables and wait for them to finish */ E1000_WRITE_FLUSH(hw); - msleep(10); + usleep_range(10000, 20000); for (i = 0; i < adapter->num_q_vectors; i++) napi_disable(&(adapter->q_vector[i]->napi)); igb_irq_disable(adapter); + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + del_timer_sync(&adapter->watchdog_timer); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + del_timer_sync(&adapter->dma_err_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; #ifndef __VMKLNX__ - /* in VMKLinux this should be put before stop all tx queues to - avoid race with netdev_watchdog. See PR 976054 */ netif_carrier_off(netdev); #endif @@ -1396,7 +1635,6 @@ void igb_down(struct igb_adapter *adapter) igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); #ifdef IGB_DCA - /* since we reset the hardware DCA settings were cleared */ igb_setup_dca(adapter); #endif @@ -1406,7 +1644,7 @@ void igb_reinit_locked(struct igb_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igb_down(adapter); igb_up(adapter); clear_bit(__IGB_RESETTING, &adapter->state); @@ -1418,8 +1656,7 @@ void igb_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; - u32 pba = 0, tx_space, min_tx_space, min_rx_space; - u16 hwm; + u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -1427,6 +1664,7 @@ void igb_reset(struct igb_adapter *adapter) switch (mac->type) { case e1000_i350: case e1000_82580: + case e1000_i354: pba = E1000_READ_REG(hw, E1000_RXPBS); pba = e1000_rxpbs_adjust_82580(pba); break; @@ -1435,6 +1673,8 @@ void igb_reset(struct igb_adapter *adapter) pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82575: + case e1000_i210: + case e1000_i211: default: pba = E1000_PBA_34K; break; @@ -1460,7 +1700,7 @@ void igb_reset(struct igb_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + sizeof(union e1000_adv_tx_desc) - - ETH_FCS_LEN) * 2; + ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -1492,7 +1732,7 @@ void igb_reset(struct igb_adapter *adapter) hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); - fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; fc->pause_time = 0xFFFF; fc->send_xon = 1; @@ -1501,8 +1741,12 @@ void igb_reset(struct igb_adapter *adapter) /* disable receive for all VFs and wait one second */ if (adapter->vfs_allocated_count) { int i; + /* + * Clear all flags except indication that the PF has set + * the VF MAC addresses administratively + */ for (i = 0 ; i < adapter->vfs_allocated_count; i++) - adapter->vf_data[i].flags = 0; + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; /* ping all the active vfs to let them know we are going down */ igb_ping_all_vfs(adapter); @@ -1516,100 +1760,524 @@ void igb_reset(struct igb_adapter *adapter) e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + e1000_setup_init_funcs(hw, TRUE); + igb_check_options(adapter); + e1000_get_bus_info(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } if (e1000_init_hw(hw)) - dev_err(&pdev->dev, "Hardware Error\n"); + dev_err(pci_dev_to_dev(pdev), "Hardware Error\n"); - if (hw->mac.type == e1000_82580) { - u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); - E1000_WRITE_REG(hw, E1000_PCIEMISC, - reg & ~E1000_PCIEMISC_LX_DECISION); + /* + * Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + e1000_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); + /* Re-initialize the thermal sensor on i350 devices. */ + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* + * If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + e1000_set_i2c_bb(hw); + e1000_init_thermal_sensor_thresh(hw); + } + + /*Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + e1000_set_eee_i350(hw); + break; + case e1000_i354: + e1000_set_eee_i354(hw); + break; + default: + break; + } } + + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + igb_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + +#ifdef HAVE_PTP_1588_CLOCK + /* Re-enable PTP, where applicable. */ + igb_ptp_reset(adapter); +#endif /* HAVE_PTP_1588_CLOCK */ + e1000_get_phy_info(hw); + + adapter->devrc++; } -#ifdef HAVE_NET_DEVICE_OPS -static const struct net_device_ops igb_netdev_ops = { - .ndo_open = igb_open, - .ndo_stop = igb_close, - .ndo_start_xmit = igb_xmit_frame_adv, - .ndo_get_stats = igb_get_stats, - .ndo_set_rx_mode = igb_set_rx_mode, - .ndo_set_multicast_list = igb_set_rx_mode, - .ndo_set_mac_address = igb_set_mac, - .ndo_change_mtu = igb_change_mtu, - .ndo_do_ioctl = igb_ioctl, - .ndo_tx_timeout = igb_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_vlan_rx_register = igb_vlan_rx_register, - .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igb_netpoll, +#ifdef HAVE_NDO_SET_FEATURES +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate tx vlan accel + * enabled make sure tx flag is cleared if rx is. + */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; +#else + if (!(features & NETIF_F_HW_VLAN_RX)) + features &= ~NETIF_F_HW_VLAN_TX; #endif -}; -#endif /* HAVE_NET_DEVICE_OPS */ -/** - * igb_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in igb_pci_tbl - * - * Returns 0 on success, negative on failure - * - * igb_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit igb_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) + /* If Rx checksum is disabled, then LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) { - struct net_device *netdev; - struct igb_adapter *adapter; - struct e1000_hw *hw; - u16 eeprom_data = 0; - static int global_quad_port_a; /* global quad port a indication */ - int i, err, pci_using_dac; - static int cards_found; + u32 changed = netdev->features ^ features; - err = pci_enable_device_mem(pdev); - if (err) - return err; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (changed & NETIF_F_HW_VLAN_CTAG_RX) +#else + if (changed & NETIF_F_HW_VLAN_RX) +#endif + igb_vlan_mode(netdev, features); - pci_using_dac = 0; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - IGB_ERR("No usable DMA configuration, " - "aborting\n"); - goto err_dma; - } - } - } + return 0; +} -#ifndef HAVE_ASPM_QUIRKS - /* 82575 requires that the pci-e link partner disable the L0s state */ - switch (pdev->device) { - case E1000_DEV_ID_82575EB_COPPER: - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82575GB_QUAD_COPPER: - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); - default: - break; +#ifdef NTF_SELF +#ifdef USE_CONST_DEV_UC_CHAR +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags) +#else +static int igb_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +#endif +{ + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (!(adapter->vfs_allocated_count)) + return -EOPNOTSUPP; + + /* Hardware does not support aging addresses so if a + * ndm_state is given only allow permanent addresses + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", + igb_driver_name); + return -EINVAL; } -#endif /* HAVE_ASPM_QUIRKS */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + u32 rar_uc_entries = hw->mac.rar_entry_count - + (adapter->vfs_allocated_count + 1); + + if (netdev_uc_count(dev) < rar_uc_entries) + err = dev_uc_add_excl(dev, addr); + else + err = -ENOMEM; + } else if (is_multicast_ether_addr(addr)) { + err = dev_mc_add_excl(dev, addr); + } else { + err = -EINVAL; + } + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifndef USE_DEFAULT_FDB_DEL_DUMP +#ifdef USE_CONST_DEV_UC_CHAR +static int igb_ndo_fdb_del(struct ndmsg *ndm, + struct net_device *dev, + const unsigned char *addr) +#else +static int igb_ndo_fdb_del(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr) +#endif +{ + struct igb_adapter *adapter = netdev_priv(dev); + int err = -EOPNOTSUPP; + + if (ndm->ndm_state & NUD_PERMANENT) { + pr_info("%s: FDB only supports static addresses\n", + igb_driver_name); + return -EINVAL; + } + + if (adapter->vfs_allocated_count) { + if (is_unicast_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + } + + return err; +} + +static int igb_ndo_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + if (adapter->vfs_allocated_count) + idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + + return idx; +} +#endif /* USE_DEFAULT_FDB_DEL_DUMP */ + +#ifdef HAVE_BRIDGE_ATTRIBS +static int igb_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->vfs_allocated_count)) + return -EOPNOTSUPP; + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_i350: + case e1000_i354: + break; + default: + return -EOPNOTSUPP; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + e1000_vmdq_set_loopback_pf(hw, 0); + adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE; + } else if (mode == BRIDGE_MODE_VEB) { + e1000_vmdq_set_loopback_pf(hw, 1); + adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; + } else + return -EINVAL; + + netdev_info(adapter->netdev, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +#ifdef HAVE_BRIDGE_FILTER +static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask) +#else +static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif +{ + struct igb_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->vfs_allocated_count)) + return -EOPNOTSUPP; + + if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE) + mode = BRIDGE_MODE_VEB; + else + mode = BRIDGE_MODE_VEPA; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* NTF_SELF */ + +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, + .ndo_get_stats = igb_get_stats, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_do_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, + .ndo_get_vf_config = igb_ndo_get_vf_config, +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ +#endif /* IFLA_VF_MAX */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igb_netpoll, +#endif +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, +#endif +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = igb_vlan_mode, +#endif +#ifdef NTF_SELF + .ndo_fdb_add = igb_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = igb_ndo_fdb_del, + .ndo_fdb_dump = igb_ndo_fdb_dump, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = igb_ndo_bridge_setlink, + .ndo_bridge_getlink = igb_ndo_bridge_getlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif +}; + +#ifdef CONFIG_IGB_VMDQ_NETDEV +static const struct net_device_ops igb_vmdq_ops = { + .ndo_open = &igb_vmdq_open, + .ndo_stop = &igb_vmdq_close, + .ndo_start_xmit = &igb_vmdq_xmit_frame, + .ndo_get_stats = &igb_vmdq_get_stats, + .ndo_set_rx_mode = &igb_vmdq_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = &igb_vmdq_set_mac, + .ndo_change_mtu = &igb_vmdq_change_mtu, + .ndo_tx_timeout = &igb_vmdq_tx_timeout, + .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register, + .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid, +}; + +#endif /* CONFIG_IGB_VMDQ_NETDEV */ +#endif /* HAVE_NET_DEVICE_OPS */ +#ifdef CONFIG_IGB_VMDQ_NETDEV +void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev) +{ +#ifdef HAVE_NET_DEVICE_OPS + vnetdev->netdev_ops = &igb_vmdq_ops; +#else + dev->open = &igb_vmdq_open; + dev->stop = &igb_vmdq_close; + dev->hard_start_xmit = &igb_vmdq_xmit_frame; + dev->get_stats = &igb_vmdq_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &igb_vmdq_set_rx_mode; +#endif + dev->set_multicast_list = &igb_vmdq_set_rx_mode; + dev->set_mac_address = &igb_vmdq_set_mac; + dev->change_mtu = &igb_vmdq_change_mtu; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &igb_vmdq_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + dev->vlan_rx_register = &igb_vmdq_vlan_rx_register; + dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid; +#endif +#endif + igb_vmdq_set_ethtool_ops(vnetdev); + vnetdev->watchdog_timeo = 5 * HZ; + +} + +int igb_init_vmdq_netdevs(struct igb_adapter *adapter) +{ + int pool, err = 0, base_queue; + struct net_device *vnetdev; + struct igb_vmdq_adapter *vmdq_adapter; + + for (pool = 1; pool < adapter->vmdq_pools; pool++) { + int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues); + base_queue = pool * qpp; + vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter)); + if (!vnetdev) { + err = -ENOMEM; + break; + } + vmdq_adapter = netdev_priv(vnetdev); + vmdq_adapter->vnetdev = vnetdev; + vmdq_adapter->real_adapter = adapter; + vmdq_adapter->rx_ring = adapter->rx_ring[base_queue]; + vmdq_adapter->tx_ring = adapter->tx_ring[base_queue]; + igb_assign_vmdq_netdev_ops(vnetdev); + snprintf(vnetdev->name, IFNAMSIZ, "%sv%d", + adapter->netdev->name, pool); + vnetdev->features = adapter->netdev->features; +#ifdef HAVE_NETDEV_VLAN_FEATURES + vnetdev->vlan_features = adapter->netdev->vlan_features; +#endif + adapter->vmdq_netdev[pool-1] = vnetdev; + err = register_netdev(vnetdev); + if (err) + break; + } + return err; +} + +int igb_remove_vmdq_netdevs(struct igb_adapter *adapter) +{ + int pool, err = 0; + + for (pool = 1; pool < adapter->vmdq_pools; pool++) { + unregister_netdev(adapter->vmdq_netdev[pool-1]); + free_netdev(adapter->vmdq_netdev[pool-1]); + adapter->vmdq_netdev[pool-1] = NULL; + } + return err; +} +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + * + **/ +static void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + e1000_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(e1000_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, fw.invm_img_type); + break; + } + /* fall through */ + default: + /* if option rom is valid, display its version too*/ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else { + if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + } + break; + } + + return; +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit igb_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + u8 pba_str[E1000_PBANUM_LENGTH]; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + int i, err, pci_using_dac; + static int cards_found; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + IGB_ERR("No usable DMA configuration, " + "aborting\n"); + goto err_dma; + } + } + } + +#ifndef HAVE_ASPM_QUIRKS + /* 82575 requires that the pci-e link partner disable the L0s state */ + switch (pdev->device) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + default: + break; + } + +#endif /* HAVE_ASPM_QUIRKS */ err = pci_request_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM), @@ -1640,6 +2308,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; + adapter->port_num = hw->bus.func; adapter->msg_enable = (1 << debug) - 1; #ifdef HAVE_PCI_ERS @@ -1669,13 +2338,13 @@ static int __devinit igb_probe(struct pci_dev *pdev, #ifdef HAVE_TX_TIMEOUT netdev->tx_timeout = &igb_tx_timeout; #endif - netdev->vlan_rx_register = igb_vlan_rx_register; + netdev->vlan_rx_register = igb_vlan_mode; netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = igb_netpoll; #endif - netdev->hard_start_xmit = &igb_xmit_frame_adv; + netdev->hard_start_xmit = &igb_xmit_frame; #endif /* HAVE_NET_DEVICE_OPS */ igb_set_ethtool_ops(netdev); #ifdef HAVE_TX_TIMEOUT @@ -1708,53 +2377,71 @@ static int __devinit igb_probe(struct pci_dev *pdev, } if (e1000_check_reset_block(hw)) - dev_info(&pdev->dev, - "PHY reset is blocked due to SOL/IDER session.\n"); - - netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + dev_info(pci_dev_to_dev(pdev), + "PHY reset is blocked due to SOL/IDER session.\n"); + /* + * features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM | #ifdef NETIF_F_IPV6_CSUM - netdev->features |= NETIF_F_IPV6_CSUM; + NETIF_F_IPV6_CSUM | #endif #ifdef NETIF_F_TSO - netdev->features |= NETIF_F_TSO; + NETIF_F_TSO | #ifdef NETIF_F_TSO6 - netdev->features |= NETIF_F_TSO6; -#endif -#ifdef __VMKLNX__ -#ifdef NETIF_F_OFFLOAD_8OFFSET - netdev->features |= NETIF_F_OFFLOAD_8OFFSET; + NETIF_F_TSO6 | #endif -#endif // __VMKLNX__ #endif /* NETIF_F_TSO */ -#ifdef IGB_LRO -#ifdef NETIF_F_LRO - netdev->features |= NETIF_F_LRO; - +#ifdef NETIF_F_RXHASH + NETIF_F_RXHASH | #endif + NETIF_F_RXCSUM | +#ifdef NETIF_F_HW_VLAN_CTAG_RX + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; +#else + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_TX; #endif + + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CSUM; + +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; +#else #ifdef NETIF_F_GRO + + /* this is only needed on kernels prior to 2.6.39 */ netdev->features |= NETIF_F_GRO; #endif +#endif + +#ifndef __VMKLNX__ + /* set this bit last since it cannot be part of hw_features */ +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#else + netdev->features |= NETIF_F_HW_VLAN_FILTER; +#endif +#endif /* __VMKLNX__ */ #ifdef HAVE_NETDEV_VLAN_FEATURES - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; #endif if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - if (hw->mac.type >= e1000_82576) - netdev->features |= NETIF_F_SCTP_CSUM; - adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); /* before reading the NVM, reset the controller to put the device in a @@ -1762,16 +2449,24 @@ static int __devinit igb_probe(struct pci_dev *pdev, e1000_reset_hw(hw); /* make sure the NVM is good */ +#ifdef __VMKLNX__ + /* + * Do not need to validate the NVM for a flashless device since + * the returned value from the shared code is alwasy < 0 for ESX + */ + if (hw->device_id != E1000_DEV_ID_I210_COPPER_FLASHLESS && + hw->device_id != E1000_DEV_ID_I210_SERDES_FLASHLESS) +#endif if (e1000_validate_nvm_checksum(hw) < 0) { - dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); - err = -EIO; - goto err_eeprom; + dev_err(pci_dev_to_dev(pdev), "The NVM Checksum Is Not" + " Valid\n"); + err = -EIO; + goto err_eeprom; } /* copy the MAC address out of the NVM */ if (e1000_read_mac_addr(hw)) - dev_err(&pdev->dev, "NVM Read Error\n"); - + dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); #ifdef ETHTOOL_GPERMADDR memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); @@ -1780,21 +2475,31 @@ static int __devinit igb_probe(struct pci_dev *pdev, #else if (!is_valid_ether_addr(netdev->dev_addr)) { #endif - dev_err(&pdev->dev, "Invalid MAC Address\n"); + dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; } + memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len); + adapter->mac_table[0].queue = adapter->vfs_allocated_count; + adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE); + igb_rar_set(adapter, 0); + /* get firmware version for ethtool -i */ - e1000_read_nvm(&adapter->hw, 5, 1, &adapter->fw_version); + igb_set_fw_version(adapter); setup_timer(&adapter->watchdog_timer, &igb_watchdog, (unsigned long) adapter); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer, + (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, (unsigned long) adapter); INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + INIT_WORK(&adapter->dma_err_task, igb_dma_err_task); /* Initialize link properties that are user-changeable */ adapter->fc_autoneg = true; @@ -1806,13 +2511,12 @@ static int __devinit igb_probe(struct pci_dev *pdev, e1000_validate_mdi_setting(hw); - /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, - * enable the ACPI Magic Packet filter - */ - + /* By default, support wake on port A */ if (hw->bus.func == 0) - e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - else if (hw->mac.type == e1000_82580) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support for non-port A ports */ + if (hw->mac.type >= e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); @@ -1820,14 +2524,14 @@ static int __devinit igb_probe(struct pci_dev *pdev, e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); if (eeprom_data & IGB_EEPROM_APME) - adapter->eeprom_wol |= E1000_WUFC_MAG; + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; /* now that we have the eeprom settings, apply the special cases where * the eeprom may be wrong or the board simply won't support wake on * lan on a particular port */ switch (pdev->device) { case E1000_DEV_ID_82575GB_QUAD_COPPER: - adapter->eeprom_wol = 0; + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: case E1000_DEV_ID_82576_FIBER: @@ -1835,180 +2539,167 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) - adapter->eeprom_wol = 0; + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; break; case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) - adapter->eeprom_wol = 0; + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; else adapter->flags |= IGB_FLAG_QUAD_PORT_A; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; + default: + break; } /* initialize the wol settings based on the eeprom settings */ - adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), + adapter->flags & IGB_FLAG_WOL_SUPPORTED); /* reset the hardware with the new settings */ igb_reset(adapter); + adapter->devrc = 0; + /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); - netif_tx_stop_all_queues(netdev); - strncpy(netdev->name, "eth%d", IFNAMSIZ); err = register_netdev(netdev); if (err) goto err_register; +#ifdef CONFIG_IGB_VMDQ_NETDEV + err = igb_init_vmdq_netdevs(adapter); + if (err) + goto err_register; +#endif /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); #ifdef IGB_DCA if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) { adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&pdev->dev, "DCA enabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); igb_setup_dca(adapter); } #endif -#ifdef SIOCSHWTSTAMP - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /* - * The 82580 timesync updates the system timer every 8ns by 8ns - * and the value cannot be shifted. Instead we need to shift - * the registers to generate a 64bit timer value. As a result - * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by - * 24 in order to generate a larger value for synchronization. - */ - adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; - /* disable system timer temporarily by setting bit 31 */ - E1000_WRITE_REG(hw, E1000_TSAUXC, 0x80000000); - E1000_WRITE_FLUSH(hw); +#ifdef HAVE_PTP_1588_CLOCK + /* do hw tstamp init after resetting */ + igb_ptp_init(adapter); +#endif /* HAVE_PTP_1588_CLOCK */ - /* Set registers so that rollover occurs soon to test this. */ - E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x00000000); - E1000_WRITE_REG(hw, E1000_SYSTIML, 0x80000000); - E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x000000FF); - E1000_WRITE_FLUSH(hw); + dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info */ + dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" : + (hw->mac.type == e1000_i354) ? "integrated" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : + (hw->mac.type == e1000_i354) ? "integrated" : + "unknown")); + dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name); + for (i = 0; i < 6; i++) + printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); - /* enable system timer by clearing bit 31 */ - E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); - E1000_WRITE_FLUSH(hw); + ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH); + if (ret_val) + strcpy(pba_str, "Unknown"); + dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name, + pba_str); - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82576: - /* - * Initialize hardware timer: we keep it running just in case - * that some program needs it later on. - */ - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /** - * Scale the NIC clock cycle by a large factor so that - * relatively small clock corrections can be added or - * substracted at each clock tick. The drawbacks of a large - * factor are a) that the clock register overflows more quickly - * (not such a big deal) and b) that the increment per tick has - * to fit into 24 bits. As a result we need to use a shift of - * 19 so we can fit a value of 16 into the TIMINCA register. - */ - adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; - E1000_WRITE_REG(hw, E1000_TIMINCA, - (1 << E1000_TIMINCA_16NS_SHIFT) | - (16 << IGB_82576_TSYNC_SHIFT)); - - /* Set registers so that rollover occurs soon to test this. */ - E1000_WRITE_REG(hw, E1000_SYSTIML, 0x00000000); - E1000_WRITE_REG(hw, E1000_SYSTIMH, 0xFF800000); - E1000_WRITE_FLUSH(hw); - - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82575: - /* 82575 does not support timesync */ - default: - break; - } - -#endif /* SIOCSHWTSTAMP */ - dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); - /* print bus type/speed/width info */ - dev_info(&pdev->dev, "%s: (PCIe:%s:%s)", - netdev->name, - ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : - "unknown"), - ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : - (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : - "unknown")); #ifdef __VMKNETDDI_QUEUEOPS__ if (adapter->num_rx_queues > 1) VMKNETDDI_REGISTER_QUEUEOPS(netdev, igb_netqueue_ops); #endif - for (i = 0; i < 6; i++) - printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350) { + if (hw->bus.func == 0) { + u16 ets_word; -#ifdef IGB_LRO -#ifdef NETIF_F_LRO - if (netdev->features & NETIF_F_LRO) -#endif - dev_info(&pdev->dev, "Internal LRO is enabled \n"); -#ifdef NETIF_F_LRO - else - dev_info(&pdev->dev, "LRO is disabled \n"); -#endif -#endif - dev_info(&pdev->dev, + /* + * Read the NVM to determine if this i350 device + * supports an external thermal sensor. + */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + } +#ifdef IGB_HWMON + + igb_sysfs_init(adapter); +#else + + igb_procfs_init(adapter); +#endif /* IGB_HWMON */ + } else { + adapter->ets = false; + } + + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = e1000_set_eee_i350(hw); + if ((!err) && + (adapter->flags & IGB_FLAG_EEE)) + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + break; + case e1000_i354: + if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) & + (E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = e1000_set_eee_i354(hw); + if ((!err) && + (adapter->flags & IGB_FLAG_EEE)) + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + } + break; + default: + break; + } + } + + /* send driver version info to firmware */ + if ((hw->mac.type >= e1000_i350) && + (e1000_get_flash_presence_i210(hw))) + igb_init_fw(adapter); + + dev_info(pci_dev_to_dev(pdev), "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", adapter->msix_entries ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); cards_found++; + + pm_runtime_put_noidle(&pdev->dev); return 0; err_register: @@ -2021,6 +2712,7 @@ err_eeprom: iounmap(hw->flash_address); err_sw_init: igb_clear_interrupt_scheme(adapter); + igb_reset_sriov_capability(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -2048,21 +2740,27 @@ static void __devexit igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + pm_runtime_get_noresume(&pdev->dev); +#ifdef HAVE_PTP_1588_CLOCK + igb_ptp_stop(adapter); +#endif /* HAVE_PTP_1588_CLOCK */ + /* flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ set_bit(__IGB_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + del_timer_sync(&adapter->dma_err_timer); del_timer_sync(&adapter->phy_info_timer); flush_scheduled_work(); #ifdef IGB_DCA if (adapter->flags & IGB_FLAG_DCA_ENABLED) { - dev_info(&pdev->dev, "DCA disabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); dca_remove_requester(&pdev->dev); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; - E1000_WRITE_REG(hw, E1000_DCA_CTRL, - E1000_DCA_CTRL_DCA_DISABLE); + E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); } #endif @@ -2071,12 +2769,12 @@ static void __devexit igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); unregister_netdev(netdev); - - if (!e1000_check_reset_block(hw)) - e1000_phy_hw_reset(hw); +#ifdef CONFIG_IGB_VMDQ_NETDEV + igb_remove_vmdq_netdevs(adapter); +#endif igb_clear_interrupt_scheme(adapter); - + igb_reset_sriov_capability(adapter); iounmap(hw->hw_addr); if (hw->flash_address) @@ -2084,8 +2782,15 @@ static void __devexit igb_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); +#ifdef IGB_HWMON + igb_sysfs_exit(adapter); +#else + igb_procfs_exit(adapter); +#endif /* IGB_HWMON */ if (adapter->config_space) kfree(adapter->config_space); + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); @@ -2093,19 +2798,6 @@ static void __devexit igb_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -/** - * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space - * @adapter: board private structure to initialize - * - * This function initializes the vf specific data storage and then attempts to - * allocate the VFs. The reason for ordering it this way is because it is much - * more expensive time wise to disable SR-IOV than it is to allocate and free - * the memory for the VFs. - **/ -static void __devinit igb_probe_vfs(struct igb_adapter *adapter) -{ - adapter->vfs_allocated_count = 0; -} /** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize @@ -2131,27 +2823,41 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + /* set default ring sizes */ adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_TAG_SIZE; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + VLAN_HLEN; /* Initialize the hardware-specific values */ if (e1000_setup_init_funcs(hw, TRUE)) { - dev_err(&pdev->dev, "Hardware Initialization Failure\n"); + dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n"); return -EIO; } igb_check_options(adapter); - /* This call may decrease the number of queues */ - if (igb_init_interrupt_scheme(adapter)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - return -ENOMEM; + adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) * + hw->mac.rar_entry_count, + GFP_ATOMIC); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = (u32 *)kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES, + GFP_ATOMIC); + + /* These calls may decrease the number of queues */ + if (hw->mac.type < e1000_i210) { + igb_set_sriov_capability(adapter); } - igb_probe_vfs(adapter); + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); + return -ENOMEM; + } /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); @@ -2172,16 +2878,26 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int igb_open(struct net_device *netdev) +static int __igb_open(struct net_device *netdev, bool resuming) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; +#ifdef CONFIG_PM_RUNTIME + struct pci_dev *pdev = adapter->pdev; +#endif /* CONFIG_PM_RUNTIME */ int err; int i; /* disallow open during test */ - if (test_bit(__IGB_TESTING, &adapter->state)) + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); return -EBUSY; + } + +#ifdef CONFIG_PM_RUNTIME + if (!resuming) + pm_runtime_get_sync(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ netif_carrier_off(netdev); @@ -2195,7 +2911,7 @@ static int igb_open(struct net_device *netdev) if (err) goto err_setup_rx; - /* e1000_power_up_phy(adapter); */ + igb_power_up_link(adapter); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt @@ -2207,6 +2923,22 @@ static int igb_open(struct net_device *netdev) if (err) goto err_req_irq; + /* Notify the stack of the actual queue counts. */ +#ifdef __VMKLNX__ + netif_set_real_num_tx_queues(netdev, + adapter->num_tx_queues); +#else + netif_set_real_num_tx_queues(netdev, + adapter->vmdq_pools ? 1 : + adapter->num_tx_queues); + + err = netif_set_real_num_rx_queues(netdev, + adapter->vmdq_pools ? 1 : + adapter->num_rx_queues); + if (err) + goto err_set_queues; +#endif + /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); @@ -2228,24 +2960,39 @@ static int igb_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + schedule_work(&adapter->dma_err_task); + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); return E1000_SUCCESS; +err_set_queues: + igb_free_irq(adapter); err_req_irq: igb_release_hw_control(adapter); - /* e1000_power_down_phy(adapter); */ + igb_power_down_link(adapter); igb_free_all_rx_resources(adapter); err_setup_rx: igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); +#ifdef CONFIG_PM_RUNTIME + if (!resuming) + pm_runtime_put(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ + return err; } +static int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + /** * igb_close - Disables a network interface * @netdev: network interface device structure @@ -2257,21 +3004,42 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int igb_close(struct net_device *netdev) +static int __igb_close(struct net_device *netdev, bool suspending) { struct igb_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM_RUNTIME + struct pci_dev *pdev = adapter->pdev; +#endif /* CONFIG_PM_RUNTIME */ WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + +#ifdef CONFIG_PM_RUNTIME + if (!suspending) + pm_runtime_get_sync(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ + igb_down(adapter); + igb_release_hw_control(adapter); + igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); +#ifdef CONFIG_PM_RUNTIME + if (!suspending) + pm_runtime_put_sync(&pdev->dev); +#endif /* CONFIG_PM_RUNTIME */ + return 0; } +static int igb_close(struct net_device *netdev) +{ + return __igb_close(netdev, false); +} + /** * igb_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -2280,22 +3048,20 @@ static int igb_close(struct net_device *netdev) **/ int igb_setup_tx_resources(struct igb_ring *tx_ring) { - struct pci_dev *pdev = tx_ring->pdev; + struct device *dev = tx_ring->dev; int size; - size = sizeof(struct igb_buffer) * tx_ring->count; - tx_ring->buffer_info = vmalloc(size); - if (!tx_ring->buffer_info) + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) goto err; - memset(tx_ring->buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = pci_alloc_consistent(pdev, - tx_ring->size, - &tx_ring->dma); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -2307,11 +3073,12 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) tx_ring->allocated = 0; #endif + return 0; err: - vfree(tx_ring->buffer_info); - dev_err(&pdev->dev, + vfree(tx_ring->tx_buffer_info); + dev_err(dev, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -2331,7 +3098,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { err = igb_setup_tx_resources(adapter->tx_ring[i]); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Allocation for Tx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_tx_resources(adapter->tx_ring[i]); @@ -2339,16 +3106,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) } } -#ifdef HAVE_TX_MQ - for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - int r_idx = i % adapter->netdev->egress_subqueue_count; -#else - int r_idx = i % adapter->netdev->real_num_tx_queues; -#endif - adapter->multi_tx_table[i] = adapter->tx_ring[r_idx]; - } -#endif return err; } @@ -2378,6 +3135,22 @@ void igb_setup_tctl(struct igb_adapter *adapter) E1000_WRITE_REG(hw, E1000_TCTL, tctl); } +static u32 igb_tx_wthresh(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + switch (hw->mac.type) { + case e1000_i354: + return 4; + case e1000_82576: + if (adapter->msix_entries) + return 1; + default: + break; + } + + return 16; +} + /** * igb_configure_tx_ring - Configure transmit ring after Reset * @adapter: board private structure @@ -2389,14 +3162,12 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; - u32 txdctl; + u32 txdctl = 0; u64 tdba = ring->dma; int reg_idx = ring->reg_idx; /* disable the queue */ - txdctl = E1000_READ_REG(hw, E1000_TXDCTL(reg_idx)); - E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), - txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0); E1000_WRITE_FLUSH(hw); mdelay(10); @@ -2406,14 +3177,13 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, tdba & 0x00000000ffffffffULL); E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32); - ring->head = hw->hw_addr + E1000_TDH(reg_idx); ring->tail = hw->hw_addr + E1000_TDT(reg_idx); - writel(0, ring->head); + E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0); writel(0, ring->tail); txdctl |= IGB_TX_PTHRESH; txdctl |= IGB_TX_HTHRESH << 8; - txdctl |= IGB_TX_WTHRESH << 16; + txdctl |= igb_tx_wthresh(adapter) << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl); @@ -2441,14 +3211,13 @@ static void igb_configure_tx(struct igb_adapter *adapter) **/ int igb_setup_rx_resources(struct igb_ring *rx_ring) { - struct pci_dev *pdev = rx_ring->pdev; + struct device *dev = rx_ring->dev; int size, desc_len; - size = sizeof(struct igb_buffer) * rx_ring->count; - rx_ring->buffer_info = vmalloc(size); - if (!rx_ring->buffer_info) + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) goto err; - memset(rx_ring->buffer_info, 0, size); desc_len = sizeof(union e1000_adv_rx_desc); @@ -2456,13 +3225,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = pci_alloc_consistent(pdev, - rx_ring->size, - &rx_ring->dma); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -2476,16 +3245,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) return 0; err: - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - dev_err(&pdev->dev, "Unable to allocate memory for " - "the receive descriptor ring\n"); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the receive descriptor" + " ring\n"); return -ENOMEM; } /** * igb_setup_all_rx_resources - wrapper to allocate Rx resources - * (Descriptors) for all queues + * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure @@ -2498,7 +3267,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = igb_setup_rx_resources(adapter->rx_ring[i]); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Allocation for Rx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_rx_resources(adapter->rx_ring[i]); @@ -2522,56 +3291,57 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 j, num_rx_queues, shift = 0, shift2 = 0; - union e1000_reta { - u32 dword; - u8 bytes[4]; - } reta; - static const u8 rsshash[40] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, - 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, - 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, - 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; + static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, + 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, + 0xA32DCB77, 0x0CF23080, 0x3BB7426A, + 0xFA01ACBE }; /* Fill out hash function seeds */ - for (j = 0; j < 10; j++) { - u32 rsskey = rsshash[(j * 4)]; - rsskey |= rsshash[(j * 4) + 1] << 8; - rsskey |= rsshash[(j * 4) + 2] << 16; - rsskey |= rsshash[(j * 4) + 3] << 24; - E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, rsskey); - } + for (j = 0; j < 10; j++) + E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]); num_rx_queues = adapter->rss_queues; - if (adapter->vfs_allocated_count || adapter->vmdq_pools) { - /* 82575 and 82576 supports 2 RSS queues for VMDq */ - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - num_rx_queues = 1; - shift = 0; - break; - case e1000_82576: - shift = 3; - num_rx_queues = 2; - break; - case e1000_82575: + /* 82575 and 82576 supports 2 RSS queues for VMDq */ + switch (hw->mac.type) { + case e1000_82575: + if (adapter->vmdq_pools) { shift = 2; shift2 = 6; - default: break; } - } else { - if (hw->mac.type == e1000_82575) - shift = 6; + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count || adapter->vmdq_pools) { + shift = 3; + num_rx_queues = 2; + } + break; + default: + break; } - for (j = 0; j < (32 * 4); j++) { - reta.bytes[j & 3] = (j % num_rx_queues) << shift; + /* + * Populate the redirection table 4 entries at a time. To do this + * we are generating the results for n and n+2 and then interleaving + * those with the results with n+1 and n+3. + */ + for (j = 0; j < 32; j++) { + /* first pass generates n and n+2 */ + u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; + u32 reta = (base & 0x07800780) >> (7 - shift); + + /* second pass generates n+1 and n+3 */ + base += 0x00010001 * num_rx_queues; + reta |= (base & 0x07800780) << (1 + shift); + + /* generate 2nd table for 82575 based parts */ if (shift2) - reta.bytes[j & 3] |= num_rx_queues << shift2; - if ((j & 3) == 3) - E1000_WRITE_REG(hw, E1000_RETA(j >> 2), reta.dword); + reta |= (0x01010101 * num_rx_queues) << shift2; + + E1000_WRITE_REG(hw, E1000_RETA(j), reta); } /* @@ -2589,6 +3359,20 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) /* Don't need to set TUOFL or IPOFL, they default to 1 */ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + /* If VMDq is enabled then we set the appropriate mode for that, else * we default to RSS so that an RSS hash is calculated per packet even * if we are only using one queue */ @@ -2607,23 +3391,14 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) adapter->rss_queues << 7); } if (adapter->rss_queues > 1) - mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; else - mrqc = E1000_MRQC_ENABLE_VMDQ; + mrqc |= E1000_MRQC_ENABLE_VMDQ; } else { - mrqc = E1000_MRQC_ENABLE_RSS_4Q; + mrqc |= E1000_MRQC_ENABLE_RSS_4Q; } igb_vmm_control(adapter); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | - E1000_MRQC_RSS_FIELD_IPV4_TCP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | - E1000_MRQC_RSS_FIELD_IPV6_TCP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | - E1000_MRQC_RSS_FIELD_IPV6_UDP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | - E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); - E1000_WRITE_REG(hw, E1000_MRQC, mrqc); } @@ -2682,8 +3457,18 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, * increase the size to support vlan tags */ if (vfn < adapter->vfs_allocated_count && adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; + size += VLAN_HLEN; + +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (vfn >= adapter->vfs_allocated_count) { + int queue = vfn - adapter->vfs_allocated_count; + struct igb_vmdq_adapter *vadapter; + vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]); + if (vadapter->vlgrp) + size += VLAN_HLEN; + } +#endif vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; vmolr |= size | E1000_VMOLR_LPE; @@ -2708,13 +3493,46 @@ static void igb_rlpml_set(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->vmdq_pools; i++) igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i); + /* + * If we're in VMDQ or SR-IOV mode, then set global RLPML + * to our max jumbo frame size, in case we need to enable + * jumbo frames on one of the rings later. + * This will not pass over-length frames into the default + * queue because it's gated by the VMOLR.RLPML. + */ max_frame_size = MAX_JUMBO_FRAME_SIZE; } + /* Set VF RLPML for the PF device. */ + if (adapter->vfs_allocated_count) + igb_set_vf_rlpml(adapter, max_frame_size, pf_id); E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size); } -static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn) +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + void __iomem *reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = hw->hw_addr + E1000_DVMOLR(vfn); + else + reg = hw->hw_addr + E1000_VMOLR(vfn); + + val = readl(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + writel(val, reg); +} +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) { struct e1000_hw *hw = &adapter->hw; u32 vmolr; @@ -2727,21 +3545,29 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn) return; vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); - vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */ - E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ /* clear all bits that might not be set */ - vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + vmolr &= ~E1000_VMOLR_RSSE; if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + vmolr |= E1000_VMOLR_LPE; /* Accept long packets */ + +#ifdef __VMKLNX__ /* - * for VMDq only allow the VFs and pool 0 to accept broadcast and - * multicast packets + * For ESX driver, BAM, ROPE and ROMPE bits should be disabled on all queues + * other than the default queue */ - if (vfn <= adapter->vfs_allocated_count) - vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ - + if (vfn != 0) + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE); +#endif /* __VMKLNX__ */ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); } @@ -2758,12 +3584,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, struct e1000_hw *hw = &adapter->hw; u64 rdba = ring->dma; int reg_idx = ring->reg_idx; - u32 srrctl, rxdctl; + u32 srrctl = 0, rxdctl = 0; + +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + /* + * RLPML prevents us from receiving a frame larger than max_frame so + * it is safe to just set the rx_buffer_len to max_frame without the + * risk of an skb over panic. + */ + ring->rx_buffer_len = max_t(u32, adapter->max_frame_size, + MAXIMUM_ETHERNET_VLAN_SIZE); +#endif /* disable the queue */ - rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx)); - E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), - rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0); /* Set DMA base address registers */ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx), @@ -2773,57 +3607,56 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ - ring->head = hw->hw_addr + E1000_RDH(reg_idx); ring->tail = hw->hw_addr + E1000_RDT(reg_idx); - writel(0, ring->head); + E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0); writel(0, ring->tail); - /* set descriptor configuration */ + /* reset next-to- use/clean to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; #ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { - srrctl = ALIGN(ring->rx_buffer_len, 64) << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; -#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 - srrctl |= IGB_RXBUFFER_16384 >> - E1000_SRRCTL_BSIZEPKT_SHIFT; -#else - srrctl |= (PAGE_SIZE / 2) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; + ring->next_to_alloc = 0; + #endif - srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else { -#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - srrctl = ALIGN(ring->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + /* set descriptor configuration */ #ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - } + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; +#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + srrctl = ALIGN(ring->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; #endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ -#ifdef IGB_PER_PKT_TIMESTAMP - if (hw->mac.type == e1000_82580) + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; +#ifdef HAVE_PTP_1588_CLOCK + if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; -#endif +#endif /* HAVE_PTP_1588_CLOCK */ /* - * For SR-IOV we enable drops so that a single VF can't DOS the - * whole device. For VMDQ, we don't want this, There's only one - * entity handling all vectors - if one gets stalled and not - * handled, something elsewhere in in the system is badly broke. + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Flow Control is disabled and number of RX queues > 1 + * + * This allows us to avoid head of line blocking for security + * and performance reasons. */ - if (!adapter->vmdq_pools) + if (adapter->vfs_allocated_count || + (adapter->num_rx_queues > 1 && + (hw->fc.requested_mode == e1000_fc_none || + hw->fc.requested_mode == e1000_fc_rx_pause))) srrctl |= E1000_SRRCTL_DROP_EN; E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl); /* set filtering for VMDQ pools */ - igb_set_vmolr(adapter, reg_idx & 0x7); + igb_set_vmolr(adapter, reg_idx & 0x7, true); - /* enable receive descriptor fetching */ - rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(reg_idx)); - rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; - rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl); } @@ -2840,10 +3673,7 @@ static void igb_configure_rx(struct igb_adapter *adapter) /* set UTA to appropriate mode */ igb_set_uta(adapter); - /* set the correct pool for the PF default MAC address in entry 0 */ - igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, - adapter->vfs_allocated_count); - + igb_full_sync_mac_table(adapter); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -2860,15 +3690,15 @@ void igb_free_tx_resources(struct igb_ring *tx_ring) { igb_clean_tx_ring(tx_ring); - vfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; /* if not set, then don't free */ if (!tx_ring->desc) return; - pci_free_consistent(tx_ring->pdev, tx_ring->size, - tx_ring->desc, tx_ring->dma); + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } @@ -2887,29 +3717,25 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) igb_free_tx_resources(adapter->tx_ring[i]); } -void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, - struct igb_buffer *buffer_info) -{ - if (buffer_info->dma) { - if (buffer_info->mapped_as_page) - pci_unmap_page(tx_ring->pdev, - buffer_info->dma, - buffer_info->length, - PCI_DMA_TODEVICE); - else - pci_unmap_single(tx_ring->pdev, - buffer_info->dma, - buffer_info->length, - PCI_DMA_TODEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - buffer_info->time_stamp = 0; - buffer_info->next_to_watch = 0; - buffer_info->mapped_as_page = 0; +void igb_unmap_and_free_tx_resource(struct igb_ring *ring, + struct igb_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); /* buffer_info must be completely set up in the transmit path */ } @@ -2919,21 +3745,23 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, **/ static void igb_clean_tx_ring(struct igb_ring *tx_ring) { - struct igb_buffer *buffer_info; + struct igb_tx_buffer *buffer_info; unsigned long size; - unsigned int i; + u16 i; - if (!tx_ring->buffer_info) + if (!tx_ring->tx_buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->buffer_info[i]; + buffer_info = &tx_ring->tx_buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } - size = sizeof(struct igb_buffer) * tx_ring->count; - memset(tx_ring->buffer_info, 0, size); + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); @@ -2964,16 +3792,15 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) { igb_clean_rx_ring(rx_ring); - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; /* if not set, then don't free */ if (!rx_ring->desc) return; - pci_free_consistent(rx_ring->pdev, rx_ring->size, - rx_ring->desc, rx_ring->dma); + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } @@ -2996,23 +3823,29 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +void igb_clean_rx_ring(struct igb_ring *rx_ring) { - struct igb_buffer *buffer_info; unsigned long size; - unsigned int i; + u16 i; - if (!rx_ring->buffer_info) + if (!rx_ring->rx_buffer_info) return; +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + +#endif /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT if (buffer_info->dma) { - pci_unmap_single(rx_ring->pdev, + dma_unmap_single(rx_ring->dev, buffer_info->dma, - rx_ring->rx_buffer_len, - PCI_DMA_FROMDEVICE); + rx_ring->rx_buffer_len, + DMA_FROM_DEVICE); buffer_info->dma = 0; } @@ -3020,28 +3853,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - if (buffer_info->page_dma) { - pci_unmap_page(rx_ring->pdev, - buffer_info->page_dma, - PAGE_SIZE / 2, - PCI_DMA_FROMDEVICE); - buffer_info->page_dma = 0; - } - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - buffer_info->page_offset = 0; - } +#else + if (!buffer_info->page) + continue; + + dma_unmap_page(rx_ring->dev, + buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(buffer_info->page); + + buffer_info->page = NULL; #endif } - size = sizeof(struct igb_buffer) * rx_ring->count; - memset(rx_ring->buffer_info, 0, size); + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -3074,14 +3906,14 @@ static int igb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + igb_del_mac_filter(adapter, hw->mac.addr, + adapter->vfs_allocated_count); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ - igb_rar_set_qsel(adapter, hw->mac.addr, 0, - adapter->vfs_allocated_count); - - return 0; + return igb_add_mac_filter(adapter, hw->mac.addr, + adapter->vfs_allocated_count); } /** @@ -3093,100 +3925,169 @@ static int igb_set_mac(struct net_device *netdev, void *p) * 0 on no addresses written * X on writing X addresses to MTA **/ -static int igb_write_mc_addr_list(struct net_device *netdev) +int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr = netdev->mc_list; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif u8 *mta_list; - int i; + int i, count; +#ifdef CONFIG_IGB_VMDQ_NETDEV + int vm; +#endif + count = netdev_mc_count(netdev); +#ifdef CONFIG_IGB_VMDQ_NETDEV + for (vm = 1; vm < adapter->vmdq_pools; vm++) { + if (!adapter->vmdq_netdev[vm]) + break; + if (!netif_running(adapter->vmdq_netdev[vm])) + continue; + count += netdev_mc_count(adapter->vmdq_netdev[vm]); + } +#endif - if (!netdev->mc_count) { - /* nothing to program, so clear mc list */ + if (!count) { e1000_update_mc_addr_list(hw, NULL, 0); - igb_restore_vf_multicasts(adapter); return 0; } - - mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); + mta_list = kzalloc(count * 6, GFP_ATOMIC); if (!mta_list) return -ENOMEM; /* The shared function expects a packed array of only addresses. */ - mc_ptr = netdev->mc_list; - - for (i = 0; i < netdev->mc_count; i++) { - if (!mc_ptr) + i = 0; + netdev_for_each_mc_addr(ha, netdev) +#ifdef NETDEV_HW_ADDR_T_MULTICAST + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); +#else + memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); +#endif +#ifdef CONFIG_IGB_VMDQ_NETDEV + for (vm = 1; vm < adapter->vmdq_pools; vm++) { + if (!adapter->vmdq_netdev[vm]) break; - memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); - mc_ptr = mc_ptr->next; + if (!netif_running(adapter->vmdq_netdev[vm]) || + !netdev_mc_count(adapter->vmdq_netdev[vm])) + continue; + netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm]) +#ifdef NETDEV_HW_ADDR_T_MULTICAST + memcpy(mta_list + (i++ * ETH_ALEN), + ha->addr, ETH_ALEN); +#else + memcpy(mta_list + (i++ * ETH_ALEN), + ha->dmi_addr, ETH_ALEN); +#endif } +#endif e1000_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); - return netdev->mc_count; + return count; } -#ifdef HAVE_SET_RX_MODE -/** - * igb_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int igb_write_uc_addr_list(struct net_device *netdev) +void igb_rar_set(struct igb_adapter *adapter, u32 index) { - struct igb_adapter *adapter = netdev_priv(netdev); + u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; - unsigned int vfn = adapter->vfs_allocated_count; - unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); -#ifndef HAVE_NETDEV_HW_ADDR - struct dev_mc_list *uc_ptr = netdev->uc_list; -#endif - int count = 0; + u8 *addr = adapter->mac_table[index].addr; + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - /* return ENOMEM indicating insufficient memory for addresses */ -#ifndef HAVE_NETDEV_HW_ADDR - if (netdev->uc_count > rar_entries) -#else - if (netdev->uc.count > rar_entries) -#endif - return -ENOMEM; + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) + rar_high |= E1000_RAH_AV; -#ifdef HAVE_NETDEV_HW_ADDR - if (netdev->uc.count && rar_entries) { - struct netdev_hw_addr *ha; - list_for_each_entry(ha, &netdev->uc.list, list) { - if (!rar_entries) - break; - igb_rar_set_qsel(adapter, ha->addr, - rar_entries--, - vfn); - count++; - } - } -#else - while (uc_ptr) { - igb_rar_set_qsel(adapter, uc_ptr->da_addr, - rar_entries--, vfn); - uc_ptr = uc_ptr->next; - count++; + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; + else + rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; + + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); +} + +void igb_full_sync_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + igb_rar_set(adapter, i); } -#endif - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) { - E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0); - E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0); +} + +void igb_sync_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED) + igb_rar_set(adapter, i); + adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED); } - E1000_WRITE_FLUSH(hw); +} +int igb_available_rars(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i, count = 0; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } return count; } +#ifdef HAVE_SET_RX_MODE +/** + * igb_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_uc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int vfn = adapter->vfs_allocated_count; + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > igb_available_rars(adapter)) + return -ENOMEM; + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + igb_del_mac_filter(adapter, ha->addr, vfn); + igb_add_mac_filter(adapter, ha->addr, vfn); +#else + igb_del_mac_filter(adapter, ha->da_addr, vfn); + igb_add_mac_filter(adapter, ha->da_addr, vfn); #endif + count++; + } + } + return count; +} + +#endif /* HAVE_SET_RX_MODE */ /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -3213,6 +4114,11 @@ static void igb_set_rx_mode(struct net_device *netdev) if (netdev->flags & IFF_PROMISC) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); +#ifndef __VMKLNX__ + /* retain VLAN HW filtering if in VT mode */ + if (adapter->vfs_allocated_count || adapter->vmdq_pools) + rctl |= E1000_RCTL_VFE; +#endif } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; @@ -3220,7 +4126,7 @@ static void igb_set_rx_mode(struct net_device *netdev) } else { /* * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscous mode so + * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ count = igb_write_mc_addr_list(netdev); @@ -3235,14 +4141,14 @@ static void igb_set_rx_mode(struct net_device *netdev) /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable - * unicast promiscous mode + * unicast promiscuous mode */ count = igb_write_uc_addr_list(netdev); if (count < 0) { rctl |= E1000_RCTL_UPE; vmolr |= E1000_VMOLR_ROPE; } -#endif +#endif /* HAVE_SET_RX_MODE */ rctl |= E1000_RCTL_VFE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -3274,11 +4180,10 @@ static void igb_update_phy_info(unsigned long data) * igb_has_link - check shared code for link and determine up/down * @adapter: pointer to driver private info **/ -static bool igb_has_link(struct igb_adapter *adapter) +bool igb_has_link(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool link_active = FALSE; - s32 ret_val = 0; /* get_link_status is set on LSC (link status) interrupt or * rx sequence error interrupt. get_link_status will stay @@ -3287,22 +4192,28 @@ static bool igb_has_link(struct igb_adapter *adapter) */ switch (hw->phy.media_type) { case e1000_media_type_copper: - if (hw->mac.get_link_status) { - ret_val = e1000_check_for_link(hw); - link_active = !hw->mac.get_link_status; - } else { - link_active = TRUE; - } - break; + if (!hw->mac.get_link_status) + return true; case e1000_media_type_internal_serdes: - ret_val = e1000_check_for_link(hw); - link_active = hw->mac.serdes_has_link; + e1000_check_for_link(hw); + link_active = !hw->mac.get_link_status; break; case e1000_media_type_unknown: default: break; } + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + return link_active; } @@ -3324,12 +4235,29 @@ static void igb_watchdog_task(struct work_struct *work) watchdog_task); struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - u32 link; + u32 thstat, ctrl_ext, link; int i; link = igb_has_link(adapter); + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = FALSE; + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + if (!netif_carrier_ok(netdev)) { u32 ctrl; e1000_get_speed_and_duplex(hw, @@ -3345,13 +4273,10 @@ static void igb_watchdog_task(struct work_struct *work) adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex", ((ctrl & E1000_CTRL_TFCE) && - (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE)) ? "RX/TX": ((ctrl & E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); - - /* tweak tx_queue_len according to speed/duplex and - * adjust the timeout factor */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ #ifdef __VMKLNX__ adapter->tx_timeout_factor = 4; #else @@ -3359,19 +4284,22 @@ static void igb_watchdog_task(struct work_struct *work) #endif switch (adapter->link_speed) { case SPEED_10: - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 14; break; case SPEED_100: - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; + default: + break; } netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); igb_ping_all_vfs(adapter); +#ifdef IFLA_VF_MAX + igb_check_vf_rate_limit(adapter); +#endif /* IFLA_VF_MAX */ /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) @@ -3382,6 +4310,33 @@ static void igb_watchdog_task(struct work_struct *work) if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; + /* check for thermal sensor event on i350 */ + if (hw->mac.type == e1000_i350) { + thstat = E1000_READ_REG(hw, E1000_THSTAT); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + if ((hw->phy.media_type == + e1000_media_type_copper) && + !(ctrl_ext & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + if (thstat & E1000_THSTAT_PWR_DOWN) { + printk(KERN_ERR "igb: %s The " + "network adapter was stopped " + "because it overheated.\n", + netdev->name); + } + if (thstat & E1000_THSTAT_LINK_THROTTLE) { + printk(KERN_INFO + "igb: %s The network " + "adapter supported " + "link speed " + "was downshifted " + "because it " + "overheated.\n", + netdev->name); + } + } + } + /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Down\n", netdev->name); @@ -3394,12 +4349,13 @@ static void igb_watchdog_task(struct work_struct *work) if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + } } igb_update_stats(adapter); - e1000_update_adaptive(hw); - for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; @@ -3417,7 +4373,7 @@ static void igb_watchdog_task(struct work_struct *work) } /* Force detection of hung controller every watchdog period */ - tx_ring->detect_tx_hung = TRUE; + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); } /* Cause software interrupt to ensure rx ring is cleaned */ @@ -3430,10 +4386,79 @@ static void igb_watchdog_task(struct work_struct *work) E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0); } + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +static void igb_dma_err_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + dma_err_task); + int vf; + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 hgptc; + u32 ciaa, ciad; + + hgptc = E1000_READ_REG(hw, E1000_HGPTC); + if (hgptc) /* If incrementing then no need for the check below */ + goto dma_timer_reset; + /* + * Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + ciaa = (vf << 16) | 0x80000000; + /* 32 bit read so align, we really want status at offset 6 */ + ciaa |= PCI_COMMAND; + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + ciad = E1000_READ_REG(hw, E1000_CIAD); + ciaa &= 0x7FFFFFFF; + /* disable debug mode asap after reading data */ + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + /* Get the upper 16 bits which will be the PCI status reg */ + ciad >>= 16; + if (ciad & (PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR)) { + netdev_err(netdev, "VF %d suffered error\n", vf); + /* Issue VFLR */ + ciaa = (vf << 16) | 0x80000000; + ciaa |= 0xA8; + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + ciad = 0x00008000; /* VFLR */ + E1000_WRITE_REG(hw, E1000_CIAD, ciad); + ciaa &= 0x7FFFFFFF; + E1000_WRITE_REG(hw, E1000_CIAA, ciaa); + } + } +dma_timer_reset: /* Reset the timer */ if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); + mod_timer(&adapter->dma_err_timer, + round_jiffies(jiffies + HZ / 10)); +} + +/** + * igb_dma_err_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igb_dma_err_timer(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->dma_err_task); } enum latency_range { @@ -3449,7 +4474,7 @@ enum latency_range { * Stores a new ITR value based on strictly on packet size. This * algorithm is less sophisticated than that used in igb_update_itr, * due to the difficulty of synchronizing statistics across multiple - * receive rings. The divisors and thresholds used by this fuction + * receive rings. The divisors and thresholds used by this function * were determined based on theoretical maximum wire speed and testing * data, in order to minimize response time while increasing bulk * throughput. @@ -3459,37 +4484,33 @@ enum latency_range { * receive environment. * @q_vector: pointer to q_vector **/ -/* - * For ESX, we use this function for all cases, even when in single-queue - * mode. It generates slightly lower interrupt rates than igb_update_itr(), - * and this provides lower CPU utilization for ESX, with no drop in - * throughput. - */ static void igb_update_ring_itr(struct igb_q_vector *q_vector) { int new_val = q_vector->itr_val; int avg_wire_size = 0; struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; /* For non-gigabit speeds, just fix the interrupt rate at 4000 * ints/sec - ITR timer value of 120 ticks. */ - if (adapter->link_speed != SPEED_1000) { - new_val = 976; + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGB_4K_ITR; goto set_itr_val; + default: + break; } - if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { - struct igb_ring *ring = q_vector->rx_ring; - avg_wire_size = ring->total_bytes / ring->total_packets; - } + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; - if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { - struct igb_ring *ring = q_vector->tx_ring; + packets = q_vector->tx.total_packets; + if (packets) avg_wire_size = max_t(u32, avg_wire_size, - (ring->total_bytes / - ring->total_packets)); - } + q_vector->tx.total_bytes / packets); /* if avg_wire_size isn't set no work was done */ if (!avg_wire_size) @@ -3507,20 +4528,22 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) else new_val = avg_wire_size / 2; + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + set_itr_val: if (new_val != q_vector->itr_val) { q_vector->itr_val = new_val; q_vector->set_itr = 1; } clear_counts: - if (q_vector->rx_ring) { - q_vector->rx_ring->total_bytes = 0; - q_vector->rx_ring->total_packets = 0; - } - if (q_vector->tx_ring) { - q_vector->tx_ring->total_bytes = 0; - q_vector->tx_ring->total_packets = 0; - } + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; } /** @@ -3536,115 +4559,116 @@ clear_counts: * parameter (see igb_param.c) * NOTE: These calculations are only valid when operating in a single- * queue environment. - * @adapter: pointer to adapter - * @itr_setting: current q_vector->itr_val - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for **/ -static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, - int packets, int bytes) +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) { - unsigned int retval = itr_setting; + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + /* no packets, exit with status unchanged */ if (packets == 0) - goto update_itr_done; + return; - switch (itr_setting) { + switch (itrval) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes/packets > 8000) - retval = bulk_latency; + itrval = bulk_latency; else if ((packets < 5) && (bytes > 512)) - retval = low_latency; + itrval = low_latency; break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes/packets > 8000) { - retval = bulk_latency; + itrval = bulk_latency; } else if ((packets < 10) || ((bytes/packets) > 1200)) { - retval = bulk_latency; + itrval = bulk_latency; } else if ((packets > 35)) { - retval = lowest_latency; + itrval = lowest_latency; } } else if (bytes/packets > 2000) { - retval = bulk_latency; + itrval = bulk_latency; } else if (packets <= 2 && bytes < 512) { - retval = lowest_latency; + itrval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) - retval = low_latency; + itrval = low_latency; } else if (bytes < 1500) { - retval = low_latency; + itrval = low_latency; } break; } -update_itr_done: - return retval; + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; } -#ifndef __VMKLNX__ -static void igb_set_itr(struct igb_adapter *adapter) +#ifndef __VMKLNX__ +static void igb_set_itr(struct igb_q_vector *q_vector) { - struct igb_q_vector *q_vector = adapter->q_vector[0]; - u16 current_itr; + struct igb_adapter *adapter = q_vector->adapter; u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ - if (adapter->link_speed != SPEED_1000) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: current_itr = 0; - new_itr = 4000; + new_itr = IGB_4K_ITR; goto set_itr_now; + default: + break; } - adapter->rx_itr = igb_update_itr(adapter, - adapter->rx_itr, - q_vector->rx_ring->total_packets, - q_vector->rx_ring->total_bytes); + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); - adapter->tx_itr = igb_update_itr(adapter, - adapter->tx_itr, - q_vector->tx_ring->total_packets, - q_vector->tx_ring->total_bytes); - current_itr = max(adapter->rx_itr, adapter->tx_itr); + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) current_itr = low_latency; switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: - new_itr = 56; /* aka 70,000 ints/sec */ + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ break; case low_latency: - new_itr = 196; /* aka 20,000 ints/sec */ + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ break; case bulk_latency: - new_itr = 980; /* aka 4,000 ints/sec */ + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ break; default: break; } set_itr_now: - q_vector->rx_ring->total_bytes = 0; - q_vector->rx_ring->total_packets = 0; - q_vector->tx_ring->total_bytes = 0; - q_vector->tx_ring->total_packets = 0; - if (new_itr != q_vector->itr_val) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > q_vector->itr_val ? max((new_itr * q_vector->itr_val) / - (new_itr + (q_vector->itr_val >> 2)), new_itr) : - new_itr; + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : + new_itr; /* Don't write the value here; it resets the adapter's * internal timer, and causes us to delay far longer than * we should between interrupts. Instead, we write the ITR @@ -3654,41 +4678,60 @@ set_itr_now: q_vector->itr_val = new_itr; q_vector->set_itr = 1; } - - return; } -#endif -#define IGB_TX_FLAGS_CSUM 0x00000001 -#define IGB_TX_FLAGS_VLAN 0x00000002 -#define IGB_TX_FLAGS_TSO 0x00000004 -#define IGB_TX_FLAGS_IPV4 0x00000008 -#define IGB_TX_FLAGS_TSTAMP 0x00000010 -#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGB_TX_FLAGS_VLAN_SHIFT 16 +#endif /* __VMKLNX__ */ +void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGB_TX_CTXTDESC(tx_ring, i); -static inline int igb_tso_adv(struct igb_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int igb_tso(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u8 *hdr_len) { #ifdef NETIF_F_TSO - struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - int err; - struct igb_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - u32 mss_l4len_idx; - u8 l4len; + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) +#endif /* NETIF_F_TSO */ + return 0; +#ifdef NETIF_F_TSO if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - if (skb->protocol == htons(ETH_P_IP)) { + if (first->protocol == __constant_htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -3696,305 +4739,296 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring, iph->daddr, 0, IPPROTO_TCP, 0); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; #ifdef NETIF_F_TSO6 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; #endif } - i = tx_ring->next_to_use; - - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); - /* VLAN MACLEN IPLEN */ - if (tx_flags & IGB_TX_FLAGS_VLAN) - info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - info |= skb_network_header_len(skb); - *hdr_len += skb_network_header_len(skb); - context_desc->vlan_macip_lens = cpu_to_le32(info); - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - - if (skb->protocol == htons(ETH_P_IP)) - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; /* MSS L4LEN IDX */ - mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; - /* For 82575, context index must be unique per ring. */ - if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) - mss_l4len_idx |= tx_ring->reg_idx << 4; - - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - context_desc->seqnum_seed = 0; - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - tx_ring->next_to_use = i; + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); - return TRUE; -#else - return FALSE; + return 1; #endif /* NETIF_F_TSO */ } -static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) +static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) { - struct e1000_adv_tx_context_desc *context_desc; -#ifndef __VMKLNX__ - struct pci_dev *pdev = tx_ring->pdev; -#endif - struct igb_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - unsigned int i; - - if ((skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IGB_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); - - if (tx_flags & IGB_TX_FLAGS_VLAN) - info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); - - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - info |= skb_network_header_len(skb); + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; - context_desc->vlan_macip_lens = cpu_to_le32(info); - - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - __be16 protocol; - - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { - const struct vlan_ethhdr *vhdr = - (const struct vlan_ethhdr*)skb->data; - - protocol = vhdr->h_vlan_encapsulated_proto; - } else { - protocol = skb->protocol; - } - - switch (protocol) { - case __constant_htons(ETH_P_IP): - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - break; + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) + return; + } else { + u8 nexthdr = 0; + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + nexthdr = ip_hdr(skb)->protocol; + break; #ifdef NETIF_F_IPV6_CSUM - case __constant_htons(ETH_P_IPV6): - /* XXX what about other V6 headers?? */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - break; -#endif - default: -#ifndef __VMKLNX__ - if (unlikely(net_ratelimit())) - dev_warn(&pdev->dev, - "partial checksum but proto=%x!\n", - skb->protocol); + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + nexthdr = ipv6_hdr(skb)->nexthdr; + break; #endif - break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + first->protocol); } + break; } - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - context_desc->seqnum_seed = 0; - if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) - context_desc->mss_l4len_idx = - cpu_to_le32(tx_ring->reg_idx << 4); + switch (nexthdr) { + case IPPROTO_TCP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + E1000_ADVTXD_L4LEN_SHIFT; + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + E1000_ADVTXD_L4LEN_SHIFT; + break; +#endif + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + E1000_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + nexthdr); + } + break; + } - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = 0; + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + } - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - return TRUE; - } - return FALSE; + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); } -#define IGB_MAX_TXD_PWR 16 -#define IGB_MAX_DATA_PER_TXD (1<pdev; - unsigned int len = skb_headlen(skb); - unsigned int count = 0, i; - unsigned int f; - - i = tx_ring->next_to_use; + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; - buffer_info = &tx_ring->buffer_info[i]; - BUG_ON(len >= IGB_MAX_DATA_PER_TXD); - buffer_info->length = len; - /* set time_stamp *before* dma to help avoid a possible race */ - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = pci_map_single(pdev, skb->data, len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) - goto dma_error; + /* set HW vlan bit if vlan is present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - struct skb_frag_struct *frag; + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); - i++; - if (i == tx_ring->count) - i = 0; + /* set timestamp bit if present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); - frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; - - buffer_info = &tx_ring->buffer_info[i]; - BUG_ON(len >= IGB_MAX_DATA_PER_TXD); - buffer_info->length = len; - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->mapped_as_page = true; - buffer_info->dma = pci_map_page(pdev, - frag->page, - frag->page_offset, - len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) - goto dma_error; + return cmd_type; +} - count++; - } +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1; - tx_ring->buffer_info[first].next_to_watch = i; + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; - return ++count; + /* insert L4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); -dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); - - /* clear timestamp and dma mappings for failed buffer_info mapping */ - buffer_info->dma = 0; - buffer_info->time_stamp = 0; - buffer_info->next_to_watch = 0; - buffer_info->mapped_as_page = 0; - count--; - - /* clear timestamp and dma mappings for remaining portion of packet */ - while (count >= 0) { - count--; - i--; - if (i < 0) - i+= tx_ring->count; - buffer_info = &tx_ring->buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - } + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); - return count; + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } -static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, - u32 tx_flags, int count, u32 paylen, - u8 hdr_len) +static void igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) { + struct sk_buff *skb = first->skb; + struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; - struct igb_buffer *buffer_info; - u32 olinfo_status = 0, cmd_type_len; - unsigned int i = tx_ring->next_to_use; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; - cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); + tx_desc = IGB_TX_DESC(tx_ring, i); - if (tx_flags & IGB_TX_FLAGS_VLAN) - cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); - if (tx_flags & IGB_TX_FLAGS_TSTAMP) - cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; + size = skb_headlen(skb); + data_len = skb->data_len; - if (tx_flags & IGB_TX_FLAGS_TSO) { - cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - /* insert tcp checksum */ - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + tx_buffer = first; - /* insert ip checksum */ - if (tx_flags & IGB_TX_FLAGS_IPV4) - olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; - } else if (tx_flags & IGB_TX_FLAGS_CSUM) { - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; - } + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); - if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && - (tx_flags & (IGB_TX_FLAGS_CSUM | - IGB_TX_FLAGS_TSO | - IGB_TX_FLAGS_VLAN))) - olinfo_status |= tx_ring->reg_idx << 4; + tx_desc->read.buffer_addr = cpu_to_le64(dma); - olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); + while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGB_MAX_DATA_PER_TXD; + size -= IGB_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); - do { - buffer_info = &tx_ring->buffer_info[i]; - tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - count--; i++; - if (i == tx_ring->count) + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); i = 0; - } while (count > 0); + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + /* set the timestamp */ + first->time_stamp = jiffies; + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ wmb(); + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + writel(i, tx_ring->tail); + /* we need this if more than one processor can write to our tail * at a time, it syncronizes IO on IA64/Altix systems */ mmiowb(); + + return; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; } -static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) { - struct net_device *netdev = tx_ring->netdev; + struct net_device *netdev = netdev_ring(tx_ring); if (netif_is_multiqueue(netdev)) - netif_stop_subqueue(netdev, tx_ring->queue_index); + netif_stop_subqueue(netdev, ring_queue_index(tx_ring)); else netif_stop_queue(netdev); @@ -4010,7 +5044,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) /* A reprieve! */ if (netif_is_multiqueue(netdev)) - netif_wake_subqueue(netdev, tx_ring->queue_index); + netif_wake_subqueue(netdev, ring_queue_index(tx_ring)); else netif_wake_queue(netdev); @@ -4019,97 +5053,118 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) return 0; } -static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) { if (igb_desc_unused(tx_ring) >= size) return 0; return __igb_maybe_stop_tx(tx_ring, size); } -netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, - struct igb_ring *tx_ring) +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, + struct igb_ring *tx_ring) { - struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - int tso = 0, count; + struct igb_tx_buffer *first; + int tso; u32 tx_flags = 0; - u16 first; - u8 hdr_len = 0; -#ifdef SIOCSHWTSTAMP - union skb_shared_tx *shtx = skb_tx(skb); +#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD + unsigned short f; #endif + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; - /* need: 1 descriptor per page, + /* + * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, * + 1 desc for context descriptor, - * otherwise try next time */ - if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { + * otherwise try next time + */ +#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ return NETDEV_TX_BUSY; } -#ifdef SIOCSHWTSTAMP - if (unlikely(shtx->hardware)) { - shtx->in_progress = 1; - tx_flags |= IGB_TX_FLAGS_TSTAMP; + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + skb_tx_timestamp(skb); + +#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!adapter->ptp_tx_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); + } } +#endif /* HAVE_PTP_1588_CLOCK */ -#endif - if (vlan_tx_tag_present(skb) && adapter->vlgrp) { + if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IGB_TX_FLAGS_IPV4; + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; - first = tx_ring->next_to_use; -#ifdef NETIF_F_TSO - if (skb_is_gso(skb)) { - tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); + tso = igb_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igb_tx_csum(tx_ring, first); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } + igb_tx_map(tx_ring, first, hdr_len); + +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = jiffies; #endif - if (tso) - tx_flags |= IGB_TX_FLAGS_TSO; - else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) - tx_flags |= IGB_TX_FLAGS_CSUM; + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - /* - * count reflects descriptors mapped, if 0 or less then mapping error - * has occured and we need to rewind the descriptor queue - */ - count = igb_tx_map_adv(tx_ring, skb, first); - if (count <= 0) { - dev_kfree_skb_any(skb); - tx_ring->buffer_info[first].time_stamp = 0; - tx_ring->next_to_use = first; - return NETDEV_TX_OK; - } + return NETDEV_TX_OK; - igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); +out_drop: + igb_unmap_and_free_tx_resource(tx_ring, first); + + return NETDEV_TX_OK; +} - tx_ring->netdev->trans_start = jiffies; +#ifdef HAVE_TX_MQ +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; - return NETDEV_TX_OK; + return adapter->tx_ring[r_idx]; } +#else +#define igb_tx_queue_mapping(_adapter, _skb) (_adapter)->tx_ring[0] +#endif -static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, - struct net_device *netdev) +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); -#ifdef HAVE_TX_MQ - int r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); -#endif if (test_bit(__IGB_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); @@ -4121,15 +5176,17 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, return NETDEV_TX_OK; } - /* This goes back to the question of how to logically map a tx queue - * to a flow. Right now, performance is impacted slightly negatively - * if using multiple tx queues. If the stack breaks away from a - * single qdisc implementation, we can look at this again. */ -#ifdef HAVE_TX_MQ - return igb_xmit_frame_ring_adv(skb, adapter->multi_tx_table[r_idx]); -#else - return igb_xmit_frame_ring_adv(skb, adapter->tx_ring[0]); -#endif + /* + * The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); } /** @@ -4144,7 +5201,7 @@ static void igb_tx_timeout(struct net_device *netdev) /* Do the reset outside of interrupt context */ adapter->tx_timeout_count++; - if (hw->mac.type == e1000_82580) + if (hw->mac.type >= e1000_82580) hw->dev_spec._82575.global_device_reset = true; schedule_work(&adapter->reset_task); @@ -4165,16 +5222,19 @@ static void igb_reset_task(struct work_struct *work) * @netdev: network interface device structure * * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. + * The statistics are updated here and also from the timer callback. **/ static struct net_device_stats *igb_get_stats(struct net_device *netdev) { + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__IGB_RESETTING, &adapter->state)) + igb_update_stats(adapter); + #ifdef HAVE_NETDEV_STATS_IN_NETDEV /* only return the current stats */ return &netdev->stats; #else - struct igb_adapter *adapter = netdev_priv(netdev); - /* only return the current stats */ return &adapter->net_stats; #endif /* HAVE_NETDEV_STATS_IN_NETDEV */ @@ -4190,75 +5250,38 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev) static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_TAG_SIZE; - u32 rx_buffer_len, i; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&pdev->dev, "Invalid MTU setting\n"); + dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n"); return -EINVAL; } #define MAX_STD_JUMBO_FRAME_SIZE 9238 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); + dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n"); return -EINVAL; } + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN - * means we reserve 2 more, this pushes us to allocate from the next - * larger slab size. - * i.e. RXBUFFER_2048 --> size-4096 slab - */ - -#ifdef IGB_PER_PKT_TIMESTAMP - if (adapter->hw.mac.type == e1000_82580) - max_frame += IGB_TS_HDR_LEN; - -#endif - if (max_frame <= IGB_RXBUFFER_1024) - rx_buffer_len = IGB_RXBUFFER_1024; - else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) - rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - else - rx_buffer_len = IGB_RXBUFFER_128; -#else - else if (max_frame <= IGB_RXBUFFER_2048) - rx_buffer_len = IGB_RXBUFFER_2048; - else if (max_frame <= IGB_RXBUFFER_4096) - rx_buffer_len = IGB_RXBUFFER_4096; - else if (max_frame <= IGB_RXBUFFER_8192) - rx_buffer_len = IGB_RXBUFFER_8192; - else - rx_buffer_len = IGB_RXBUFFER_16384; -#endif - -#ifdef IGB_PER_PKT_TIMESTAMP - if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) || - (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN)) - rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN; - - if ((adapter->hw.mac.type == e1000_82580) && - (rx_buffer_len == IGB_RXBUFFER_128)) - rx_buffer_len += IGB_RXBUFFER_64; - -#endif if (netif_running(netdev)) igb_down(adapter); - dev_info(&pdev->dev, "changing MTU from %d to %d\n", + dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; + hw->dev_spec._82575.mtu = new_mtu; if (netif_running(netdev)) igb_up(adapter); @@ -4277,19 +5300,19 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) void igb_update_stats(struct igb_adapter *adapter) { - struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ struct e1000_hw *hw = &adapter->hw; #ifdef HAVE_PCI_ERS struct pci_dev *pdev = adapter->pdev; #endif - u32 rnbc, reg; + u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; -#ifdef IGB_LRO - u32 flushed = 0, coal = 0, recycled = 0; - struct igb_q_vector *q_vector; -#endif #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF @@ -4303,20 +5326,6 @@ void igb_update_stats(struct igb_adapter *adapter) if (pci_channel_offline(pdev)) return; -#endif -#ifdef IGB_LRO - for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (!q_vector || !q_vector->lrolist) - continue; - flushed += q_vector->lrolist->stats.flushed; - coal += q_vector->lrolist->stats.coal; - recycled += q_vector->lrolist->stats.recycled; - } - adapter->lro_stats.flushed = flushed; - adapter->lro_stats.coal = coal; - adapter->lro_stats.recycled = recycled; - #endif bytes = 0; packets = 0; @@ -4325,8 +5334,15 @@ void igb_update_stats(struct igb_adapter *adapter) struct igb_ring *ring = adapter->rx_ring[i]; ring->rx_stats.drops += rqdpc_tmp; net_stats->rx_fifo_errors += rqdpc_tmp; +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (!ring->vmdq_netdev) { + bytes += ring->rx_stats.bytes; + packets += ring->rx_stats.packets; + } +#else bytes += ring->rx_stats.bytes; packets += ring->rx_stats.packets; +#endif } net_stats->rx_bytes = bytes; @@ -4336,8 +5352,15 @@ void igb_update_stats(struct igb_adapter *adapter) packets = 0; for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (!ring->vmdq_netdev) { + bytes += ring->tx_stats.bytes; + packets += ring->tx_stats.packets; + } +#else bytes += ring->tx_stats.bytes; packets += ring->tx_stats.packets; +#endif } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; @@ -4360,7 +5383,9 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS); adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC); - adapter->stats.mpc += E1000_READ_REG(hw, E1000_MPC); + mpc = E1000_READ_REG(hw, E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC); adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL); adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC); @@ -4375,9 +5400,7 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC); adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL); E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */ - rnbc = E1000_READ_REG(hw, E1000_RNBC); - adapter->stats.rnbc += rnbc; - net_stats->rx_fifo_errors += rnbc; + adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC); adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC); adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC); adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC); @@ -4395,20 +5418,20 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC); adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC); - /* used for adaptive IFS */ - hw->mac.tx_packet_delta = E1000_READ_REG(hw, E1000_TPT); - adapter->stats.tpt += hw->mac.tx_packet_delta; - hw->mac.collision_delta = E1000_READ_REG(hw, E1000_COLC); - adapter->stats.colc += hw->mac.collision_delta; + adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT); + adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC); adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); /* read internal phy sepecific stats */ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC); - adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS); - } + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS); + } adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC); adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); @@ -4435,14 +5458,14 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; net_stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.roc; net_stats->rx_crc_errors = adapter->stats.crcerrs; net_stats->rx_frame_errors = adapter->stats.algnerrc; net_stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ net_stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + adapter->stats.latecol; net_stats->tx_aborted_errors = adapter->stats.ecol; net_stats->tx_window_errors = adapter->stats.latecol; net_stats->tx_carrier_errors = adapter->stats.tncrs; @@ -4461,7 +5484,12 @@ void igb_update_stats(struct igb_adapter *adapter) /* Management Stats */ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC); adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC); - adapter->stats.mgpdc += E1000_READ_REG(hw, E1000_MGTPDC); + if (hw->mac.type > e1000_82580) { + adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC); + adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC); + adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC); + adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC); + } } static irqreturn_t igb_msix_other(int irq, void *data) @@ -4490,13 +5518,23 @@ static irqreturn_t igb_msix_other(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (adapter->vfs_allocated_count) - E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC | - E1000_IMS_VMMB | - E1000_IMS_DOUTSYNC); - else - E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC | - E1000_IMS_DOUTSYNC); +#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { + u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } +#endif /* HAVE_PTP_1588_CLOCK */ + + /* Check for MDD event */ + if (icr & E1000_ICR_MDDET) + igb_process_mdd_event(adapter); + E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other); return IRQ_HANDLED; @@ -4516,7 +5554,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector) if (adapter->hw.mac.type == e1000_82575) itr_val |= itr_val << 16; else - itr_val |= 0x80000000; + itr_val |= E1000_EITR_CNT_IGNR; writel(itr_val, q_vector->itr_register); q_vector->set_itr = 0; @@ -4535,45 +5573,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data) } #ifdef IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + static void igb_update_dca(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; int cpu = get_cpu(); if (q_vector->cpu == cpu) goto out_no_update; - if (q_vector->tx_ring) { - int q = q_vector->tx_ring->reg_idx; - u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q)); - if (hw->mac.type == e1000_82575) { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_TXCTRL_CPUID_SHIFT_82576; - } - dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; - E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl); - } - if (q_vector->rx_ring) { - int q = q_vector->rx_ring->reg_idx; - u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q)); - if (hw->mac.type == e1000_82575) { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_RXCTRL_CPUID_SHIFT_82576; - } - dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; - dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; - dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; - E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl); - } + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + q_vector->cpu = cpu; out_no_update: put_cpu(); @@ -4611,7 +5667,7 @@ static int __igb_notify_dca(struct device *dev, void *data) break; if (dca_add_requester(dev) == E1000_SUCCESS) { adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&pdev->dev, "DCA enabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); igb_setup_dca(adapter); break; } @@ -4621,10 +5677,9 @@ static int __igb_notify_dca(struct device *dev, void *data) /* without this a class_device is left * hanging around in the sysfs model */ dca_remove_requester(dev); - dev_info(&pdev->dev, "DCA disabled\n"); + dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; - E1000_WRITE_REG(hw, E1000_DCA_CTRL, - E1000_DCA_CTRL_DCA_DISABLE); + E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); } break; } @@ -4644,6 +5699,23 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event, } #endif /* IGB_DCA */ +static int igb_vf_configure(struct igb_adapter *adapter, int vf) +{ + unsigned char mac_addr[ETH_ALEN]; + + random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + +#ifdef IFLA_VF_MAX +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; +#endif +#endif + + return true; +} + static void igb_ping_all_vfs(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -4658,6 +5730,42 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter) } } +/** + * igb_mta_set_ - Set multicast filter table address + * @adapter: pointer to the adapter structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct igb_adapter *adapter, u32 hash_value) +{ + struct e1000_hw *hw = &adapter->hw; + u32 hash_bit, hash_reg, mta; + + /* + * The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); +} + static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { @@ -4665,7 +5773,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | IGB_VF_FLAG_MULTI_PROMISC); vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); @@ -4678,6 +5786,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) #endif if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; } else { /* @@ -4691,8 +5800,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) int j; vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - hw->mac.ops.mta_set(hw, - vf_data->vf_mc_hashes[j]); + igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); } } @@ -4752,8 +5860,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) } else if (vf_data->num_vf_mc_hashes) { vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - hw->mac.ops.mta_set(hw, - vf_data->vf_mc_hashes[j]); + igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); } E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); } @@ -4780,7 +5887,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) (reg & E1000_VLVF_VLANID_ENABLE)) { reg = 0; vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, FALSE); + igb_vfta_set(adapter, vid, FALSE); } E1000_WRITE_REG(hw, E1000_VLVF(i), reg); @@ -4833,7 +5940,7 @@ s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) /* if !enabled we need to set this up in vfta */ if (!(reg & E1000_VLVF_VLANID_ENABLE)) { /* add VID to filter table */ - igb_vfta_set(hw, vid, TRUE); + igb_vfta_set(adapter, vid, TRUE); reg |= E1000_VLVF_VLANID_ENABLE; } reg &= ~E1000_VLVF_VLANID_MASK; @@ -4855,7 +5962,6 @@ s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) } adapter->vf_data[vf].vlans_enabled++; - return E1000_SUCCESS; } } else { if (i < E1000_VLVF_ARRAY_SIZE) { @@ -4864,7 +5970,7 @@ s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) /* if pool is empty then remove entry from vfta */ if (!(reg & E1000_VLVF_POOLSEL_MASK)) { reg = 0; - igb_vfta_set(hw, vid, FALSE); + igb_vfta_set(adapter, vid, FALSE); } E1000_WRITE_REG(hw, E1000_VLVF(i), reg); @@ -4882,37 +5988,212 @@ s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) reg |= size; E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); } - return E1000_SUCCESS; } } - return -1; + return E1000_SUCCESS; +} + +#ifdef IFLA_VF_MAX +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + E1000_WRITE_REG(hw, E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) +{ + int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = igb_vlvf_set(adapter, vlan, !!vlan, vf); + if (err) + goto out; + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + } else { + if (adapter->vf_data[vf].pf_vlan) + dev_info(&adapter->pdev->dev, + "Clearing VLAN on VF %d\n", vf); + igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + igb_set_vmvir(adapter, vlan, vf); + igb_set_vmolr(adapter, vf, true); + igb_set_vf_vlan_strip(adapter, vf, false); + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + } +out: + return err; +} + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 dtxswc, reg_offset; + + if (!adapter->vfs_allocated_count) + return -EOPNOTSUPP; + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; + dtxswc = E1000_READ_REG(hw, reg_offset); + if (setting) + dtxswc |= ((1 << vf) | + (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + else + dtxswc &= ~((1 << vf) | + (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + E1000_WRITE_REG(hw, reg_offset, dtxswc); + + adapter->vf_data[vf].spoofchk_enabled = setting; + return E1000_SUCCESS; +} +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ +#endif /* IFLA_VF_MAX */ + +static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + u32 reg; + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; + } + + if (i >= E1000_VLVF_ARRAY_SIZE) + i = -1; + + return i; } static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { + struct e1000_hw *hw = &adapter->hw; int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int err = 0; + + if (vid) + igb_set_vf_vlan_strip(adapter, vf, true); + else + igb_set_vf_vlan_strip(adapter, vf, false); + + /* If in promiscuous mode we need to make sure the PF also has + * the VLAN filter set. + */ + if (add && (adapter->netdev->flags & IFF_PROMISC)) + err = igb_vlvf_set(adapter, vid, add, + adapter->vfs_allocated_count); + if (err) + goto out; + + err = igb_vlvf_set(adapter, vid, add, vf); + + if (err) + goto out; + + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && (adapter->netdev->flags & IFF_PROMISC)) { + u32 vlvf, bits; - return igb_vlvf_set(adapter, vid, add, vf); + int regndx = igb_find_vlvf_entry(adapter, vid); + if (regndx < 0) + goto out; + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx)); + bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && +#ifndef HAVE_VLAN_RX_REGISTER + !test_bit(vid, adapter->active_vlans) && +#endif + !bits) + igb_vlvf_set(adapter, vid, add, + adapter->vfs_allocated_count); + } + +out: + return err; } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { - /* clear all flags */ - adapter->vf_data[vf].flags = 0; + struct e1000_hw *hw = &adapter->hw; + + /* clear flags except flag that the PF has set the MAC */ + adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; adapter->vf_data[vf].last_nack = jiffies; /* reset offloads to defaults */ - igb_set_vmolr(adapter, vf); + igb_set_vmolr(adapter, vf, true); /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); +#ifdef IFLA_VF_MAX + if (adapter->vf_data[vf].pf_vlan) + igb_ndo_set_vf_vlan(adapter->netdev, vf, + adapter->vf_data[vf].pf_vlan, + adapter->vf_data[vf].pf_qos); + else + igb_clear_vf_vfta(adapter, vf); +#endif /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); + + /* + * Reset the VFs TDWBAL and TDWBAH registers which are not + * cleared by a VFLR + */ + E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0); + E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0); + if (hw->mac.type == e1000_82576) { + E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0); + E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0); + } } static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) @@ -4920,7 +6201,8 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; /* generate a new mac address as we were hotplug removed/added */ - random_ether_addr(vf_mac); + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + random_ether_addr(vf_mac); /* process remaining reset events */ igb_vf_reset(adapter, vf); @@ -4930,7 +6212,6 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - int rar_entry = hw->mac.rar_entry_count - (vf + 1); u32 reg, msgbuf[3]; u8 *addr = (u8 *)(&msgbuf[1]); @@ -4938,7 +6219,8 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) igb_vf_reset(adapter, vf); /* set vf mac address */ - igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + igb_del_mac_filter(adapter, vf_mac, vf); + igb_add_mac_filter(adapter, vf_mac, vf); /* enable transmit and receive for vf */ reg = E1000_READ_REG(hw, E1000_VFTE); @@ -4946,7 +6228,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) reg = E1000_READ_REG(hw, E1000_VFRE); E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf)); - adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS; + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; /* reply to reset with ack and vf mac address */ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; @@ -4956,7 +6238,11 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) { - unsigned char *addr = (char *)&msg[1]; + /* + * The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (unsigned char *)&msg[1]; int err = -1; if (is_valid_ether_addr(addr)) @@ -4990,7 +6276,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); if (retval) { - dev_err(&pdev->dev, "Error receiving message from VF\n"); + dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n"); return; } @@ -5019,10 +6305,15 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) switch ((msgbuf[0] & 0xFFFF)) { case E1000_VF_SET_MAC_ADDR: -#ifndef IGB_DISABLE_VF_MAC_SET - retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); -#else retval = -EINVAL; +#ifndef IGB_DISABLE_VF_MAC_SET + if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + else + DPRINTK(DRV, INFO, + "VF %d attempted to override administratively " + "set MAC address\nReload the VF driver to " + "resume operations\n", vf); #endif break; case E1000_VF_SET_PROMISC: @@ -5035,10 +6326,19 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); break; case E1000_VF_SET_VLAN: - retval = igb_set_vf_vlan(adapter, msgbuf, vf); + retval = -1; +#ifdef IFLA_VF_MAX + if (vf_data->pf_vlan) + DPRINTK(DRV, INFO, + "VF %d attempted to override administratively " + "set VLAN tag\nReload the VF driver to " + "resume operations\n", vf); + else +#endif + retval = igb_set_vf_vlan(adapter, msgbuf, vf); break; default: - dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", msgbuf[0]); retval = -E1000_ERR_MBX; break; } @@ -5081,8 +6381,8 @@ static void igb_msg_task(struct igb_adapter *adapter) * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used * however due to certain limitations in the hardware it is necessary to - * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous - * enable bit to allow vlan tag stripping when promiscous mode is enabled + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ static void igb_set_uta(struct igb_adapter *adapter) { @@ -5130,6 +6430,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } +#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { + u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } +#endif /* HAVE_PTP_1588_CLOCK */ + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -5148,16 +6461,14 @@ static irqreturn_t igb_intr(int irq, void *data) /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No * need for the IMC write */ u32 icr = E1000_READ_REG(hw, E1000_ICR); - if (!icr) - return IRQ_NONE; /* Not our interrupt */ - - igb_write_itr(q_vector); /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE; + igb_write_itr(q_vector); + if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); @@ -5173,21 +6484,34 @@ static irqreturn_t igb_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } +#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { + u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } +#endif /* HAVE_PTP_1588_CLOCK */ + napi_schedule(&q_vector->napi); return IRQ_HANDLED; } -static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) +void igb_ring_irq_enable(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; - if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || - (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { #ifndef __VMKLNX__ - if (!adapter->msix_entries) - igb_set_itr(adapter); + if ((adapter->num_q_vectors == 1) && !adapter->vf_data) + igb_set_itr(q_vector); else #endif igb_update_ring_itr(q_vector); @@ -5209,193 +6533,153 @@ static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) static int igb_poll(struct napi_struct *napi, int budget) { struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); - int tx_clean_complete = 1, work_done = 0; + bool clean_complete = true; #ifdef IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); #endif - if (q_vector->tx_ring) - tx_clean_complete = igb_clean_tx_irq(q_vector); + if (q_vector->tx.ring) + clean_complete = igb_clean_tx_irq(q_vector); - if (q_vector->rx_ring) - igb_clean_rx_irq_adv(q_vector, &work_done, budget); - - if (!tx_clean_complete) - work_done = budget; + if (q_vector->rx.ring) + clean_complete &= igb_clean_rx_irq(q_vector, budget); #ifndef HAVE_NETDEV_NAPI_LIST /* if netdev is disabled we need to stop polling */ if (!netif_running(q_vector->adapter->netdev)) - work_done = 0; + clean_complete = true; #endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + /* If not enough Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - igb_ring_irq_enable(q_vector); - } + napi_complete(napi); + igb_ring_irq_enable(q_vector); - return work_done; + return 0; } -#ifdef SIOCSHWTSTAMP /** - * igb_systim_to_hwtstamp - convert system time value to hw timestamp - * @adapter: board private structure - * @shhwtstamps: timestamp structure to update - * @regval: unsigned 64bit system time value. - * - * We need to convert the system time value stored in the RX/TXSTMP registers - * into a hwtstamp which can be used by the upper level timestamping functions - */ -static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, - struct skb_shared_hwtstamps *shhwtstamps, - u64 regval) + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * returns TRUE if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) { - u64 ns; + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; - /* - * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to - * 24 to match clock shift we setup earlier. - */ - if (adapter->hw.mac.type == e1000_82580) - regval <<= IGB_82580_TSYNC_SHIFT; + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; - ns = timecounter_cyc2time(&adapter->clock, regval); - timecompare_update(&adapter->compare, ns); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); -} + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; -/** - * igb_tx_hwtstamp - utility function which checks for TX time stamp - * @q_vector: pointer to q_vector containing needed info - * @skb: packet that was just sent - * - * If we were asked to do hardware stamping and such a time stamp is - * available, then it must have been for this skb here because we only - * allow only one such packet into the queue. - */ -static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) -{ - struct igb_adapter *adapter = q_vector->adapter; - union skb_shared_tx *shtx = skb_tx(skb); - struct e1000_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps shhwtstamps; - u64 regval; + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; - /* if skb does not support hw timestamp or TX stamp not valid exit */ - if (likely(!shtx->hardware) || - !(E1000_READ_REG(hw, E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) - return; + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; - regval = E1000_READ_REG(hw, E1000_TXSTMPL); - regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32; + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); - igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); - skb_tstamp_tx(skb, &shhwtstamps); -} + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; -#endif -/** - * igb_clean_tx_irq - Reclaim resources after transmit completes - * @q_vector: pointer to q_vector containing needed info - * returns TRUE if ring is completely cleaned - **/ -static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct igb_ring *tx_ring = q_vector->tx_ring; - struct net_device *netdev = tx_ring->netdev; - struct pci_dev *pdev = tx_ring->pdev; - struct e1000_hw *hw = &adapter->hw; - struct igb_buffer *buffer_info; - struct sk_buff *skb; - union e1000_adv_tx_desc *tx_desc, *eop_desc; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i, eop, count = 0; - bool cleaned = false; - - i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); - - while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - for (cleaned = false; !cleaned; count++) { - tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); - skb = buffer_info->skb; - - if (skb) { -#ifdef NETIF_F_TSO - unsigned int segs, bytecount; - /* gso_segs is currently only valid for tcp */ - segs = buffer_info->gso_segs; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_packets += segs; - total_bytes += bytecount; -#else - total_packets++; - total_bytes += skb->len; -#endif -#ifdef SIOCSHWTSTAMP - igb_tx_hwtstamp(q_vector, skb); -#endif - } + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - tx_desc->wb.status = 0; + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; - i++; - if (i == tx_ring->count) - i = 0; - } - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); - } + /* free the skb */ + dev_kfree_skb_any(tx_buffer->skb); - tx_ring->next_to_clean = i; + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); - if (unlikely(count && - netif_carrier_ok(netdev) && - igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && - !(test_bit(__IGB_DOWN, &adapter->state))) { - netif_wake_subqueue(netdev, tx_ring->queue_index); - tx_ring->tx_stats.restart_queue++; + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); } - } else { - if (netif_queue_stopped(netdev) && - !(test_bit(__IGB_DOWN, &adapter->state))) { - netif_wake_queue(netdev); - tx_ring->tx_stats.restart_queue++; + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); } } - } - if (tx_ring->detect_tx_hung) { + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ - tx_ring->detect_tx_hung = FALSE; - if (tx_ring->buffer_info[i].time_stamp && - time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + (adapter->tx_timeout_factor * HZ)) && !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ - dev_err(&pdev->dev, + dev_err(tx_ring->dev, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH <%x>\n" @@ -5404,734 +6688,946 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) " next_to_clean <%x>\n" "buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" - " next_to_watch <%x>\n" + " next_to_watch <%p>\n" " jiffies <%lx>\n" " desc.status <%x>\n", tx_ring->queue_index, - readl(tx_ring->head), + E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)), readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, jiffies, - eop_desc->wb.status); - if (netif_is_multiqueue(netdev)) - netif_stop_subqueue(netdev, - tx_ring->queue_index); + tx_buffer->next_to_watch->wb.status); + if (netif_is_multiqueue(netdev_ring(tx_ring))) + netif_stop_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); else - netif_stop_queue(netdev); + netif_stop_queue(netdev_ring(tx_ring)); + + /* we are about to reset, no point in enabling stuff */ + return true; } } - tx_ring->total_bytes += total_bytes; - tx_ring->total_packets += total_packets; - tx_ring->tx_stats.bytes += total_bytes; - tx_ring->tx_stats.packets += total_packets; - return (count < tx_ring->count); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(netdev_ring(tx_ring)) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_is_multiqueue(netdev_ring(tx_ring))) { + if (__netif_subqueue_stopped(netdev_ring(tx_ring), + ring_queue_index(tx_ring)) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); + tx_ring->tx_stats.restart_queue++; + } + } else { + if (netif_queue_stopped(netdev_ring(tx_ring)) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_queue(netdev_ring(tx_ring)); + tx_ring->tx_stats.restart_queue++; + } + } + } + + return !!budget; } +#ifdef HAVE_VLAN_RX_REGISTER /** * igb_receive_skb - helper function to handle rx indications * @q_vector: structure containing interrupt and ring information * @skb: packet to send up - * @vlan_tag: vlan tag for packet **/ static void igb_receive_skb(struct igb_q_vector *q_vector, - struct sk_buff *skb, - u16 vlan_tag) + struct sk_buff *skb) { - struct igb_adapter *adapter = q_vector->adapter; + struct vlan_group **vlgrp = netdev_priv(skb->dev); - if (vlan_tag) - vlan_gro_receive(&q_vector->napi, adapter->vlgrp, - vlan_tag, skb); - else + if (IGB_CB(skb)->vid) { + if (*vlgrp) { + vlan_gro_receive(&q_vector->napi, *vlgrp, + IGB_CB(skb)->vid, skb); + } else { + dev_kfree_skb_any(skb); + } + } else { napi_gro_receive(&q_vector->napi, skb); + } +} + +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer)); + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, + old_buff->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + struct page *page, + unsigned int truesize) +{ + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IGB_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; +#endif + + /* bump ref count on page before it is given to the stack */ + get_page(page); + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + +#ifdef HAVE_PTP_1588_CLOCK + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } +#endif /* HAVE_PTP_1588_CLOCK */ + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + return igb_can_reuse_rx_page(rx_buffer, page, truesize); } -static inline void igb_rx_checksum_adv(struct igb_ring *ring, - u32 status_err, struct sk_buff *skb) +static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - skb->ip_summed = CHECKSUM_NONE; + struct igb_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IGB_RX_HDR_LEN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); - /* Ignore Checksum bit is set or checksum is disabled through ethtool */ - if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || - (status_err & E1000_RXD_STAT_IXSM)) + /* pull page into skb */ + if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + + return skb; +} + +#endif +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) return; /* TCP/UDP checksum error bit is set */ - if (status_err & - (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { /* * work around errata with sctp packets where the TCPE aka * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) * packets, (aka let the stack check the crc32c) */ - if ((skb->len == 60) && - (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) ring->rx_stats.csum_err++; /* let the stack verify checksum errors */ return; } /* It must be a TCP or UDP packet with a valid checksum */ - if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; } -#ifdef SIOCSHWTSTAMP -static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, - struct sk_buff *skb) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - u64 regval; - - /* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a skb_shared_tx that we - * can turn into a skb_shared_hwtstamps. - */ - if (staterr & E1000_RXDADV_STAT_TSIP) { - u32 *stamp = (u32 *)skb->data; - regval = le32_to_cpu(*(stamp + 2)); - regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; - skb_pull(skb, IGB_TS_HDR_LEN); - } else { - if(!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; - - regval = E1000_READ_REG(hw, E1000_RXSTMPL); - regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32; - } - - igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); -} -#endif -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -static inline u16 igb_get_hlen(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc) +#ifdef NETIF_F_RXHASH +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - /* HW will not DMA in data larger than the given buffer, even if it - * parses the (NFS, of course) header to be larger. In that case, it - * fills the header buffer and spills the rest into the page. - */ - u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > rx_ring->rx_buffer_len) - hlen = rx_ring->rx_buffer_len; - return hlen; + if (netdev_ring(ring)->features & NETIF_F_RXHASH) + skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); } #endif -#ifdef IGB_LRO /** - * igb_transform_rsc_queue - change rsc queue into a full packet - * @skb: pointer to the last skb in the rsc queue + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated * - * This function changes a queue full of hw rsc buffers into a completed - * packet. It uses the ->prev pointers to find the first packet and then - * turns it into the frag list owner. + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. **/ -static inline struct sk_buff *igb_transform_rsc_queue(struct sk_buff *skb) +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - unsigned int frag_list_size = 0; + struct net_device *dev = rx_ring->netdev; + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - while (skb->prev) { - struct sk_buff *prev = skb->prev; - frag_list_size += skb->len; - skb->prev = NULL; - skb = prev; - } +#ifdef NETIF_F_RXHASH + igb_rx_hash(rx_ring, rx_desc, skb); - skb_shinfo(skb)->frag_list = skb->next; - skb->next = NULL; - skb->len += frag_list_size; - skb->data_len += frag_list_size; -#if !defined(ESX35) || !defined(__VMKLNX__) - skb->truesize += frag_list_size; #endif - return skb; + igb_rx_checksum(rx_ring, rx_desc, skb); + + /* update packet type stats */ + if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4)) + rx_ring->rx_stats.ipv4_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4_EX)) + rx_ring->rx_stats.ipv4e_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6)) + rx_ring->rx_stats.ipv6_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6_EX)) + rx_ring->rx_stats.ipv6e_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) + rx_ring->rx_stats.tcp_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_UDP)) + rx_ring->rx_stats.udp_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_SCTP)) + rx_ring->rx_stats.sctp_packets++; + else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_NFS)) + rx_ring->rx_stats.nfs_packets++; + +#ifdef HAVE_PTP_1588_CLOCK + igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +#else + if ((dev->features & NETIF_F_HW_VLAN_RX) && +#endif + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid = 0; + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu(rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); +#ifdef HAVE_VLAN_RX_REGISTER + IGB_CB(skb)->vid = vid; + } else { + IGB_CB(skb)->vid = 0; +#else + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); +#endif + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); } /** - * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled - * @adapter: board private structure - * @rx_desc: pointer to the rx descriptor + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. **/ -static inline bool igb_can_lro(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc) +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) { - u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); - return ((rx_ring->flags & IGB_RING_FLAG_RX_LRO) && - (pkt_info & E1000_RXDADV_PKTTYPE_IPV4) && - (pkt_info & E1000_RXDADV_PKTTYPE_TCP)); + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; } -/** - * igb_lro_flush - Indicate packets to upper layer. - * - * Update IP and TCP header part of head skb if more than one - * skb's chained and indicate packets to upper layer. - **/ -static void igb_lro_flush(struct igb_q_vector *q_vector, - struct igb_lro_desc *lrod) +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT +/* igb_clean_rx_irq -- * legacy */ +static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) { - struct igb_lro_list *lrolist = q_vector->lrolist; - struct iphdr *iph; - struct tcphdr *th; - struct sk_buff *skb; - u32 *ts_ptr; + struct igb_ring *rx_ring = q_vector->rx.ring; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); - hlist_del(&lrod->lro_node); - lrolist->active_cnt--; + do { + struct igb_rx_buffer *rx_buffer; + union e1000_adv_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; - skb = lrod->skb; - lrod->skb = NULL; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } - if (lrod->append_cnt) { - /* take the lro queue and convert to skb format */ - skb = igb_transform_rsc_queue(skb); + ntc = rx_ring->next_to_clean; + rx_desc = IGB_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; - /* incorporate ip header and re-calculate checksum */ - iph = (struct iphdr *)skb->data; - iph->tot_len = ntohs(skb->len); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - - /* incorporate the latest ack into the tcp header */ - th = (struct tcphdr *) ((char *)skb->data + sizeof(*iph)); - th->ack_seq = lrod->ack_seq; - th->psh = lrod->psh; - th->window = lrod->window; - th->check = 0; - - /* incorporate latest timestamp into the tcp header */ - if (lrod->opt_bytes) { - ts_ptr = (u32 *)(th + 1); - ts_ptr[1] = htonl(lrod->tsval); - ts_ptr[2] = lrod->tsecr; + if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) + break; + + /* + * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + /* pull the header of the skb in */ + __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + + cleaned_count++; + + BUG_ON(igb_is_non_eop(rx_ring, rx_desc)); + + dma_unmap_single(rx_ring->dev, rx_buffer->dma, + rx_ring->rx_buffer_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { + dev_kfree_skb_any(skb); + continue; } - } -#ifdef NETIF_F_TSO - skb_shinfo(skb)->gso_size = lrod->mss; + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + +#ifdef HAVE_VLAN_RX_REGISTER + igb_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); #endif - igb_receive_skb(q_vector, skb, lrod->vlan_tag); - lrolist->stats.flushed++; +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; - hlist_add_head(&lrod->lro_node, &lrolist->free); -} +#endif + /* update budget accounting */ + total_packets++; + } while (likely(total_packets < budget)); -static void igb_lro_flush_all(struct igb_q_vector *q_vector) -{ - struct igb_lro_desc *lrod; - struct hlist_node *node, *node2; - struct igb_lro_list *lrolist = q_vector->lrolist; + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; - hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, lro_node) - igb_lro_flush(q_vector, lrod); -} + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); -/* - * igb_lro_header_ok - Main LRO function. + return (total_packets < budget); +} +#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ +/** + * igb_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max_len: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, and GRO offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. **/ -static u16 igb_lro_header_ok(struct sk_buff *new_skb, struct iphdr *iph, - struct tcphdr *th) -{ - int opt_bytes, tcp_data_len; - u32 *ts_ptr = NULL; +static unsigned int igb_get_headlen(unsigned char *data, + unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == __constant_htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + } + + /* handle L3 protocols */ + if (protocol == __constant_htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; +#ifdef NETIF_F_TSO6 + } else if (protocol == __constant_htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); +#endif /* NETIF_F_TSO6 */ + } else { + return hdr.network - data; + } - /* If we see CE codepoint in IP header, packet is not mergeable */ - if (INET_ECN_is_ce(ipv4_get_dsfield(iph))) - return -1; + /* relocate pointer to start of L4 header */ + hdr.network += hlen; - /* ensure there are no options */ - if ((iph->ihl << 2) != sizeof(*iph)) - return -1; + /* finally sort out TCP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; - /* .. and the packet is not fragmented */ - if (iph->frag_off & htons(IP_MF|IP_OFFSET)) - return -1; + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; - /* ensure no bits set besides ack or psh */ - if (th->fin || th->syn || th->rst || - th->urg || th->ece || th->cwr || !th->ack) - return -1; + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; - /* ensure that the checksum is valid */ - if (new_skb->ip_summed != CHECKSUM_UNNECESSARY) - return -1; + hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); + } /* - * check for timestamps. Since the only option we handle are timestamps, - * we only have to handle the simple case of aligned timestamps + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; +} - opt_bytes = (th->doff << 2) - sizeof(*th); - if (opt_bytes != 0) { - ts_ptr = (u32 *)(th + 1); - if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || - (*ts_ptr != ntohl((TCPOPT_NOP << 24) | - (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | - TCPOLEN_TIMESTAMP))) { - return -1; - } +/** + * igb_pull_tail - igb specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being adjusted + * + * This function is an igb specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void igb_pull_tail(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + +#ifdef HAVE_PTP_1588_CLOCK + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + /* retrieve timestamp from buffer */ + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + + /* update pointers to remove timestamp header */ + skb_frag_size_sub(frag, IGB_TS_HDR_LEN); + frag->page_offset += IGB_TS_HDR_LEN; + skb->data_len -= IGB_TS_HDR_LEN; + skb->len -= IGB_TS_HDR_LEN; + + /* move va to start of packet data */ + va += IGB_TS_HDR_LEN; } +#endif /* HAVE_PTP_1588_CLOCK */ - tcp_data_len = ntohs(iph->tot_len) - (th->doff << 2) - sizeof(*iph); + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - return tcp_data_len; + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; } /** - * igb_lro_queue - if able, queue skb into lro chain - * @q_vector: structure containing interrupt and ring information - * @new_skb: pointer to current skb being checked - * @tag: vlan tag for skb + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. * - * Checks whether the skb given is eligible for LRO and if that's - * fine chains it to the existing lro_skb based on flowid. If an LRO for - * the flow doesn't exist create one. + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. **/ -static struct sk_buff *igb_lro_queue(struct igb_q_vector *q_vector, - struct sk_buff *new_skb, - u16 tag) -{ - struct sk_buff *lro_skb; - struct igb_lro_desc *lrod; - struct hlist_node *node; - struct skb_shared_info *new_skb_info = skb_shinfo(new_skb); - struct igb_lro_list *lrolist = q_vector->lrolist; - struct iphdr *iph = (struct iphdr *)new_skb->data; - struct tcphdr *th = (struct tcphdr *)(iph + 1); - int tcp_data_len = igb_lro_header_ok(new_skb, iph, th); - u16 opt_bytes = (th->doff << 2) - sizeof(*th); - u32 *ts_ptr = (opt_bytes ? (u32 *)(th + 1) : NULL); - u32 seq = ntohl(th->seq); - - /* - * we have a packet that might be eligible for LRO, - * so see if it matches anything we might expect - */ - hlist_for_each_entry(lrod, node, &lrolist->active, lro_node) { - if (lrod->source_port != th->source || - lrod->dest_port != th->dest || - lrod->source_ip != iph->saddr || - lrod->dest_ip != iph->daddr || - lrod->vlan_tag != tag) - continue; +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ - /* - * malformed header, no tcp data, resultant packet would - * be too large, ack sequence numbers do not match, window - * size has changed, or new skb is larger than our current mss. - * If any of the above we should flush the lro descriptor and - * start over if possible - */ - if (tcp_data_len <= 0 || (tcp_data_len + lrod->len) > 65535 || - lrod->ack_seq != th->ack_seq || - lrod->window != th->window || - lrod->mss < tcp_data_len) { - igb_lro_flush(q_vector, lrod); - break; + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; } + } - /* out of order packet */ - if (seq != lrod->next_seq) { - igb_lro_flush(q_vector, lrod); - tcp_data_len = -1; - break; - } + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + igb_pull_tail(rx_ring, rx_desc, skb); - if (lrod->opt_bytes || opt_bytes) { - u32 tsval = ntohl(*(ts_ptr + 1)); - /* make sure timestamp values are increasing */ - if (opt_bytes != lrod->opt_bytes || - lrod->tsval > tsval || *(ts_ptr + 2) == 0) { - igb_lro_flush(q_vector, lrod); - tcp_data_len = -1; - break; - } + /* if skb_pad returns an error the skb was freed */ + if (unlikely(skb->len < 60)) { + int pad_len = 60 - skb->len; - lrod->tsval = tsval; - lrod->tsecr = *(ts_ptr + 2); - } + if (skb_pad(skb, pad_len)) + return true; + __skb_put(skb, pad_len); + } - /* remove any padding from the end of the skb */ - __pskb_trim(new_skb, ntohs(iph->tot_len)); - /* Remove IP and TCP header*/ - skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len); - - lrod->next_seq += tcp_data_len; - lrod->len += tcp_data_len; - lrod->psh |= th->psh; - lrod->append_cnt++; - lrolist->stats.coal++; - lro_skb = lrod->skb; - - /* if header is empty pull pages into current skb */ - if (!skb_headlen(new_skb) && - ((skb_shinfo(lro_skb)->nr_frags + - skb_shinfo(new_skb)->nr_frags) <= MAX_SKB_FRAGS )) { - struct skb_shared_info *lro_skb_info = skb_shinfo(lro_skb); - - /* copy frags into the last skb */ - memcpy(lro_skb_info->frags + lro_skb_info->nr_frags, - new_skb_info->frags, - new_skb_info->nr_frags * sizeof(skb_frag_t)); - - lro_skb_info->nr_frags += new_skb_info->nr_frags; - lro_skb->len += tcp_data_len; - lro_skb->data_len += tcp_data_len; - lro_skb->truesize += tcp_data_len; - - new_skb_info->nr_frags = 0; -#if !defined(ESX35) || !defined(__VMKLNX__) - new_skb->truesize -= tcp_data_len; -#endif - new_skb->len = new_skb->data_len = 0; - } else { - /* Chain this new skb in frag_list */ - new_skb->prev = lro_skb; - lro_skb->next = new_skb; - lrod->skb = new_skb ; - } + return false; +} - if (lrod->psh || (tcp_data_len < lrod->mss)) - igb_lro_flush(q_vector, lrod); +/* igb_clean_rx_irq -- * packet split */ +static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) +{ + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); - /* return the skb if it is empty for recycling */ - if (!new_skb->len) { - new_skb->data = skb_mac_header(new_skb); - __pskb_trim(new_skb, 0); - new_skb->protocol = 0; - lrolist->stats.recycled++; - return new_skb; - } + do { + union e1000_adv_rx_desc *rx_desc; - return NULL; - } - - /* start a new packet */ - if (tcp_data_len > 0 && !hlist_empty(&lrolist->free) && !th->psh) { - lrod = hlist_entry(lrolist->free.first, struct igb_lro_desc, - lro_node); - - lrod->skb = new_skb; - lrod->source_ip = iph->saddr; - lrod->dest_ip = iph->daddr; - lrod->source_port = th->source; - lrod->dest_port = th->dest; - lrod->vlan_tag = tag; - lrod->len = new_skb->len; - lrod->next_seq = seq + tcp_data_len; - lrod->ack_seq = th->ack_seq; - lrod->window = th->window; - lrod->mss = tcp_data_len; - lrod->opt_bytes = opt_bytes; - lrod->psh = 0; - lrod->append_cnt = 0; - - /* record timestamp if it is present */ - if (opt_bytes) { - lrod->tsval = ntohl(*(ts_ptr + 1)); - lrod->tsecr = *(ts_ptr + 2); + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; } - /* remove first packet from freelist.. */ - hlist_del(&lrod->lro_node); - /* .. and insert at the front of the active list */ - hlist_add_head(&lrod->lro_node, &lrolist->active); - lrolist->active_cnt++; - lrolist->stats.coal++; - return NULL; - } - - /* packet not handled by any of the above, pass it to the stack */ - igb_receive_skb(q_vector, new_skb, tag); - return NULL; -} - -#endif /* IGB_LRO */ -static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, - int *work_done, int budget) -{ - struct igb_ring *rx_ring = q_vector->rx_ring; - struct net_device *netdev = rx_ring->netdev; - struct pci_dev *pdev = rx_ring->pdev; - union e1000_adv_rx_desc *rx_desc , *next_rxd; - struct igb_buffer *buffer_info , *next_buffer; - struct sk_buff *skb; - bool cleaned = FALSE; - int cleaned_count = 0; -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - int current_node = numa_node_id(); -#endif - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i; - u32 staterr; - u16 length; - u16 vlan_tag; - - i = rx_ring->next_to_clean; - buffer_info = &rx_ring->buffer_info[i]; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & E1000_RXD_STAT_DD) { - if (*work_done >= budget) + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) break; - (*work_done)++; - skb = buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - buffer_info->skb = NULL; + /* + * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); - i++; - if (i == rx_ring->count) - i = 0; + /* retrieve a buffer from the ring */ + skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); - next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); - prefetch(next_rxd); - next_buffer = &rx_ring->buffer_info[i]; + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; - length = le16_to_cpu(rx_desc->wb.upper.length); - cleaned = TRUE; cleaned_count++; -#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT - pci_unmap_single(pdev, buffer_info->dma, - rx_ring->rx_buffer_len, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; - skb_put(skb, length); + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + +#ifdef HAVE_VLAN_RX_REGISTER + igb_receive_skb(q_vector, skb); +#else + napi_gro_receive(&q_vector->napi, skb); +#endif +#ifndef NETIF_F_GRO + + netdev_ring(rx_ring)->last_rx = jiffies; +#endif -#else - if (buffer_info->dma) { - pci_unmap_single(pdev, buffer_info->dma, - rx_ring->rx_buffer_len, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; - if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { - skb_put(skb, length); - goto send_up; - } - skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); - } + /* reset skb pointer */ + skb = NULL; - if (length) { - pci_unmap_page(pdev, buffer_info->page_dma, - PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); - buffer_info->page_dma = 0; + /* update budget accounting */ + total_packets++; + } while (likely(total_packets < budget)); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - buffer_info->page, - buffer_info->page_offset, - length); + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; - if ((page_count(buffer_info->page) != 1) || - (page_to_nid(buffer_info->page) != current_node)) - buffer_info->page = NULL; - else - get_page(buffer_info->page); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); - if (!(staterr & E1000_RXD_STAT_EOP)) { - buffer_info->skb = next_buffer->skb; - buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - goto next_desc; - } -send_up: + return (total_packets < budget); +} #endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { - dev_kfree_skb_irq(skb); - goto next_desc; - } -#ifdef SIOCSHWTSTAMP - if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) - igb_rx_hwtstamp(q_vector, staterr, skb); -#endif - total_bytes += skb->len; - total_packets++; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT +static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; - igb_rx_checksum_adv(rx_ring, staterr, skb); + if (dma) + return true; - skb->protocol = eth_type_trans(skb, netdev); - skb_record_rx_queue(skb, rx_ring->queue_index); + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), + rx_ring->rx_buffer_len); + bi->skb = skb; + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + /* initialize skb for ring */ + skb_record_rx_queue(skb, ring_queue_index(rx_ring)); #ifdef __VMKNETDDI_QUEUEOPS__ + vmknetddi_queueops_set_skb_queueid (skb, - VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(rx_ring->queue_index)); + VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(rx_ring->queue_index)); #endif - vlan_tag = ((staterr & E1000_RXD_STAT_VP) ? - le16_to_cpu(rx_desc->wb.upper.vlan) : 0); + } -#ifdef IGB_LRO - if (igb_can_lro(rx_ring, rx_desc)) - buffer_info->skb = igb_lro_queue(q_vector, skb, vlan_tag); - else -#endif - igb_receive_skb(q_vector, skb, vlan_tag); + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buffer_len, DMA_FROM_DEVICE); - netdev->last_rx = jiffies; + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; -next_desc: - rx_desc->wb.upper.status_error = 0; + rx_ring->rx_stats.alloc_failed++; + return false; + } - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IGB_RX_BUFFER_WRITE) { - igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); - cleaned_count = 0; - } + bi->dma = dma; + return true; +} + +#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + /* alloc new page for storage */ + page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; } -#ifdef IGB_LRO - if (rx_ring->flags & IGB_RING_FLAG_RX_LRO) - igb_lro_flush_all(q_vector); -#endif /* IGB_LRO */ - rx_ring->next_to_clean = i; - cleaned_count = igb_desc_unused(rx_ring); + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (cleaned_count) - igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); - rx_ring->total_packets += total_packets; - rx_ring->total_bytes += total_bytes; - rx_ring->rx_stats.packets += total_packets; - rx_ring->rx_stats.bytes += total_bytes; - return cleaned; + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; } +#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ /** - * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split + * igb_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure **/ -void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) { - struct net_device *netdev = rx_ring->netdev; union e1000_adv_rx_desc *rx_desc; - struct igb_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - int bufsz; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; - bufsz = rx_ring->rx_buffer_len; + /* nothing to do */ + if (!cleaned_count) + return; - while (cleaned_count--) { - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { - if (!buffer_info->page) { - buffer_info->page = netdev_alloc_page(netdev); - if (!buffer_info->page) { - rx_ring->rx_stats.alloc_failed++; - goto no_buffers; - } - buffer_info->page_offset = 0; - } else { - buffer_info->page_offset ^= PAGE_SIZE / 2; - } - buffer_info->page_dma = - pci_map_page(rx_ring->pdev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rx_ring->pdev, - buffer_info->page_dma)) { - buffer_info->page_dma = 0; - rx_ring->rx_stats.alloc_failed++; - goto no_buffers; - } - } + do { +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + if (!igb_alloc_mapped_skb(rx_ring, bi)) +#else + if (!igb_alloc_mapped_page(rx_ring, bi)) #endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + break; - skb = buffer_info->skb; - if (!skb) { - skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); - if (!skb) { - rx_ring->rx_stats.alloc_failed++; - goto no_buffers; - } - - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); +#endif - buffer_info->skb = skb; - } - if (!buffer_info->dma) { - buffer_info->dma = pci_map_single(rx_ring->pdev, - skb->data, - bufsz, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rx_ring->pdev, - buffer_info->dma)) { - buffer_info->dma = 0; - rx_ring->rx_stats.alloc_failed++; - goto no_buffers; - } - } - /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - if (bufsz < IGB_RXBUFFER_1024) { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); - rx_desc->read.hdr_addr = 0; + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; } -#else - rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); + + /* clear the hdr_addr for the next_to_use descriptor */ rx_desc->read.hdr_addr = 0; -#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - i++; - if (i == rx_ring->count) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; -no_buffers: if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ rx_ring->next_to_use = i; - if (i == 0) - i = (rx_ring->count - 1); - else - i--; - /* Force memory writes to complete before letting h/w +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + +#endif + /* + * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, rx_ring->tail); } @@ -6170,179 +7666,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return E1000_SUCCESS; } -#endif -#ifdef SIOCSHWTSTAMP -/** - * igb_hwtstamp_ioctl - control hardware time stamping - * @netdev: - * @ifreq: - * @cmd: - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't case any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - **/ -static int igb_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config config; - u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - u32 tsync_rx_cfg = 0; - bool is_l4 = false; - bool is_l2 = false; - u32 regval; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - tsync_tx_ctl = 0; - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl = 0; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_ALL: - /* - * register TSYNCRXCFG must be set, therefore it is not - * possible to time stamp both Sync and Delay_Req messages - * => fall back to time stamping all packets - */ - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l2 = true; - break; - default: - return -ERANGE; - } - - if (hw->mac.type == e1000_82575) { - if (tsync_rx_ctl | tsync_tx_ctl) - return -EINVAL; - return 0; - } - -#ifdef IGB_PER_PKT_TIMESTAMP - if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) { - tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - } - -#endif - /* enable/disable TX */ - regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL); - regval &= ~E1000_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; - E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval); - - /* enable/disable RX */ - regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL); - regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; - E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval); - - /* define which PTP packets are time stamped */ - E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg); - - /* define ethertype filter for timestamped packets */ - if (is_l2) - E1000_WRITE_REG(hw, E1000_ETQF(3), - (E1000_ETQF_FILTER_ENABLE | /* enable filter */ - E1000_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else - E1000_WRITE_REG(hw, E1000_ETQF(3), 0); - -#define PTP_PORT 319 - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IPPROTO_UDP /* UDP */ - | E1000_FTQF_VF_BP /* VF not compared */ - | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ - | E1000_FTQF_MASK); /* mask all inputs */ - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - - E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_PORT)); - E1000_WRITE_REG(hw, E1000_IMIREXT(3), - (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); - if (hw->mac.type == e1000_82576) { - /* enable source port check */ - E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_PORT)); - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - } - E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf); - } else { - E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK); - } - E1000_WRITE_FLUSH(hw); - - adapter->hwtstamp_config = config; - - /* clear TX/RX time stamp registers, just to be sure */ - regval = E1000_READ_REG(hw, E1000_TXSTMPH); - regval = E1000_READ_REG(hw, E1000_RXSTMPH); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - #endif /** * igb_ioctl - @@ -6359,14 +7682,16 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); #endif -#ifdef SIOCSHWTSTAMP +#ifdef HAVE_PTP_1588_CLOCK case SIOCSHWTSTAMP: - return igb_hwtstamp_ioctl(netdev, ifr, cmd); -#endif + return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); +#endif /* HAVE_PTP_1588_CLOCK */ #ifdef ETHTOOL_OPS_COMPAT case SIOCETHTOOL: return ethtool_ioctl(ifr); #endif + case SIOCINTELCIM: + return igb_intelcim_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -6400,17 +7725,34 @@ s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) return E1000_SUCCESS; } -static void igb_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp) +#ifdef HAVE_VLAN_RX_REGISTER +static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp) +#else +void igb_vlan_mode(struct net_device *netdev, u32 features) +#endif { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl; + int i; +#ifdef HAVE_VLAN_RX_REGISTER + bool enable = !!vlgrp; igb_irq_disable(adapter); - adapter->vlgrp = grp; - if (grp) { + adapter->vlgrp = vlgrp; + + if (!test_bit(__IGB_DOWN, &adapter->state)) + igb_irq_enable(adapter); +#else +#ifdef NETIF_F_HW_VLAN_CTAG_RX + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); +#else + bool enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif +#endif + + if (enable) { /* enable VLAN tag insert/strip */ ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; @@ -6427,74 +7769,157 @@ static void igb_vlan_rx_register(struct net_device *netdev, E1000_WRITE_REG(hw, E1000_CTRL, ctrl); } - igb_rlpml_set(adapter); +#ifndef CONFIG_IGB_VMDQ_NETDEV + for (i = 0; i < adapter->vmdq_pools; i++) { + igb_set_vf_vlan_strip(adapter, + adapter->vfs_allocated_count + i, + enable); + } - if (!test_bit(__IGB_DOWN, &adapter->state)) - igb_irq_enable(adapter); +#else + igb_set_vf_vlan_strip(adapter, + adapter->vfs_allocated_count, + enable); + + for (i = 1; i < adapter->vmdq_pools; i++) { +#ifdef HAVE_VLAN_RX_REGISTER + struct igb_vmdq_adapter *vadapter; + vadapter = netdev_priv(adapter->vmdq_netdev[i-1]); + enable = !!vadapter->vlgrp; +#else + struct net_device *vnetdev; + vnetdev = adapter->vmdq_netdev[i-1]; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX); +#else + enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX); +#endif +#endif + igb_set_vf_vlan_strip(adapter, + adapter->vfs_allocated_count + i, + enable); + } + +#endif + igb_rlpml_set(adapter); } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif +#else static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; -#ifndef HAVE_NETDEV_VLAN_FEATURES -#ifndef __VMKLNX__ - struct net_device *v_netdev; -#endif -#endif /* attempt to add filter to vlvf array */ igb_vlvf_set(adapter, vid, TRUE, pf_id); /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, TRUE); -#ifndef HAVE_NETDEV_VLAN_FEATURES + igb_vfta_set(adapter, vid, TRUE); #ifndef __VMKLNX__ +#ifndef HAVE_NETDEV_VLAN_FEATURES /* Copy feature flags from netdev to the vlan netdev for this vid. * This allows things like TSO to bubble down to our vlan device. + * There is no need to update netdev for vlan 0 (DCB), since it + * wouldn't has v_netdev. */ - v_netdev = vlan_group_get_device(adapter->vlgrp, vid); - v_netdev->features |= adapter->netdev->features; - vlan_group_set_device(adapter->vlgrp, vid, v_netdev); + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif +#endif +#ifndef HAVE_VLAN_RX_REGISTER + + set_bit(vid, adapter->active_vlans); #endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; #endif } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif +#else static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; s32 err; +#ifdef HAVE_VLAN_RX_REGISTER igb_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); +#endif /* HAVE_VLAN_RX_REGISTER */ /* remove vlan from VLVF table array */ err = igb_vlvf_set(adapter, vid, FALSE, pf_id); /* if vid was not present in VLVF just remove it from table */ if (err) - igb_vfta_set(hw, vid, FALSE); + igb_vfta_set(adapter, vid, FALSE); +#ifndef HAVE_VLAN_RX_REGISTER + + clear_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif } static void igb_restore_vlan(struct igb_adapter *adapter) { - igb_vlan_rx_register(adapter->netdev, adapter->vlgrp); +#ifdef HAVE_VLAN_RX_REGISTER + igb_vlan_mode(adapter->netdev, adapter->vlgrp); if (adapter->vlgrp) { u16 vid; - for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + for (vid = 0; vid < VLAN_N_VID; vid++) { if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + igb_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else igb_vlan_rx_add_vid(adapter->netdev, vid); +#endif } } +#else + u16 vid; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +#ifdef NETIF_F_HW_VLAN_CTAG_RX + igb_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else + igb_vlan_rx_add_vid(adapter->netdev, vid); +#endif +#endif } int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) @@ -6504,6 +7929,22 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) mac->autoneg = 0; + /* SerDes device's does not support 10Mbps Full/duplex + * and 100Mbps Half duplex + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: + dev_err(pci_dev_to_dev(pdev), + "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + default: + break; + } + } + switch (spddplx) { case SPEED_10 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_10_HALF; @@ -6523,47 +7964,36 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } - return 0; -} -#ifdef USE_REBOOT_NOTIFIER -/* only want to do this for 2.4 kernels? */ -static int igb_notify_reboot(struct notifier_block *nb, unsigned long event, - void *p) -{ - struct pci_dev *pdev = NULL; + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; - switch (event) { - case SYS_DOWN: - case SYS_HALT: - case SYS_POWER_OFF: - while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { - if (pci_dev_driver(pdev) == &igb_driver) - igb_suspend(pdev, PMSG_SUSPEND); - } - } - return NOTIFY_DONE; + return 0; } -#endif -static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; - u32 wufc = adapter->wol; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + if (netif_running(netdev)) - igb_close(netdev); + __igb_close(netdev, true); igb_clear_interrupt_scheme(adapter); @@ -6573,10 +8003,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) return retval; #endif - status = E1000_READ_REG(hw, E1000_STATUS); - if (status & E1000_STATUS_LU) - wufc &= ~E1000_WUFC_LNKC; - if (wufc) { igb_setup_rctl(adapter); igb_set_rx_mode(netdev); @@ -6606,7 +8032,9 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) *enable_wake = wufc || adapter->en_mng_pt; if (!*enable_wake) - e1000_shutdown_fiber_serdes_link(hw); + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -6618,12 +8046,19 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) } #ifdef CONFIG_PM +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS +static int igb_suspend(struct device *dev) +#else static int igb_suspend(struct pci_dev *pdev, pm_message_t state) +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ { +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + struct pci_dev *pdev = to_pci_dev(dev); +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ int retval; bool wake; - retval = __igb_shutdown(pdev, &wake); + retval = __igb_shutdown(pdev, &wake, 0); if (retval) return retval; @@ -6637,8 +8072,15 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS +static int igb_resume(struct device *dev) +#else static int igb_resume(struct pci_dev *pdev) +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ { +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + struct pci_dev *pdev = to_pci_dev(dev); +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6646,10 +8088,11 @@ static int igb_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); + pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "igb: Cannot enable PCI device from suspend\n"); return err; } @@ -6658,13 +8101,11 @@ static int igb_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (igb_init_interrupt_scheme(adapter)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); return -ENOMEM; } - /* e1000_power_up_phy(adapter); */ - igb_reset(adapter); /* let the f/w know that the h/w is now under the control of the @@ -6673,32 +8114,99 @@ static int igb_resume(struct pci_dev *pdev) E1000_WRITE_REG(hw, E1000_WUS, ~0); - if (netif_running(netdev)) { - err = igb_open(netdev); + if (netdev->flags & IFF_UP) { + rtnl_lock(); + err = __igb_open(netdev, true); + rtnl_unlock(); if (err) return err; } - netif_device_attach(netdev); + netif_device_attach(netdev); + + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +#ifdef HAVE_SYSTEM_SLEEP_PM_OPS +static int igb_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int igb_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __igb_shutdown(pdev, &wake, 1); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } return 0; } -#endif -#ifndef USE_REBOOT_NOTIFIER -static void igb_shutdown(struct pci_dev *pdev) +static int igb_runtime_resume(struct device *dev) +{ + return igb_resume(dev); +} +#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ + +#ifdef USE_REBOOT_NOTIFIER +/* only want to do this for 2.4 kernels? */ +static int igb_notify_reboot(struct notifier_block *nb, unsigned long event, + void *p) { + struct pci_dev *pdev = NULL; bool wake; - __igb_shutdown(pdev, &wake); + switch (event) { + case SYS_DOWN: + case SYS_HALT: + case SYS_POWER_OFF: + while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { + if (pci_dev_driver(pdev) == &igb_driver) { + __igb_shutdown(pdev, &wake, 0); + if (event == SYS_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } + } + } + } + return NOTIFY_DONE; +} +#else +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake = false; + + __igb_shutdown(pdev, &wake, 0); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); pci_set_power_state(pdev, PCI_D3hot); } } +#endif /* USE_REBOOT_NOTIFIER */ -#endif #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs @@ -6714,13 +8222,17 @@ static void igb_netpoll(struct net_device *netdev) for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; - E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value); + if (adapter->msix_entries) + E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value); + else + igb_irq_disable(adapter); napi_schedule(&q_vector->napi); } } #endif /* CONFIG_NET_POLL_CONTROLLER */ #ifdef HAVE_PCI_ERS +#define E1000_DEV_ID_82576_VF 0x10CA /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -6730,11 +8242,12 @@ static void igb_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) @@ -6763,17 +8276,18 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) pci_ers_result_t result; if (pci_enable_device_mem(pdev)) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); + pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - igb_reset(adapter); + schedule_work(&adapter->reset_task); E1000_WRITE_REG(hw, E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } @@ -6796,9 +8310,15 @@ static void igb_io_resume(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + if (adapter->vferr_refcount) { + dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + if (netif_running(netdev)) { if (igb_up(adapter)) { - dev_err(&pdev->dev, "igb_up failed after reset\n"); + dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n"); return; } } @@ -6811,442 +8331,192 @@ static void igb_io_resume(struct pci_dev *pdev) } #endif /* HAVE_PCI_ERS */ -static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, - u8 qsel) -{ - u32 rar_low, rar_high; - struct e1000_hw *hw = &adapter->hw; - - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - - /* Indicate to hardware the Address is Valid. */ - rar_high |= E1000_RAH_AV; - - if (hw->mac.type == e1000_82575) - rar_high |= E1000_RAH_POOL_1 * qsel; - else - rar_high |= E1000_RAH_POOL_1 << qsel; - - E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); - E1000_WRITE_FLUSH(hw); - E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); - E1000_WRITE_FLUSH(hw); -} -#ifdef __VMKNETDDI_QUEUEOPS__ -int igb_set_rxqueue_macfilter(struct net_device *netdev, int queue, - u8 *mac_addr) +int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) { - int err = 0; - struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct igb_ring *rx_ring = adapter->rx_ring[queue]; - - if ((queue < 0) || (queue >= adapter->num_rx_queues)) { - DPRINTK(DRV, ERR, "Invalid RX Queue %u specified\n", queue); - return -EADDRNOTAVAIL; - } - - - /* Note: Broadcast address is used to disable the MAC filter*/ - if (!is_valid_ether_addr(mac_addr)) { - - /* Clear ring addr */ - DPRINTK(DRV, DEBUG, - "disabling MAC filter on RX Queue[%d]\n", queue); - memset(rx_ring->mac_addr, 0xFF, NODE_ADDRESS_SIZE); - - /* Clear RAR */ - E1000_WRITE_REG(hw, E1000_RAL(queue + 1), 0); - E1000_WRITE_FLUSH(hw); - E1000_WRITE_REG(hw, E1000_RAH(queue + 1), 0); - E1000_WRITE_FLUSH(hw); - - return -EADDRNOTAVAIL; - } - - DPRINTK(DRV, DEBUG, - "enabling MAC filter [[0x%X:0x%X:0x%X:0x%X:0x%X:0x%X]] " - "on RX Queue[%d]\n", mac_addr[0], mac_addr[1], mac_addr[2], - mac_addr[3], mac_addr[4], mac_addr[5], queue); - - /* Store in ring */ - memcpy(rx_ring->mac_addr, mac_addr, NODE_ADDRESS_SIZE); - - /* Note: Preserve perm mac addr at RAR[0] - * Set MAC filter for - * Q0 => RAR[1] - * Q1 => RAR[2] - * Q2 => RAR[3] - * Q3 => RAR[4] - */ - igb_rar_set_qsel(adapter, rx_ring->mac_addr, queue + 1, queue); - - return err; -} - -static int igb_get_netqueue_features(vmknetddi_queueop_get_features_args_t *args) -{ - args->features = VMKNETDDI_QUEUEOPS_FEATURE_RXQUEUES | - VMKNETDDI_QUEUEOPS_FEATURE_TXQUEUES; - return VMKNETDDI_QUEUEOPS_OK; -} - -static int igb_get_queue_count(vmknetddi_queueop_get_queue_count_args_t *args) -{ - struct net_device *netdev = args->netdev; - struct igb_adapter *adapter = netdev_priv(netdev); - - if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { - args->count = adapter->num_tx_queues - 1; - } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { - args->count = adapter->num_rx_queues - 1; - } else { - DPRINTK(DRV, ERR, "invalid queue type\n"); - return VMKNETDDI_QUEUEOPS_ERR; - } - - return VMKNETDDI_QUEUEOPS_OK; -} - -static int igb_get_filter_count(vmknetddi_queueop_get_filter_count_args_t *args) -{ - args->count = 1; - return VMKNETDDI_QUEUEOPS_OK; -} + int i; -static int igb_alloc_rx_queue(struct net_device *netdev, - vmknetddi_queueops_queueid_t *p_qid, - struct napi_struct **napi_p) -{ - struct igb_adapter *adapter = netdev_priv(netdev); + if (is_zero_ether_addr(addr)) + return 0; - if (adapter->n_rx_queues_allocated >= adapter->num_rx_queues) { - DPRINTK(DRV, ERR, "igb_alloc_rx_queue: no free rx queues\n"); - return VMKNETDDI_QUEUEOPS_ERR; - } else { - int i; - for (i = 1; i < adapter->num_rx_queues; i++) { - struct igb_ring *ring = adapter->rx_ring[i]; - if (!ring->allocated) { - ring->allocated = TRUE; - *p_qid = VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(i); - DPRINTK(DRV, DEBUG, - "allocated VMDQ rx queue=%d\n", i); - *napi_p = &ring->q_vector->napi; - adapter->n_rx_queues_allocated++; - return VMKNETDDI_QUEUEOPS_OK; - } - } - DPRINTK(DRV, ERR, "no free rx queues found!\n"); - return VMKNETDDI_QUEUEOPS_ERR; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) + continue; + adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED | + IGB_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].queue = queue; + igb_sync_mac_table(adapter); + return 0; } + return -ENOMEM; } - -static int igb_alloc_tx_queue(struct net_device *netdev, - vmknetddi_queueops_queueid_t *p_qid, - u16 *queue_mapping) +int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue) { - struct igb_adapter *adapter = netdev_priv(netdev); + /* search table for addr, if found, set to 0 and sync */ + int i; + struct e1000_hw *hw = &adapter->hw; - if (adapter->n_tx_queues_allocated >= adapter->num_tx_queues) { - DPRINTK(DRV, ERR, "igb_alloc_tx_queue: no free tx queues\n"); - return VMKNETDDI_QUEUEOPS_ERR; - } else { - int i; - for (i = 1; i < adapter->num_tx_queues; i++) { - struct igb_ring *ring = adapter->tx_ring[i]; - if (!ring->allocated) { - ring->allocated = TRUE; - *p_qid = VMKNETDDI_QUEUEOPS_MK_TX_QUEUEID(i); - *queue_mapping = i; - DPRINTK(DRV, DEBUG, - "allocated VMDQ tx queue=%d\n", i); - adapter->n_tx_queues_allocated++; - return VMKNETDDI_QUEUEOPS_OK; - } + if (is_zero_ether_addr(addr)) + return 0; + for (i = 0; i < hw->mac.rar_entry_count; i++) { + if (!compare_ether_addr(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].queue == queue) { + adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].queue = 0; + igb_sync_mac_table(adapter); + return 0; } - DPRINTK(DRV, ERR, "no free tx queues found!\n"); - return VMKNETDDI_QUEUEOPS_ERR; - } -} - -static int igb_alloc_queue(vmknetddi_queueop_alloc_queue_args_t *args) -{ - struct net_device *netdev = args->netdev; - struct igb_adapter *adapter = netdev_priv(netdev); - - if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { - return igb_alloc_tx_queue(args->netdev, &args->queueid, - &args->queue_mapping); - } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { - return igb_alloc_rx_queue(args->netdev, &args->queueid, - &args->napi); - } else { - DPRINTK(DRV, ERR, "invalid queue type\n"); - return VMKNETDDI_QUEUEOPS_ERR; } + return -ENOMEM; } - -static int -igb_free_rx_queue(struct net_device *netdev, - vmknetddi_queueops_queueid_t qid) +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) { - struct igb_adapter *adapter = netdev_priv(netdev); - u16 queue = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(qid); - struct igb_ring *ring = adapter->rx_ring[queue]; + igb_del_mac_filter(adapter, adapter->vf_data[vf].vf_mac_addresses, vf); + memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - if (!ring->allocated) { - DPRINTK(DRV, ERR, "rx queue %d not allocated\n", queue); - return VMKNETDDI_QUEUEOPS_ERR; - } + igb_add_mac_filter(adapter, mac_addr, vf); - DPRINTK(DRV, DEBUG, "freed VMDQ rx queue=%d\n", queue); - ring->allocated = FALSE; - adapter->n_rx_queues_allocated--; - return VMKNETDDI_QUEUEOPS_OK; + return 0; } -static int -igb_free_tx_queue(struct net_device *netdev, - vmknetddi_queueops_queueid_t qid) +#ifdef IFLA_VF_MAX +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct igb_adapter *adapter = netdev_priv(netdev); - u16 queue = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(qid); - - if (!adapter->tx_ring[queue]->allocated) { - DPRINTK(DRV, ERR, "tx queue %d not allocated\n", queue); - return VMKNETDDI_QUEUEOPS_ERR; + if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + return -EINVAL; + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective.\n"); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before" + " attempting to use the VF device.\n"); } - - DPRINTK(DRV, DEBUG, "freed VMDQ tx queue=%d\n", queue); - adapter->tx_ring[queue]->allocated = FALSE; - adapter->n_tx_queues_allocated--; - return VMKNETDDI_QUEUEOPS_OK; + return igb_set_vf_mac(adapter, vf, mac); } -static int -igb_free_queue(vmknetddi_queueop_free_queue_args_t *args) +static int igb_link_mbps(int internal_link_speed) { - struct net_device *netdev = args->netdev; - struct igb_adapter *adapter = netdev_priv(netdev); - - if (VMKNETDDI_QUEUEOPS_IS_TX_QUEUEID(args->queueid)) { - return igb_free_tx_queue(netdev, args->queueid); - } else if (VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { - return igb_free_rx_queue(netdev, args->queueid); - } else { - DPRINTK(DRV, ERR, "invalid queue type\n"); - return VMKNETDDI_QUEUEOPS_ERR; + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + case SPEED_2500: + return 2500; + default: + return 0; } } -static int -igb_get_queue_vector(vmknetddi_queueop_get_queue_vector_args_t *args) +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) { - int qid; - struct net_device *netdev = args->netdev; - struct igb_adapter *adapter = netdev_priv(netdev); - /* Assuming RX queue id's are received */ - qid = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); - args->vector = adapter->msix_entries[qid].vector; - - return VMKNETDDI_QUEUEOPS_OK; -} + int rf_dec, rf_int; + u32 bcnrc_val; -static int -igb_get_default_queue(vmknetddi_queueop_get_default_queue_args_t *args) -{ - struct net_device *netdev = args->netdev; - struct igb_adapter *adapter = netdev_priv(netdev); + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1<type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { - args->napi = &adapter->rx_ring[0]->q_vector->napi; - args->queueid = VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(0); - return VMKNETDDI_QUEUEOPS_OK; - } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { - args->queueid = VMKNETDDI_QUEUEOPS_MK_TX_QUEUEID(0); - return VMKNETDDI_QUEUEOPS_OK; + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int<queueid); - struct igb_adapter *adapter = netdev_priv(args->netdev); - - if (!VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { - DPRINTK(DRV, ERR, "not an rx queue 0x%x\n", - args->queueid); - return VMKNETDDI_QUEUEOPS_ERR; - } - - if (vmknetddi_queueops_get_filter_class(&args->filter) - != VMKNETDDI_QUEUEOPS_FILTER_MACADDR) { - DPRINTK(DRV, ERR, "only mac filters supported\n"); - return VMKNETDDI_QUEUEOPS_ERR; - } + int actual_link_speed, i; + bool reset_rate = false; - if (!adapter->rx_ring[queue]->allocated) { - DPRINTK(DRV, ERR, "queue not allocated\n"); - return VMKNETDDI_QUEUEOPS_ERR; - } + /* VF TX rate limit was not set */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; - if (adapter->rx_ring[queue]->active) { - DPRINTK(DRV, ERR, "filter count exceeded\n"); - return VMKNETDDI_QUEUEOPS_ERR; + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); } - macaddr = vmknetddi_queueops_get_filter_macaddr(&args->filter); + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; - rval = igb_set_rxqueue_macfilter(args->netdev, queue, macaddr); - if (rval == 0) { - adapter->rx_ring[queue]->active = TRUE; - /* force to 0 since we only support one filter per queue */ - args->filterid = VMKNETDDI_QUEUEOPS_MK_FILTERID(0); - return VMKNETDDI_QUEUEOPS_OK; - } else { - return VMKNETDDI_QUEUEOPS_ERR; + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, actual_link_speed); } } -static int -igb_remove_rx_filter(vmknetddi_queueop_remove_rx_filter_args_t *args) -{ - int rval; - u16 cidx = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); - u16 fidx = VMKNETDDI_QUEUEOPS_FILTERID_VAL(args->filterid); - struct igb_adapter *adapter = netdev_priv(args->netdev); - u8 macaddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - - DPRINTK(DRV, DEBUG, "removing filter on cidx=%d, fidx=%d\n", - cidx, fidx); - - /* This will return an error because broadcast is not a valid - * Ethernet address, so ignore and carry on - */ - rval = igb_set_rxqueue_macfilter(args->netdev, cidx, macaddr); - adapter->rx_ring[cidx]->active = FALSE; - return rval; -} - - -static int -igb_get_queue_stats(vmknetddi_queueop_get_stats_args_t *args) -{ - return VMKNETDDI_QUEUEOPS_ERR; -} - -static int -igb_get_netqueue_version(vmknetddi_queueop_get_version_args_t *args) -{ - return vmknetddi_queueops_version(args); -} -static int igb_set_tx_priority(vmknetddi_queueop_set_tx_priority_args_t *args) +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { - /* Not supported */ - return VMKNETDDI_QUEUEOPS_OK; -} -static int -igb_netqueue_ops(vmknetddi_queueops_op_t op, void *args) -{ - switch (op) { - case VMKNETDDI_QUEUEOPS_OP_GET_VERSION: - return igb_get_netqueue_version( - (vmknetddi_queueop_get_version_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_GET_FEATURES: - return igb_get_netqueue_features( - (vmknetddi_queueop_get_features_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_GET_QUEUE_COUNT: - return igb_get_queue_count( - (vmknetddi_queueop_get_queue_count_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_GET_FILTER_COUNT: - return igb_get_filter_count( - (vmknetddi_queueop_get_filter_count_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_ALLOC_QUEUE: - return igb_alloc_queue( - (vmknetddi_queueop_alloc_queue_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_FREE_QUEUE: - return igb_free_queue( - (vmknetddi_queueop_free_queue_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_GET_QUEUE_VECTOR: - return igb_get_queue_vector( - (vmknetddi_queueop_get_queue_vector_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_GET_DEFAULT_QUEUE: - return igb_get_default_queue( - (vmknetddi_queueop_get_default_queue_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_APPLY_RX_FILTER: - return igb_apply_rx_filter( - (vmknetddi_queueop_apply_rx_filter_args_t *)args); - break; - - case VMKNETDDI_QUEUEOPS_OP_REMOVE_RX_FILTER: - return igb_remove_rx_filter( - (vmknetddi_queueop_remove_rx_filter_args_t *)args); - break; + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; - case VMKNETDDI_QUEUEOPS_OP_GET_STATS: - return igb_get_queue_stats( - (vmknetddi_queueop_get_stats_args_t *)args); - break; + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; - case VMKNETDDI_QUEUEOPS_OP_SET_TX_PRIORITY: - return igb_set_tx_priority( - (vmknetddi_queueop_set_tx_priority_args_t *)args); - break; + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) || + (tx_rate < 0) || (tx_rate > actual_link_speed)) + return -EINVAL; - default: - return VMKNETDDI_QUEUEOPS_ERR; - } + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)tx_rate; + igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); - return VMKNETDDI_QUEUEOPS_ERR; + return 0; } -#endif /* __VMKNETDDI_QUEUEOPS__ */ -static int igb_set_vf_mac(struct igb_adapter *adapter, - int vf, unsigned char *mac_addr) +#ifndef __VMKLNX__ +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) { - struct e1000_hw *hw = &adapter->hw; - /* VF MAC addresses start at end of receive addresses and moves - * torwards the first, as a result a collision should not be possible */ - int rar_entry = hw->mac.rar_entry_count - (vf + 1); - - memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - - igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); - + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->tx_rate = adapter->vf_data[vf].tx_rate; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; +#endif return 0; } - +#endif /* __VMKLNX__ */ +#endif static void igb_vmm_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + int count; u32 reg; switch (hw->mac.type) { @@ -7257,7 +8527,8 @@ static void igb_vmm_control(struct igb_adapter *adapter) case e1000_82576: /* notify HW that the MAC is adding vlan tags */ reg = E1000_READ_REG(hw, E1000_DTXCTL); - reg |= E1000_DTXCTL_VLAN_ADDED; + reg |= (E1000_DTXCTL_VLAN_ADDED | + E1000_DTXCTL_SPOOF_INT); E1000_WRITE_REG(hw, E1000_DTXCTL, reg); case e1000_82580: /* enable replication vlan tag stripping */ @@ -7265,14 +8536,185 @@ static void igb_vmm_control(struct igb_adapter *adapter) reg |= E1000_RPLOLR_STRVLAN; E1000_WRITE_REG(hw, E1000_RPLOLR, reg); case e1000_i350: + case e1000_i354: /* none of the above registers are supported by i350 */ break; } - /* enable replcation and loopback support */ - e1000_vmdq_set_loopback_pf(hw, adapter->vfs_allocated_count && !adapter->vmdq_pools); - e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count || adapter->vmdq_pools); + /* Enable Malicious Driver Detection */ + if ((adapter->vfs_allocated_count) && + (adapter->mdd)) { + if (hw->mac.type == e1000_i350) + igb_enable_mdd(adapter); + } +#ifdef __VMKLNX__ + if (adapter->vfs_allocated_count) { +#endif /* __VMKLNX__ */ + + /* enable replication and loopback support */ + count = adapter->vfs_allocated_count || adapter->vmdq_pools; + if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count) + e1000_vmdq_set_loopback_pf(hw, 1); +#ifdef __VMKLNX__ + } +#endif /* __VMKLNX__ */ + e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count || + adapter->vmdq_pools); +} + +static void igb_init_fw(struct igb_adapter *adapter) +{ + struct e1000_fw_drv_info fw_cmd; + struct e1000_hw *hw = &adapter->hw; + int i; + u16 mask; + + if (hw->mac.type == e1000_i210) + mask = E1000_SWFW_EEP_SM; + else + mask = E1000_SWFW_PHY0_SM; + /* i211 parts do not support this feature */ + if (hw->mac.type == e1000_i211) + hw->mac.arc_subsystem_valid = false; + + if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) { + for (i = 0; i <= FW_MAX_RETRIES; i++) { + E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI); + fw_cmd.hdr.cmd = FW_CMD_DRV_INFO; + fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED; + fw_cmd.port_num = hw->bus.func; + fw_cmd.drv_version = FW_FAMILY_DRV_VER; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = e1000_calculate_checksum((u8 *)&fw_cmd, + (FW_HDR_LEN + + fw_cmd.hdr.buf_len)); + e1000_host_interface_command(hw, (u8*)&fw_cmd, + sizeof(fw_cmd)); + if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_STATUS_SUCCESS) + break; + } + } else + dev_warn(pci_dev_to_dev(adapter->pdev), + "Unable to get semaphore, firmware init failed.\n"); + hw->mac.ops.release_swfw_sync(hw, mask); +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + u32 status; + + if (hw->mac.type == e1000_i211) + return; + + if (hw->mac.type > e1000_82580) { + if (adapter->dmac != IGB_DMAC_DISABLE) { + u32 reg; + + /* force threshold to 0. */ + E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); + + /* + * DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * pba - adapter->max_frame_size / 16; + if (hwm < 64 * (pba - 6)) + hwm = 64 * (pba - 6); + reg = E1000_READ_REG(hw, E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + E1000_WRITE_REG(hw, E1000_FCRTC, reg); + + /* + * Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - adapter->max_frame_size / 512; + if (dmac_thr < pba - 10) + dmac_thr = pba - 10; + reg = E1000_READ_REG(hw, E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* Check if status is 2.5Gb backplane connection + * before configuration of watchdog timer, which is + * in msec values in 12.8usec intervals + * watchdog timer= msec values in 32usec intervals + * for non 2.5Gb connection + */ + if (hw->mac.type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + (!(status & E1000_STATUS_2P5_SKU_OVER))) + reg |= ((adapter->dmac * 5) >> 6); + else + reg |= ((adapter->dmac) >> 5); + } else { + reg |= ((adapter->dmac) >> 5); + } + + /* + * Disable BMC-to-OS Watchdog enable + * on devices that support OS-to-BMC + */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + E1000_WRITE_REG(hw, E1000_DMACR, reg); + + /* no lower threshold to disable coalescing(smart fifb)-UTRESH=0*/ + E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); + + /* This sets the time to wait before requesting + * transition to low power state to number of usecs + * needed to receive 1 512 byte frame at gigabit + * line rate. On i350 device, time to make transition + * to Lx state is delayed by 4 usec with flush disable + * bit set to avoid losing mailbox interrupts + */ + reg = E1000_READ_REG(hw, E1000_DMCTLX); + if (hw->mac.type == e1000_i350) + reg |= IGB_DMCTLX_DCFLUSH_DIS; + /* in 2.5Gb connection, TTLX unit is 0.4 usec + * which is 0x4*2 = 0xA. But delay is still 4 usec + */ + if (hw->mac.type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + (!(status & E1000_STATUS_2P5_SKU_OVER))) + reg |= 0xA; + else + reg |= 0x4; + } else { + reg |= 0x4; + } + E1000_WRITE_REG(hw, E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from DMA coal */ + E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + + /* make low power state decision controlled by DMA coal */ + reg = E1000_READ_REG(hw, E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; + E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); + E1000_WRITE_REG(hw, E1000_PCIEMISC, + reg & ~E1000_PCIEMISC_LX_DECISION); + E1000_WRITE_REG(hw, E1000_DMACR, 0); + } } /* igb_main.c */ diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_param.c b/vmkdrivers/src_9/drivers/net/igb/igb_param.c index 9056bfe2ada3a8f43dae4a87201e96f9cd055cca..a230cbd01e0c6749b779dec5fb43aa209061a621 100644 --- a/vmkdrivers/src_9/drivers/net/igb/igb_param.c +++ b/vmkdrivers/src_9/drivers/net/igb/igb_param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -39,6 +39,7 @@ #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 +#define MAX_NUM_LIST_OPTS 15 /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code @@ -58,12 +59,12 @@ */ #define IGB_PARAM(X, desc) \ - static const int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ + static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \ MODULE_PARM_DESC(X, desc); #else #define IGB_PARAM(X, desc) \ - static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ + static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ static unsigned int num_##X; \ module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); @@ -73,11 +74,12 @@ * * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) */ -IGB_PARAM(InterruptThrottleRate, +IGB_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive"); #define DEFAULT_ITR 3 #define MAX_ITR 100000 -#define MIN_ITR 120 +/* #define MIN_ITR 120 */ +#define MIN_ITR 0 /* IntMode (Interrupt Mode) * * Valid Range: 0 - 2 @@ -88,6 +90,8 @@ IGB_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2" #define MAX_INTMODE IGB_INT_MODE_MSIX #define MIN_INTMODE IGB_INT_MODE_LEGACY +IGB_PARAM(Node, "set the starting node to allocate memory on, default -1"); + /* LLIPort (Low Latency Interrupt TCP Port) * * Valid Range: 0 - 65535 @@ -124,18 +128,17 @@ IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500), default 0=off #define MAX_LLISIZE 1500 #define MIN_LLISIZE 0 - /* RSS (Enable RSS multiqueue receive) * * Valid Range: 0 - 8 * * Default Value: 1 */ -IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1=number of cpus"); +IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus"); #define DEFAULT_RSS 1 -#define MAX_RSS ((adapter->hw.mac.type == e1000_82575) ? 4 : 8) -#define MIN_RSS 0 +#define MAX_RSS 8 +#define MIN_RSS 0 /* VMDQ (Enable VMDq multiqueue receive) * @@ -143,12 +146,37 @@ IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default * * Default Value: 0 */ -IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues (0-8), default 0"); +IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0"); #define DEFAULT_VMDQ 0 #define MAX_VMDQ MAX_RSS #define MIN_VMDQ 0 +#ifndef __VMKLNX__ +/* max_vfs (Enable SR-IOV VF devices) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 + */ +IGB_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0"); + +#define DEFAULT_SRIOV 0 +#define MAX_SRIOV 7 +#define MIN_SRIOV 0 +#endif /* __VMKLNX__ */ + +/* MDD (Enable Malicious Driver Detection) + * + * Only available when SR-IOV is enabled - max_vfs is greater than 0 + * + * Valid Range: 0, 1 + * + * Default Value: 1 + */ +IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. " + "Only available when max_vfs is greater than 0"); + /* QueuePairs (Enable TX/RX queue pairs for interrupt handling) * @@ -156,12 +184,33 @@ IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues (0-8), default 0"); * * Default Value: 1 */ -IGB_PARAM(QueuePairs, "Enable TX/RX queue pairs for interrupt handling (0,1), default 1=on"); +IGB_PARAM(QueuePairs, "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on"); #define DEFAULT_QUEUE_PAIRS 1 #define MAX_QUEUE_PAIRS 1 #define MIN_QUEUE_PAIRS 0 +/* Enable/disable EEE (a.k.a. IEEE802.3az) + * + * Valid Range: 0, 1 + * + * Default Value: 1 + */ + IGB_PARAM(EEE, "Enable/disable on parts that support the feature"); + +/* Enable/disable DMA Coalescing + * + * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, + * 9000, 10000(msec), 250(usec), 500(usec) + * + * Default Value: 0 + */ + IGB_PARAM(DMAC, "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))"); + +struct igb_opt_list { + int i; + char *str; +}; struct igb_option { enum { enable_option, range_option, list_option } type; const char *name; @@ -174,14 +223,14 @@ struct igb_option { } r; struct { /* list_option info */ int nr; - struct igb_opt_list { int i; char *str; } *p; + struct igb_opt_list *p; } l; } arg; }; -static int __devinit igb_validate_option(unsigned int *value, - struct igb_option *opt, - struct igb_adapter *adapter) +static int igb_validate_option(unsigned int *value, + struct igb_option *opt, + struct igb_adapter *adapter) { if (*value == OPTION_UNSET) { *value = opt->def; @@ -240,9 +289,10 @@ static int __devinit igb_validate_option(unsigned int *value, * in a variable in the adapter structure. **/ -void __devinit igb_check_options(struct igb_adapter *adapter) +void igb_check_options(struct igb_adapter *adapter) { int bd = adapter->bd_number; + struct e1000_hw *hw = &adapter->hw; if (bd >= IGB_MAX_NIC) { DPRINTK(PROBE, NOTICE, @@ -272,6 +322,9 @@ void __devinit igb_check_options(struct igb_adapter *adapter) case 0: DPRINTK(PROBE, INFO, "%s turned off\n", opt.name); + if (hw->mac.type >= e1000_i350) + adapter->dmac = IGB_DMAC_DISABLE; + adapter->rx_itr_setting = itr; break; case 1: DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", @@ -403,19 +456,58 @@ void __devinit igb_check_options(struct igb_adapter *adapter) } #endif } +#ifndef __VMKLNX__ + { /* SRIOV - Enable SR-IOV VF devices */ + struct igb_option opt = { + .type = range_option, + .name = "max_vfs - SR-IOV VF devices", + .err = "using default of " __MODULE_STRING(DEFAULT_SRIOV), + .def = DEFAULT_SRIOV, + .arg = { .r = { .min = MIN_SRIOV, + .max = MAX_SRIOV } } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + adapter->vfs_allocated_count = max_vfs[bd]; + igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter); + +#ifdef module_param_array + } else { + adapter->vfs_allocated_count = opt.def; + } +#endif + if (adapter->vfs_allocated_count) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82580: + case e1000_i210: + case e1000_i211: + case e1000_i354: + adapter->vfs_allocated_count = 0; + DPRINTK(PROBE, INFO, "SR-IOV option max_vfs not supported.\n"); + default: + break; + } + } + } +#endif /* __VMKLNX__ */ { /* VMDQ - Enable VMDq multiqueue receive */ struct igb_option opt = { .type = range_option, - .name = "VMDQ - VMDq multiqueue receive count", + .name = "VMDQ - VMDq multiqueue queue count", .err = "using default of " __MODULE_STRING(DEFAULT_VMDQ), .def = DEFAULT_VMDQ, .arg = { .r = { .min = MIN_VMDQ, .max = (MAX_VMDQ - adapter->vfs_allocated_count) } } }; + if ((hw->mac.type != e1000_i210) || + (hw->mac.type != e1000_i211)) { #ifdef module_param_array if (num_VMDQ > bd) { #endif - adapter->vmdq_pools = VMDQ[bd]; + adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]); if (adapter->vfs_allocated_count && !adapter->vmdq_pools) { DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n"); adapter->vmdq_pools = 1; @@ -425,11 +517,26 @@ void __devinit igb_check_options(struct igb_adapter *adapter) #ifdef module_param_array } else { if (!adapter->vfs_allocated_count) - adapter->vmdq_pools = opt.def; + adapter->vmdq_pools = (opt.def == 1 ? 0 : opt.def); else adapter->vmdq_pools = 1; } #endif +#ifdef CONFIG_IGB_VMDQ_NETDEV + if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) { + DPRINTK(PROBE, INFO, "VMDq not supported on this part.\n"); + adapter->vmdq_pools = 0; + } +#endif + if (adapter->vmdq_pools) { + DPRINTK(PROBE, INFO, "VMDq not supported on ESX-5.5\n"); + adapter->vmdq_pools = 0; + } + + } else { + DPRINTK(PROBE, INFO, "VMDq option is not supported.\n"); + adapter->vmdq_pools = opt.def; + } } { /* RSS - Enable RSS multiqueue receives */ struct igb_option opt = { @@ -441,22 +548,48 @@ void __devinit igb_check_options(struct igb_adapter *adapter) .max = MAX_RSS } } }; - if (adapter->vmdq_pools) { - switch (adapter->hw.mac.type) { -#ifndef __VMKLNX__ - case e1000_82576: + switch (hw->mac.type) { + case e1000_82575: +#ifndef CONFIG_IGB_VMDQ_NETDEV + if (!!adapter->vmdq_pools) { + if (adapter->vmdq_pools <= 2) { + if (adapter->vmdq_pools == 2) + opt.arg.r.max = 3; + } else { + opt.arg.r.max = 1; + } + } else { + opt.arg.r.max = 4; + } +#else + opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4; +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + break; + case e1000_i210: + opt.arg.r.max = 4; + break; + case e1000_i211: + opt.arg.r.max = 2; + break; + case e1000_82576: +#ifndef CONFIG_IGB_VMDQ_NETDEV + if (!!adapter->vmdq_pools) opt.arg.r.max = 2; - break; - case e1000_82575: - if (adapter->vmdq_pools == 2) - opt.arg.r.max = 3; - if (adapter->vmdq_pools <= 2) - break; -#endif - default: + break; +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + case e1000_82580: + case e1000_i350: + case e1000_i354: + default: + if (!!adapter->vmdq_pools) opt.arg.r.max = 1; - break; - } + break; + } + + if (adapter->int_mode != IGB_INT_MODE_MSIX) { + DPRINTK(PROBE, INFO, "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n", + opt.err); + opt.arg.r.max = 1; } #ifdef module_param_array @@ -480,42 +613,195 @@ void __devinit igb_check_options(struct igb_adapter *adapter) } #endif } - { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */ + { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */ struct igb_option opt = { .type = enable_option, - .name = "QueuePairs - TX/RX queue pairs for interrupt handling", + .name = "QueuePairs - Tx/Rx queue pairs for interrupt handling", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; - #ifdef module_param_array if (num_QueuePairs > bd) { #endif unsigned int qp = QueuePairs[bd]; /* - * we must enable queue pairs if the number of queues - * exceeds the number of avaialble interrupts. We are - * limited to 10, or 3 per unallocated vf. + * We must enable queue pairs if the number of queues + * exceeds the number of available interrupts. We are + * limited to 10, or 3 per unallocated vf. On I210 and + * I211 devices, we are limited to 5 interrupts. + * However, since I211 only supports 2 queues, we do not + * need to check and override the user option. */ - if ((adapter->rss_queues > 4) || - (adapter->vmdq_pools > 4) || - ((adapter->rss_queues > 1) && - ((adapter->vmdq_pools > 3) || - (adapter->vfs_allocated_count > 6)))) { - if (qp == OPTION_DISABLED) { + if (qp == OPTION_DISABLED) { + if (adapter->rss_queues > 4) qp = OPTION_ENABLED; - DPRINTK(PROBE, INFO, - "Number of queues exceeds available interrupts, %s\n",opt.err); - } + + if (adapter->vmdq_pools > 4) + qp = OPTION_ENABLED; + + if (adapter->rss_queues > 1 && + (adapter->vmdq_pools > 3 || + adapter->vfs_allocated_count > 6)) + qp = OPTION_ENABLED; + + if (hw->mac.type == e1000_i210 && + adapter->rss_queues > 2) + qp = OPTION_ENABLED; + + if (qp == OPTION_ENABLED) + DPRINTK(PROBE, INFO, "Number of queues exceeds available interrupts, %s\n", + opt.err); } igb_validate_option(&qp, &opt, adapter); adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0; - #ifdef module_param_array } else { adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0; } #endif } + { /* EEE - Enable EEE for capable adapters */ + + if (hw->mac.type >= e1000_i350) { + struct igb_option opt = { + .type = enable_option, + .name = "EEE Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; +#ifdef module_param_array + if (num_EEE > bd) { +#endif + unsigned int eee = EEE[bd]; + igb_validate_option(&eee, &opt, adapter); + adapter->flags |= eee ? IGB_FLAG_EEE : 0; + if (eee) + hw->dev_spec._82575.eee_disable = false; + else + hw->dev_spec._82575.eee_disable = true; + +#ifdef module_param_array + } else { + adapter->flags |= opt.def ? IGB_FLAG_EEE : 0; + if (adapter->flags & IGB_FLAG_EEE) + hw->dev_spec._82575.eee_disable = false; + else + hw->dev_spec._82575.eee_disable = true; + } +#endif + } + } + { /* DMAC - Enable DMA Coalescing for capable adapters */ + + if (hw->mac.type >= e1000_i350) { + struct igb_opt_list list [] = { + { IGB_DMAC_DISABLE, "DMAC Disable"}, + { IGB_DMAC_MIN, "DMAC 250 usec"}, + { IGB_DMAC_500, "DMAC 500 usec"}, + { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"}, + { IGB_DMAC_2000, "DMAC 2000 usec"}, + { IGB_DMAC_3000, "DMAC 3000 usec"}, + { IGB_DMAC_4000, "DMAC 4000 usec"}, + { IGB_DMAC_5000, "DMAC 5000 usec"}, + { IGB_DMAC_6000, "DMAC 6000 usec"}, + { IGB_DMAC_7000, "DMAC 7000 usec"}, + { IGB_DMAC_8000, "DMAC 8000 usec"}, + { IGB_DMAC_9000, "DMAC 9000 usec"}, + { IGB_DMAC_MAX, "DMAC 10000 usec"} + }; + struct igb_option opt = { + .type = list_option, + .name = "DMA Coalescing", + .err = "using default of "__MODULE_STRING(IGB_DMAC_DISABLE), + .def = IGB_DMAC_DISABLE, + .arg = { .l = { .nr = 13, + .p = list + } + } + }; +#ifdef module_param_array + if (num_DMAC > bd) { +#endif + unsigned int dmac = DMAC[bd]; + if (adapter->rx_itr_setting == IGB_DMAC_DISABLE) + dmac = IGB_DMAC_DISABLE; + igb_validate_option(&dmac, &opt, adapter); + switch (dmac) { + case IGB_DMAC_DISABLE: + adapter->dmac = dmac; + break; + case IGB_DMAC_MIN: + adapter->dmac = dmac; + break; + case IGB_DMAC_500: + adapter->dmac = dmac; + break; + case IGB_DMAC_EN_DEFAULT: + adapter->dmac = dmac; + break; + case IGB_DMAC_2000: + adapter->dmac = dmac; + break; + case IGB_DMAC_3000: + adapter->dmac = dmac; + break; + case IGB_DMAC_4000: + adapter->dmac = dmac; + break; + case IGB_DMAC_5000: + adapter->dmac = dmac; + break; + case IGB_DMAC_6000: + adapter->dmac = dmac; + break; + case IGB_DMAC_7000: + adapter->dmac = dmac; + break; + case IGB_DMAC_8000: + adapter->dmac = dmac; + break; + case IGB_DMAC_9000: + adapter->dmac = dmac; + break; + case IGB_DMAC_MAX: + adapter->dmac = dmac; + break; + default: + adapter->dmac = opt.def; + DPRINTK(PROBE, INFO, + "Invalid DMAC setting, " + "resetting DMAC to %d\n", opt.def); + } +#ifdef module_param_array + } else + adapter->dmac = opt.def; +#endif + } + } +#ifndef __VMKLNX__ + { /* MDD - Enable Malicious Driver Detection. Only available when + SR-IOV is enabled. */ + struct igb_option opt = { + .type = enable_option, + .name = "Malicious Driver Detection", + .err = "defaulting to 1", + .def = OPTION_ENABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED } } + }; + +#ifdef module_param_array + if (num_MDD > bd) { +#endif + adapter->mdd = MDD[bd]; + igb_validate_option((uint *)&adapter->mdd, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->mdd = opt.def; + } +#endif + } +#endif /* __VMKLNX__ */ } diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_procfs.c b/vmkdrivers/src_9/drivers/net/igb/igb_procfs.c new file mode 100755 index 0000000000000000000000000000000000000000..da062e400267f521632087a423d86a294e668c4b --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/igb_procfs.c @@ -0,0 +1,962 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#ifndef IGB_HWMON + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *igb_top_dir = NULL; + +#ifdef __VMKLNX__ +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct igb_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif /* __VMKLNX__ */ + +bool igb_thermal_present(struct igb_adapter *adapter) +{ + s32 status; + struct e1000_hw *hw; + + if (adapter == NULL) + return false; + hw = &adapter->hw; + + /* + * Only set I2C bit-bang mode if an external thermal sensor is + * supported on this device. + */ + if (adapter->ets) { + status = e1000_set_i2c_bb(hw); + if (status != E1000_SUCCESS) + return false; + } + + status = hw->mac.ops.init_thermal_sensor_thresh(hw); + if (status != E1000_SUCCESS) + return false; + + return true; +} + +#ifdef __VMKLNX__ +static int igb_fwbanner(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "0x%08x\n", adapter->etrack_id); +} + +static int igb_portspeed(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + int speed = 0; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case E1000_STATUS_SPEED_10: + speed = 10; + break; + case E1000_STATUS_SPEED_100: + speed = 100; + break; + case E1000_STATUS_SPEED_1000: + speed = 1000; + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int igb_wqlflag(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int igb_xflowctl(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int igb_rxdrops(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int igb_rxerrors(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} +static int igb_rxupacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_TPR)); +} + +static int igb_rxmpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_MPRC)); +} + +static int igb_rxbpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_BPRC)); +} + +static int igb_txupacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_TPT)); +} + +static int igb_txmpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_MPTC)); +} + +static int igb_txbpacks(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + E1000_READ_REG(hw, E1000_BPTC)); + +} + +static int igb_txerrors(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int igb_txdrops(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int igb_rxframes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int igb_rxbytes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int igb_txframes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int igb_txbytes(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} +static int igb_linkstat(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int bitmask = 0; + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + if (!test_bit(__IGB_DOWN, &adapter->state)) + bitmask |= 1; + + if (igb_has_link(adapter)) + bitmask |= 2; + if (adapter->old_lsc != hw->mac.get_link_status) { + bitmask |= 4; + adapter->old_lsc = hw->mac.get_link_status; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int igb_funcid(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device* netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "0x%lX\n", netdev->base_addr); +} + +static int igb_funcvers(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device* netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%s\n", igb_driver_version); +} + +static int igb_maclla1(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + u16 eeprom_buff[6]; + int first_word = 0x37; + int word_count = 6; + int rc; + + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + rc = e1000_read_nvm(hw, first_word, word_count, + eeprom_buff); + if (rc != E1000_SUCCESS) + return 0; + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + } + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); +} + +static int igb_mtusize(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device* netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int igb_featflag(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int bitmask = 0; +#ifndef HAVE_NDO_SET_FEATURES + struct igb_ring *ring; +#endif + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + +#ifndef HAVE_NDO_SET_FEATURES + /* igb_get_rx_csum(netdev) doesn't compile so hard code */ + ring = adapter->rx_ring[0]; + bitmask = test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); + return snprintf(page, count, "%d\n", bitmask); +#else + if (netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +#endif +} + +static int igb_lsominct(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int igb_prommode(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int igb_txdscqsz(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int igb_rxdscqsz(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int igb_rxqavg(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int index; + int totaldiff = 0; + u16 ntc; + u16 ntu; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + totaldiff += (ntc - ntu); + else + totaldiff += (adapter->rx_ring[index]->count + - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", totaldiff/adapter->num_rx_queues); +} + +static int igb_txqavg(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int index; + int totaldiff = 0; + u16 ntc; + u16 ntu; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + totaldiff += (ntc - ntu); + else + totaldiff += (adapter->tx_ring[index]->count + - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + totaldiff/adapter->num_tx_queues); +} + +static int igb_iovotype(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + return snprintf(page, count, "2\n"); +} + +static int igb_funcnbr(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->vfs_allocated_count); +} +#endif /* __VMKLNX__ */ + +static int igb_macburn(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int igb_macadmn(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct e1000_hw *hw; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int igb_numeports(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct e1000_hw *hw; + int ports; + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + ports = 4; + + return snprintf(page, count, "%d\n", ports); +} + +static int igb_porttype(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", + test_bit(__IGB_DOWN, &adapter->state)); +} + +static int igb_therm_location(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->location); +} + +static int igb_therm_maxopthresh(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->max_op_thresh); +} + +static int igb_therm_cautionthresh(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->caution_thresh); +} + +static int igb_therm_temp(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + s32 status; + struct igb_therm_proc_data *therm_data = + (struct igb_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = e1000_get_thermal_sensor_data(therm_data->hw); + if (status != E1000_SUCCESS) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + +struct igb_proc_type{ + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct igb_proc_type igb_proc_entries[] = { + {"numeports", &igb_numeports}, + {"porttype", &igb_porttype}, + {"macburn", &igb_macburn}, + {"macadmn", &igb_macadmn}, +#ifdef __VMKLNX__ + {"fwbanner", &igb_fwbanner}, + {"portspeed", &igb_portspeed}, + {"wqlflag", &igb_wqlflag}, + {"xflowctl", &igb_xflowctl}, + {"rxdrops", &igb_rxdrops}, + {"rxerrors", &igb_rxerrors}, + {"rxupacks", &igb_rxupacks}, + {"rxmpacks", &igb_rxmpacks}, + {"rxbpacks", &igb_rxbpacks}, + {"txdrops", &igb_txdrops}, + {"txerrors", &igb_txerrors}, + {"txupacks", &igb_txupacks}, + {"txmpacks", &igb_txmpacks}, + {"txbpacks", &igb_txbpacks}, + {"rxframes", &igb_rxframes}, + {"rxbytes", &igb_rxbytes}, + {"txframes", &igb_txframes}, + {"txbytes", &igb_txbytes}, + {"linkstat", &igb_linkstat}, + {"funcid", &igb_funcid}, + {"funcvers", &igb_funcvers}, + {"maclla1", &igb_maclla1}, + {"mtusize", &igb_mtusize}, + {"featflag", &igb_featflag}, + {"lsominct", &igb_lsominct}, + {"prommode", &igb_prommode}, + {"txdscqsz", &igb_txdscqsz}, + {"rxdscqsz", &igb_rxdscqsz}, + {"txqavg", &igb_txqavg}, + {"rxqavg", &igb_rxqavg}, + {"iovotype", &igb_iovotype}, + {"funcnbr", &igb_funcnbr}, +#endif /* __VMKLNX__ */ + {"", NULL} +}; + +struct igb_proc_type igb_internal_entries[] = { + {"location", &igb_therm_location}, + {"temp", &igb_therm_temp}, + {"cautionthresh", &igb_therm_cautionthresh}, + {"maxopthresh", &igb_therm_maxopthresh}, + {"", NULL} +}; + +void igb_del_proc_entries(struct igb_adapter *adapter) +{ + int index, i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (igb_top_dir == NULL) + return; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (igb_internal_entries[index].read == NULL) + break; + + remove_proc_entry(igb_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (igb_proc_entries[index].read == NULL) + break; + remove_proc_entry(igb_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), igb_top_dir); +} + +/* called from igb_main.c */ +void igb_procfs_exit(struct igb_adapter *adapter) +{ + igb_del_proc_entries(adapter); +} + +int igb_procfs_topdir_init(void) +{ +#ifdef __VMKLNX__ + igb_top_dir = proc_mkdir("driver/igb", NULL); +#else + igb_top_dir = proc_mkdir("driver/igb", NULL); +#endif /* __VMKLNX__ */ + if (igb_top_dir == NULL) + return (-ENOMEM); + + return 0; +} + +void igb_procfs_topdir_exit(void) +{ +#ifdef __VMKLNX__ + remove_proc_entry("driver/igb", NULL); +#else + remove_proc_entry("driver/igb", NULL); +#endif /* __VMKLNX__ */ +} + +/* called from igb_main.c */ +int igb_procfs_init(struct igb_adapter *adapter) +{ + int rc = 0; + int i; + int index; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + for (i = 0; i < E1000_MAX_SENSORS; i++) + adapter->therm_dir[i] = NULL; + + if ( igb_top_dir == NULL ) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (igb_proc_entries[index].read == NULL) { + break; + } + if (!(create_proc_read_entry(igb_proc_entries[index].name, + 0444, + adapter->info_dir, + igb_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (igb_thermal_present(adapter) == false) + goto exit; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location== 0) + continue; + + snprintf(buf, sizeof(buf), "sensor_%d", i); + adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir[i] == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (igb_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data[i].hw = &adapter->hw; + adapter->therm_data[i].sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor[i]; + + if (!(create_proc_read_entry( + igb_internal_entries[index].name, + 0444, + adapter->therm_dir[i], + igb_internal_entries[index].read, + &adapter->therm_data[i]))) { + rc = -ENOMEM; + goto fail; + } + } + } + goto exit; + +fail: + igb_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !IGB_HWMON */ diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_ptp.c b/vmkdrivers/src_9/drivers/net/igb/igb_ptp.c new file mode 100755 index 0000000000000000000000000000000000000000..9fe0a03ca1bad40640b6a4d0e9f6aaf05c05fb51 --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/igb_ptp.c @@ -0,0 +1,944 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/****************************************************************************** + Copyright(c) 2011 Richard Cochran for some of the + 82576 and 82580 code +******************************************************************************/ + +#include "igb.h" + +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* + * The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not availible. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + */ + +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) +#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) +#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +/* + * SYSTIM read access for the 82576 + */ + +static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + lo = E1000_READ_REG(hw, E1000_SYSTIML); + hi = E1000_READ_REG(hw, E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* + * SYSTIM read access for the 82580 + */ + +static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + E1000_READ_REG(hw, E1000_SYSTIMR); + lo = E1000_READ_REG(hw, E1000_SYSTIML); + hi = E1000_READ_REG(hw, E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* + * SYSTIM read access for I210/I211 + */ + +static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + E1000_READ_REG(hw, E1000_SYSTIMR); + nsec = E1000_READ_REG(hw, E1000_SYSTIML); + sec = E1000_READ_REG(hw, E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec); + E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* + * PTP clock operations + */ + +static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 incvalue; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 14; + rate = div_u64(rate, 1953125); + + incvalue = 16 << IGB_82576_TSYNC_SHIFT; + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 26; + rate = div_u64(rate, 1953125); + + /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x + * as quickly. Account for this by dividing the adjustment by 2.5. + */ + if (hw->mac.type == e1000_i354) { + u32 status = E1000_READ_REG(hw, E1000_STATUS); + + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + rate <<= 1; + rate = div_u64(rate, 5); + } + } + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + E1000_WRITE_REG(hw, E1000_TIMINCA, inca); + + return 0; +} + +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + s64 now; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + now = timecounter_read(&igb->tc); + now += delta; + timecounter_init(&igb->tc, &igb->cc, now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec now, then = ns_to_timespec(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, + struct timespec *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + u32 remainder; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ns = timecounter_read(&igb->tc); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, + struct timespec *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + */ +void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); + return; + } + + tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) +{ + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec ts; + + igb->ptp_caps.gettime(&igb->ptp_caps, &ts); + + pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *rx_ring; + u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + unsigned long rx_event; + int n; + + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + E1000_READ_REG(hw, E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); + } +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + + regval = E1000_READ_REG(hw, E1000_TXSTMPL); + regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; +} + +/** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8. + */ +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb) +{ + __le64 *regval = (__le64 *)va; + + /* + * The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ + igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + le64_to_cpu(regval[1])); +} + +/** + * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register + * @q_vector: Pointer to interrupt specific structure + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. + */ +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u64 regval; + + /* + * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = E1000_READ_REG(hw, E1000_RXSTMPL); + regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * igb_ptp_hwtstamp_ioctl - control hardware time stamping + * @netdev: + * @ifreq: + * @cmd: + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + **/ +int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct hwtstamp_config config; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + /* + * 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + /* fall through */ + default: + config.rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* + * Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config.rx_filter = HWTSTAMP_FILTER_ALL; + is_l2 = true; + is_l4 = true; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = E1000_READ_REG(hw, E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + E1000_WRITE_REG(hw, E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + E1000_WRITE_REG(hw, E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + E1000_WRITE_REG(hw, E1000_ETQF(3), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT)); + E1000_WRITE_REG(hw, E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf); + } else { + E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK); + } + E1000_WRITE_FLUSH(hw); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = E1000_READ_REG(hw, E1000_TXSTMPL); + regval = E1000_READ_REG(hw, E1000_TXSTMPH); + regval = E1000_READ_REG(hw, E1000_RXSTMPL); + regval = E1000_READ_REG(hw, E1000_RXSTMPH); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + switch (hw->mac.type) { + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 999999881; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + /* Dial the nominal frequency. */ + E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | + INCVALUE_82576); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + /* Enable the timer functions by clearing bit 31. */ + E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + break; + case e1000_i210: + case e1000_i211: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettime = igb_ptp_gettime_i210; + adapter->ptp_caps.settime = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_enable; + /* Enable the timer functions by clearing bit 31. */ + E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + break; + default: + adapter->ptp_clock = NULL; + return; + } + + E1000_WRITE_FLUSH(hw); + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + /* Initialize the clock and overflow work for devices that need it. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec ts = ktime_to_timespec(ktime_get_real()); + + igb_ptp_settime_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); + } + + /* Initialize the time sync interrupts for devices that support it. */ + if (hw->mac.type >= e1000_82580) { + E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); + E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); + } + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else { + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); + adapter->flags |= IGB_FLAG_PTP; + } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + break; + case e1000_i210: + case e1000_i211: + /* No delayed work to cancel. */ + break; + default: + return; + } + + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + adapter->flags &= ~IGB_FLAG_PTP; + } +} + +/** + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igb_ptp_reset(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags & IGB_FLAG_PTP)) + return; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | + INCVALUE_82576); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Enable the timer functions and interrupts. */ + E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); + E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ + return; + } + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec ts = ktime_to_timespec(ktime_get_real()); + + igb_ptp_settime_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +} diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_regtest.h b/vmkdrivers/src_9/drivers/net/igb/igb_regtest.h index 993599d1193930e809ea47d58e8bb7758a60b900..a6761db8acacf335cb3d8165d52c96685d5888b1 100644 --- a/vmkdrivers/src_9/drivers/net/igb/igb_regtest.h +++ b/vmkdrivers/src_9/drivers/net/igb/igb_regtest.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -52,18 +52,48 @@ struct igb_reg_test { #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + /* i350 reg test */ static struct igb_reg_test reg_test_i350[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + /* VET is readonly on i350 */ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, /* RDH is read-only for i350, only test RDT. */ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, @@ -72,10 +102,10 @@ static struct igb_reg_test reg_test_i350[] = { { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_vmdq.c b/vmkdrivers/src_9/drivers/net/igb/igb_vmdq.c new file mode 100755 index 0000000000000000000000000000000000000000..85e8910fdc9279e5dbd6e415073807cf113566b7 --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/igb_vmdq.c @@ -0,0 +1,815 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include + +#include "igb.h" +#include "igb_vmdq.h" +#include + +#ifdef CONFIG_IGB_VMDQ_NETDEV +int igb_vmdq_open(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct net_device *main_netdev = adapter->netdev; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + if (test_bit(__IGB_DOWN, &adapter->state)) { + DPRINTK(DRV, WARNING, + "Open %s before opening this device.\n", + main_netdev->name); + return -EAGAIN; + } + netif_carrier_off(dev); + vadapter->tx_ring->vmdq_netdev = dev; + vadapter->rx_ring->vmdq_netdev = dev; + if (is_valid_ether_addr(dev->dev_addr)) { + igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); + igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); + } + netif_carrier_on(dev); + return 0; +} + +int igb_vmdq_close(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + netif_carrier_off(dev); + igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); + + vadapter->tx_ring->vmdq_netdev = NULL; + vadapter->rx_ring->vmdq_netdev = NULL; + return 0; +} + +netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + + return igb_xmit_frame_ring(skb, vadapter->tx_ring); +} + +struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + vadapter->net_stats.rx_packets += + E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0); + vadapter->net_stats.tx_packets += + E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0); + vadapter->net_stats.rx_bytes += + E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0); + vadapter->net_stats.tx_bytes += + E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0); + vadapter->net_stats.multicast += + E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue)); + E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0); + /* only return the current stats */ + return &vadapter->net_stats; +} + +/** + * igb_write_vm_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_vm_addr_list(struct net_device *netdev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + int count = 0; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > igb_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST + igb_del_mac_filter(adapter, ha->addr, hw_queue); + igb_add_mac_filter(adapter, ha->addr, hw_queue); +#else + igb_del_mac_filter(adapter, ha->da_addr, hw_queue); + igb_add_mac_filter(adapter, ha->da_addr, hw_queue); +#endif + count++; + } + } + return count; +} + + +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */ +void igb_vmdq_set_rx_mode(struct net_device *dev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + u32 vmolr, rctl; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + /* Check for Promiscuous and All Multicast modes */ + vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue)); + + /* clear the affected bits */ + vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME | + E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE); + + if (dev->flags & IFF_PROMISC) { + vmolr |= E1000_VMOLR_UPE; + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_UPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } else { + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_UPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + if (dev->flags & IFF_ALLMULTI) { + vmolr |= E1000_VMOLR_MPME; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscous mode so + * that we can at least receive multicast traffic + */ + if (igb_write_mc_addr_list(adapter->netdev) != 0) + vmolr |= E1000_VMOLR_ROMPE; + } +#ifdef HAVE_SET_RX_MODE + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscous mode + */ + if (igb_write_vm_addr_list(dev) < 0) + vmolr |= E1000_VMOLR_UPE; +#endif + } + E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr); + + return; +} + +int igb_vmdq_set_mac(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); +} + +int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + + if (adapter->netdev->mtu < new_mtu) { + DPRINTK(PROBE, INFO, + "Set MTU on %s to >= %d " + "before changing MTU on %s\n", + adapter->netdev->name, new_mtu, dev->name); + return -EINVAL; + } + dev->mtu = new_mtu; + return 0; +} + +void igb_vmdq_tx_timeout(struct net_device *dev) +{ + return; +} + +void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + vadapter->vlgrp = grp; + + igb_enable_vlan_tags(adapter); + E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0); + + return; +} +void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; +#ifndef HAVE_NETDEV_VLAN_FEATURES + struct net_device *v_netdev; +#endif + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + /* attempt to add filter to vlvf array */ + igb_vlvf_set(adapter, vid, TRUE, hw_queue); + +#ifndef HAVE_NETDEV_VLAN_FEATURES + + /* Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + */ + v_netdev = vlan_group_get_device(vadapter->vlgrp, vid); + v_netdev->features |= adapter->netdev->features; + vlan_group_set_device(vadapter->vlgrp, vid, v_netdev); +#endif + + return; +} +void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(dev); + struct igb_adapter *adapter = vadapter->real_adapter; + int hw_queue = vadapter->rx_ring->queue_index + + adapter->vfs_allocated_count; + + vlan_group_set_device(vadapter->vlgrp, vid, NULL); + /* remove vlan from VLVF table array */ + igb_vlvf_set(adapter, vid, FALSE, hw_queue); + + + return; +} + +static int igb_vmdq_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct e1000_hw *hw = &adapter->hw; + u32 status; + + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + } + + ecmd->transceiver = XCVR_INTERNAL; + + status = E1000_READ_REG(hw, E1000_STATUS); + + if (status & E1000_STATUS_LU) { + + if ((status & E1000_STATUS_SPEED_1000) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + + +static u32 igb_vmdq_get_msglevel(struct net_device *netdev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + return adapter->msg_enable; +} + +static void igb_vmdq_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + struct net_device *main_netdev = adapter->netdev; + + strncpy(drvinfo->driver, igb_driver_name, 32); + strncpy(drvinfo->version, igb_driver_version, 32); + + strncpy(drvinfo->fw_version, "N/A", 4); + snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name, + vadapter->rx_ring->queue_index); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; +} + +static void igb_vmdq_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + + struct igb_ring *tx_ring = vadapter->tx_ring; + struct igb_ring *rx_ring = vadapter->rx_ring; + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} +static u32 igb_vmdq_get_rx_csum(struct net_device *netdev) +{ + struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); + struct igb_adapter *adapter = vadapter->real_adapter; + + return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags); +} + + +static struct ethtool_ops igb_vmdq_ethtool_ops = { + .get_settings = igb_vmdq_get_settings, + .get_drvinfo = igb_vmdq_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = igb_vmdq_get_ringparam, + .get_rx_csum = igb_vmdq_get_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_msglevel = igb_vmdq_get_msglevel, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, +#endif +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif +}; + +void igb_vmdq_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops); +} + + +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + +#ifdef __VMKNETDDI_QUEUEOPS__ +int igb_set_rxqueue_macfilter(struct net_device *netdev, int queue, + u8 *mac_addr) +{ + int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *rx_ring = adapter->rx_ring[queue]; + + if ((queue < 0) || (queue >= adapter->num_rx_queues)) { + DPRINTK(DRV, ERR, "Invalid RX Queue %u specified\n", queue); + return -EADDRNOTAVAIL; + } + + + /* Note: Broadcast address is used to disable the MAC filter*/ + if (!is_valid_ether_addr(mac_addr)) { + + /* Clear ring addr */ + DPRINTK(DRV, DEBUG, + "disabling MAC filter on RX Queue[%d]\n", queue); + igb_del_mac_filter(adapter, rx_ring->mac_addr, queue); + memset(rx_ring->mac_addr, 0xFF, NODE_ADDRESS_SIZE); + + return -EADDRNOTAVAIL; + } + + DPRINTK(DRV, DEBUG, + "enabling MAC filter [[0x%X:0x%X:0x%X:0x%X:0x%X:0x%X]] " + "on RX Queue[%d]\n", mac_addr[0], mac_addr[1], mac_addr[2], + mac_addr[3], mac_addr[4], mac_addr[5], queue); + + /* Store in ring */ + memcpy(rx_ring->mac_addr, mac_addr, NODE_ADDRESS_SIZE); + + igb_add_mac_filter(adapter, rx_ring->mac_addr, queue); + + return err; +} + +static int igb_get_netqueue_features(vmknetddi_queueop_get_features_args_t *args) +{ + args->features = VMKNETDDI_QUEUEOPS_FEATURE_RXQUEUES | + VMKNETDDI_QUEUEOPS_FEATURE_TXQUEUES; + return VMKNETDDI_QUEUEOPS_OK; +} + +static int igb_get_queue_count(vmknetddi_queueop_get_queue_count_args_t *args) +{ + struct net_device *netdev = args->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { + args->count = adapter->num_tx_queues - 1; + } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { + args->count = adapter->num_rx_queues - 1; + } else { + DPRINTK(DRV, ERR, "invalid queue type\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int igb_get_filter_count(vmknetddi_queueop_get_filter_count_args_t *args) +{ + args->count = 1; + return VMKNETDDI_QUEUEOPS_OK; +} + +static int igb_alloc_rx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t *p_qid, + struct napi_struct **napi_p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->n_rx_queues_allocated >= adapter->num_rx_queues) { + DPRINTK(DRV, ERR, "igb_alloc_rx_queue: no free rx queues\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } else { + int i; + for (i = 1; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + if (!ring->allocated) { + ring->allocated = TRUE; + *p_qid = VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(i); + DPRINTK(DRV, DEBUG, + "allocated VMDQ rx queue=%d\n", i); + *napi_p = &ring->q_vector->napi; + adapter->n_rx_queues_allocated++; + return VMKNETDDI_QUEUEOPS_OK; + } + } + DPRINTK(DRV, ERR, "no free rx queues found!\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int igb_alloc_tx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t *p_qid, + u16 *queue_mapping) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->n_tx_queues_allocated >= adapter->num_tx_queues) { + DPRINTK(DRV, ERR, "igb_alloc_tx_queue: no free tx queues\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } else { + int i; + for (i = 1; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + if (!ring->allocated) { + ring->allocated = TRUE; + *p_qid = VMKNETDDI_QUEUEOPS_MK_TX_QUEUEID(i); + *queue_mapping = i; + DPRINTK(DRV, DEBUG, + "allocated VMDQ tx queue=%d\n", i); + adapter->n_tx_queues_allocated++; + return VMKNETDDI_QUEUEOPS_OK; + } + } + DPRINTK(DRV, ERR, "no free tx queues found!\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int igb_alloc_queue(vmknetddi_queueop_alloc_queue_args_t *args) +{ + struct net_device *netdev = args->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { + return igb_alloc_tx_queue(args->netdev, &args->queueid, + &args->queue_mapping); + } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { + return igb_alloc_rx_queue(args->netdev, &args->queueid, + &args->napi); + } else { + DPRINTK(DRV, ERR, "invalid queue type\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int +igb_free_rx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t qid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 queue = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(qid); + struct igb_ring *ring = adapter->rx_ring[queue]; + + if (!ring->allocated) { + DPRINTK(DRV, ERR, "rx queue %d not allocated\n", queue); + return VMKNETDDI_QUEUEOPS_ERR; + } + + DPRINTK(DRV, DEBUG, "freed VMDQ rx queue=%d\n", queue); + ring->allocated = FALSE; + adapter->n_rx_queues_allocated--; + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +igb_free_tx_queue(struct net_device *netdev, + vmknetddi_queueops_queueid_t qid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 queue = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(qid); + + if (!adapter->tx_ring[queue]->allocated) { + DPRINTK(DRV, ERR, "tx queue %d not allocated\n", queue); + return VMKNETDDI_QUEUEOPS_ERR; + } + + DPRINTK(DRV, DEBUG, "freed VMDQ tx queue=%d\n", queue); + adapter->tx_ring[queue]->allocated = FALSE; + adapter->n_tx_queues_allocated--; + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +igb_free_queue(vmknetddi_queueop_free_queue_args_t *args) +{ + struct net_device *netdev = args->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (VMKNETDDI_QUEUEOPS_IS_TX_QUEUEID(args->queueid)) { + return igb_free_tx_queue(netdev, args->queueid); + } else if (VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { + return igb_free_rx_queue(netdev, args->queueid); + } else { + DPRINTK(DRV, ERR, "invalid queue type\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int +igb_get_queue_vector(vmknetddi_queueop_get_queue_vector_args_t *args) +{ + int qid; + struct net_device *netdev = args->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + /* Assuming RX queue id's are received */ + qid = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + args->vector = adapter->msix_entries[qid].vector; + + return VMKNETDDI_QUEUEOPS_OK; +} + +static int +igb_get_default_queue(vmknetddi_queueop_get_default_queue_args_t *args) +{ + struct net_device *netdev = args->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_RX) { + args->napi = &adapter->rx_ring[0]->q_vector->napi; + args->queueid = VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID(0); + return VMKNETDDI_QUEUEOPS_OK; + } else if (args->type == VMKNETDDI_QUEUEOPS_QUEUE_TYPE_TX) { + args->queueid = VMKNETDDI_QUEUEOPS_MK_TX_QUEUEID(0); + return VMKNETDDI_QUEUEOPS_OK; + } else { + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int +igb_apply_rx_filter(vmknetddi_queueop_apply_rx_filter_args_t *args) +{ + int rval; + u8 *macaddr; + u16 queue = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + struct igb_adapter *adapter = netdev_priv(args->netdev); + + if (!VMKNETDDI_QUEUEOPS_IS_RX_QUEUEID(args->queueid)) { + DPRINTK(DRV, ERR, "not an rx queue 0x%x\n", + args->queueid); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (vmknetddi_queueops_get_filter_class(&args->filter) + != VMKNETDDI_QUEUEOPS_FILTER_MACADDR) { + DPRINTK(DRV, ERR, "only mac filters supported\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (!adapter->rx_ring[queue]->allocated) { + DPRINTK(DRV, ERR, "queue not allocated\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } + + if (adapter->rx_ring[queue]->active) { + DPRINTK(DRV, ERR, "filter count exceeded\n"); + return VMKNETDDI_QUEUEOPS_ERR; + } + + macaddr = vmknetddi_queueops_get_filter_macaddr(&args->filter); + + rval = igb_set_rxqueue_macfilter(args->netdev, queue, macaddr); + if (rval == 0) { + adapter->rx_ring[queue]->active = TRUE; + /* force to 0 since we only support one filter per queue */ + args->filterid = VMKNETDDI_QUEUEOPS_MK_FILTERID(0); + return VMKNETDDI_QUEUEOPS_OK; + } else { + return VMKNETDDI_QUEUEOPS_ERR; + } +} + +static int +igb_remove_rx_filter(vmknetddi_queueop_remove_rx_filter_args_t *args) +{ + int rval; + u16 cidx = VMKNETDDI_QUEUEOPS_QUEUEID_VAL(args->queueid); + u16 fidx = VMKNETDDI_QUEUEOPS_FILTERID_VAL(args->filterid); + struct igb_adapter *adapter = netdev_priv(args->netdev); + u8 macaddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + + DPRINTK(DRV, DEBUG, "removing filter on cidx=%d, fidx=%d\n", + cidx, fidx); + + /* This will return an error because broadcast is not a valid + * Ethernet address, so ignore and carry on + */ + rval = igb_set_rxqueue_macfilter(args->netdev, cidx, macaddr); + adapter->rx_ring[cidx]->active = FALSE; + return rval; +} + + +static int +igb_get_queue_stats(vmknetddi_queueop_get_stats_args_t *args) +{ + return VMKNETDDI_QUEUEOPS_ERR; +} + +static int +igb_get_netqueue_version(vmknetddi_queueop_get_version_args_t *args) +{ + return vmknetddi_queueops_version(args); +} +static int igb_set_tx_priority(vmknetddi_queueop_set_tx_priority_args_t *args) +{ + /* Not supported */ + return VMKNETDDI_QUEUEOPS_OK; +} +int +igb_netqueue_ops(vmknetddi_queueops_op_t op, void *args) +{ + switch (op) { + case VMKNETDDI_QUEUEOPS_OP_GET_VERSION: + return igb_get_netqueue_version( + (vmknetddi_queueop_get_version_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_GET_FEATURES: + return igb_get_netqueue_features( + (vmknetddi_queueop_get_features_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_GET_QUEUE_COUNT: + return igb_get_queue_count( + (vmknetddi_queueop_get_queue_count_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_GET_FILTER_COUNT: + return igb_get_filter_count( + (vmknetddi_queueop_get_filter_count_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_ALLOC_QUEUE: + return igb_alloc_queue( + (vmknetddi_queueop_alloc_queue_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_FREE_QUEUE: + return igb_free_queue( + (vmknetddi_queueop_free_queue_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_GET_QUEUE_VECTOR: + return igb_get_queue_vector( + (vmknetddi_queueop_get_queue_vector_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_GET_DEFAULT_QUEUE: + return igb_get_default_queue( + (vmknetddi_queueop_get_default_queue_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_APPLY_RX_FILTER: + return igb_apply_rx_filter( + (vmknetddi_queueop_apply_rx_filter_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_REMOVE_RX_FILTER: + return igb_remove_rx_filter( + (vmknetddi_queueop_remove_rx_filter_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_GET_STATS: + return igb_get_queue_stats( + (vmknetddi_queueop_get_stats_args_t *)args); + break; + + case VMKNETDDI_QUEUEOPS_OP_SET_TX_PRIORITY: + return igb_set_tx_priority( + (vmknetddi_queueop_set_tx_priority_args_t *)args); + break; + + default: + return VMKNETDDI_QUEUEOPS_ERR; + } + + return VMKNETDDI_QUEUEOPS_ERR; +} + +#endif /* __VMKNETDDI_QUEUEOPS__ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/igb_vmdq.h b/vmkdrivers/src_9/drivers/net/igb/igb_vmdq.h new file mode 100755 index 0000000000000000000000000000000000000000..60becd9a71d738b287246eb942164cbb1fc43f05 --- /dev/null +++ b/vmkdrivers/src_9/drivers/net/igb/igb_vmdq.h @@ -0,0 +1,51 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IGB_VMDQ_H_ +#define _IGB_VMDQ_H_ + +#ifdef CONFIG_IGB_VMDQ_NETDEV +int igb_vmdq_open(struct net_device *dev); +int igb_vmdq_close(struct net_device *dev); +netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev); +struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev); +void igb_vmdq_set_rx_mode(struct net_device *dev); +int igb_vmdq_set_mac(struct net_device *dev, void *addr); +int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu); +void igb_vmdq_tx_timeout(struct net_device *dev); +void igb_vmdq_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp); +void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); +void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); +void igb_vmdq_set_ethtool_ops(struct net_device *netdev); +#endif /* CONFIG_IGB_VMDQ_NETDEV */ +#ifdef __VMKLNX__ +#ifdef __VMKNETDDI_QUEUEOPS__ +extern int igb_netqueue_ops(vmknetddi_queueops_op_t op, void *args); +#endif +#endif +#endif /* _IGB_VMDQ_H_ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/kcompat.c b/vmkdrivers/src_9/drivers/net/igb/kcompat.c index 97c466a1732ec6dc3a92c384513c5703cedfbf15..89dde495271861ef4a743ad1ac0b2a869c12873d 100644 --- a/vmkdrivers/src_9/drivers/net/igb/kcompat.c +++ b/vmkdrivers/src_9/drivers/net/igb/kcompat.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -29,26 +29,354 @@ #include "kcompat.h" /*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) ) -struct sk_buff * -_kc_skb_pad(struct sk_buff *skb, int pad) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) { - struct sk_buff *nskb; - - /* If the skbuff is non linear tailroom is always zero.. */ - if(skb_tailroom(skb) >= pad) - { - memset(skb->data+skb->len, 0, pad); - return skb; - } - - nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); - kfree_skb(skb); - if(nskb) - memset(nskb->data+nskb->len, 0, pad); - return nskb; -} -#endif /* < 2.4.21 */ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + continue; + + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) @@ -157,10 +485,9 @@ _kc_alloc_etherdev(int sizeof_priv) int alloc_size; alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; - dev = kmalloc(alloc_size, GFP_KERNEL); + dev = kzalloc(alloc_size, GFP_KERNEL); if (!dev) return NULL; - memset(dev, 0, alloc_size); if (sizeof_priv) dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); @@ -264,8 +591,93 @@ found_middle: } #endif /* __VMKLNX__ */ +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ #endif /* 2.6.0 => 2.4.6 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) void *_kc_kzalloc(size_t size, int flags) @@ -278,31 +690,50 @@ void *_kc_kzalloc(size_t size, int flags) #endif /* <= 2.6.13 */ /*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) -struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev, - unsigned int length) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) { - /* 16 == NET_PAD_SKB */ - struct sk_buff *skb; - skb = alloc_skb(length + 16, GFP_ATOMIC); - if (likely(skb != NULL)) { - skb_reserve(skb, 16); - skb->dev = dev; + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)); + goto free_skb; } - return skb; + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; } -#endif /* <= 2.6.17 */ -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) int _kc_pci_save_state(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct adapter_struct *adapter = netdev_priv(netdev); int size = PCI_CONFIG_SPACE_LEN, i; - u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); - u16 pcie_link_status; + u16 pcie_cap_offset, pcie_link_status; +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (pcie_cap_offset) { if (!pci_read_config_word(pdev, pcie_cap_offset + PCIE_LINK_STATUS, @@ -325,7 +756,7 @@ int _kc_pci_save_state(struct pci_dev *pdev) return 0; } -void _kc_pci_restore_state(struct pci_dev * pdev) +void _kc_pci_restore_state(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct adapter_struct *adapter = netdev_priv(netdev); @@ -350,6 +781,7 @@ void _kc_pci_restore_state(struct pci_dev * pdev) #endif } } +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ #ifdef HAVE_PCI_ERS void _kc_free_netdev(struct net_device *netdev) @@ -371,16 +803,170 @@ void _kc_free_netdev(struct net_device *netdev) #endif } #endif -#endif /* <= 2.6.18 */ + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) #endif /* <= 2.6.24 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#endif /* < 2.6.26 */ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) #ifdef HAVE_TX_MQ #endif /* HAVE_TX_MQ */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __WARN_printf */ #endif /* < 2.6.27 */ /*****************************************************************************/ @@ -422,79 +1008,261 @@ out: /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} #endif /* < 2.6.29 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev *dev) +{ + int num_vf = 0; +#ifndef __VMKLNX__ +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + + /*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) -#ifdef HAVE_NETDEV_SELECT_QUEUE -#include -static u32 _kc_simple_tx_hashrnd; -static u32 _kc_simple_tx_hashrnd_initialized; - -u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb) -{ - u32 addr1, addr2, ports; - u32 hash, ihl; - u8 ip_proto = 0; - - if (unlikely(!_kc_simple_tx_hashrnd_initialized)) { - get_random_bytes(&_kc_simple_tx_hashrnd, 4); - _kc_simple_tx_hashrnd_initialized = 1; - } - - switch (skb->protocol) { - case htons(ETH_P_IP): - if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) - ip_proto = ip_hdr(skb)->protocol; - addr1 = ip_hdr(skb)->saddr; - addr2 = ip_hdr(skb)->daddr; - ihl = ip_hdr(skb)->ihl; - break; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - case htons(ETH_P_IPV6): - ip_proto = ipv6_hdr(skb)->nexthdr; - addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; - addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; - ihl = (40 >> 2); - break; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) + + + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; +#ifndef __VMKLNX__ + skb->truesize += truesize; #endif - default: +} + +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) return 0; - } + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} - switch (ip_proto) { - case IPPROTO_TCP: - case IPPROTO_UDP: - case IPPROTO_DCCP: - case IPPROTO_ESP: - case IPPROTO_AH: - case IPPROTO_SCTP: - case IPPROTO_UDPLITE: - ports = *((u32 *) (skb_network_header(skb) + (ihl * 4))); - break; +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; default: - ports = 0; - break; + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; } - hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd); + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; - return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); } -#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#endif /* < 2.6.30 */ -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) -struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, - unsigned int length) +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) { - struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } - if (NET_IP_ALIGN && skb) - skb_reserve(skb, NET_IP_ALIGN); - return skb; + return ret; } -#endif /* < 2.6.33 */ +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#endif /* 3.10.0 */ diff --git a/vmkdrivers/src_9/drivers/net/igb/kcompat.h b/vmkdrivers/src_9/drivers/net/igb/kcompat.h index 082a3f9185139db6b811b65b6d73c19044b9d855..7cfbd8a8f0a142fbf19819580884bd90c9dbf3ac 100644 --- a/vmkdrivers/src_9/drivers/net/igb/kcompat.h +++ b/vmkdrivers/src_9/drivers/net/igb/kcompat.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,7 +28,11 @@ #ifndef _KCOMPAT_H_ #define _KCOMPAT_H_ +#ifndef LINUX_VERSION_CODE #include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif #include #include #include @@ -46,7 +50,10 @@ #include #include #include +#include #include +#include +#include /* NAPI enable/disable flags here */ #define NAPI @@ -62,11 +69,10 @@ /* packet split disable/enable */ #ifdef DISABLE_PACKET_SPLIT -#undef CONFIG_E1000_DISABLE_PACKET_SPLIT -#define CONFIG_E1000_DISABLE_PACKET_SPLIT -#undef CONFIG_IGB_DISABLE_PACKET_SPLIT +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT #define CONFIG_IGB_DISABLE_PACKET_SPLIT #endif +#endif /* DISABLE_PACKET_SPLIT */ /* MSI compatibility code for all kernels and drivers */ #ifdef DISABLE_PCI_MSI @@ -115,12 +121,11 @@ struct msix_entry { #else #define _Bool char #endif -#ifndef bool -#define bool _Bool -#define true 1 -#define false 0 -#endif +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif #ifndef module_param #define module_param(v,t,p) MODULE_PARM(v, "i"); @@ -141,6 +146,9 @@ struct msix_entry { #ifndef PCIE_LINK_STATE_L0S #define PCIE_LINK_STATE_L0S 1 #endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif #ifndef mmiowb #ifdef CONFIG_IA64 @@ -154,7 +162,7 @@ struct msix_entry { #define SET_NETDEV_DEV(net, pdev) #endif -#ifndef HAVE_FREE_NETDEV +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) #define free_netdev(x) kfree(x) #endif @@ -162,20 +170,6 @@ struct msix_entry { #define CONFIG_NET_POLL_CONTROLLER #endif -#ifndef NETDEV_TX_OK -#define NETDEV_TX_OK 0 -#endif - -#ifndef NETDEV_TX_BUSY -#define NETDEV_TX_BUSY 1 -#endif - -#ifndef NETDEV_TX_LOCKED -#define NETDEV_TX_LOCKED -1 -#endif - -#define VMDQ_P(p) (p) - #ifndef SKB_DATAREF_SHIFT /* if we do not have the infrastructure to detect if skb_header is cloned just return false in all cases */ @@ -197,6 +191,14 @@ struct msix_entry { #define NETIF_F_SCTP_CSUM 0 #endif +#ifndef NETIF_F_LRO +#define NETIF_F_LRO (1 << 15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE (1 << 27) +#endif + #ifndef IPPROTO_SCTP #define IPPROTO_SCTP 132 #endif @@ -210,31 +212,6 @@ struct msix_entry { #define __read_mostly #endif -#ifndef HAVE_NETIF_MSG -#define HAVE_NETIF_MSG 1 -enum { - NETIF_MSG_DRV = 0x0001, - NETIF_MSG_PROBE = 0x0002, - NETIF_MSG_LINK = 0x0004, - NETIF_MSG_TIMER = 0x0008, - NETIF_MSG_IFDOWN = 0x0010, - NETIF_MSG_IFUP = 0x0020, - NETIF_MSG_RX_ERR = 0x0040, - NETIF_MSG_TX_ERR = 0x0080, - NETIF_MSG_TX_QUEUED = 0x0100, - NETIF_MSG_INTR = 0x0200, - NETIF_MSG_TX_DONE = 0x0400, - NETIF_MSG_RX_STATUS = 0x0800, - NETIF_MSG_PKTDATA = 0x1000, - NETIF_MSG_HW = 0x2000, - NETIF_MSG_WOL = 0x4000, -}; - -#else -#define NETIF_MSG_HW 0x2000 -#define NETIF_MSG_WOL 0x4000 -#endif /* HAVE_NETIF_MSG */ - #ifndef MII_RESV1 #define MII_RESV1 0x17 /* Reserved... */ #endif @@ -254,15 +231,18 @@ enum { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID #endif +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + #ifndef num_online_cpus #define num_online_cpus() smp_num_cpus #endif -#ifndef numa_node_id -#define numa_node_id() 0 +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) #endif - #ifndef _LINUX_RANDOM_H #include #endif @@ -286,10 +266,90 @@ enum { #define VLAN_ETH_FRAME_LEN 1518 #endif +#if !defined(IXGBE_DCA) && !defined(IGB_DCA) +#define dca_get_tag(b) 0 +#define dca_add_requester(a) -1 +#define dca_remove_requester(b) do { } while(0) +#define DCA_PROVIDER_ADD 0x0001 +#define DCA_PROVIDER_REMOVE 0x0002 +#endif + #ifndef DCA_GET_TAG_TWO_ARGS #define dca3_get_tag(a,b) dca_get_tag(b) #endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifdef __VMKLNX__ +/* this has to be defined for ESX since it is stripped from their skbuff.h */ +#ifndef __skb_queue_purge +#define __skb_queue_purge(_l) __kc_skb_queue_purge(_l) +static inline void __kc_skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb = __skb_dequeue(list)) != NULL) + kfree_skb(skb); +} +#endif +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + /*****************************************************************************/ /* Installations with ethtool version without eeprom, adapter id, or statistics * support */ @@ -377,6 +437,18 @@ struct ethtool_value { #define ETHTOOL_GLINK 0xa #endif /* ETHTOOL_GLINK */ +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + #ifndef ETHTOOL_GREGS #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ #define ethtool_regs _kc_ethtool_regs @@ -608,6 +680,58 @@ struct _kc_ethtool_pauseparam { #define ETHTOOL_BUSINFO_LEN 32 #endif +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* SuSE version macro is the same as Linux kernel version */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))) +/* SLES11 SP3 is at least 3.0.61+ based */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ + +#ifdef __KLOCWORK__ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif +#endif /* __KLOCWORK__ */ + /*****************************************************************************/ /* 2.4.3 => 2.4.0 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) @@ -767,11 +891,67 @@ extern void _kc_pci_disable_device(struct pci_dev *pdev); pos = n, n = pos->next) #endif +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +extern int snprintf(char * buf, size_t size, const char *fmt, ...); +extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif #endif /* 2.4.10 -> 2.4.6 */ /*****************************************************************************/ -/* 2.4.13 => 2.4.10 */ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) /**************************************/ @@ -812,7 +992,7 @@ struct vlan_ethhdr { unsigned short h_vlan_TCI; unsigned short h_vlan_encapsulated_proto; }; -#endif /* 2.4.13 => 2.4.10 */ +#endif /* 2.4.13 => 2.4.12 */ /*****************************************************************************/ /* 2.4.17 => 2.4.12 */ @@ -824,6 +1004,21 @@ struct vlan_ethhdr { #endif /* 2.4.17 => 2.4.13 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + /*****************************************************************************/ /* 2.4.20 => 2.4.19 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) @@ -835,26 +1030,23 @@ struct vlan_ethhdr { #endif /* 2.4.20 => 2.4.19 */ -/*****************************************************************************/ -/* < 2.4.21 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21) ) -#define skb_pad(x,y) _kc_skb_pad(x, y) -struct sk_buff * _kc_skb_pad(struct sk_buff *skb, int pad); -#endif /* < 2.4.21 */ - /*****************************************************************************/ /* 2.4.22 => 2.4.17 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) #define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#endif #endif /*****************************************************************************/ /* 2.4.22 => 2.4.17 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) -#ifdef IGB_LRO -#undef IGB_LRO -#endif #endif #ifndef __VMKLNX__ @@ -892,6 +1084,8 @@ static inline void _kc_netif_tx_disable(struct net_device *dev) spin_unlock_bh(&dev->xmit_lock); } #endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP #endif /* 2.4.23 => 2.4.22 */ #endif /* __VMKLNX__ */ @@ -903,6 +1097,11 @@ static inline void _kc_netif_tx_disable(struct net_device *dev) #define ETHTOOL_OPS_COMPAT #endif /* 2.6.4 => 2.6.0 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + /*****************************************************************************/ /* 2.5.71 => 2.4.x */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) @@ -941,15 +1140,79 @@ static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bit /*****************************************************************************/ /* <= 2.5.0 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include #undef pci_register_driver #define pci_register_driver pci_module_init -#define dev_err(__unused_dev, format, arg...) \ - printk(KERN_ERR "%s: " format, pci_name(pdev) , ## arg) -#define dev_info(__unused_dev, format, arg...) \ - printk(KERN_INFO "%s: " format, pci_name(pdev) , ## arg) -#define dev_warn(__unused_dev, format, arg...) \ - printk(KERN_WARNING "%s: " format, pci_name(pdev) , ## arg) +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} + +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) /* hlist_* code - double linked lists */ struct hlist_head { @@ -998,43 +1261,21 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h) h->next = NULL; h->pprev = NULL; } -#define hlist_entry(ptr, type, member) container_of(ptr,type,member) - -#define hlist_for_each_entry(tpos, pos, head, member) \ - for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - for (pos = (head)->first; \ - pos && ({ n = pos->next; 1; }) && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = n) - -/* we ignore GFP here */ -#define dma_alloc_coherent(dv, sz, dma, gfp) \ - pci_alloc_consistent(pdev, (sz), (dma)) -#define dma_free_coherent(dv, sz, addr, dma_addr) \ - pci_free_consistent(pdev, (sz), (addr), (dma_addr)) #ifndef might_sleep #define might_sleep() #endif - +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} #endif /* <= 2.5.0 */ /*****************************************************************************/ /* 2.5.28 => 2.4.23 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) -static inline void _kc_synchronize_irq(void) -{ - synchronize_irq(); -} -#undef synchronize_irq -#define synchronize_irq(X) _kc_synchronize_irq() - #include #define work_struct tq_struct #undef INIT_WORK @@ -1050,6 +1291,14 @@ static inline void _kc_synchronize_irq(void) /*****************************************************************************/ /* 2.6.0 => 2.5.28 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) #define MODULE_INFO(version, _version) #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT #define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 @@ -1059,9 +1308,9 @@ static inline void _kc_synchronize_irq(void) #endif #ifndef __VMKLNX__ -#define pci_set_consistent_dma_mask(dev,mask) 1 +#define dma_set_coherent_mask(dev,mask) 1 #else -#define pci_set_consistent_dma_mask(dev,mask) 0 +#define dma_set_coherent_mask(dev,mask) 0 #endif #undef dev_put @@ -1079,6 +1328,11 @@ extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page #define page_count(p) atomic_read(&(p)->count) #endif +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + #ifndef __VMKLNX__ /* find_first_bit and find_next bit are not defined for most * 2.4 kernels (except for the redhat 2.4.21 kernels @@ -1093,7 +1347,70 @@ extern unsigned long _kc_find_next_bit(const unsigned long *addr, #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #endif /* __VMKLNX__ */ + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ +#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9))) +#define HAVE_I2C_SUPPORT +#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */ + #endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ /*****************************************************************************/ /* 2.6.4 => 2.6.0 */ @@ -1104,10 +1421,25 @@ extern unsigned long _kc_find_next_bit(const unsigned long *addr, /*****************************************************************************/ /* 2.6.5 => 2.6.0 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) -#define pci_dma_sync_single_for_cpu pci_dma_sync_single -#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif #endif /* 2.6.5 => 2.6.0 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) /* taken from 2.6 include/linux/bitmap.h */ @@ -1141,6 +1473,10 @@ static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) { return (struct mii_ioctl_data *) &rq->ifr_ifru; } + +#ifndef __force +#define __force +#endif #endif /* < 2.6.7 */ /*****************************************************************************/ @@ -1151,6 +1487,16 @@ static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) #ifndef PCI_EXP_DEVCTL_CERE #define PCI_EXP_DEVCTL_CERE 0x0001 #endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ #ifdef __VMKLNX__ #define msleep(x) mdelay(x) #define page_count(p) 0 @@ -1158,7 +1504,7 @@ static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) #define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ schedule_timeout((x * HZ)/1000 + 2); \ } while (0) -#endif /* __VMKLNX__ */ +#endif /* __VMKLNX */ #endif /* < 2.6.8 */ @@ -1222,21 +1568,59 @@ static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) #ifndef __be16 #define __be16 u16 #endif - -#ifdef pci_dma_mapping_error -#undef pci_dma_mapping_error +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 #endif -#define pci_dma_mapping_error _kc_pci_dma_mapping_error -static inline int _kc_pci_dma_mapping_error(struct pci_dev *pdev, - dma_addr_t dma_addr) -{ - return dma_addr == 0; -} static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) { return (struct vlan_ethhdr *)skb->mac.raw; } + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif #endif /* < 2.6.9 */ /*****************************************************************************/ @@ -1256,6 +1640,10 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) */ #undef num_online_nodes #define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class #endif /* < 2.6.10 */ /*****************************************************************************/ @@ -1304,6 +1692,15 @@ static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; #endif } + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ #endif /* < 2.6.11 */ /*****************************************************************************/ @@ -1317,10 +1714,40 @@ static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) /* Advertisement control register. */ #define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ #define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ /* 1000BASE-T Control register */ #define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ #endif /* < 2.6.12 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +extern char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) #define pm_message_t u32 @@ -1329,10 +1756,6 @@ static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) extern void *_kc_kzalloc(size_t size, int flags); #endif -#ifndef vmalloc_node -#define vmalloc_node(a,b) vmalloc(a) -#endif /* vmalloc_node*/ - /* Generic MII registers. */ #define MII_ESTATUS 0x0f /* Extended Status */ /* Basic mode status register. */ @@ -1340,10 +1763,38 @@ extern void *_kc_kzalloc(size_t size, int flags); /* Extended status register. */ #define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ #define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif #endif /* < 2.6.14 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + #define setup_timer(_timer, _function, _data) \ do { \ (_timer)->function = _function; \ @@ -1359,6 +1810,15 @@ do { \ #ifndef device_init_wakeup #define device_init_wakeup(dev,val) do {} while (0) #endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) #endif /* < 2.6.15 */ /*****************************************************************************/ @@ -1379,8 +1839,29 @@ do { \ #else /* 2.6.16 and above */ #undef HAVE_PCI_ERS #define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ #endif /* < 2.6.16 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) @@ -1410,12 +1891,6 @@ do { \ #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #endif -#ifndef netdev_alloc_skb -#define netdev_alloc_skb _kc_netdev_alloc_skb -extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev, - unsigned int length); -#endif - #ifndef skb_is_gso #ifdef NETIF_F_TSO #define skb_is_gso _kc_skb_is_gso @@ -1432,32 +1907,63 @@ static inline int _kc_skb_is_gso(const struct sk_buff *skb) #define resource_size_t unsigned long #endif -#endif /* < 2.6.18 */ +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#ifndef __VMKLNX__ +#define i_private u.generic_ip +#endif /* __VMKLNX__ */ +#endif /* >= RHEL 5.0 */ + #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #endif -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) -#ifndef RHEL_RELEASE_CODE -#define RHEL_RELEASE_CODE 0 -#endif -#ifndef RHEL_RELEASE_VERSION -#define RHEL_RELEASE_VERSION(a,b) 0 -#endif -#ifndef AX_RELEASE_CODE -#define AX_RELEASE_CODE 0 -#endif -#ifndef AX_RELEASE_VERSION -#define AX_RELEASE_VERSION(a,b) 0 +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) #endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) #ifndef __VMKLNX__ -#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0)))) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); #endif -#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) #undef CONFIG_INET_LRO #undef CONFIG_INET_LRO_MODULE #ifdef IXGBE_FCOE @@ -1483,6 +1989,7 @@ static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsig #define irq_handler_t new_handler_t /* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) #define PCIE_CONFIG_SPACE_LEN 256 #define PCI_CONFIG_SPACE_LEN 64 #define PCIE_LINK_STATUS 0x12 @@ -1493,10 +2000,21 @@ extern int _kc_pci_save_state(struct pci_dev *); #undef pci_restore_state extern void _kc_pci_restore_state(struct pci_dev *); #define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + #ifdef HAVE_PCI_ERS #endif #ifdef __VMKLNX__ -#define skb_orphan(p) do { } while (0) +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} #endif static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { @@ -1504,8 +2022,17 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) } #define pci_disable_pcie_error_reporting(dev) do {} while (0) #define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif #else /* 2.6.19 */ #include +#include #endif /* < 2.6.19 */ /*****************************************************************************/ @@ -1519,6 +2046,10 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) PCI_ANY_ID, PCI_ANY_ID, 0, 0 #endif +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + #ifndef round_jiffies #define round_jiffies(x) x #endif @@ -1526,7 +2057,36 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) #define csum_offset csum #define HAVE_EARLY_VMALLOC_NODE +#ifdef __VMKLNX__ +#define vmalloc_node(a,b) vmalloc(a) +#endif #define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#ifndef __VMKLNX__ +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif +#endif /* __VMKLNX___ */ + #else /* < 2.6.20 */ #define HAVE_DEVICE_NUMA_NODE #endif /* < 2.6.20 */ @@ -1535,13 +2095,32 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) #define to_net_dev(class) container_of(class, struct net_device, class_dev) #define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) #define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) -#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev; +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ #define pci_channel_offline(pdev) (pdev->error_state && \ pdev->error_state != pci_channel_io_normal) #define pci_request_selected_regions(pdev, bars, name) \ pci_request_regions(pdev, name) #define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#else +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + #endif /* < 2.6.21 */ /*****************************************************************************/ @@ -1555,8 +2134,18 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) #define skb_network_offset(skb) (skb->nh.raw - skb->data) #define skb_network_header(skb) (skb->nh.raw) #define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) #define skb_copy_to_linear_data_offset(skb, offset, from, len) \ - memcpy(skb->data + offset, from, len) + memcpy(skb->data + offset, from, len) #define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) #define pci_register_driver pci_module_init #define skb_mac_header(skb) skb->mac.raw @@ -1584,6 +2173,30 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) #endif #define cpu_to_be16(x) __constant_htons(x) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +extern void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full (1 << 15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full (1 << 15) +#endif + + #else /* 2.6.22 */ #define ETH_TYPE_TRANS_SETS_DEV #define HAVE_NETDEV_STATS_IN_NETDEV @@ -1591,7 +2204,6 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) /*****************************************************************************/ #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) -#undef ETHTOOL_GPERMADDR #undef SET_MODULE_OWNER #define SET_MODULE_OWNER(dev) do { } while (0) #endif /* > 2.6.22 */ @@ -1602,12 +2214,41 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) #ifndef PTR_ALIGN #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) #endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ #endif /* < 2.6.23 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -#define netif_napi_del(_a) do {} while (0) -#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif #else /* < 2.6.24 */ #define HAVE_ETHTOOL_GET_SSET_COUNT #define HAVE_NETDEV_NAPI_LIST @@ -1615,7 +2256,11 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) /*****************************************************************************/ #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) #include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ #endif /* > 2.6.24 */ /*****************************************************************************/ @@ -1641,25 +2286,83 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) pci_name(adapter->pdev)); \ } \ } + #endif /* > 2.6.18 */ #define pci_enable_device_mem(pdev) pci_enable_device(pdev) +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + + +#if IS_ENABLED(CONFIG_HWMON) +#ifndef IGB_HWMON +#define IGB_HWMON +#endif /* IGB_HWMON */ +#endif /* CONFIG_HWMON */ + #endif /* < 2.6.25 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ #undef kzalloc_node #define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +#ifndef __VMKLNX__ +extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else +#define pci_disable_link_state(p, s) do {} while (0) +#endif /* __VMKLNX__ */ #else /* < 2.6.26 */ #include #define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ #endif /* < 2.6.26 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + #ifndef __VMKLNX__ #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) -#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && defined(CONFIG_PM_SLEEP))) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) #undef device_set_wakeup_enable #define device_set_wakeup_enable(dev, val) \ do { \ @@ -1674,7 +2377,6 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) } while (0) #endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ #endif /* 2.6.15 through 2.6.27 */ -#endif /* __VMKLNX__ */ #ifndef netif_napi_del #define netif_napi_del(_a) do {} while (0) #ifdef NAPI @@ -1684,9 +2386,11 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) #endif #endif #endif /* netif_napi_del */ -#ifndef pci_dma_mapping_error -#define pci_dma_mapping_error(pdev, dma_addr) pci_dma_mapping_error(dma_addr) +#endif /* __VMKLNX__ */ +#ifdef dma_mapping_error +#undef dma_mapping_error #endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) #ifdef CONFIG_NETDEVICES_MULTIQUEUE #define HAVE_TX_MQ @@ -1695,9 +2399,30 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) #define HAVE_TX_MQ 1 #define NETIF_F_MULTI_QUEUE 0 #define system_state SYSTEM_POWER_OFF + +#ifndef __WARN_printf +extern void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS #else /* < 2.6.27 */ #define HAVE_TX_MQ #define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ #endif /* < 2.6.27 */ /*****************************************************************************/ @@ -1709,22 +2434,49 @@ static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); #define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + #endif /* < 2.6.28 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif #define pci_request_selected_regions_exclusive(pdev, bars, name) \ pci_request_selected_regions(pdev, bars, name) -#ifndef __VMKLNX__ -extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); -#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) -#else -#define pci_disable_link_state(p, s) do {} while (0) -#endif /* __VMKLNX__ */ #ifndef CONFIG_NR_CPUS #define CONFIG_NR_CPUS 1 #endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef pci_clear_master +extern void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif #else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif #ifdef CONFIG_DCB #define HAVE_PFC_MODE_ENABLE #endif /* CONFIG_DCB */ @@ -1732,27 +2484,92 @@ extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) -#ifdef IXGBE_FCOE -#undef CONFIG_FCOE -#undef CONFIG_FCOE_MODULE -#endif /* IXGBE_FCOE */ -extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); -#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s) +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 #define skb_record_rx_queue(a, b) do {} while (0) -#else +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#else /* < 2.6.30 */ #define HAVE_ASPM_QUIRKS #endif /* < 2.6.30 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) #define ETH_P_1588 0x88F7 -#else +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full (1 << 17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full (1 << 19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full (1 << 17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full (1 << 19) +#endif + +#else /* < 2.6.31 */ #ifndef HAVE_NETDEV_STORAGE_ADDRESS #define HAVE_NETDEV_STORAGE_ADDRESS #endif #ifndef HAVE_NETDEV_HW_ADDR #define HAVE_NETDEV_HW_ADDR #endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif #endif /* < 2.6.31 */ /*****************************************************************************/ @@ -1764,7 +2581,53 @@ extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); #define NETIF_F_FCOE_MTU (1 << 26) #endif #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#else + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) #ifndef HAVE_NETDEV_OPS_FCOE_ENABLE #define HAVE_NETDEV_OPS_FCOE_ENABLE @@ -1775,21 +2638,1101 @@ extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); #define HAVE_DCBNL_OPS_GETAPP #endif #endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE #endif /* < 2.6.32 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) -#ifndef netdev_alloc_skb_ip_align -extern struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, - unsigned int length); -#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#ifndef __always_unused +#define __always_unused __attribute__((__unused__)) #endif +#ifndef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + #else /* < 2.6.33 */ #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN #define HAVE_NETDEV_OPS_FCOE_GETWWN #endif #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#define HAVE_ETHTOOL_SFP_DISPLAY_PORT #endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +extern int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +#ifdef __VMKLNX__ +#define netif_set_real_num_tx_queues(_netdev, _count) \ + do { \ + (_netdev)->real_num_tx_queues = _count; \ + } while (0) +#else /* __VMKLNX__ */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* __VMKLNX__ */ +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#ifdef __VMKLNX__ +#define egress_subqueue_count real_num_tx_queues +#endif /* __VMKLNX__ */ +#define netif_set_real_num_tx_queues(_netdev, _count) \ + do { \ + (_netdev)->egress_subqueue_count = _count; \ + } while (0) +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0) +#endif /* HAVE_TX_MQ */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +#else /* < 2.6.35 */ +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +extern u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifndef __VMKLNX__ +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + +#ifdef __VMKLNX__ + /* VMKLNX already takes care of allocating the NET_SKB_PAD for + * each netdev skb allocation. It does not however include the + * 2 bytes for IP alignment. So, changing the call below to more + * accurately reflect what is done in the ESX 5.0 inbox driver + */ + skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); + if (skb) { +#if NET_IP_ALIGN + skb_reserve(skb, NET_IP_ALIGN); +#endif + skb->dev = dev; + } +#else + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } +#endif + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +#else /* < 2.6.36 */ + + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN (1 << 7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN (1 << 8) +#endif /* ETH_FLAG_RXVLAN */ + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#ifndef vlan_get_protocol +static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb) +#endif +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP (1 << 0) +#define SKBTX_IN_PROGRESS (1 << 2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#ifdef CONFIG_DCB +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM (1 << 29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +typedef u32 netdev_features_t; +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#else /* ! < 3.3.0 */ +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, + int, int, unsigned int); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + random_ether_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) random_ether_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#else +#error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel +#endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) +#define skb_tx_timestamp(skb) do {} while (0) +#else +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif +#else /* < 3.6.0 */ +#include +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#define SUPPORTED_40000baseLR4_Full (1 << 26) +#define ADVERTISED_40000baseKR4_Full (1 << 23) +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#define ADVERTISED_40000baseSR4_Full (1 << 25) +#define ADVERTISED_40000baseLR4_Full (1 << 26) +#endif +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} + +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisment (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (!pos) + BUG(); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) + +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ + +static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#endif + +#else /* >= 3.7.0 */ +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +#ifndef __VMKLNX__ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +static inline bool is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ +#endif +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + (ptr) ? hlist_entry(ptr, type, member) : NULL + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#ifdef CONFIG_XPS +extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define USE_DEFAULT_FDB_DEL_DUMP +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +static inline int __kc_pci_vfs_assigned(struct pci_dev *dev) +{ + return 0; +} +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#endif /* >= 3.10.0 */ + #endif /* _KCOMPAT_H_ */ diff --git a/vmkdrivers/src_9/drivers/net/igb/kcompat_esx.h b/vmkdrivers/src_9/drivers/net/igb/kcompat_esx.h index 56e499ea7f3a79161c58efc16972d7a4a6b24bfd..c28a864f11719a6885265ae879e606c52afe33ea 100644 --- a/vmkdrivers/src_9/drivers/net/igb/kcompat_esx.h +++ b/vmkdrivers/src_9/drivers/net/igb/kcompat_esx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -33,6 +33,11 @@ #include "vmkapi.h" +#define ESX40_PRODUCT_VER "4.0.0" +#define ESX41_PRODUCT_VER "4.1.0" +#define ESX50_PRODUCT_VER "5.0.0" +#define ESX51_PRODUCT_VER "5.1.0" + /* disable features that VMware ESX does not support */ #ifndef CONFIG_PM @@ -48,9 +53,8 @@ #define vmalloc_node(a,b) vmalloc(a) #define skb_record_rx_queue(a, b) \ - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) \ - vmknetddi_queueops_set_skb_queueid((a), \ - VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID((b))); + vmknetddi_queueops_set_skb_queueid((a), \ + VMKNETDDI_QUEUEOPS_MK_RX_QUEUEID((b))); #define skb_trim _kc_skb_trim @@ -65,8 +69,6 @@ static inline void _kc_skb_trim(struct sk_buff *skb, unsigned int len) skb->tail = skb->data + len; } } -/* disable pskb_trim usage for now - should break lots of stuff */ -#define pskb_trim(a,b) /* Alternate __VMKLNX__ DMA memory allocation stuff */ #define alloc_page(A) __get_free_pages(A, 0) @@ -81,6 +83,7 @@ static inline void _kc_skb_trim(struct sk_buff *skb, unsigned int len) /* * A couple of quick hacks for working with esx40 */ +#define vmknetddi_queueops_queue_features_t unsigned int #define HAVE_NETDEV_NAPI_LIST #define vmk_set_module_version(x,y) 1 #define VMKNETDDI_REGISTER_QUEUEOPS(ndev, ops) \ @@ -100,3 +103,10 @@ static inline void _kc_skb_trim(struct sk_buff *skb, unsigned int len) #define device_set_wakeup_enable(d, w) device_init_wakeup(d, w); +#define nr_cpu_ids smp_num_cpus +#define ESX_ALLOC_PERCPU( type ) \ + kmalloc(sizeof(type) * nr_cpu_ids, GFP_KERNEL) +#define ESX_FREE_PERCPU( ptr ) kfree(ptr) +#define ESX_PER_CPU_PTR( ptr, cpu, type ) (((cpu) < nr_cpu_ids)? \ + ((typeof(ptr))((char*)(ptr) + (cpu) * sizeof(type))):NULL) +#define __percpu diff --git a/vmkdrivers/src_9/drivers/net/igb/kcompat_ethtool.c b/vmkdrivers/src_9/drivers/net/igb/kcompat_ethtool.c index ec05d68d85345770d34b9df4361a20b058ad2382..3adf8696d6b5ebb996e9968b8ece16b1009bcccb 100644 --- a/vmkdrivers/src_9/drivers/net/igb/kcompat_ethtool.c +++ b/vmkdrivers/src_9/drivers/net/igb/kcompat_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2009 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -894,8 +894,6 @@ struct mii_ioctl_data; #define mii_ethtool_sset _kc_mii_ethtool_sset #undef mii_check_link #define mii_check_link _kc_mii_check_link -#undef generic_mii_ioctl -#define generic_mii_ioctl _kc_generic_mii_ioctl extern int _kc_mii_link_ok (struct mii_if_info *mii); extern int _kc_mii_nway_restart (struct mii_if_info *mii); extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, @@ -903,9 +901,13 @@ extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern void _kc_mii_check_link (struct mii_if_info *mii); +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +#undef generic_mii_ioctl +#define generic_mii_ioctl _kc_generic_mii_ioctl extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_changed); +#endif /* > 2.4.6 */ struct _kc_pci_dev_ext { @@ -1091,6 +1093,7 @@ void _kc_mii_check_link (struct mii_if_info *mii) netif_carrier_off(mii->dev); } +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) @@ -1165,4 +1168,5 @@ int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, return rc; } +#endif /* > 2.4.6 */ diff --git a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c index 6c62e2e761b6ee2fc795fac4bef43aa541207dd2..e3ce2e334db4e112d9660a4dd46eb0c45c2e84ff 100644 --- a/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c +++ b/vmkdrivers/src_9/drivers/net/ixgbe/ixgbe_main.c @@ -4413,6 +4413,8 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) /* Dont allow programming of duplicate MAC * address on the same PF for different VFs(queues) */ + if (!compare_ether_addr(addr, hw->mac.perm_addr)) + return i; if (!compare_ether_addr (addr, adapter->mac_table[i].addr)) { if (adapter->mac_table[i].queue == queue) { @@ -4473,6 +4475,8 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8* addr, u16 queue) return -EINVAL; for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (!compare_ether_addr(addr, hw->mac.perm_addr)) + return 0; if (!compare_ether_addr(addr, adapter->mac_table[i].addr) && adapter->mac_table[i].queue == queue) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; diff --git a/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c b/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c index 9b741d0dbc5d48db3b034ce4c2bacf74820b8e14..71488f6629ae17b3813cefe0a51456aea00bc077 100644 --- a/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c +++ b/vmkdrivers/src_9/drivers/net/nx_nic/unm_nic_hw.c @@ -25,7 +25,7 @@ /* * Source file for NIC routines to access the Phantom hardware * - * $Id: //depot/vmkdrivers/prod2013-stage-rel/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $ + * $Id: //depot/vmkdrivers/vsphere55u1/src_9/drivers/net/nx_nic/unm_nic_hw.c#1 $ * */ #include diff --git a/vmkdrivers/src_9/drivers/net/tg3/tg3.c b/vmkdrivers/src_9/drivers/net/tg3/tg3.c index b653478a26d79d3c48b921012e12a7d5405fd62d..8de4e3ad0282d604dbd85030770c065e22f97cd1 100644 --- a/vmkdrivers/src_9/drivers/net/tg3/tg3.c +++ b/vmkdrivers/src_9/drivers/net/tg3/tg3.c @@ -17703,6 +17703,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, features |= NETIF_F_TSO_ECN; } +#if defined(__VMKLNX__) && defined(TG3_INBOX) + /* + * Disable TSO to avoid the data corruption issue happened when + * a 9KB+ buffer straddling a 4GB boundary. Please refer to + * PR 1148150. + */ + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); +#endif #if defined(__VMKLNX__) /* VMWare does not have skb_gso_segment() to workaround TSO_BUG */ if (tg3_flag(tp, TSO_BUG)) diff --git a/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_version.h b/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_version.h index e3f8d0bb53a0549e1d0289adf350064e351acbb7..e6059ecf4e2a4f06ca3dc4cce0aba7c6fbb15edb 100644 --- a/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_version.h +++ b/vmkdrivers/src_9/drivers/scsi/lpfc820/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.2.3.1-128vmw" +#define LPFC_DRIVER_VERSION "8.2.3.1-129vmw" #if defined(__VMKLNX__) /* diff --git a/vmkdrivers/src_9/drivers/scsi/megaraid_sas/megaraid_sas.c b/vmkdrivers/src_9/drivers/scsi/megaraid_sas/megaraid_sas.c index 64185b1e4afb28f8dd3def3cff466cba2275bd58..c1f07bb6e562876584401044094244a71ab77a4d 100644 --- a/vmkdrivers/src_9/drivers/scsi/megaraid_sas/megaraid_sas.c +++ b/vmkdrivers/src_9/drivers/scsi/megaraid_sas/megaraid_sas.c @@ -2510,7 +2510,6 @@ static void megasas_hotplug_work(void *arg) * Because scsi_device_lookup() takes a reference on the device, * should decrement reference count before removing it. */ - vmklnx_scsi_device_hot_removed(device); scsi_device_put(device); scsi_remove_device(device); } @@ -2525,7 +2524,6 @@ static void megasas_hotplug_work(void *arg) * Because scsi_device_lookup() takes a reference on the device, * should decrement reference count before removing it. */ - vmklnx_scsi_device_hot_removed(device); scsi_device_put(device); scsi_remove_device(device); } else { diff --git a/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_base.c b/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_base.c index 71cf082c6439d046f26c0b133a74adecd1ec087d..7c399c94e34a51f4af7dca25dacef7c4f4b5309a 100644 --- a/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3026,6 +3026,7 @@ mpt2sas_base_get_sata_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz) { Mpi2SataPassthroughRequest_t *mpi_request; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -3109,7 +3110,7 @@ mpt2sas_base_get_sata_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, sizeof(Mpi2SataPassthroughRequest_t)/4); if (!(ioc->config_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; - goto issue_host_reset; + goto issue_target_reset; } if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) { memcpy(mpi_reply, ioc->config_cmds.reply, @@ -3122,12 +3123,40 @@ mpt2sas_base_get_sata_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, goto out; - issue_host_reset: - if (issue_reset) - mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, - FORCE_BIG_HAMMER); + issue_target_reset: + if (issue_reset) { + + smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + printk(MPT2SAS_ERR_FMT "%s: failed obtaining a tm smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), " + "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, + ioc->tm_tr_cb_idx)); + + ioc->config_cmds.status = MPT2_CMD_PENDING; + request = mpt2sas_base_get_msg_frame(ioc, smid); + mpi_request_tm = (Mpi2SCSITaskManagementRequest_t *)request; + memset(mpi_request_tm, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request_tm->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request_tm->DevHandle = cpu_to_le16(handle); + mpi_request_tm->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + ioc->config_cmds.smid = smid; + mpt2sas_base_put_smid_hi_priority(ioc, smid); + + init_completion(&ioc->config_cmds.done); + timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, + msecs_to_jiffies(30000)); + ioc->config_cmds.status = MPT2_CMD_NOT_USED; - rc = -EFAULT; + memset(mpi_reply, 0, sizeof(Mpi2SataPassthroughReply_t)); + } + rc = -EAGAIN; + out: mutex_unlock(&ioc->config_cmds.mutex); pci_free_consistent(ioc->pdev, sz, buffer, dma_addr); diff --git a/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 0f61f2610f3af0fe452b5a55abf35c1dd490d653..3d15be887b724a45fd32fae985c9aec05a3bc416 100644 --- a/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/vmkdrivers/src_9/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -8224,7 +8224,6 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, break; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - vmklnx_scsi_target_hot_removed(sas_device->starget); #endif _scsih_device_remove_by_handle(ioc, handle); break; diff --git a/vmkdrivers/src_9/drivers/scsi/qla2xxx/qla_version.h b/vmkdrivers/src_9/drivers/scsi/qla2xxx/qla_version.h index 083de52542ac17c0478af554f346fbdef47469be..dfea6113fe0f63e5b2ff47000b3c909ce179678f 100644 --- a/vmkdrivers/src_9/drivers/scsi/qla2xxx/qla_version.h +++ b/vmkdrivers/src_9/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "902.k1.1-11vmw" +#define QLA2XXX_VERSION "902.k1.1-12vmw" #define QLA_DRIVER_MAJOR_VER 9 #define QLA_DRIVER_MINOR_VER 0 diff --git a/vmkdrivers/src_92/drivers/ata/libata-core.c b/vmkdrivers/src_92/drivers/ata/libata-core.c index 52f60a1df1ca8b54545a70f6aac517c97b1d0b0c..f2dd82c60ffeb99064d301626060bcd107e750f0 100644 --- a/vmkdrivers/src_92/drivers/ata/libata-core.c +++ b/vmkdrivers/src_92/drivers/ata/libata-core.c @@ -5998,6 +5998,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) /* call completion callback */ qc->complete_fn(qc); +#if defined(__VMKLNX__) + ata_restart_waitq(ap); +#endif /* defined(__VMKLNX__) */ } static void fill_result_tf(struct ata_queued_cmd *qc) @@ -6100,6 +6103,15 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ata_verify_xfer(qc); __ata_qc_complete(qc); +#if defined(__VMKLNX__) + /* Fixing PR 1173296 */ + if (unlikely((ehi->dev_action[dev->devno] & ATA_EH_REVALIDATE) && + (ap->pflags & ATA_PFLAG_EH_PENDING))) { + ata_port_printk(ap, KERN_INFO, + "ata_qc_complete, dev needs revalidation, calling EH\n"); + wake_up_process(ap->scsi_host->ehandler); + } +#endif /* defined(__VMKLNX__) */ } else { if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) return; @@ -6951,7 +6963,16 @@ struct ata_port *ata_port_alloc(struct ata_host *host) DPRINTK("ENTER\n"); +#if defined(__VMKLNX__) + struct vmk_ata_all_port_wq *ata_wq; + + ap = kzalloc((sizeof(*ap) + + sizeof(struct vmk_ata_all_port_wq)), GFP_KERNEL); + ATA_GET_WAITQ_STRUCT(ap, ata_wq); + ATA_WQ_INIT(ata_wq); +#else ap = kzalloc(sizeof(*ap), GFP_KERNEL); +#endif /* defined(__VMKLNX__) */ if (!ap) return NULL; diff --git a/vmkdrivers/src_92/drivers/ata/libata-eh.c b/vmkdrivers/src_92/drivers/ata/libata-eh.c index 2302d9f4e3c2b93e5006bd8f0c7d8c6d53067c15..acbdc9896eb69427212e3a022c65f52f74056f04 100644 --- a/vmkdrivers/src_92/drivers/ata/libata-eh.c +++ b/vmkdrivers/src_92/drivers/ata/libata-eh.c @@ -1110,9 +1110,9 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); } -/** +/* * ata_eh_done - EH action complete -* @ap: target ATA port + * @ap: target ATA port * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * @@ -1263,7 +1263,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev, return 0; } -/** +/* * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) diff --git a/vmkdrivers/src_92/drivers/ata/libata-pmp.c b/vmkdrivers/src_92/drivers/ata/libata-pmp.c index e669b6f8b1fcdc907123080eb8c641bb976432cd..dabc44aa74793d653a74da5e9bb32d7df021c3f8 100644 --- a/vmkdrivers/src_92/drivers/ata/libata-pmp.c +++ b/vmkdrivers/src_92/drivers/ata/libata-pmp.c @@ -52,7 +52,7 @@ static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val) * sata_pmp_write - write PMP register * @link: link to write PMP register for * @reg: register to write - * @r_val: value to write + * @val: value to write * * Write PMP register. * @@ -287,7 +287,7 @@ int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class, /** * ata_std_postreset - standard postreset method for PMP link * @link: the target ata_link - * @classes: classes of attached devices + * @class: classes of attached devices * * This function is invoked after a successful reset. Note that * the device might have been reset more than once using diff --git a/vmkdrivers/src_92/drivers/ata/libata-scsi.c b/vmkdrivers/src_92/drivers/ata/libata-scsi.c index 14d55ed16d2dba56a7a2757d84506cdf380e8a1c..67dc7780a8b94e4cb0ae10ba3f3a4346cf6ab33e 100644 --- a/vmkdrivers/src_92/drivers/ata/libata-scsi.c +++ b/vmkdrivers/src_92/drivers/ata/libata-scsi.c @@ -1839,8 +1839,13 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, { struct ata_port *ap = dev->link->ap; struct ata_queued_cmd *qc; - int rc; + int rc = 0; +#if defined(__VMKLNX__) + struct vmk_ata_all_port_wq *ata_wq = NULL; + ATA_GET_WAITQ_STRUCT(ap, ata_wq); + +#endif /* defined(__VMKLNX__) */ VPRINTK("ENTER\n"); qc = ata_scsi_qc_new(dev, cmd, done); @@ -1877,14 +1882,33 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, if (xlat_func(qc)) goto early_finish; +#if defined(__VMKLNX__) + if (!ATA_WQ_EMPTY(ata_wq)) { + if (!ata_save_waitq(ap, qc)) { + goto noerror_exit; + } else { + goto defer; + } + } + if (ap->ops->qc_defer) { + if ((rc = ap->ops->qc_defer(qc))) { + if (!ata_save_waitq(ap, qc)) { + goto noerror_exit; + } else { + goto defer; + } + } + } +#else if (ap->ops->qc_defer) { if ((rc = ap->ops->qc_defer(qc))) goto defer; } +#endif /* defined(__VMKLNX__) */ /* select device, send command to hardware */ ata_qc_issue(qc); - +noerror_exit: VPRINTK("EXIT\n"); return 0; @@ -4090,6 +4114,69 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) spin_lock(shost->host_lock); return rc; } +#if defined(__VMKLNX__) +/* + * ata_save_waitq + * @ap: ATA port to which the command was being sent + * @qc: command to issue to device + * + * Save qc command into waiting queue list + * because ata_scsi_qc_new can't allocate more than 31 + * commands, the waiting queue's depth is not over 31, + * we will hold qc command which is allocate and defered + * by NCQ/Non-NCQ defer. + * + * LOCKING + * spin_lock(ap lock) + * + */ +int ata_save_waitq(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct vmk_ata_all_port_wq *ata_wq = NULL; + + ATA_GET_WAITQ_STRUCT(ap, ata_wq); + + if (ATA_WQ_FULL(ata_wq)) + return -1; + + ATA_WQ_ADD(ata_wq, qc); + return 0; +} + +/* + * ata_restart_waitq + * @ap: ATA port to which the command was being sent + * + * start waiting queue work and try to issue command + * + * LOCKING + * spin_lock(ap lock) + * + */ +void ata_restart_waitq(struct ata_port *ap) +{ + struct vmk_ata_all_port_wq *ata_wq = NULL; + struct ata_queued_cmd *qc = NULL; + int rc = 0; + + ATA_GET_WAITQ_STRUCT(ap, ata_wq); + + ATA_WQ_HEAD(ata_wq, qc); + while (qc != NULL) { + if (ap->ops->qc_defer) { + if ((rc = ap->ops->qc_defer(qc))) { + return; + } + } + ATA_WQ_REMOVE(ata_wq, qc); + ata_qc_issue(qc); + ATA_WQ_HEAD(ata_wq, qc); + } + return; +} + +#endif /* defined(__VMKLNX__) */ + /** * ata_scsi_simulate - simulate SCSI command on ATA device diff --git a/vmkdrivers/src_92/drivers/ata/libata.h b/vmkdrivers/src_92/drivers/ata/libata.h index c92f804a21b124b63af6da5063ae61e9ced479bc..a0893706570e5c970d40a82175204f6de7e58184 100644 --- a/vmkdrivers/src_92/drivers/ata/libata.h +++ b/vmkdrivers/src_92/drivers/ata/libata.h @@ -38,6 +38,59 @@ struct ata_scsi_args { void (*done)(struct scsi_cmnd *); }; +#if defined(__VMKLNX__) +#define ATA_WAITQ_ARRAY_SIZE ATA_MAX_QUEUE + +struct vmk_ata_all_port_wq { + struct ata_queued_cmd *qc[ATA_WAITQ_ARRAY_SIZE]; + int head; + int tail; + int count; +}; + +#define ATA_GET_WAITQ_STRUCT(ap, ata_wq) \ + do { \ + ata_wq = (struct vmk_ata_all_port_wq *)((void *)(ap) \ + + sizeof(*ap)); \ + } while (0) + +#define ATA_WQ_INIT(wq) \ + do { \ + (wq)->head = (wq)->tail = (wq)->count = 0; \ + } while (0) + +#define ATA_WQ_FULL(wq) ((wq)->count == ATA_WAITQ_ARRAY_SIZE) + +#define ATA_WQ_EMPTY(wq) ((wq)->count == 0) + +#define ATA_WQ_ADD(wq, qc) \ + do { \ + (wq)->qc[(wq)->tail++] = qc; \ + if ((wq)->tail >= ATA_WAITQ_ARRAY_SIZE) { \ + (wq)->tail = 0; \ + } \ + (wq)->count++; \ + } while (0) + +#define ATA_WQ_REMOVE(wq, qc) \ + do { \ + qc = (wq)->qc[(wq)->head++]; \ + if ((wq)->head >= ATA_WAITQ_ARRAY_SIZE) { \ + (wq)->head = 0; \ + } \ + (wq)->count--; \ + } while (0) + +#define ATA_WQ_HEAD(wq, qc) \ + do { \ + qc = (wq)->qc[(wq)->head]; \ + if ((wq)->count == 0) { \ + qc = NULL; \ + } \ + } while (0) + +#endif /* defined(__VMKLNX__) */ + /* libata-core.c */ enum { /* flags for ata_dev_read_id() */ @@ -53,6 +106,14 @@ enum { ATA_DNXFER_QUIET = (1 << 31), }; +#if defined(__VMKLNX__) + +int ata_save_waitq(struct ata_port *ap, struct ata_queued_cmd *qc); +extern void ata_restart_waitq(struct ata_port *ap); +extern void ata_waitq_delete_all(struct ata_port *ap); + +#endif /* defined(__VMKLNX__) */ + extern unsigned int ata_print_id; extern struct workqueue_struct *ata_aux_wq; extern int atapi_enabled; diff --git a/vmkdrivers/src_92/drivers/scsi/fcoe/libfcoe.c b/vmkdrivers/src_92/drivers/scsi/fcoe/libfcoe.c index c2cabadccc61bb741b503e449b27d7f296b637a6..44089e8719b21a1341b93ebab644941876347a1e 100644 --- a/vmkdrivers/src_92/drivers/scsi/fcoe/libfcoe.c +++ b/vmkdrivers/src_92/drivers/scsi/fcoe/libfcoe.c @@ -495,6 +495,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, /** * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it * @fip: The FCoE controller for the ELS frame + * @lport: local port * @dtype: The FIP descriptor type for the frame * @skb: The FCoE ELS frame including FC header but no FCoE headers * @@ -1532,6 +1533,7 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) /** * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response * @fip: The FCoE controller + * @lport: not in use * @fp: The FC frame to snoop * * Snoop potential response to FLOGI or even incoming FLOGI. @@ -1648,7 +1650,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); /** * fcoe_libfc_config() - Sets up libfc related properties for local port - * @lp: The local port to configure libfc for + * @lport: The local port to configure libfc for * @tt: The libfc function template * * Returns : 0 for success diff --git a/vmkdrivers/src_92/drivers/scsi/libfc/fc_disc.c b/vmkdrivers/src_92/drivers/scsi/libfc/fc_disc.c index aa2779fb5848eb12d61a891f9666d72f477e8cad..82475f95022eb42840108358163d6c447012fdff 100644 --- a/vmkdrivers/src_92/drivers/scsi/libfc/fc_disc.c +++ b/vmkdrivers/src_92/drivers/scsi/libfc/fc_disc.c @@ -76,6 +76,7 @@ void fc_disc_stop_rports(struct fc_disc *disc) * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) * @sp: The sequence of the RSCN exchange * @fp: The RSCN frame + * @disc: The discovery context of local port. * @lport: The local port that the request will be sent on * * Locking Note: This function expects that the disc_mutex is locked @@ -342,9 +343,9 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) } } -/** +/* * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request - * @lport: The discovery context + * @disc: The discovery context * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -376,9 +377,9 @@ err: fc_disc_error(disc, NULL); } -/** +/* * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. - * @lport: The local port the GPN_FT was received on + * @disc: The discovery context * @buf: The GPN_FT response buffer * @len: The size of response buffer * @@ -495,11 +496,11 @@ static void fc_disc_timeout(struct work_struct *work) mutex_unlock(&disc->disc_mutex); } -/** +/* * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) * @sp: The sequence that the GPN_FT response was received on * @fp: The GPN_FT response frame - * @lp_arg: The discovery context + * @disc_arg: The discovery context * * Locking Note: This function is called without disc mutex held, and * should do all its processing with the mutex held diff --git a/vmkdrivers/src_92/drivers/scsi/libfc/fc_fcp.c b/vmkdrivers/src_92/drivers/scsi/libfc/fc_fcp.c index cc95d8b5d7d6b5493a832594fd074e64f5a3dcd5..242bd7637b277099d8495b6e90f43cb82cf0647d 100644 --- a/vmkdrivers/src_92/drivers/scsi/libfc/fc_fcp.c +++ b/vmkdrivers/src_92/drivers/scsi/libfc/fc_fcp.c @@ -615,7 +615,7 @@ err: /** * fc_fcp_send_data() - Send SCSI data to a target * @fsp: The FCP packet the data is on - * @sp: The sequence the data is to be sent on + * @seq: The sequence the data is to be sent on * @offset: The starting offset for this data request * @seq_blen: The burst length for this data request * @@ -1767,7 +1767,7 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp) fc_fcp_send_abort(fsp); } -/** +/* * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) * @fsp: The FCP packet the SRR is to be sent on * @r_ctl: The R_CTL field for the SRR request @@ -1912,7 +1912,7 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) /** * fc_queuecommand() - The queuecommand function of the SCSI template - * @cmd: The scsi_cmnd to be executed + * @sc_cmd: The scsi_cmnd to be executed * @done: The callback function to be called when the scsi_cmnd is complete * * This is the i/o strategy routine, called by the SCSI layer. This routine diff --git a/vmkdrivers/src_92/drivers/scsi/libfc/fc_lport.c b/vmkdrivers/src_92/drivers/scsi/libfc/fc_lport.c index 4d4988e393a19ad6c3035205e00fd39ef3c6ac8b..dbd20244047c92814616ee137a551a131fa38068 100644 --- a/vmkdrivers/src_92/drivers/scsi/libfc/fc_lport.c +++ b/vmkdrivers/src_92/drivers/scsi/libfc/fc_lport.c @@ -406,7 +406,7 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_lport_recv_echo_req() - Handle received ECHO request * @sp: The sequence in the ECHO exchange - * @fp: ECHO request frame + * @in_fp: ECHO request frame * @lport: The local port recieving the ECHO * * Locking Note: The lport lock is expected to be held before calling @@ -448,7 +448,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, /** * fc_lport_recv_rnid_req() - Handle received Request Node ID data request * @sp: The sequence in the RNID exchange - * @fp: The RNID request frame + * @in_fp: The RNID request frame * @lport: The local port recieving the RNID * * Locking Note: The lport lock is expected to be held before calling @@ -1205,9 +1205,10 @@ static void fc_lport_enter_scr(struct fc_lport *lport) fc_lport_error(lport, NULL); } -/** +/* * fc_lport_enter_ns() - register some object with the name server * @lport: Fibre Channel local port to register + * @state: local port state * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1705,6 +1706,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, * @job: The BSG Passthrough job * @lport: The local port sending the request * @did: The destination port id + * @tov: The timeout period for the frame (in msecs) * * Locking Note: The lport lock is expected to be held before calling * this routine. diff --git a/vmkdrivers/src_92/drivers/scsi/libfc/fc_rport.c b/vmkdrivers/src_92/drivers/scsi/libfc/fc_rport.c index 834fe3c6d288e97715d31b441b1329f6c7597223..5dc34add358e90cd1d06d480eddf8212ccf63c0e 100644 --- a/vmkdrivers/src_92/drivers/scsi/libfc/fc_rport.c +++ b/vmkdrivers/src_92/drivers/scsi/libfc/fc_rport.c @@ -109,6 +109,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, /** * fc_rport_create() - Create a new remote port * @lport: The local port this remote port will be associated with + * @port_id: The remote port ID to be created. * @ids: The identifiers for the new remote port * * The remote port will start in the INIT state. diff --git a/vmkdrivers/src_92/drivers/usb/core/pci-quirks.c b/vmkdrivers/src_92/drivers/usb/core/pci-quirks.c index c20fa271ec75a56d425de7a65a9942b8eecb4b29..969f7bce16f45aa6f91e92702abb75d067a746e2 100644 --- a/vmkdrivers/src_92/drivers/usb/core/pci-quirks.c +++ b/vmkdrivers/src_92/drivers/usb/core/pci-quirks.c @@ -18,8 +18,8 @@ #endif #include "pci-quirks.h" #if defined(__VMKLNX__) -#include "../host/xhci/xhci-ext-caps.h" -#else +//#include "../host/xhci/xhci-ext-caps.h" +//#else #include "xhci-ext-caps.h" #endif diff --git a/vmkdrivers/src_92/include/linux/ata.h b/vmkdrivers/src_92/include/linux/ata.h index cad647b6a238fd2523e82f45f123ccdf29504116..6606235a90c91b7cda412378c85ae0c2443dc56e 100644 --- a/vmkdrivers/src_92/include/linux/ata.h +++ b/vmkdrivers/src_92/include/linux/ata.h @@ -994,7 +994,7 @@ static inline int ata_id_is_cfa(const u16 *id) /** * ata_drive_40wire - Check if device is a 40 wire IDE drive - * @id: the pointer to IDENTIFY DEVICE data + * @dev_id: the pointer to IDENTIFY DEVICE data * * Check if the device is a compact flash device. * @@ -1053,7 +1053,7 @@ static inline int atapi_id_dmadir(const u16 *dev_id) /** * ata_set_lba_range_entries - Construct TRIM command data buffer * @_buffer: TRIM data buffer - * @buf_zie: data buffer size + * @buf_size: data buffer size * @sector: starting sector to be TRIMed * @count: number of sectors * diff --git a/vmkdrivers/src_92/include/scsi/fc_encode.h b/vmkdrivers/src_92/include/scsi/fc_encode.h index 79d1e8262b3e9610dee503dc40686155eeff6d53..88bb26e8e97fbd3b594198f60aa864b6d7621832 100644 --- a/vmkdrivers/src_92/include/scsi/fc_encode.h +++ b/vmkdrivers/src_92/include/scsi/fc_encode.h @@ -41,6 +41,16 @@ struct fc_ct_req { /** * fill FC header fields in specified fc_frame + * @fp: fc frame where header will be placed. + * @r_ctl: pointer to FC header R_CTL. + * @did: FC destination ID. + * @sid: FC source ID. + * @type: pointer to FC-4 type. + * @f_ctl: pointer to FC header F_CTL. + * @parm_offset: parameter offset. + * + * RETURN VALUE: + * None. */ /* _VMKLNX_CODECHECK_: fc_fill_fc_hdr */ static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl, @@ -81,7 +91,12 @@ static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp) /** * fc_ct_hdr_fill- fills ct header and reset ct payload - * returns pointer to ct request. + * @fp: fc frame where ct header be placed. + * @op: CT opcode. + * @req_size: size of request frame. + * + * RETURN VALUE: + * pointer to ct request. */ static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp, unsigned int op, size_t req_size) @@ -180,10 +195,13 @@ static inline int fc_ct_fill(struct fc_lport *lport, return 0; } -/** +/* * fc_plogi_fill - Fill in plogi request frame + * + * @lport: local port. + * @fp: fc frame where plogi request frame be placed. + * @op: opcode. */ -/* _VMKLNX_CODECHECK_: fc_plogi_fill */ static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp, unsigned int op) { @@ -214,8 +232,11 @@ static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp, cp->cp_open_seq = 1; } -/** +/* * fc_flogi_fill - Fill in a flogi request frame. + * + * @lport: local port + * @fp: fc frame where flogi request frame be placed. */ static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -239,8 +260,11 @@ static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp) sp->sp_features = htons(FC_SP_FT_NPIV); } -/** +/* * fc_fdisc_fill - Fill in a fdisc request frame. + * + * @lport: local port. + * @fp: fc frame where fdisc request frame be placed. */ static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -262,8 +286,11 @@ static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp) cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); } -/** +/* * fc_logo_fill - Fill in a logo request frame. + * + * @lport: local port + * @fp: fc frame where logo request frame be placed. */ static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -276,8 +303,11 @@ static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp) logo->fl_n_port_wwn = htonll(lport->wwpn); } -/** +/* * fc_rtv_fill - Fill in RTV (read timeout value) request frame. + * + * @lport: local port + * @fp: fc frame where RTV request frame be placed. */ static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -288,8 +318,11 @@ static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp) rtv->rtv_cmd = ELS_RTV; } -/** +/* * fc_rec_fill - Fill in rec request frame + * + * @lport: local port + * @fp: fc frame where rec request frame be placed. */ static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -304,8 +337,12 @@ static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp) rec->rec_rx_id = htons(ep->rxid); } -/** +/* * fc_prli_fill - Fill in prli request frame + * + * @lport: local port + * @fp: fc frame where prli request frame be placed. + * */ static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -324,8 +361,12 @@ static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp) pp->spp.spp_params = htonl(lport->service_params); } -/** +/* * fc_scr_fill - Fill in a scr request frame. + * + * @lport: local port + * @fp: fc frame where scr request frame be placed. + * */ static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp) { @@ -337,8 +378,18 @@ static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp) scr->scr_reg_func = ELS_SCRF_FULL; } -/** +/* * fc_els_fill - Fill in an ELS request frame + * @lport: local port. + * @did: not in use. + * @fp: fc frame where ELS request frame be placed. + * @op: operation code. + * @r_ctl: pointer to ELS request R_CTL. + * @fh_type: pointer to ELS type. + * + * RETURN VALUE: + * 0 on success. + * -EINVAL on failed. */ /* _VMKLNX_CODECHECK_: fc_els_fill */ static inline int fc_els_fill(struct fc_lport *lport, diff --git a/vmkdrivers/src_92/include/scsi/libfc.h b/vmkdrivers/src_92/include/scsi/libfc.h index f6be291c424c895b35213dad9dfe9a8edbf6d2ae..09a79b48bcc6f2ae4f2428e1fb35ffbb74160d0a 100644 --- a/vmkdrivers/src_92/include/scsi/libfc.h +++ b/vmkdrivers/src_92/include/scsi/libfc.h @@ -72,12 +72,15 @@ p[2] = ((v) & 0xFF); \ } while (0) -/** +/* * enum fc_lport_state - Local port states * @LPORT_ST_DISABLED: Disabled * @LPORT_ST_FLOGI: Fabric login (FLOGI) sent * @LPORT_ST_DNS: Waiting for name server remote port to become ready * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent + * @LPORT_ST_RNN_ID: + * @LPORT_ST_RSNN_NN: + * @LPORT_ST_RSPN_ID: * @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent * @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent * @LPORT_ST_SCR: State Change Register (SCR) sent @@ -194,7 +197,7 @@ struct fc_rport_libfc_priv { unsigned int r_a_tov; }; -/** +/* * struct fc_rport_priv - libfc remote port and discovery info * @local_port: The associated local port * @rport: The FC transport remote port @@ -212,6 +215,13 @@ struct fc_rport_libfc_priv { * @r_a_tov: Resource allocation timeout value (in msec) * @rp_mutex: The mutex that protects the remote port * @retry_work: Handle for retries + * @event: + * @ops: + * @peers: + * @event_work: + * @sp_features: + * @spp_type: + * @supported_classes: * @event_callback: Callback when READY, FAILED or LOGO states complete */ struct fc_rport_priv { @@ -304,7 +314,7 @@ struct fc_seq_els_data { enum fc_els_rjt_explan explan; }; -/** +/* * struct fc_fcp_pkt - FCP request structure (one for each scsi_cmnd request) * @lp: The associated local port * @state: The state of the I/O @@ -338,6 +348,8 @@ struct fc_seq_els_data { * @seq_ptr: The sequence that will carry the SCSI command * @recov_retry: Number of recovery retries * @recov_seq: The sequence for REC or SRR + * @sg: + * @vmksgel: */ struct fc_fcp_pkt { /* Housekeeping information */ @@ -800,7 +812,7 @@ struct libfc_function_template { * struct fc_disc - Discovery context * @retry_count: Number of retries * @pending: 1 if discovery is pending, 0 if not - * @requesting: 1 if discovery has been requested, 0 if not + * @requested: 1 if discovery has been requested, 0 if not * @seq_count: Number of sequences used for discovery * @buf_len: Length of the discovery buffer * @disc_id: Discovery ID @@ -830,7 +842,7 @@ struct fc_disc { enum fc_disc_event); }; -/** +/* * struct fc_lport - Local port * @host: The SCSI host associated with a local port * @ema_list: Exchange manager anchor list @@ -864,6 +876,8 @@ struct fc_disc { * @mfs: The maximum Fibre Channel payload size * @max_retry_count: The maximum retry attempts * @max_rport_retry_count: The maximum remote port retry attempts + * @link_speed: + * @link_supported_speeds: * @lro_xid: The maximum XID for LRO * @lso_max: The maximum large offload send size * @fcts: FC-4 type mask diff --git a/vmkdrivers/src_92/include/scsi/libfcoe.h b/vmkdrivers/src_92/include/scsi/libfcoe.h index 806a6380f4fa5344c0eef8539d8d2750a5011741..0a9cda051dd8a3e702bf5078687c0e787936454d 100644 --- a/vmkdrivers/src_92/include/scsi/libfcoe.h +++ b/vmkdrivers/src_92/include/scsi/libfcoe.h @@ -55,7 +55,7 @@ enum fip_state { FIP_ST_ENABLED, }; -/** +/* * struct fcoe_ctlr - FCoE Controller and FIP state * @state: internal FIP state for network link and FIP or non-FIP mode. * @mode: LLD-selected mode. @@ -74,12 +74,14 @@ enum fip_state { * @user_mfs: configured maximum FC frame size, including FC header. * @flogi_oxid: exchange ID of most recent fabric login. * @flogi_count: number of FLOGI attempts in AUTO mode. + * @reset_req: * @map_dest: use the FC_MAP mode for destination MAC addresses. * @spma: supports SPMA server-provided MACs mode * @send_ctlr_ka: need to send controller keep alive * @send_port_ka: need to send port keep alives * @dest_addr: MAC address of the selected FC forwarder. * @ctl_src_addr: the native MAC address of our local port. + * @vlan_id: * @send: LLD-supplied function to handle sending FIP Ethernet frames * @update_mac: LLD-supplied function to handle changes to MAC addresses. * @get_src_addr: LLD-supplied function to supply a source MAC address. @@ -132,7 +134,7 @@ struct fcoe_ctlr { spinlock_t lock; }; -/** +/* * struct fcoe_fcf - Fibre-Channel Forwarder * @list: list linkage * @time: system time (jiffies) when an advertisement was last received @@ -144,6 +146,7 @@ struct fcoe_ctlr { * @pri: selection priority, smaller values are better * @flags: flags received from advertisement * @fka_period: keep-alive period, in jiffies + * @fd_flags: * * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that * passes FCoE frames on to an FC fabric. This structure represents diff --git a/vmkdrivers/src_92/include/scsi/scsi_cmnd.h b/vmkdrivers/src_92/include/scsi/scsi_cmnd.h index 30cdf4328742617e6773b34f750db04b4943c8dc..f75ff88429dbb681c208b4b3ddb8c13452d24ed9 100644 --- a/vmkdrivers/src_92/include/scsi/scsi_cmnd.h +++ b/vmkdrivers/src_92/include/scsi/scsi_cmnd.h @@ -248,6 +248,8 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd) * scsi_for_each_sg - Loop over scsi_cmd's SG list * @cmd: a pointer to struct scsi_cmnd * @nseg: number of elements in the list + * @sg: scatter-gather list + * @__i: index of sg element * * Loop over @cmd scsi_cmd's SG list. * diff --git a/vmkdrivers/src_92/include/scsi/scsi_tcq.h b/vmkdrivers/src_92/include/scsi/scsi_tcq.h index 71d50c7bc67ac8009627b525fd8a17915fddd7b1..394fcec8ea7a72ce3b5251395971901ecf91c082 100644 --- a/vmkdrivers/src_92/include/scsi/scsi_tcq.h +++ b/vmkdrivers/src_92/include/scsi/scsi_tcq.h @@ -108,17 +108,16 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) /** * scsi_deactivate_tcq - turn off tag command queueing - * @SDpnt: device to turn off TCQ for + * @sdev: SCSI Device to turn off TCQ for + * @depth: number of commands the low level driver can queue up in non-tagged mode. * - * Turns off tag command queueing + * RETURN VALUE: + * None * * ESX Deviation Notes: * blk layer is not supported/affected. - * - * RETURN VALUE: - * None */ -/* _VMKLNX_CODECHECK_: scsi_deactivate_tcq */ +/* _VMKLNX_CODECHECK_: scsi_deactivate_tcq*/ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) { #if !defined(__VMKLNX__) @@ -130,7 +129,7 @@ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) /** * scsi_populate_tag_msg - place a tag message in a buffer - * @SCpnt: pointer to the Scsi_Cmnd for the tag + * @cmd: pointer to the Scsi_Cmnd for the tag * @msg: pointer to the area to place the tag * * Create the correct type of tag message for the @@ -190,14 +189,14 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) #endif } -/** +/* * scsi_find_tag - find a tagged command by device - * @SDpnt: pointer to the ScSI device + * @sdev: pointer to the SCSI device * @tag: the tag number * * Notes: * Only works with tags allocated by the generic blk layer. - **/ + */ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) { @@ -212,7 +211,7 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) return sdev->current_cmnd; } -/** +/* * scsi_init_shared_tag_map - create a shared tag map * @shost: the host to share the tag map among all devices * @depth: the total depth of the map diff --git a/vmkdrivers/src_92/vmklinux_92/linux/drivers/scsi/scsi_error.c b/vmkdrivers/src_92/vmklinux_92/linux/drivers/scsi/scsi_error.c index 8b6fa0058e3840f3999d6bd50ec6259919119121..c05698a8bebdfb25c1822a0a1a7aed7c4f339be3 100644 --- a/vmkdrivers/src_92/vmklinux_92/linux/drivers/scsi/scsi_error.c +++ b/vmkdrivers/src_92/vmklinux_92/linux/drivers/scsi/scsi_error.c @@ -502,14 +502,14 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) complete(eh_action); } -/** +/* * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. * @scmd: SCSI Cmd to send. * @timeout: Timeout for cmd. * * Return value: * SUCCESS or FAILED or NEEDS_RETRY - **/ + */ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, int cmnd_size, int timeout, int copy_sense) { @@ -842,7 +842,7 @@ retry_tur: } } -/** +/* * scsi_eh_abort_cmds - abort canceled commands. * @shost: scsi host being recovered. * @eh_done_q: list_head for processed commands. @@ -853,7 +853,7 @@ retry_tur: * command that has timed out. if the command simply failed, it makes * no sense to try and abort the command, since as far as the shost * adapter is concerned, it isn't running. - **/ + */ static int scsi_eh_abort_cmds(struct list_head *work_q, struct list_head *done_q) { @@ -990,17 +990,17 @@ static int scsi_eh_stu(struct Scsi_Host *shost, } -/** +/* * scsi_eh_bus_device_reset - send bdr if needed * @shost: scsi host being recovered. - * @eh_done_q: list_head for processed commands. + * @done_q: list_head for processed commands. * * Notes: * Try a bus device reset. still, look to see whether we have multiple * devices that are jammed or not - if we have multiple devices, it * makes no sense to try bus_device_reset - we really would need to try * a bus_reset instead. - **/ + */ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, struct list_head *work_q, struct list_head *done_q) @@ -1115,11 +1115,11 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd) return rtn; } -/** +/* * scsi_eh_bus_reset - send a bus reset * @shost: scsi host being recovered. * @eh_done_q: list_head for processed commands. - **/ + */ static int scsi_eh_bus_reset(struct Scsi_Host *shost, struct list_head *work_q, struct list_head *done_q) @@ -1500,12 +1500,12 @@ static void scsi_restart_operations(struct Scsi_Host *shost) } #if !defined(__VMKLNX__) -/** +/* * scsi_eh_ready_devs - check device ready state and recover if not. * @shost: host to be recovered. * @eh_done_q: list_head for processed commands. * - **/ + */ static void scsi_eh_ready_devs(struct Scsi_Host *shost, struct list_head *work_q, struct list_head *done_q) diff --git a/vmkdrivers/src_92/vmklinux_92/vmware/linux_block.c b/vmkdrivers/src_92/vmklinux_92/vmware/linux_block.c index 93c893fddf3afed8a61442b064a64412ec1c0237..bc30116d5e50856112e9b4bad807d013df7f6410 100644 --- a/vmkdrivers/src_92/vmklinux_92/vmware/linux_block.c +++ b/vmkdrivers/src_92/vmklinux_92/vmware/linux_block.c @@ -2707,7 +2707,7 @@ __generic_unplug_device(request_queue_t *q, void *data) VMKAPI_MODULE_CALL_VOID(BLOCK_GET_ID(dev), q->request_fn, q); } -/** +/* * generic_unplug_device - fire a request queue * @q: The &request_queue_t in question * @@ -2717,7 +2717,7 @@ __generic_unplug_device(request_queue_t *q, void *data) * is still adding and merging requests on the queue. Once the queue * gets unplugged, the request_fn defined for the queue is invoked and * transfers started. - **/ + */ void generic_unplug_device(request_queue_t *q, void *data) { diff --git a/vmkdrivers/src_92/vmklinux_92/vmware/linux_pci.c b/vmkdrivers/src_92/vmklinux_92/vmware/linux_pci.c index e8f36fd7a5b9236aa57ab7dae8af81402254d899..0baeefaf48598b97b7b088c8cd9e12bf3b55eccb 100644 --- a/vmkdrivers/src_92/vmklinux_92/vmware/linux_pci.c +++ b/vmkdrivers/src_92/vmklinux_92/vmware/linux_pci.c @@ -179,6 +179,11 @@ LinuxPCILegacyIntrVectorSet(LinuxPCIDevExt *pciDevExt) if (status != VMK_OK) { VMKLNX_WARN("Could not allocate legacy PCI interrupt for device %s", pciDevExt->linuxDev.dev.bus_id); + /* + * Ensure that the irq field is set to zero to indicate not to + * attempt to free the interrupt cookie. + */ + pciDevExt->linuxDev.irq = 0; return; } @@ -196,6 +201,17 @@ LinuxPCIIntrVectorFree(LinuxPCIDevExt *pciDevExt) vmk_IntrCookie intrCookie; VMK_ReturnStatus status; + + /* + * Check whether the device even has a currently valid, + * allocated interrupt. We attempt to allocate a legacy interrupt + * on device-insert, but not all devices support legacy interrupts. + */ + if (pciDevExt->linuxDev.irq == 0) { + VMK_ASSERT(pciDevExt->linuxDev.msi_enabled == 0); + return; + } + /* * Get the associated intrCookie for the irq. */ @@ -494,14 +510,13 @@ LinuxPCIDeviceRemoved(vmk_PCIDevice vmkDev) VMKAPI_MODULE_CALL_VOID(pciDevExt->moduleID, devres_release_all, &linuxDev->dev); } - /* free the legacy interrupt setup during LinuxPCIDeviceInserted() */ - LinuxPCIIntrVectorFree(pciDevExt); - linuxDev->driver = NULL; linuxDev->dev.driver = NULL; LinuxPCI_DeviceUnclaimed(pciDevExt); quit: + /* free the legacy interrupt setup during LinuxPCIDeviceInserted() */ + LinuxPCIIntrVectorFree(pciDevExt); /* * If device is physically removed, free up the structures. Otherwise, @@ -727,8 +742,8 @@ LinuxPCI_EnableMSI(struct pci_dev* dev) VMK_ASSERT(pciDevExt->vmkDev); /* - * Remove the previous legacy interrupt before requesting - * MSI interrupt. + * Remove the previous legacy interrupt, if it exists, + * before requesting MSI interrupt. */ LinuxPCIIntrVectorFree(pciDevExt); diff --git a/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_lld_if.c b/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_lld_if.c index bc69112c3a59106af29c515c9659aa7c1ba18e31..0734e1fb5f9aa1f1741d18861c975e58188af0f9 100644 --- a/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_lld_if.c +++ b/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_lld_if.c @@ -155,24 +155,6 @@ static void scsi_offline_device(struct scsi_device *sdev); static void vmklnx_scsi_update_lun_path(struct scsi_device *sdev, void *data); static inline void vmklnx_init_scmd(struct scsi_cmnd *scmd, struct scsi_device *dev); -/** - ********************************************************************** - * \globalfn scsi_host_alloc -- allocate a SCSI Host structure - * - * \param pointer to scsi host template - * \param additional size to be allocated as requested by the driver - * \return On Success pointer to newly allocated Scsi_Host structure - * \par Include: - * scsi/scsi_host.h - * \par ESX Deviation Notes: - * This interface will assume a default value for - * Scsi_Host->dma_boundary to be 0 if the Scsi Host template does - * not specify a value for dma_boundary. This is different from - * the linux behavior which defaults to a 4G boundary in a similar - * situation. - * \sa None. - ********************************************************************** - */ /** * scsi_host_alloc - allocate a Scsi_Host structure * @sht: pointer to scsi host template @@ -316,18 +298,18 @@ failed_sh_alloc: EXPORT_SYMBOL(scsi_host_alloc); /** - ********************************************************************** - * \internalfn scsi_setup_command_freelist -- Setup the command freelist + * Setup the command freelist + * @sh: host to allocate the freelist for + * + * RETURN VALUE: + * 0 on success * - * \param shost - host to allocate the freelist for - * \return 0 on success - * \par Include: + * Include: * scsi/scsi_host.h - * \par ESX Deviation Notes: + * + * ESX Deviation Notes: * Our scsi_cmnd cache also includes space for the maximally sized * scatterlist array. - * \sa None. - ********************************************************************** */ static int scsi_setup_command_freelist(struct Scsi_Host *sh) { @@ -929,17 +911,6 @@ void vmklnx_scsi_register_poll_handler(struct Scsi_Host *sh, } EXPORT_SYMBOL(vmklnx_scsi_register_poll_handler); -/** - ********************************************************************** - * \globalfn vmklnx_scsi_set_path_maxsectors -- - * Set the max. transfer size for a path - * - * \param[in] sdev - the scsi_device struct representing the target path - * \param[in] max_sectors - the max. transfer size in 512-byte sectors - * \return None - * - ********************************************************************** - */ /** * vmklnx_scsi_set_path_maxsectors - set the max transfer size for a path * @sdev: a pointer to scsi_device struct representing the target path @@ -971,18 +942,15 @@ vmklnx_scsi_set_path_maxsectors(struct scsi_device *sdev, EXPORT_SYMBOL(vmklnx_scsi_set_path_maxsectors); /** - ********************************************************************** - * \internalfn scsi_destroy_command_freelist -- Release the command freelist + * scsi_destroy_command_freelist -- Release the command freelist * for a scsi host + * @sh: host that's freelist is going to be destroyed * - * \param shost - host that's freelist is going to be destroyed - * \return None - * \par Include: - * scsi/scsi_host.h - * \par ESX Deviation Notes: + * RETURN VALUE: * None - * \sa None. - ********************************************************************** + * + * Include: + * scsi/scsi_host.h */ static void scsi_destroy_command_freelist(struct Scsi_Host *sh) @@ -1004,9 +972,8 @@ scsi_destroy_command_freelist(struct Scsi_Host *sh) } /* - * \sa All scanning functions needs PSA backend support. Will depend on + * All scanning functions needs PSA backend support. Will depend on * completion of PR166189 - ********************************************************************** */ /** * scsi_scan_host - issue wild card scan for the given SCSI host @@ -1097,8 +1064,6 @@ scsi_scan_host(struct Scsi_Host *sh) EXPORT_SYMBOL(scsi_scan_host); /* - *---------------------------------------------------------------------- - * * vmklnx_scsi_update_lun_path -- * * Callback function used in scsi_scan_target which updates @@ -1109,8 +1074,6 @@ EXPORT_SYMBOL(scsi_scan_host); * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void vmklnx_scsi_update_lun_path(struct scsi_device *sdev, void *data) @@ -1212,18 +1175,14 @@ target_scan: EXPORT_SYMBOL(scsi_scan_target); /** - ********************************************************************** - * \globalfn scsi_register -- Old style passive scsi registration + * Old style passive scsi registration + * @sht: scsi_host_template + * @privsize: private size * - * \param scsi_host_template, privatesize - * \return None - * \par Include: - * - * \par ESX Deviation Notes: - * - * \sa None. - ********************************************************************** + * RETURN VALUE: + * None */ +/* _VMKLNX_CODECHECK_: scsi_register*/ struct Scsi_Host * scsi_register(struct scsi_host_template *sht, int privsize) { @@ -1245,17 +1204,11 @@ scsi_register(struct scsi_host_template *sht, int privsize) } /** - ********************************************************************** - * \globalfn scsi_unregister -- Old style passive scsi unregistration + * Old style passive scsi unregistration + * @sh: scsi_host_template * - * \param scsi_host_template, privatesize - * \return None - * \par Include: - * - * \par ESX Deviation Notes: - * - * \sa None. - ********************************************************************** + * RETURN VALUE: + * None */ void scsi_unregister(struct Scsi_Host *sh) @@ -1271,19 +1224,17 @@ scsi_unregister(struct Scsi_Host *sh) } /** - ********************************************************************** - * \globalfn ScsiModifyQueueDepth -- Called by drivers to notify the vmkernel - * of the new queue Depth. + * Called by drivers to notify the vmkernel + * of the new queue Depth. * - * \param arg - work queue payload containing vmkAdapter, channel, id, lun, + * @work: work queue payload containing vmkAdapter, channel, id, lun, * queue depth - * \return None - * \par Include: - * scsi/scsi_host.h - * \par ESX Deviation Notes: + * + * RETURN VALUE: * None - * \sa None - ********************************************************************** + * + * Include: + * scsi/scsi_host.h */ static void vmklnx_scsi_modify_queue_depth(struct work_struct *work) @@ -1424,6 +1375,7 @@ EXPORT_SYMBOL(scsi_adjust_queue_depth); * @channel: channel number * @id: target id number * @lun: logical unit number + * @hostdata: passed to scsi_alloc_sdev() * * Create new scsi device instance * @@ -1708,6 +1660,11 @@ EXPORT_SYMBOL(scsi_bios_ptable); /** * scsi_partsize - non-operational function in release build. + * @buf: partition table, see scsi_bios_ptable + * @capacity: size of the disk in sector + * @cyls: cylinders + * @hds: heads + * @secs: sectors * * This is a non-operational function in release build, but can cause panic if being called in non-release mode. * @@ -1733,18 +1690,20 @@ scsi_partsize(unsigned char *buf, unsigned long capacity, EXPORT_SYMBOL(scsi_partsize); -/** - ********************************************************************** - * \internalfn vmklnx_scsi_alloc_target_conditionally -- Allocate a target +/* + * vmklnx_scsi_alloc_target_conditionally -- Allocate a target + * + * @parent: parent of the target (need not be a scsi host) + * @channel: target channel number (zero if no channels) + * @id: target id number + * @force_alloc_flag: if set, force to allocate scsi target. + * @old_target: output value. set as 1 if target already exists, 0 else. + * + * RETURN VALUE + * Pointer to an existing scsi target or a new scsi target. * - * \param parent, channel, id - * \return None - * \par Include: + * Include: * scsi/scsi_host.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - ********************************************************************** */ struct scsi_target * vmklnx_scsi_alloc_target_conditionally(struct device *parent, int channel, uint id, @@ -1818,16 +1777,13 @@ alloc_and_return: } /** - ********************************************************************** - * \internalfn scsi_alloc_target -- Allocate a new or find an existing target - * - * \param parent, channel, id - * \return None - * \par Include: - * \par ESX Deviation Notes: - * Needs - * \sa None. - ********************************************************************** + * Allocate a new or find an existing target + * @parent: parent of the target (need not be a scsi host) + * @channel: target channel number (zero if no channels) + * @id: target id number + * + * RETURN VALUE: + * Pointer to scsi target */ struct scsi_target * vmklnx_scsi_alloc_target(struct device *parent, int channel, uint id) @@ -1840,7 +1796,7 @@ EXPORT_SYMBOL(vmklnx_scsi_alloc_target); EXPORT_SYMBOL_ALIASED(vmklnx_scsi_alloc_target, scsi_alloc_target); /** - * scsi_alloc_target - Allocate a new or find an existing target + * Allocate a new or find an existing target * @parent: parent device * @channel: target id * @id: target id @@ -1848,7 +1804,7 @@ EXPORT_SYMBOL_ALIASED(vmklnx_scsi_alloc_target, scsi_alloc_target); * Allocate a new or find an existing target * * RETURN VALUE: - * scsi_target. + * pointer to scsi target. */ /* _VMKLNX_CODECHECK_: scsi_alloc_target */ struct scsi_target * @@ -1948,17 +1904,16 @@ scsi_alloc_target(struct device *parent, int channel, uint id) } /** - ********************************************************************** - * \internalfn vmklnx_scsi_find_target -- Find a matching target + * Find a matching target + * @sh: scsi host + * @channel: target id + * @id: target id * - * \param sh, channel, id - * \return None - * \par Include: + * RETURN VALUE: + * None + * + * Include: * scsi/scsi_host.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - ********************************************************************** */ struct scsi_target * vmklnx_scsi_find_target(struct Scsi_Host *sh, @@ -1983,20 +1938,17 @@ vmklnx_scsi_find_target(struct Scsi_Host *sh, EXPORT_SYMBOL(vmklnx_scsi_find_target); -/** - ********************************************************************** - * \globalfn __scsi_device_lookup_by_target -- Find a matching device +/* + * Find a matching device * for given target * - * \param starget, lun - * \return None - * \par Include: + * RETURN VALUE: + * None + * + * Include: * scsi/scsi_host.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - * \comments - Usually called from interrupt context - ********************************************************************** + * + * comments - Usually called from interrupt context */ struct scsi_device * __scsi_device_lookup_by_target(struct scsi_target *starget, uint lun) @@ -2012,7 +1964,7 @@ __scsi_device_lookup_by_target(struct scsi_target *starget, uint lun) } /** - * scsi_device_lookup_by_target - find a device given the target + * find a device given the target * @starget: SCSI target pointer * @lun: Logical Unit Number * @@ -2046,20 +1998,20 @@ EXPORT_SYMBOL(scsi_device_lookup_by_target); /** - ********************************************************************** - * \globalfn scsi_alloc_sdev -- Allocate and set up a scsi device + * Allocate and set up a scsi device + * @starget: which target to allocate a scsi_device for + * @lun: which lun + * @hostdata: usually NULL and set by ->slave_alloc instead * - * \param starget, lun, hostdata - * \return On failure to alloc sdev, return NULL + * RETURN VALUE: + * return On failure to alloc sdev, return NULL * On other failures, return ERR_PTR(-errno) * On Success, return pointer to sdev (which fails IF_ERR) - * \par Include: + * + * Include: * scsi/scsi_device.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - * \comments - Gets the device ready for IO - ********************************************************************** + * + * comments - Gets the device ready for IO */ struct scsi_device * scsi_alloc_sdev(struct scsi_target *starget, unsigned int lun, void *hostdata) @@ -2181,18 +2133,16 @@ out_device_del: } /** - ********************************************************************** - * \globalfn scsi_destroy_sdev -- Destroy a scsi device + * scsi_destroy_sdev -- Destroy a scsi device + * @sdev: sdevice + * + * RETURN VALUE: + * Pointer to sdev * - * \param sdevice - * \return Pointer to sdev - * \par Include: + * Include: * scsi/scsi_device.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - * \comments - Removes from all the lists as well - ********************************************************************** + * + * comments - Removes from all the lists as well */ void scsi_destroy_sdev(struct scsi_device *sdev) @@ -2211,19 +2161,20 @@ scsi_destroy_sdev(struct scsi_device *sdev) } -/** - ********************************************************************** - * \globalfn __scsi_device_lookup -- Look up for a scsi device given BTL +/* + * __scsi_device_lookup -- Look up for a scsi device given BTL + * @sh: SCSI host pointer + * @channel: SCSI channel (zero if only one channel) + * @id: SCSI target number (physical unit number) + * @lun: SCSI Logical Unit Number * - * \param sh, channel, id, lun - * \return Pointer to sdev - * \par Include: + * RETURN VALUE: + * Pointer to scsi device + * + * Include: * scsi/scsi_device.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - * \comments - Called from IRQ context or with lock held - ********************************************************************** + * + * comments - Called from IRQ context or with lock held */ struct scsi_device * __scsi_device_lookup(struct Scsi_Host *sh, uint channel, uint id, uint lun) @@ -2272,18 +2223,16 @@ EXPORT_SYMBOL(scsi_device_lookup); /** - ********************************************************************** - * \globalfn __scsi_get_command -- Return a Scsi_Cmnd + * __scsi_get_command -- Return a Scsi_Cmnd + * + * @sh: scsi host + * @gfp_mask: allocator flags + * + * RETURN VALUE: + * Pointer to sdev * - * \param sh, mask - * \return Pointer to sdev - * \par Include: + * Include: * scsi/scsi_device.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - * \comments - - ********************************************************************** */ struct scsi_cmnd * __scsi_get_command(struct Scsi_Host *sh, gfp_t gfp_mask) @@ -2925,17 +2874,15 @@ struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) EXPORT_SYMBOL(scsi_host_lookup); /** - ********************************************************************** - * \internalfn scsi_forget_host -- Notify all LUNs that a host is going down + * scsi_forget_host -- Notify all LUNs that a host is going down * - * \param shost - host that is being removed - * \return None - * \par Include: - * scsi/scsi_host.h - * \par ESX Deviation Notes: + * @sh: host that is being removed + * + * RETURN VALUE: * None - * \sa None. - ********************************************************************** + * + * Include: + * scsi/scsi_host.h */ static void scsi_forget_host(struct Scsi_Host *sh) { @@ -3069,7 +3016,7 @@ EXPORT_SYMBOL(scsi_execute_req); * @timeout: command timeout * @retries: number of retries before failing * @data: returns a structure abstracting the mode header data - * @sense: place to put sense data (or NULL if no sense to be collected). + * @sshdr: place to put sense data (or NULL if no sense to be collected). * must be SCSI_SENSE_BUFFERSIZE big. * * Returns zero if unsuccessful, or the header offset (either 4 @@ -3169,19 +3116,6 @@ retry: return result; } -/** - ********************************************************************** - * \globalfn scsi_is_target_device -- Check if this is target type device - * - * \param device struct associated with this target - * \return 0 if not a valid device - * \par Include: - * scsi/scsi_device.h - * \par ESX Deviation Notes: - * None - * \sa None. - ********************************************************************** - */ /** * scsi_is_target_device - Check if this is target type device * @dev: device struct associated with this target @@ -3597,17 +3531,14 @@ scsi_rescan_device(struct device *dev) EXPORT_SYMBOL(scsi_rescan_device); /** - ************************************************************************** - * \globalfn scsi_device_set_state - Take the given device through the device + * scsi_device_set_state - Take the given device through the device * state model. - * \param sdev scsi device to change the state of. - * \param state state to change to. - * \return zero if unsuccessful or an error if the requested transition + * @sdev: scsi device to change the state of. + * @state: state to change to. + * + * RETURN VALUE: + * return zero if unsuccessful or an error if the requested transition * is illegal. - * \par ESX Deviation Notes: - * None - * \sa None - ************************************************************************** **/ /** * scsi_device_set_state - Set scsi state to the given scsi device @@ -3633,14 +3564,11 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) EXPORT_SYMBOL(scsi_device_set_state); /** - ************************************************************************** - * \globalfn vmklnx_get_vmhba_name - Provide the vmhba name - * \param shost - Scsi_Host for the adapter - * \return vmhba Name - * \par ESX Deviation Notes: - * None - * \sa None - ************************************************************************** + * vmklnx_get_vmhba_name - Provide the vmhba name + * @sh: Scsi_Host for the adapter + * + * RETURN VALUE: + * vmhba Name **/ char * vmklnx_get_vmhba_name(struct Scsi_Host *sh) @@ -3662,8 +3590,6 @@ vmklnx_get_vmhba_name(struct Scsi_Host *sh) EXPORT_SYMBOL(vmklnx_get_vmhba_name); /* - *---------------------------------------------------------------------- - * * vmklnx_scsi_free_host_resources -- * * Frees the common host resources @@ -3673,8 +3599,6 @@ EXPORT_SYMBOL(vmklnx_get_vmhba_name); * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void vmklnx_scsi_free_host_resources(struct Scsi_Host *sh) @@ -3697,8 +3621,6 @@ vmklnx_scsi_free_host_resources(struct Scsi_Host *sh) /* - *---------------------------------------------------------------------- - * * scsi_host_dev_release -- * * This function is called when the ref count on the adapter goes to zero @@ -3710,8 +3632,6 @@ vmklnx_scsi_free_host_resources(struct Scsi_Host *sh) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void scsi_host_dev_release(struct device *dev) { @@ -3780,8 +3700,6 @@ vmklnx_destroy_adapter_tls(struct vmklnx_ScsiAdapter *vmklnx26ScsiAdapter) } /* - *---------------------------------------------------------------------- - * * vmklnx_scsi_unregister_host -- * * This function is called from a WQ. This fn unregisters ourselves @@ -3792,8 +3710,6 @@ vmklnx_destroy_adapter_tls(struct vmklnx_ScsiAdapter *vmklnx26ScsiAdapter) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void vmklnx_scsi_unregister_host(struct work_struct *work) @@ -3853,8 +3769,6 @@ vmklnx_scsi_unregister_host(struct work_struct *work) /* - *---------------------------------------------------------------------- - * * scsi_target_dev_release -- * * This function is called when the ref count on the target goes to zero @@ -3865,8 +3779,6 @@ vmklnx_scsi_unregister_host(struct work_struct *work) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void scsi_target_dev_release(struct device *dev) { @@ -3896,8 +3808,6 @@ static void scsi_target_dev_release(struct device *dev) } /* - *---------------------------------------------------------------------- - * * scsi_device_dev_release -- * * This function is called when the ref count on the device goes to zero @@ -3909,8 +3819,6 @@ static void scsi_target_dev_release(struct device *dev) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void scsi_device_dev_release(struct device *dev) { @@ -3990,7 +3898,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) /** * scsi_req_abort_cmd -- Request command recovery for the specified command - * cmd: pointer to the SCSI command of interest + * @cmd: pointer to the SCSI command of interest * * This function requests that SCSI Core start recovery for the * command by deleting the timer and adding the command to the eh @@ -3998,6 +3906,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) * implement their own error recovery MAY ignore the timeout event if * they generated scsi_req_abort_cmd. **/ + /* _VMKLNX_CODECHECK_: scsi_req_abort_cmd*/ void scsi_req_abort_cmd(struct scsi_cmnd *cmd) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); @@ -4023,6 +3932,7 @@ void __scsi_done(struct scsi_cmnd *cmd) * Returns zero if successful or an error if the requested * transition is illegal. **/ + /* _VMKLNX_CODECHECK_: csi_host_set_state*/ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); @@ -4078,17 +3988,12 @@ vmklnx_scsi_target_offline(struct device *dev) EXPORT_SYMBOL(vmklnx_scsi_target_offline); EXPORT_SYMBOL_ALIASED(vmklnx_scsi_target_offline, scsi_target_offline); -/** - ********************************************************************** - * \globalfn scsi_offline_device -- Mark the device offline +/* + * scsi_offline_device -- Mark the device offline + * @sdev: scsi device * - * \param sh - * \return None - * \par Include: - * \par ESX Deviation Notes: + * RETURN VALUE: * None - * \sa None - ********************************************************************** */ static void scsi_offline_device(struct scsi_device *sdev) @@ -4146,19 +4051,18 @@ scsi_offline_device(struct scsi_device *sdev) } /** - ********************************************************************** - * \globalfn vmklnx_scsi_get_num_ioqueue -- get num of queues to create - * - * \param Maximum queues adapter can create - * \return Number of queues to be created, returns 0 for no additional - * queues in adapter - * \par Include: - * \par ESX Deviation Notes: + * get num of queues to create + * @maxQueue: Maximum queues adapter can create + * + * RETURN VALUE: + * return Number of queues to be created, returns 0 for no additional + * queues in adapter + * + * ESX Deviation Notes: * ESX specific API to query VMKernel for the number of queues to * be created - * \sa None - ********************************************************************** */ +/* _VMKLNX_CODECHECK_: vmklnx_scsi_get_num_ioqueue*/ int vmklnx_scsi_get_num_ioqueue(unsigned int maxQueue) { @@ -4171,19 +4075,19 @@ vmklnx_scsi_get_num_ioqueue(unsigned int maxQueue) EXPORT_SYMBOL(vmklnx_scsi_get_num_ioqueue); /** - ********************************************************************** - * \globalfn vmklnx_scsi_get_cmd_ioqueue_handle -- returns queue handle - * - * \param scsi_cmd, sh - * \return ioqueue handle - * \par Include: - * \par ESX Deviation Notes: + * returns queue handle + * @cmd: scsi command + * @sh: scsi host + * + * RETURN VALUE: + * ioqueue handle + * + * Deviation Notes: * ESX specific API to query VMKernel for the ioqueues to be used * to issue the given scsi command * None - * \sa None - ********************************************************************** */ +/* _VMKLNX_CODECHECK_: vmklnx_scsi_get_cmd_ioqueue_handle*/ void * vmklnx_scsi_get_cmd_ioqueue_handle(struct scsi_cmnd *cmd, struct Scsi_Host *sh) @@ -4204,21 +4108,20 @@ vmklnx_scsi_get_cmd_ioqueue_handle(struct scsi_cmnd *cmd, EXPORT_SYMBOL(vmklnx_scsi_get_cmd_ioqueue_handle); /** - ********************************************************************** - * \globalfn vmklnx_scsi_register_ioqueue-- pass queue info to vmkernel - * - * \param sh, Number of I/O queues, scsi_ioqueue_info for each queue - * \return zero if successful, error code otherwise - * \par Include: - * \par ESX Deviation Notes: - * ESX specific API to register all the ioqueues in the adapter - * with VMKernel. For each ioqueue the driver should provide the - * handle and interrupt vector. The handle will be returned by - * vmkernel when vmklnx_scsi_get_cmd_ioqueue_handle() is invoked - * to select queue for a scsi_cmd - * \sa None - ********************************************************************** + * vmklnx_scsi_register_ioqueue-- pass queue info to vmkernel. + * ESX specific API to register all the ioqueues in the adapter + * with VMKernel. For each ioqueue the driver should provide the + * handle and interrupt vector. The handle will be returned by + * vmkernel when vmklnx_scsi_get_cmd_ioqueue_handle() is invoked + * to select queue for a scsi_cmd. + * @sh: scsi host + * @numIoQueue: Number of I/O queues + * @q_info: scsi_ioqueue_info for each queue + * + * RETURN VALUE: + * return zero if successful, error code otherwise */ +/* _VMKLNX_CODECHECK_: vmklnx_scsi_register_ioqueue*/ int vmklnx_scsi_register_ioqueue(struct Scsi_Host *sh, unsigned int numIoQueue, struct vmklnx_scsi_ioqueue_info q_info[]) @@ -4361,8 +4264,6 @@ SCSILinux_InitLLD(void) } /* - *---------------------------------------------------------------------- - * * SCSILinux_CleanupLLD * * Entry point for SCSI LLD-specific teardown. @@ -4373,8 +4274,6 @@ SCSILinux_InitLLD(void) * * Side effects: * Cleans up SCSI LLD log. - * - *---------------------------------------------------------------------- */ void SCSILinux_CleanupLLD(void) @@ -4384,7 +4283,7 @@ SCSILinux_CleanupLLD(void) /** * scsi_device_reprobe - Rediscover device status by reprobing if needed - * sdev: Pointer to scsi_device which needs status update + * @sdev: Pointer to scsi_device which needs status update * * This function will either hide a device which doesn't need exposure to upper * layers anymore or will initiate a scan to rediscover new devices on the @@ -4418,16 +4317,14 @@ EXPORT_SYMBOL(scsi_device_reprobe); /** - *********************************************************************** * vmklnx_scsi_cmd_get_sensedata - Get sense data length from SCSI cmd - * scmd: SCSI command - * buf: Buffer that will contain sense data - * bufLen: Length of the sense buffer + * @scmd: SCSI command + * @buf: Buffer that will contain sense data + * @bufLen: Length of the sense buffer * * Command is identified by scmd. The buffer buf is filled with sense data. * The length of the input buffer is passed through bufLen. * - ********************************************************************** */ /* _VMKLNX_CODECHECK_: vmklnx_scsi_cmd_get_sensedata */ int @@ -4441,19 +4338,16 @@ vmklnx_scsi_cmd_get_sensedata(struct scsi_cmnd *scmd, EXPORT_SYMBOL(vmklnx_scsi_cmd_get_sensedata); /** - *********************************************************************** * vmklnx_scsi_cmd_set_sensedata - Set sense data of SCSI cmd - * buf: Buffer that contains sense data to set - * scmd: SCSI command - * bufLen: Number of bytes to copy + * @buf: Buffer that contains sense data to set + * @scmd: SCSI command + * @bufLen: Number of bytes to copy * * Command is identified by scmd. The number of bytes to copy from buf * is passed as bufLen. If the number of bytes to copy is less than the * sense buffer size of SCSI cmd(obtained by calling * vmklnx_scsi_cmd_get_supportedsensedata_size), the remaining bytes in * SCSI cmd's sense buffer are set to 0. - * - ********************************************************************** */ /* _VMKLNX_CODECHECK_: vmklnx_scsi_cmd_set_sensedata */ int @@ -4467,13 +4361,11 @@ vmklnx_scsi_cmd_set_sensedata(uint8_t *buf, EXPORT_SYMBOL(vmklnx_scsi_cmd_set_sensedata); /** - *********************************************************************** * vmklnx_scsi_cmd_clear_sensedata - Clear sense data of SCSI cmd - * scmd: SCSI command + * @scmd: SCSI command * * Command is identified by scmd. * - ********************************************************************** */ /* _VMKLNX_CODECHECK_: vmklnx_scsi_cmd_clear_sensedata */ int @@ -4484,10 +4376,8 @@ vmklnx_scsi_cmd_clear_sensedata(struct scsi_cmnd *scmd) EXPORT_SYMBOL(vmklnx_scsi_cmd_clear_sensedata); /** - *********************************************************************** * vmklnx_scsi_cmd_get_supportedsensedata_size - Get scmd's sense buffer size * - ********************************************************************** */ /* _VMKLNX_CODECHECK_: vmklnx_scsi_cmd_get_supportedsensedata_size */ int diff --git a/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_transport.c b/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_transport.c index cbadbdc1a78a686e37484738c66f88bfa0b0070f..e17e0c367aab8c3bfcf5e38aba4fcdece22406d4 100644 --- a/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_transport.c +++ b/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_transport.c @@ -144,19 +144,17 @@ static int fc_host_setup(struct Scsi_Host *shost); static void fc_host_free(struct Scsi_Host *shost); static int sas_remove_scsi_target(struct device *dev, void *data); -/** - ********************************************************************** - * vmklnx_alloc_scsimod -- */ /** - * - * \brief alloc and init a vmklnx_ScsiModule +/* + * vmklnx_alloc_scsimod * - * \param type transport type - * \param data transport data + * alloc and init a vmklnx_ScsiModule + * @type: transport type + * @data: transport data * - * \retval initialized vmklnx_ScsiModule - * \retval NULL out of memory + * RETURN VALUE: + * retval initialized vmklnx_ScsiModule + * retval NULL out of memory * - ********************************************************************** */ static struct vmklnx_ScsiModule * vmklnx_alloc_scsimod(vmklnx_ScsiTransportType type, void *data) @@ -230,16 +228,13 @@ vmklnx_generic_san_attach_transport( EXPORT_SYMBOL(vmklnx_generic_san_attach_transport); /** - * vmklnx_generic_san_release_transport - Release generic transport - * @t: transport template - * * Releases a generic SAN transport previously registered with * vmklnx_generic_san_attach_transport. Caller must have previously * allocated generic transport with vmklnx_generic_san_attach_transport. + * @t: transport template * * RETURN VALUE: * None. - * */ /* _VMKLNX_CODECHECK_: vmklnx_generic_san_release_transport */ void @@ -257,19 +252,13 @@ vmklnx_generic_san_release_transport(struct scsi_transport_template *t) EXPORT_SYMBOL(vmklnx_generic_san_release_transport); /* - *---------------------------------------------------------------------- - * - * vmklnx_xsan_host_setup -- - * - * Initialize generic SAN host attributes + * Initialize generic SAN host attributes * * Results: * None. * * Side effects: * None. - * - *---------------------------------------------------------------------- */ int vmklnx_xsan_host_setup(struct Scsi_Host *shost) @@ -296,19 +285,13 @@ vmklnx_xsan_host_setup(struct Scsi_Host *shost) } /* - *---------------------------------------------------------------------- - * - * xsan_setup_transport_attrs -- - * - * Initialize generic SAN transport attributes + * Initialize generic SAN transport attributes * * Results: * None. * * Side effects: * None. - * - *---------------------------------------------------------------------- */ int xsan_setup_transport_attrs( struct Scsi_Host *shost, struct scsi_target *starget) @@ -324,19 +307,18 @@ xsan_setup_transport_attrs( struct Scsi_Host *shost, struct scsi_target *starget } /** - * spi_attach_transport - Attach pSCSI transport - * @ft: Pointer to the spi_function_template - * + * Attach pSCSI transport * Allocate and initialize all the vmklinux data structures and attach the * passed in pointer to the spi_function_template. + * @ft: Pointer to the spi_function_template * - * ESX Deviation Notes: - * This function also does the necessary initialization of data structures for - * the vmklinux storage stack - * * RETURN VALUE: * non-NULL is a success and is a pointer to the new template * NULL is a failure. + * + * ESX Deviation Notes: + * This function also does the necessary initialization of data structures for + * the vmklinux storage stack */ /* _VMKLNX_CODECHECK_: spi_attach_transport */ struct scsi_transport_template * @@ -369,11 +351,9 @@ spi_attach_transport(struct spi_function_template *ft) EXPORT_SYMBOL(spi_attach_transport); /** - * spi_release_transport - Releases pSCSI transport + * Releases pSCSI transport * @t: pointer to scsi_transport_template * - * Releases pSCSI transport - * * RETURN VALUE: * None */ @@ -396,19 +376,13 @@ void spi_release_transport(struct scsi_transport_template *t) EXPORT_SYMBOL(spi_release_transport); /* - *---------------------------------------------------------------------- - * - * spi_setup_transport_attrs -- - * - * Initialize pSCSI transport attributes + * Initialize pSCSI transport attributes * * Results: * None. * * Side effects: * None. - * - *---------------------------------------------------------------------- */ int spi_setup_transport_attrs(struct scsi_target *starget) @@ -507,6 +481,7 @@ EXPORT_SYMBOL(spi_dv_device); /** * scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. + * @ref: not in use. * * This works by trying to transition to the SDEV_QUIESCE state * (which must be a legal transition). When the device is in this @@ -542,6 +517,7 @@ vmklnx_scsi_device_quiesce(struct scsi_device *sdev, void *ref) /** * scsi_device_resume - Restart user issued commands to a quiesced device. * @sdev: scsi device to resume. + * @ref: not in use. * * Moves the device from quiesced back to running and restarts the * queues. @@ -571,13 +547,14 @@ scsi_target_resume(struct scsi_target *starget) } /** - * starget_for_each_device - helper to walk all devices of a target - * @starget: target whose devices we want to iterate over. - * + * helper to walk all devices of a target. * Using host_lock instead of reference counting * This traverses over each devices of @shost. The devices have * a reference that must be released by scsi_host_put when breaking * out of the loop. host_lock can not be held on this + * @stgt: target whose devices we want to iterate over. + * @data: Opaque passed to each function call. + * @fn: Function to call on each device. */ /* _VMKLNX_CODECHECK_: starget_for_each_device */ void @@ -602,8 +579,6 @@ starget_for_each_device(struct scsi_target *stgt, void * data, EXPORT_SYMBOL(starget_for_each_device); /* - *---------------------------------------------------------------------- - * * spi_dv_device_internal -- * * Process DV for pSCSI @@ -613,8 +588,6 @@ EXPORT_SYMBOL(starget_for_each_device); * * Side effects: * None. - * - *---------------------------------------------------------------------- */ static void spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) @@ -730,11 +703,9 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) } /* - *---------------------------------------------------------------------- - * * spi_dv_retrain -- * - * Perform various DV + * Perform various DV * * Results: * None. @@ -742,7 +713,6 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) * Side effects: * None. * - *---------------------------------------------------------------------- */ static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, @@ -819,11 +789,7 @@ spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, } /* - *---------------------------------------------------------------------- - * - * spi_dv_device_compare_inquiry -- - * - * This is for the simplest form of Domain Validation: a read test + * This is for the simplest form of Domain Validation: a read test * on the inquiry data from the device * * Results: @@ -832,7 +798,6 @@ spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, * Side effects: * None. * - *---------------------------------------------------------------------- */ static enum spi_compare_returns spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, @@ -878,25 +843,13 @@ spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, } /* - *---------------------------------------------------------------------- - * - * spi_execute -- - * - * Send down Commands for DV - * - * Results: - * None. - * - * Side effects: - * None. - * - *---------------------------------------------------------------------- + * Send down Commands for DV */ -static int +static int spi_execute(struct scsi_device *sdev, const void *cmd, - enum dma_data_direction dir, - void *buffer, unsigned bufflen, - struct scsi_sense_hdr *sshdr) + enum dma_data_direction dir, + void *buffer, unsigned bufflen, + struct scsi_sense_hdr *sshdr) { int i, result; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; @@ -905,18 +858,18 @@ spi_execute(struct scsi_device *sdev, const void *cmd, for(i = 0; i < DV_RETRIES; i++) { result = scsi_execute(sdev, cmd, dir, buffer, bufflen, - sense, DV_TIMEOUT, /* retries */ 1, - REQ_FAILFAST); + sense, DV_TIMEOUT, /* retries */ 1, + REQ_FAILFAST); if (result & DRIVER_SENSE) { struct scsi_sense_hdr sshdr_tmp; - if (!sshdr) { - sshdr = &sshdr_tmp; + if (!sshdr) { + sshdr = &sshdr_tmp; } if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr) - && sshdr->sense_key == UNIT_ATTENTION) { - continue; - } + && sshdr->sense_key == UNIT_ATTENTION) { + continue; + } } break; } @@ -924,19 +877,13 @@ spi_execute(struct scsi_device *sdev, const void *cmd, } /* - *---------------------------------------------------------------------- - * - * sprint_frac -- - * - * Print the fraction details + * Print the fraction details * * Results: * None. * * Side effects: * None. - * - *---------------------------------------------------------------------- */ static int sprint_frac(char *dest, int value, int denom) @@ -961,12 +908,11 @@ sprint_frac(char *dest, int value, int denom) } /** - * spi_display_xfer_agreement - Prints transfer details - * @starget: pointer to scsi_target - * + * Prints transfer details. * Each SPI port is required to maintain a transfer agreement for each * other port on the bus. This function prints a one-line summary of * the current agreement; + * @starget: pointer to scsi_target * * RETURN VALUE: * None @@ -1031,21 +977,20 @@ spi_display_xfer_agreement(struct scsi_target *starget) } EXPORT_SYMBOL(spi_display_xfer_agreement); -/** - ********************************************************************** - * \globalfn spi_populate_ppr_msg -- Populate the message fields - * - * \param Pointer to message field - * \param Period - * \param Offset - * \param Width - * \param Options - * \return 8 - * \par Include +/* + * spi_populate_ppr_msg -- Populate the message fields + * + * @msg: Pointer to message field + * @period: Period + * @offset: Offset + * @width: Width + * @options: Options + * + * RETURN VALUE: + * return 8 + * + * Include * scsi/scsi_transport_spi.h - * \par ESX Deviation Notes None - * \sa None. - ********************************************************************** */ int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, @@ -1064,18 +1009,17 @@ spi_populate_ppr_msg(unsigned char *msg, int period, int offset, } EXPORT_SYMBOL(spi_populate_ppr_msg); -/** - ********************************************************************** - * \globalfn spi_populate_width_msg -- Populate the width message fields +/* + * spi_populate_width_msg -- Populate the width message fields * - * \param Pointer to message field - * \param Width - * \return 4 - * \par Include + * @msg: Pointer to message field + * @width: Width + * + * RETURN VALUE: + * return 4 + * + * Include * scsi/scsi_transport_spi.h - * \par ESX Deviation Notes None - * \sa None. - ********************************************************************** */ int spi_populate_width_msg(unsigned char *msg, int width) @@ -1089,19 +1033,17 @@ spi_populate_width_msg(unsigned char *msg, int width) } EXPORT_SYMBOL(spi_populate_width_msg); -/** - ********************************************************************** - * \globalfn spi_populate_sync_msg -- Populate the sync message fields - * - * \param Pointer to message field - * \param Period - * \param Offset - * \return 5 - * \par Include +/* + * spi_populate_sync_msg -- Populate the sync message fields + * @msg: Pointer to message field + * @period: Period + * @offset: Offset + * + * RETURN VALUE: + * return 5 + * + * Include * scsi/scsi_transport_spi.h - * \par ESX Deviation Notes None - * \sa None. - ********************************************************************** */ int spi_populate_sync_msg(unsigned char *msg, int period, int offset) @@ -1169,10 +1111,8 @@ fc_attach_transport(struct fc_function_template *ft) EXPORT_SYMBOL(fc_attach_transport); /** - * fc_release_transport - releases an FC transport - * @t: scsi_transport_template as returned by fc_attach_transport - * * Releases an FC transport previously registered with fc_attach_transport + * @t: scsi_transport_template as returned by fc_attach_transport * * SEE ALSO: * fc_attach_transport @@ -1203,8 +1143,6 @@ void fc_release_transport(struct scsi_transport_template *t) EXPORT_SYMBOL(fc_release_transport); /* - *---------------------------------------------------------------------- - * * vmklnx_fc_host_setup * * Slightly deviated version of fc_host_setup. Exports information @@ -1215,8 +1153,6 @@ EXPORT_SYMBOL(fc_release_transport); * * Side effects: * None - * - *---------------------------------------------------------------------- */ int fc_host_setup(struct Scsi_Host *shost) @@ -1303,8 +1239,6 @@ fc_host_setup(struct Scsi_Host *shost) } /* - *---------------------------------------------------------------------- - * * vmklnx_fc_host_setup * * Sets ups FC adapters and corresponding mgmt information @@ -1314,7 +1248,6 @@ fc_host_setup(struct Scsi_Host *shost) * * Side effects: * None - *---------------------------------------------------------------------- */ int vmklnx_fc_host_setup(struct Scsi_Host *shost) { @@ -1339,8 +1272,6 @@ int vmklnx_fc_host_setup(struct Scsi_Host *shost) } /* - *---------------------------------------------------------------------- - * * vmklnx_fcoe_host_setup * * Sets ups FCoE adapters and corresponding mgmt information @@ -1350,7 +1281,6 @@ int vmklnx_fc_host_setup(struct Scsi_Host *shost) * * Side effects: * None - *---------------------------------------------------------------------- */ int vmklnx_fcoe_host_setup(struct Scsi_Host *shost) { @@ -1376,8 +1306,6 @@ int vmklnx_fcoe_host_setup(struct Scsi_Host *shost) } /* - *---------------------------------------------------------------------- - * * fc_host_free * * Free's up resources allocated to the FC host @@ -1387,8 +1315,6 @@ int vmklnx_fcoe_host_setup(struct Scsi_Host *shost) * * Side effects: * None - * - *---------------------------------------------------------------------- */ void fc_host_free(struct Scsi_Host *shost) @@ -1421,8 +1347,6 @@ fc_host_free(struct Scsi_Host *shost) /* - *---------------------------------------------------------------------- - * * vmklnx_fc_host_free * * Frees up fc host information @@ -1432,7 +1356,6 @@ fc_host_free(struct Scsi_Host *shost) * * Side effects: * None - *---------------------------------------------------------------------- */ void vmklnx_fc_host_free(struct Scsi_Host *shost) @@ -1446,8 +1369,6 @@ vmklnx_fc_host_free(struct Scsi_Host *shost) } /* - *---------------------------------------------------------------------- - * * vmklnx_fcoe_host_free * * Frees up fcoe host information @@ -1457,7 +1378,6 @@ vmklnx_fc_host_free(struct Scsi_Host *shost) * * Side effects: * None - *---------------------------------------------------------------------- */ void vmklnx_fcoe_host_free(struct Scsi_Host *shost) @@ -1471,18 +1391,17 @@ vmklnx_fcoe_host_free(struct Scsi_Host *shost) } /** - * fc_remove_host - Called to terminate any fc_transport related elements - * for a scsi host + * Called to terminate any fc_transport related elements + * for a scsi host. + * This routine is expected to be called immediately preceeding the + * call from the driver to scsi_remove_host(). * @shost: Pointer to struct Scsi_Host * - * This routine is expected to be called immediately preceeding the - * call from the driver to scsi_remove_host() + * RETURN VALUE: + * None * * ESX Deviation Notes: * Removes vports along with rports - * - * RETURN VALUE: - * None */ /* _VMKLNX_CODECHECK_: fc_remove_host */ void @@ -1545,10 +1464,7 @@ fc_remove_host(struct Scsi_Host *shost) EXPORT_SYMBOL(fc_remove_host); /* - *---------------------------------------------------------------------- - * - * fc_rport_create --- - * allocates and creates a remote FC port. + * allocates and creates a remote FC port. * * Results: * Pointer to new rport that is created @@ -1556,8 +1472,8 @@ EXPORT_SYMBOL(fc_remove_host); * Side effects: * None * - *---------------------------------------------------------------------- */ +/* _VMKLNX_CODECHECK_: fc_rport_create*/ struct fc_rport * fc_rport_create(struct Scsi_Host *shost, int channel, struct fc_rport_identifiers *ids) @@ -1660,12 +1576,8 @@ exit_rport_create: } /** - * fc_remote_port_add - notifies the fc transport of the existence - * of a remote FC port - * @shost: scsi host the remote port is connected to - * @channel: Channel on shost port connected to - * @ids: The world wide names, fc address, and FC4 port - * roles for the remote port + * notifies the fc transport of the existence + * of a remote FC port. * * The LLDD calls this routine to notify the transport of the existence * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) @@ -1689,6 +1601,10 @@ exit_rport_create: * internally on behalf of consistent target id mappings. If found, the * remote port structure will be reused. Otherwise, a new remote port * structure will be allocated. + * @shost: scsi host the remote port is connected to + * @channel: Channel on shost port connected to + * @ids: The world wide names, fc address, and FC4 port + * roles for the remote port * * RETURN VALUE: * Returns a remote port structure @@ -1894,9 +1810,8 @@ EXPORT_SYMBOL(fc_remote_port_add); /** - * fc_remote_port_delete - notifies the fc transport that a remote - * port is no longer in existence - * @rport: The remote port that no longer exists + * notifies the fc transport that a remote + * port is no longer in existence * * The LLDD calls this routine to notify the transport that a remote * port is no longer part of the topology. Although a port @@ -1940,6 +1855,10 @@ EXPORT_SYMBOL(fc_remote_port_add); * * This function cannot be called from interrupt context and assumes no * locks are held on entry. + * @rport: The remote port that no longer exists + * + * RETURN VALUE: + * None * * ESX Deviation Notes: * The link timeout can be set by the driver in @@ -1947,10 +1866,6 @@ EXPORT_SYMBOL(fc_remote_port_add); * The lesser of fast IO fail timeout of the rport and the * link timeout is selected as the time period to wait before * the rport is freed. - * - * RETURN VALUE: - * None - * **/ /* _VMKLNX_CODECHECK_: fc_remote_port_delete */ void @@ -2053,10 +1968,8 @@ fc_remote_port_delete(struct fc_rport *rport) EXPORT_SYMBOL(fc_remote_port_delete); /** - * fc_remote_port_rolechg - notifies the fc transport that the roles - * on a remote may have changed - * @rport: The remote port that changed - * @roles: Private (Transport-managed) Attribute + * notifies the fc transport that the roles + * on a remote may have changed. * * The LLDD calls this routine to notify the transport that the roles * on a remote port may have changed. The largest effect of this is @@ -2070,6 +1983,10 @@ EXPORT_SYMBOL(fc_remote_port_delete); * * Should not be called from interrupt context. * + * @rport: The remote port that changed + * @roles: Private (Transport-managed) Attribute + * + * * Notes: * This routine assumes no locks are held on entry * @@ -2156,8 +2073,6 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) EXPORT_SYMBOL(fc_remote_port_rolechg); /* - *---------------------------------------------------------------------- - * * fc_timeout_deleted_rport - Timeout handler for a deleted remote port that * was a SCSI target (thus was blocked), and failed * to return in the alloted time. @@ -2167,8 +2082,6 @@ EXPORT_SYMBOL(fc_remote_port_rolechg); * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_timeout_deleted_rport(struct work_struct *work) @@ -2279,8 +2192,6 @@ fc_timeout_deleted_rport(struct work_struct *work) } /* - *---------------------------------------------------------------------- - * * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a * disconnected SCSI target. * @@ -2289,8 +2200,6 @@ fc_timeout_deleted_rport(struct work_struct *work) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_timeout_fail_rport_io(struct work_struct *work) @@ -2319,8 +2228,6 @@ fc_timeout_fail_rport_io(struct work_struct *work) } /* - *---------------------------------------------------------------------- - * * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. * * Results: @@ -2328,8 +2235,6 @@ fc_timeout_fail_rport_io(struct work_struct *work) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_scsi_scan_rport(struct work_struct *work) @@ -2363,7 +2268,6 @@ fc_scsi_scan_rport(struct work_struct *work) * * RETURN VALUE: * TRUE if the device is a fc rport, FALSE otherwise - *---------------------------------------------------------------------- */ /* _VMKLNX_CODECHECK_: scsi_is_fc_rport */ int @@ -2392,16 +2296,14 @@ fc_get_event_number(void) EXPORT_SYMBOL(fc_get_event_number); /** - * fc_host_post_vendor_event - non-operational function + * This function is a non-operational function provided to help reduce + * kernel ifdefs. It is not supported in this release of ESX. * @shost: ignored * @event_number: ignored * @data_len: ignored * @data_buf: ignored * @vendor_id: ignored * - * This function is a non-operational function provided to help reduce - * kernel ifdefs. It is not supported in this release of ESX. - * * ESX Deviation Notes: * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. @@ -2425,23 +2327,21 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, EXPORT_SYMBOL(fc_host_post_vendor_event); /** - * fc_host_post_event - non-operational function - * @shost: ignored - * @event_number: ignored - * @event_code: ignored - * @event_data: ignored - * * This function is not implemented. * * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. + * @shost: ignored + * @event_number: ignored + * @event_code: ignored + * @event_data: ignored + * + * RETURN VALUE: + * This function does not return a value * * ESX Deviation Notes: * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. - * - * RETURN VALUE: - * This function does not return a value */ /* _VMKLNX_CODECHECK_: fc_host_post_event */ void @@ -2460,8 +2360,6 @@ EXPORT_SYMBOL(fc_host_post_event); /* - *---------------------------------------------------------------------- - * * fc_queue_work -- * * Queue work to the fc_host workqueue. @@ -2473,8 +2371,6 @@ EXPORT_SYMBOL(fc_host_post_event); * * Side effects: * None - * - *---------------------------------------------------------------------- */ static int fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) @@ -2492,8 +2388,6 @@ fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) } /* - *---------------------------------------------------------------------- - * * fc_flush_work -- * * Flush a fc_host's workqueue. @@ -2503,8 +2397,6 @@ fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_flush_work(struct Scsi_Host *shost) @@ -2520,8 +2412,6 @@ fc_flush_work(struct Scsi_Host *shost) } /* - *---------------------------------------------------------------------- - * * fc_queue_devloss_work -- * * Schedule work for the fc_host devloss workqueue. @@ -2531,8 +2421,6 @@ fc_flush_work(struct Scsi_Host *shost) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static int fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *dwork, @@ -2560,8 +2448,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *dwork, } /* - *---------------------------------------------------------------------- - * * fc_flush_devloss --- * * Flush a fc_host's devloss workqueue. @@ -2571,8 +2457,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *dwork, * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_flush_devloss(struct Scsi_Host *shost) @@ -2590,8 +2474,6 @@ fc_flush_devloss(struct Scsi_Host *shost) /* - *---------------------------------------------------------------------- - * * fc_starget_delete --- * * Called to delete the scsi decendents of an rport (target and all sdevs) @@ -2601,8 +2483,6 @@ fc_flush_devloss(struct Scsi_Host *shost) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_starget_delete(struct work_struct *work) @@ -2666,8 +2546,6 @@ fc_starget_delete(struct work_struct *work) /* - *---------------------------------------------------------------------- - * * fc_rport_final_delete --- * finish rport termination and delete it. * @@ -2676,8 +2554,6 @@ fc_starget_delete(struct work_struct *work) * * Side effects: * None - * - *---------------------------------------------------------------------- */ static void fc_rport_final_delete(struct work_struct *work) @@ -2726,14 +2602,13 @@ fc_rport_final_delete(struct work_struct *work) * NPIV support code */ -/** +/* * vmk_fc_vport_create - allocates and creates a FC virtual port. * Allocates and creates the vport structure, calls the parent host * to instantiate the vport, the completes w/ class and sysfs creation. - * shost - physical host to create vport on - * pdev - parent device - * args - virtual port data passed in, wwn, etc. - * vport_shost - pointer to callers vport_shost + * @shost: physical host to create vport on + * @pdev: parent device + * @vport_shost: pointer to callers vport_shost * * Notes: * This routine assumes no locks are held on entry. @@ -2895,7 +2770,7 @@ fc_vport_sched_delete(struct work_struct *work) -/** +/* * Calls the LLDD vport_delete() function, then deallocates and removes * the vport from the shost and object tree. * @@ -3135,19 +3010,15 @@ vmk_fc_vport_suspend(struct Scsi_Host *shost, int suspend) * SAS host attributes */ -/** +/* + * vmklnx_sas_host_setup -- initialize sas_host_attrs + * @shost: pointer to a Scsi_Host structure * - * \globalfn vmklnx_sas_host_setup -- initialize sas_host_attrs + * RETURN VALUE: + * 0 for SUCCESS; negative values if failed * - * \param shost pointer to a Scsi_Host structure - * \return 0 for SUCCESS; negative values if failed - * \par Include: + * Include: * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: - * - * \sa None. - * \comments - - * */ int vmklnx_sas_host_setup(struct Scsi_Host *shost) { @@ -3282,7 +3153,7 @@ void sas_rphy_free(struct sas_rphy *rphy) } EXPORT_SYMBOL(sas_rphy_free); -/** +/* * sas_scsi_target_reparent - reparent the scsi_target to new rphy passed in */ static int @@ -3320,7 +3191,7 @@ sas_scsi_target_reparent(struct device *dev, void *data) return 0; } -/** +/* * sas_assign_scsi_target_id - assign a scsi target id to the SAS device * * if previously removed dev @@ -3385,11 +3256,11 @@ sas_assign_scsi_target_id(struct sas_rphy *rphy, } /** - * sas_rphy_add - add a SAS remote PHY to the device hierachy - * @rphy: The remote PHY to be added + * Add a SAS remote PHY to the device hierachy * * Publishes a SAS remote PHY to the rest of the system. * Assumes sas_host->lock is held as needed + * @rphy: The remote PHY to be added * * RETURN VALUE: * 0 on success @@ -3444,10 +3315,8 @@ int sas_rphy_add(struct sas_rphy *rphy) EXPORT_SYMBOL(sas_rphy_add); /** - * sas_rphy_delete - remove SAS remote PHY - * @rphy: SAS remote PHY to remove - * * Removes the specified SAS remote PHY. + * @rphy: SAS remote PHY to remove * * RETURN VALUE: * None @@ -3496,11 +3365,9 @@ static void sas_rphy_initialize(struct sas_rphy *rphy) /** - * sas_end_device_alloc - allocate a SAS rphy and connect to its parent device + * Allocate a SAS rphy and connect to its parent device * @parent: parent to connect SAS rphy port to * - * Allocates a SAS remote PHY structure, connected to @parent - * */ /* _VMKLNX_CODECHECK_: sas_end_device_alloc */ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent) @@ -3536,12 +3403,12 @@ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent) EXPORT_SYMBOL(sas_end_device_alloc); /** - * sas_expander_alloc - allocate an rphy for an end device - * @parent: parent to connect SAS rphy to - * @type: device type + * Allocate an rphy for an end device. * * Allocates a SAS remote PHY structure, connected to @parent and sets its type. * Valid types are: SAS_EDGE_EXPANDER_DEVICE and SAS_FANOUT_EXPANDER_DEVICE + * @parent: parent to connect SAS rphy to + * @type: device type * */ /* _VMKLNX_CODECHECK_: sas_expander_alloc */ @@ -3581,19 +3448,18 @@ struct sas_rphy *sas_expander_alloc(struct sas_port *parent, } EXPORT_SYMBOL(sas_expander_alloc); -/** +/* + * sas_is_sas_port -- check if it is a port device + * @dev: a pointer to a struct device * - * \globalfn sas_is_sas_port -- check if it is a port device + * RETURN VALUE: + * 1 if it is a SAS remote PHY; 0 otherwise * - * \param dev a pointer to a struct device - * \return 1 if it is a SAS remote PHY; 0 otherwise - * \par Include: + * Include: * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: - * It uses device instead. - * \sa None. - * \comments - * + * ESX Deviation Notes: + * It uses device instead. */ int scsi_is_sas_port(const struct device *dev) { @@ -3604,19 +3470,18 @@ int scsi_is_sas_port(const struct device *dev) } } -/** +/* + * sas_is_sas_phy -- check if it is a phy device + * @dev: a pointer to a struct device * - * \globalfn sas_is_sas_phy -- check if it is a phy device + * RETURN VALUE: + * 1 if it is a SAS PHY; 0 otherwise * - * \param dev a pointer to a struct device - * \return 1 if it is a SAS PHY; 0 otherwise - * \par Include: + * Include: * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: - * It uses device instead. - * \sa None. - * \comments - * + * ESX Deviation Notes: + * It uses device instead. */ int scsi_is_sas_phy(const struct device *dev) { @@ -3650,25 +3515,12 @@ int scsi_is_sas_rphy(const struct device *dev) } EXPORT_SYMBOL(scsi_is_sas_rphy); -/** - * - * \globalfn sas_attach_transport -- Attach SAS transport - * - * \param scmd - * \return None - * \par Include: - * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: - * - * \sa None. - * \comments - * - */ /** - * sas_attach_transport - instantiate SAS transport template + * instantiate SAS transport template * @ft: SAS transport class function template * - * + * RETURN VALUE: + * Pointer to SAS transport template */ /* _VMKLNX_CODECHECK_: sas_attach_transport */ struct scsi_transport_template * @@ -3702,24 +3554,12 @@ sas_attach_transport(struct sas_function_template *ft) } EXPORT_SYMBOL(sas_attach_transport); -/** - * - * \globalfn sas_release_transport -- Release SAS transport - * - * \param scmd - * \return None - * \par Include: - * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: - * - * \sa None. - * \comments - * - */ /** * sas_release_transport - Release SAS transport template instance * @t: transport template instance * + * RETURN VALUE: + * None. */ /* _VMKLNX_CODECHECK_: sas_release_transport */ void sas_release_transport(struct scsi_transport_template *t) @@ -3767,12 +3607,10 @@ static int do_sas_phy_delete(struct device *dev, void *data) } /** - * sas_read_port_mode_page - Issue Mode Sense for Page 0x19 to specifed scsi_device - * @sdev: Pointer to SCSI device of type struct scsi_device - * * Issues SCSI Mode Sense command for page 0x19 to the passed in scsi_device. * Figures out the associated SAS end device and populates appropriate fields * on success. + * @sdev: Pointer to SCSI device of type struct scsi_device * * RETURN VALUE: * 0 on Sucess, non-zero otherwise @@ -3827,18 +3665,17 @@ int sas_read_port_mode_page(struct scsi_device *sdev) } EXPORT_SYMBOL(sas_read_port_mode_page); -/** +/* + * tear down a device SAS data structure + * @dev: a pointer to device belonging to the SAS object * - * \globalfn sas_remove_children -- tear down a device SAS data structure + * RETURN VALUE: + * void * - * \param dev a pointer to device belonging to the SAS object - * \return void - * \par Include: + * Include: * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: - * - * \sa None. - * \comments + * + * comments * It must be called just before scsi_remove_host for SAS HBAs. * */ @@ -3897,19 +3734,17 @@ void sas_remove_host(struct Scsi_Host *shost) } EXPORT_SYMBOL(sas_remove_host); -/** +/* + * sas_port_alloc -- allocate and initialize a SAS port structure * - * \globalfn sas_port_alloc -- allocate and initialize a SAS port structure + * @parent: a pointer to the parent device + * @port_id: a port number * - * \param parent a pointer to the parent device - * \param port_id a port number - * \return a pointer to the allocated PHY structure; NULL if failed - * \par Include: - * scsi/scsi_transport_sas.h - * \par ESX Deviation Notes: + * RETURN VALUE: + * a pointer to the allocated PHY structure; NULL if failed * - * \sa None. - * \comments - + * Include: + * scsi/scsi_transport_sas.h * */ struct sas_port *sas_port_alloc(struct device *parent, int port_id) @@ -3939,14 +3774,12 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id) } /** - * sas_port_alloc_num - allocate and initialize a SAS port structure - * @parent: a pointer to the parent device - * * Allocates a SAS port structure and a number to go with it. This interface is * really for adapters where the port number has no meaning, so the sas class * should manage them. It will be added to the device tree below the device * specified by @parent which must be either a Scsi_Host or a * sas_expander_device. + * @parent: a pointer to the parent device * * RETURN VALUE: * Returns a pointer to the allocated PHY structure; Returns NULL on error. @@ -3979,10 +3812,8 @@ struct sas_port *sas_port_alloc_num(struct device *parent) EXPORT_SYMBOL(sas_port_alloc_num); /** - * sas_port_mark_backlink - mark the sas port backlink - * @port: a pointer to struct sas_port - * * Marks the port as a backlink. + * @port: a pointer to struct sas_port * * RETURN VALUE: * This function does not return a value. @@ -4003,10 +3834,8 @@ void sas_port_mark_backlink(struct sas_port *port) EXPORT_SYMBOL(sas_port_mark_backlink); /** - * sas_port_add - Add a SAS port to the device hierarchy - * @port: Pointer to SAS port of type struct sas_port - * * Add the passed in SAS port to the device hierarchy + * @port: Pointer to SAS port of type struct sas_port * * RETURN VALUE: * 0 on success and non-zero on error @@ -4029,17 +3858,14 @@ int sas_port_add(struct sas_port *port) EXPORT_SYMBOL(sas_port_add); /** - * sas_port_add_phy - Add a PHY to a SAS port structure - * @port: Pointer to SAS port of type struct sas_port - * @phy: Pointer to a PHY of type struct sas_phy - * * Add the passed in PHY to the passed in SAS port. * This API is usually called by SAS drivers that discover the SAS ports and * want to associate the PHY with the SAS port. + * @port: Pointer to SAS port of type struct sas_port + * @phy: Pointer to a PHY of type struct sas_phy * * ESX Deviation Notes: * No sysfs links are created on ESXi - * */ /* _VMKLNX_CODECHECK_: sas_port_add_phy */ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy) @@ -4072,12 +3898,10 @@ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy) EXPORT_SYMBOL(sas_port_add_phy); /** - * sas_port_delete_phy - Remove a PHY from a SAS port - * @port: Pointer to a SAS port - * @phy: Pointer to the phy to be removed from the SAS port - * * Removes the passed in PHY from the passed in SAS port. * This operation is usually done as part of tearing down ports. + * @port: Pointer to a SAS port + * @phy: Pointer to the phy to be removed from the SAS port * */ /* _VMKLNX_CODECHECK_: sas_port_delete_phy */ @@ -4094,12 +3918,10 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) EXPORT_SYMBOL(sas_port_delete_phy); /** - * sas_port_delete - Remove SAS port - * @port: Pointer to a SAS port (of type struct sas_port) to be removed - * * Removes the specified SAS port. * If any PHYs are found hanging from the passed in SAS port, * those are cleaned up as well. + * @port: Pointer to a SAS port (of type struct sas_port) to be removed * */ /* _VMKLNX_CODECHECK_: sas_port_delete */ @@ -4131,18 +3953,14 @@ void sas_port_delete(struct sas_port *port) } EXPORT_SYMBOL(sas_port_delete); -/** - ********************************************************************** - * \internalfn sas_find_rphy -- Find a matching rphy +/* + * sas_find_rphy -- Find a matching rphy + * + * RETURN VALUE: + * None * - * \param sh, id - * \return None - * \par Include: + * Include: * scsi/scsi_host.h - * \par ESX Deviation Notes: - * Needs - * \sa None. - ********************************************************************** */ struct sas_rphy * sas_find_rphy(struct Scsi_Host *sh, uint id) @@ -4216,7 +4034,9 @@ static void sas_expander_release(struct device *dev) } /** - * vmklnx_init_fcoe_attribs - + * Copies the given values to the Vmkernel storage management structure. + * The given MAC address pointers are permitted to be NULL, for which + * the corresponding management information will not be set. * @shost: Pointer to shost structure * @netdevName: Pointer to CNA net_device name * @vid: VLAN ID @@ -4224,10 +4044,6 @@ static void sas_expander_release(struct device *dev) * @vnportMac: VNPort MAC address * @fcfMac: FCF MAC address * - * Copies the given values to the Vmkernel storage management structure. - * The given MAC address pointers are permitted to be NULL, for which - * the corresponding management information will not be set. - * * ESX Deviation Notes: * This API is not present in Linux. This should be called immediately * after FLOGI is completed. @@ -4274,8 +4090,6 @@ vmklnx_init_fcoe_attribs(struct Scsi_Host *shost, EXPORT_SYMBOL(vmklnx_init_fcoe_attribs); /* - *---------------------------------------------------------------------- - * * SCSILinux_InitTransport * * Entry point for SCSI Transport-specific initialization. @@ -4286,8 +4100,6 @@ EXPORT_SYMBOL(vmklnx_init_fcoe_attribs); * * Side effects: * Initializes SCSI Transport log. - * - *---------------------------------------------------------------------- */ void SCSILinux_InitTransport(void) @@ -4296,8 +4108,6 @@ SCSILinux_InitTransport(void) } /* - *---------------------------------------------------------------------- - * * SCSILinux_CleanupTransport * * Entry point for SCSI Transport-specific teardown. @@ -4308,8 +4118,6 @@ SCSILinux_InitTransport(void) * * Side effects: * Cleans up SCSI Transport log. - * - *---------------------------------------------------------------------- */ void SCSILinux_CleanupTransport(void) diff --git a/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_vmk_if.c b/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_vmk_if.c index 0ba2c9d30914039a86e0402e0e8c7d75a55dbf32..fc016ced5c36133b49fc0d9432505f864334c43b 100644 --- a/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_vmk_if.c +++ b/vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_vmk_if.c @@ -1617,6 +1617,14 @@ FcLinuxPortAttributes(void *clientData, vmk_uint32 portId, vmk_FcPortAttributes return VMK_NOT_FOUND; } + if (ft->get_host_port_id) { + VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), ft->get_host_port_id, shost); + } + + if (ft->get_host_speed) { + VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), ft->get_host_speed, shost); + } + lport = shost_priv(shost); memset(portAttrib, 0, sizeof(vmk_FcPortAttributes));