/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #ifndef _IXGBE_H_ #define _IXGBE_H_ #include #include #ifdef HAVE_IRQ_AFFINITY_HINT #include #endif /* HAVE_IRQ_AFFINITY_HINT */ #include #ifdef __VMKLNX__ #define NODE_ADDRESS_SIZE ETH_ALEN #endif /* __VMKLNX__ */ #ifdef SIOCETHTOOL #include #endif #ifdef NETIF_F_HW_VLAN_TX #include #endif #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) #define IXGBE_DCA #include #endif #include "ixgbe_dcb.h" #include "kcompat.h" #ifdef __VMKLNX__ #include "kcompat_esx.h" #endif /* __VMKLNX__ */ #ifdef HAVE_SCTP #include #endif #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) #define IXGBE_FCOE #include "ixgbe_fcoe.h" #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ #include "ixgbe_api.h" #ifdef __VMKLNX__ #include #endif #define PFX "ixgbe: " #define DPRINTK(nlevel, klevel, fmt, args...) \ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ __FUNCTION__ , ## args))) #ifdef __VMKLNX__ #define NETIF_MSG_VIRT 0x8000 #endif /* __VMKLNX__ */ /* TX/RX descriptor defines */ #ifdef __VMKLNX__ #define IXGBE_DEFAULT_TXD 1024 #define IXGBE_DEFAULT_TX_WORK 1024 #else #define IXGBE_DEFAULT_TXD 512 #define IXGBE_DEFAULT_TX_WORK 256 #endif /*__VMKLNX__ */ #define IXGBE_MAX_TXD 4096 #define IXGBE_MIN_TXD 64 #define IXGBE_DEFAULT_RXD 512 #define IXGBE_DEFAULT_RX_WORK 256 #define IXGBE_MAX_RXD 4096 #define IXGBE_MIN_RXD 64 #ifdef __VMKLNX__ #define IXGBE_JUMBO_FRAME_DEFAULT_RXD 512 #define IXGBE_ESX_RSS_QUEUES 4 #define IXGBE_ESX_HW_QUEUES_PER_POOL 4 #endif /* flow control */ #define IXGBE_MIN_FCRTL 0x40 #define IXGBE_MAX_FCRTL 0x7FF80 #define IXGBE_MIN_FCRTH 0x600 #define IXGBE_MAX_FCRTH 0x7FFF0 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF #define IXGBE_MIN_FCPAUSE 0 #define IXGBE_MAX_FCPAUSE 0xFFFF /* Supported Rx Buffer Sizes */ #define IXGBE_RXBUFFER_512 512 /* Used for packet split */ #define IXGBE_RXBUFFER_1536 1536 #define IXGBE_RXBUFFER_2K 2048 #define IXGBE_RXBUFFER_3K 3072 #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_RXBUFFER_7K 7168 #define IXGBE_RXBUFFER_8K 8192 #define IXGBE_RXBUFFER_15K 15360 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ /* * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. * i.e. RXBUFFER_512 --> size-1024 slab */ #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBE_TX_FLAGS_CSUM (u32)(1) #define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2) #define IXGBE_TX_FLAGS_TSO (u32)(1 << 3) #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4) #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) #define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8) #define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 9) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_MAX_RX_DESC_POLL 10 #define IXGBE_MAX_VF_MC_ENTRIES 30 #define IXGBE_MAX_VF_FUNCTIONS 64 #define IXGBE_MAX_VFTA_ENTRIES 128 #define MAX_EMULATION_MAC_ADDRS 16 #define IXGBE_MAX_PF_MACVLANS 15 #define IXGBE_82599_VF_DEVICE_ID 0x10ED #define IXGBE_X540_VF_DEVICE_ID 0x1515 #define VMDQ_P(p) ((p) + adapter->num_vfs) #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ u32 current_counter = IXGBE_READ_REG(hw, reg); \ if (current_counter < last_counter) \ counter += 0x100000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFFF00000000LL; \ counter |= current_counter; \ } #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ u64 current_counter = (current_counter_msb << 32) | \ current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFF000000000LL; \ counter |= current_counter; \ } struct vf_stats { u64 gprc; u64 gorc; u64 gptc; u64 gotc; u64 mprc; }; #ifdef __VMKLNX__ struct vf_vlan { bool add; int vid; }; #endif /* __VMKLNX__ */ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; u16 default_vf_vlan_id; u16 vlans_enabled; bool clear_to_send; #ifdef __VMKLNX__ bool allocated; bool init; u32 buffer_mode; u32 coml_method; u32 mtu; u32 irq_rate; u16 num_queue_pairs; int rar; #endif struct vf_stats vfstats; struct vf_stats last_vfstats; struct vf_stats saved_rst_vfstats; #ifndef __VMKLNX__ bool pf_set_mac; #endif /* __VMKLNX__ */ u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; }; struct vf_macvlans { struct list_head l; int vf; bool free; bool is_macvlan; u8 vf_macvlan[ETH_ALEN]; }; #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #ifdef MAX_SKB_FRAGS #define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) #else #define DESC_NEEDED 4 #endif /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbe_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; struct sk_buff *skb; unsigned int bytecount; unsigned short gso_segs; __be16 protocol; dma_addr_t dma; unsigned int length; u32 tx_flags; }; struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; struct page *page; dma_addr_t page_dma; unsigned int page_offset; }; struct ixgbe_queue_stats { u64 packets; u64 bytes; }; struct ixgbe_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 completed; u64 tx_done_old; }; struct ixgbe_rx_queue_stats { u64 rsc_count; u64 rsc_flush; u64 non_eop_descs; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 csum_err; u64 rx_hdr_split; }; enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_RX_PS_ENABLED, __IXGBE_RX_RSC_ENABLED, #ifndef HAVE_NDO_SET_FEATURES __IXGBE_RX_CSUM_ENABLED, #endif __IXGBE_RX_CSUM_UDP_ZERO_ERR, #ifdef __VMKLNX__ __IXGBE_RING_ALLOCATED, __IXGBE_RING_CLEAN_BUSY, __IXGBE_RING_LATENCY, __IXGBE_RING_RSS, #endif __IXGBE_RING_NETDEV_CNA, }; #define ring_is_ps_enabled(ring) \ test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) #define set_ring_ps_enabled(ring) \ set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) #define clear_ring_ps_enabled(ring) \ clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) #define check_for_tx_hang(ring) \ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define set_check_for_tx_hang(ring) \ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define clear_check_for_tx_hang(ring) \ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #ifndef IXGBE_NO_HW_RSC #define ring_is_rsc_enabled(ring) \ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #else #define ring_is_rsc_enabled(ring) false #endif #define set_ring_rsc_enabled(ring) \ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define clear_ring_rsc_enabled(ring) \ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #ifdef __VMKLNX__ #define ring_is_allocated(ring) \ test_bit(__IXGBE_RING_ALLOCATED, &(ring)->state) #define set_ring_allocated(ring) \ set_bit(__IXGBE_RING_ALLOCATED, &(ring)->state) #define clear_ring_allocated(ring) \ clear_bit(__IXGBE_RING_ALLOCATED, &(ring)->state) #define ring_is_latency_enabled(ring) \ test_bit(__IXGBE_RING_LATENCY, &(ring)->state) #define set_ring_latency_enabled(ring) \ set_bit(__IXGBE_RING_LATENCY, &(ring)->state) #define clear_ring_latency_enabled(ring) \ clear_bit(__IXGBE_RING_LATENCY, &(ring)->state) #define ring_is_rss_enabled(ring) \ test_bit(__IXGBE_RING_RSS, &(ring)->state) #define set_ring_rss_enabled(ring) \ set_bit(__IXGBE_RING_RSS, &(ring)->state) #define clear_ring_rss_enabled(ring) \ clear_bit(__IXGBE_RING_RSS, &(ring)->state) #endif /* * queues are splitted up into 2 distinct pools. This internal * resources split is due to the multiple clients of queues. */ #define ring_type_is_cna(ring) \ test_bit(__IXGBE_RING_NETDEV_CNA, &(ring)->state) #define set_ring_type_cna(ring) \ set_bit(__IXGBE_RING_NETDEV_CNA, &(ring)->state) #define clear_ring_type_cna(ring) \ clear_bit(__IXGBE_RING_NETDEV_CNA, &(ring)->state) #define netdev_ring(ring) (ring->netdev) #define ring_queue_index(ring) (ring->queue_index) struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ void *desc; /* descriptor ring memory */ struct device *dev; /* device for DMA mapping */ struct net_device *netdev; /* netdev ring belongs to */ union { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; unsigned long state; u8 __iomem *tail; u16 count; /* amount of descriptors */ u16 rx_buf_len; u8 queue_index; /* needed for multiqueue queue management */ u8 reg_idx; /* holds the special value that gets * the hardware register offset * associated with this ring, which is * different for DCB and RSS modes */ u8 atr_sample_rate; u8 atr_count; u16 next_to_use; u16 next_to_clean; u8 dcb_tc; #ifdef __VMKLNX__ u8 active; #endif /* __VMKLNX__ */ struct ixgbe_queue_stats stats; union { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; int numa_node; unsigned int size; /* length in bytes */ dma_addr_t dma; /* phys. address of descriptor ring */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { RING_F_NONE = 0, RING_F_VMDQ, /* SR-IOV uses the same ring feature */ RING_F_RSS, RING_F_FDIR, #ifdef IXGBE_FCOE RING_F_FCOE, #endif /* IXGBE_FCOE */ RING_F_ARRAY_SIZE /* must be last in enum set */ }; #define IXGBE_MAX_DCB_INDICES 8 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 64 #ifdef IXGBE_FCOE #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) #else #define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature { int indices; int mask; }; struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ u16 work_limit; /* total work allowed per interrupt */ u8 count; /* total number of rings in vector */ u8 itr; /* current ITR setting for ring */ }; #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ? 8 : 1) #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ struct ixgbe_q_vector { struct ixgbe_adapter *adapter; int cpu; /* CPU for DCA */ u16 v_idx; /* index of q_vector within array, also used for * finding the bit in EICR and friends that * represents the vector for this ring */ u16 itr; /* Interrupt throttle rate written to EITR */ struct ixgbe_ring_container rx, tx; #ifdef CONFIG_IXGBE_NAPI struct napi_struct napi; #endif #ifndef HAVE_NETDEV_NAPI_LIST struct net_device poll_dev; #endif #ifdef HAVE_IRQ_AFFINITY_HINT cpumask_var_t affinity_mask; #endif char name[IFNAMSIZ + 9]; } ____cacheline_internodealigned_in_smp; /* * microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 #define IXGBE_16K_ITR 248 #define IXGBE_10K_ITR 400 #define IXGBE_8K_ITR 500 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, const u32 stat_err_bits) { return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); } /* ixgbe_desc_unused - calculate if we have unused descriptors */ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) { u16 ntc = ring->next_to_clean; u16 ntu = ring->next_to_use; return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } #define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) #define IXGBE_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) #define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 #ifdef IXGBE_FCOE /* use 3K as the baby jumbo frame size for FCoE */ #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 #endif /* IXGBE_FCOE */ #ifdef IXGBE_TCP_TIMER #define TCP_TIMER_VECTOR 1 #else #define TCP_TIMER_VECTOR 0 #endif #define OTHER_VECTOR 1 #define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) #define IXGBE_MAX_MSIX_VECTORS_82599 64 #define IXGBE_MAX_MSIX_Q_VECTORS_82599 64 #define IXGBE_MAX_MSIX_Q_VECTORS_82598 16 #define IXGBE_MAX_MSIX_VECTORS_82598 18 struct ixgbe_mac_addr { u8 addr[ETH_ALEN]; u16 queue; u16 state; /* bitmask */ }; #define IXGBE_MAC_STATE_DEFAULT 0x1 #define IXGBE_MAC_STATE_MODIFIED 0x2 #define IXGBE_MAC_STATE_IN_USE 0x4 /* * Only for array allocations in our adapter struct. On 82598, there will be * unused entries in the array, but that's not a big deal. Also, in 82599, * we can actually assign 64 queue vectors based on our extended-extended * interrupt registers. This is different than 82598, which is limited to 16. */ #define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599 #define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599 #define MIN_MSIX_Q_FCOE_VECTORS 2 #define MIN_MSIX_Q_VECTORS (1 + MIN_MSIX_Q_FCOE_VECTORS) #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* default to trying for four seconds */ #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) /* board specific private data structure */ struct ixgbe_adapter { #ifdef NETIF_F_HW_VLAN_TX #ifdef HAVE_VLAN_RX_REGISTER struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */ #else unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; #endif #endif /* NETIF_F_HW_VLAN_TX */ /* OS defined structs */ struct net_device *netdev; struct net_device *cnadev; struct pci_dev *pdev; unsigned long state; /* Some features need tri-state capability, * thus the additional *_CAPABLE flags. */ u32 flags; #define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) #define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) #define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) #ifndef IXGBE_NO_LLI #define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4) #endif #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 6) #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 7) #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 8) #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 9) #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 10) #define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 11) #else #define IXGBE_FLAG_DCA_ENABLED (u32)0 #define IXGBE_FLAG_DCA_CAPABLE (u32)0 #define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0 #endif #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 12) #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 13) #define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 14) #define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 15) #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 16) #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 17) #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 18) #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 19) #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 20) #define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 21) #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 23) #ifdef IXGBE_FCOE #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 24) #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 25) #endif /* IXGBE_FCOE */ #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 26) #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 27) #define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 28) #define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 29) #define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 30) #define IXGBE_FLAG_RX_BB_CAPABLE (u32)(1 << 31) u32 flags2; #ifndef IXGBE_NO_HW_RSC #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) #else #define IXGBE_FLAG2_RSC_CAPABLE 0 #define IXGBE_FLAG2_RSC_ENABLED 0 #endif #define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 2) #define IXGBE_FLAG2_CNA_ENABLED (u32)(1 << 3) #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 5) #define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 6) #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 7) #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 8) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 9) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 10) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 11) #ifdef __VMKLNX__ #define IXGBE_FLAG2_LATENCY_ENABLED (u32)(1 << 12) #define IXGBE_FLAG2_DYNAMIC_NETQ_ENABLED (u32)(1 << 13) #endif /* Tx fast path data */ int num_tx_queues; u16 tx_itr_setting; u16 tx_work_limit; /* Rx fast path data */ int num_rx_queues; u16 rx_itr_setting; u16 rx_work_limit; /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; u64 restart_queue; u64 lsc_int; u32 tx_timeout_count; /* RX */ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; int num_rx_pools; /* == num_rx_queues in 82598 */ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; u64 rsc_total_count; u64 rsc_total_flush; u64 non_eop_descs; #ifndef CONFIG_IXGBE_NAPI u64 rx_dropped_backlog; /* count drops from rx intr handler */ #endif u64 rx_hdr_split; u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; u8 dcb_set_bitmap; u8 dcbx_cap; #ifndef HAVE_MQPRIO u8 tc; #endif enum ixgbe_fc_mode last_lfc_mode; int num_msix_vectors; int max_msix_q_vectors; /* true count of q_vectors for device */ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries; #ifdef IXGBE_TCP_TIMER irqreturn_t (*msix_handlers[MAX_MSIX_COUNT])(int irq, void *data, struct pt_regs *regs); #endif #ifndef HAVE_NETDEV_STATS_IN_NETDEV struct net_device_stats net_stats; #endif #ifdef ETHTOOL_TEST u32 test_icr; struct ixgbe_ring test_tx_ring; struct ixgbe_ring test_rx_ring; #endif /* structs defined in ixgbe_hw.h */ struct ixgbe_hw hw; u16 msg_enable; struct ixgbe_hw_stats stats; #ifndef IXGBE_NO_LLI u32 lli_port; u32 lli_size; u32 lli_etype; u32 lli_vlan_pri; #endif /* IXGBE_NO_LLI */ u32 *config_space; u64 tx_busy; #ifdef __VMKLNX__ u32 n_rx_queues_allocated; u32 n_tx_queues_allocated; #endif unsigned int tx_ring_count; unsigned int rx_ring_count; u32 link_speed; bool link_up; unsigned long link_check_timeout; struct timer_list service_timer; struct work_struct service_task; struct hlist_head fdir_filter_list; unsigned long fdir_overflow; /* number of times ATR was backed off */ union ixgbe_atr_input fdir_mask; int fdir_filter_count; u32 fdir_pballoc; u32 atr_sample_rate; spinlock_t fdir_perfect_lock; #ifdef IXGBE_FCOE struct ixgbe_fcoe fcoe; #endif /* IXGBE_FCOE */ u32 wol; u16 bd_number; u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; bool netdev_registered; #ifdef IXGBE_TCP_TIMER char tcp_timer_name[IFNAMSIZ + 9]; #endif u32 interrupt_event; #ifdef HAVE_ETHTOOL_SET_PHYS_ID u32 led_reg; #endif DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; struct vf_data_storage *vfinfo; struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; bool antispoofing_enabled; int node; u32 timer_event_accumulator; u32 vferr_refcount; struct ixgbe_mac_addr *mac_table; struct ieee_pfc *ixgbe_ieee_pfc; struct ieee_ets *ixgbe_ieee_ets; }; struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; u16 sw_idx; u16 action; }; enum ixbge_state_t { __IXGBE_TESTING, __IXGBE_RESETTING, __IXGBE_DOWN, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, }; struct ixgbe_cb { union { /* Union defining head/tail partner */ struct sk_buff *head; struct sk_buff *tail; }; dma_addr_t dma; #ifdef HAVE_VLAN_RX_REGISTER u16 vid; /* VLAN tag */ #endif u16 append_cnt; /* number of skb's appended */ bool delay_unmap; }; #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) extern struct dcbnl_rtnl_ops dcbnl_ops; extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max); extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); /* needed by ixgbe_main.c */ extern int ixgbe_validate_mac_addr(u8 *mc_addr); extern void ixgbe_check_options(struct ixgbe_adapter *adapter); extern void ixgbe_assign_netdev_ops(struct net_device *netdev); /* needed by ixgbe_ethtool.c */ extern char ixgbe_driver_name[]; extern const char ixgbe_driver_version[]; extern void ixgbe_up(struct ixgbe_adapter *adapter); extern void ixgbe_down(struct ixgbe_adapter *adapter); extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); extern void ixgbe_reset(struct ixgbe_adapter *adapter); extern void ixgbe_set_ethtool_ops(struct net_device *netdev); extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); extern void ixgbe_free_rx_resources(struct ixgbe_ring *); extern void ixgbe_free_tx_resources(struct ixgbe_ring *); extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev); extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, struct ixgbe_ring *); extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, struct ixgbe_tx_buffer *); extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *); extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *); extern void ixgbe_set_rx_mode(struct net_device *netdev); extern int ixgbe_write_mc_addr_list(struct net_device *netdev); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter); extern void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter); #ifdef ETHTOOL_OPS_COMPAT extern int ethtool_ioctl(struct ifreq *ifr); #endif #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc); #ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc); #endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); #ifdef HAVE_NETDEV_OPS_FCOE_ENABLE extern int ixgbe_fcoe_enable(struct net_device *netdev); extern int ixgbe_fcoe_disable(struct net_device *netdev); #endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ #ifdef CONFIG_DCB #ifdef HAVE_DCBNL_OPS_GETAPP extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); #endif /* HAVE_DCBNL_OPS_GETAPP */ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); #endif /* CONFIG_DCB */ #ifdef HAVE_NETDEV_OPS_FCOE_GETWWN extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); #endif #endif /* IXGBE_FCOE */ #ifdef CONFIG_DCB s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame); #endif /* CONFIG_DCB */ extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring); extern int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd); extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter, struct net_device *netdev, int vfn); extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); extern int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue); #ifdef __VMKLNX__ extern void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter); extern void ixgbe_del_mac_filter_by_index(struct ixgbe_adapter *adapter, int index); extern VMK_ReturnStatus ixgbe_passthru_config(struct ixgbe_adapter *adapter, u32 vfIdx, int change, void *data); #endif extern int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8* addr, u16 queue); extern int ixgbe_available_rars(struct ixgbe_adapter *adapter); #ifndef HAVE_VLAN_RX_REGISTER extern void ixgbe_vlan_mode(struct net_device *, u32); #endif #ifndef ixgbe_get_netdev_tc_txq #define ixgbe_get_netdev_tc_txq(dev, tc) (&dev->tc_to_txq[tc]); #endif #endif /* _IXGBE_H_ */