diff --git a/README b/README index 9cd2ac9..a461c2c 100755 --- a/README +++ b/README @@ -597,22 +597,7 @@ Additional Configurations NAPI ---- - NAPI (Rx polling mode) is supported in the ixgbe driver. NAPI is enabled - or disabled based on the configuration of the kernel. To override the - default, use the following compile-time flags. - - You can tell if NAPI is enabled in the driver by looking at the version - number of the driver. It will contain the string -NAPI if NAPI is enabled. - - To enable NAPI, compile the driver module, passing in a configuration option: - - make CFLAGS_EXTRA=-DIXGBE_NAPI install - NOTE: This will not do anything if NAPI is disabled in the kernel. - - To disable NAPI, compile the driver module, passing in a configuration - option: - - make CFLAGS_EXTRA=-DIXGBE_NO_NAPI install + NAPI (Rx polling mode) is supported in the ixgbe driver. See ftp://robur.slu.se/pub/Linux/net-development/NAPI/usenix-paper.tgz for more information on NAPI. diff --git a/SUMS b/SUMS index e065372..a761026 100644 --- a/SUMS +++ b/SUMS @@ -1,45 +1,46 @@ -20527 5 ixgbe-3.9.17/pci.updates -42124 3 ixgbe-3.9.17/scripts/set_irq_affinity.sh -21465 8 ixgbe-3.9.17/ixgbe.7 -07975 48 ixgbe-3.9.17/src/ixgbe_phy.c -54190 90 ixgbe-3.9.17/src/ixgbe_ethtool.c -45722 4 ixgbe-3.9.17/src/ixgbe_dcb_82598.h -59625 27 ixgbe-3.9.17/src/ixgbe_sriov.c -65030 3 ixgbe-3.9.17/src/ixgbe_x540.h -64412 2 ixgbe-3.9.17/src/ixgbe_82598.h -61801 37 ixgbe-3.9.17/src/ixgbe_82598.c -35340 28 ixgbe-3.9.17/src/kcompat.c -45974 112 ixgbe-3.9.17/src/ixgbe_common.c -06064 26 ixgbe-3.9.17/src/ixgbe_fcoe.c -29670 10 ixgbe-3.9.17/src/ixgbe_dcb_82598.c -05385 2 ixgbe-3.9.17/src/ixgbe_ptp.c -51122 125 ixgbe-3.9.17/src/ixgbe_type.h -40273 3 ixgbe-3.9.17/src/ixgbe_82599.h -35951 18 ixgbe-3.9.17/src/ixgbe_mbx.c -60213 7 ixgbe-3.9.17/src/ixgbe_common.h -59585 30 ixgbe-3.9.17/src/ixgbe_sysfs.c -63071 281 ixgbe-3.9.17/src/ixgbe_main.c -17466 91 ixgbe-3.9.17/src/kcompat.h -00346 6 ixgbe-3.9.17/src/ixgbe_phy.h -26351 5 ixgbe-3.9.17/src/ixgbe_dcb_82599.h -12190 1 ixgbe-3.9.17/src/Module.supported -02272 33 ixgbe-3.9.17/src/ixgbe_param.c -21004 34 ixgbe-3.9.17/src/ixgbe_api.c -09647 24 ixgbe-3.9.17/src/ixgbe_dcb_nl.c -40007 20 ixgbe-3.9.17/src/ixgbe_dcb.c -60194 25 ixgbe-3.9.17/src/ixgbe_procfs.c -13798 8 ixgbe-3.9.17/src/ixgbe_api.h -37802 6 ixgbe-3.9.17/src/ixgbe_dcb.h -65172 3 ixgbe-3.9.17/src/ixgbe_fcoe.h -50953 5 ixgbe-3.9.17/src/ixgbe_mbx.h -21262 26 ixgbe-3.9.17/src/ixgbe_x540.c -45057 69 ixgbe-3.9.17/src/ixgbe_82599.c -40248 16 ixgbe-3.9.17/src/ixgbe_dcb_82599.c -19629 28 ixgbe-3.9.17/src/ixgbe.h -24711 4 ixgbe-3.9.17/src/ixgbe_sriov.h -06906 5 ixgbe-3.9.17/src/ixgbe_osdep.h -52269 30 ixgbe-3.9.17/src/kcompat_ethtool.c -08395 12 ixgbe-3.9.17/src/Makefile -39773 19 ixgbe-3.9.17/COPYING -03629 10 ixgbe-3.9.17/ixgbe.spec -57606 40 ixgbe-3.9.17/README +62184 5 ixgbe-3.10.16/pci.updates +42124 3 ixgbe-3.10.16/scripts/set_irq_affinity.sh +21465 8 ixgbe-3.10.16/ixgbe.7 +65473 48 ixgbe-3.10.16/src/ixgbe_phy.c +13823 89 ixgbe-3.10.16/src/ixgbe_ethtool.c +45722 4 ixgbe-3.10.16/src/ixgbe_dcb_82598.h +32170 30 ixgbe-3.10.16/src/ixgbe_sriov.c +65030 3 ixgbe-3.10.16/src/ixgbe_x540.h +64412 2 ixgbe-3.10.16/src/ixgbe_82598.h +61801 37 ixgbe-3.10.16/src/ixgbe_82598.c +56906 28 ixgbe-3.10.16/src/kcompat.c +05886 112 ixgbe-3.10.16/src/ixgbe_common.c +56677 35 ixgbe-3.10.16/src/ixgbe_lib.c +48614 26 ixgbe-3.10.16/src/ixgbe_fcoe.c +29670 10 ixgbe-3.10.16/src/ixgbe_dcb_82598.c +05385 2 ixgbe-3.10.16/src/ixgbe_ptp.c +59613 125 ixgbe-3.10.16/src/ixgbe_type.h +40273 3 ixgbe-3.10.16/src/ixgbe_82599.h +35951 18 ixgbe-3.10.16/src/ixgbe_mbx.c +60213 7 ixgbe-3.10.16/src/ixgbe_common.h +31886 30 ixgbe-3.10.16/src/ixgbe_sysfs.c +47883 248 ixgbe-3.10.16/src/ixgbe_main.c +08971 93 ixgbe-3.10.16/src/kcompat.h +47797 6 ixgbe-3.10.16/src/ixgbe_phy.h +26351 5 ixgbe-3.10.16/src/ixgbe_dcb_82599.h +12190 1 ixgbe-3.10.16/src/Module.supported +25450 29 ixgbe-3.10.16/src/ixgbe_param.c +44865 34 ixgbe-3.10.16/src/ixgbe_api.c +05434 23 ixgbe-3.10.16/src/ixgbe_dcb_nl.c +03102 20 ixgbe-3.10.16/src/ixgbe_dcb.c +60194 25 ixgbe-3.10.16/src/ixgbe_procfs.c +13798 8 ixgbe-3.10.16/src/ixgbe_api.h +52926 6 ixgbe-3.10.16/src/ixgbe_dcb.h +63796 3 ixgbe-3.10.16/src/ixgbe_fcoe.h +50953 5 ixgbe-3.10.16/src/ixgbe_mbx.h +21262 26 ixgbe-3.10.16/src/ixgbe_x540.c +09063 66 ixgbe-3.10.16/src/ixgbe_82599.c +40248 16 ixgbe-3.10.16/src/ixgbe_dcb_82599.c +30402 28 ixgbe-3.10.16/src/ixgbe.h +11362 3 ixgbe-3.10.16/src/ixgbe_sriov.h +06906 5 ixgbe-3.10.16/src/ixgbe_osdep.h +52269 30 ixgbe-3.10.16/src/kcompat_ethtool.c +17868 12 ixgbe-3.10.16/src/Makefile +39773 19 ixgbe-3.10.16/COPYING +24272 10 ixgbe-3.10.16/ixgbe.spec +32092 40 ixgbe-3.10.16/README diff --git a/ixgbe.spec b/ixgbe.spec index 52f9270..4aa6253 100644 --- a/ixgbe.spec +++ b/ixgbe.spec @@ -1,6 +1,6 @@ Name: ixgbe Summary: Intel(R) 10GbE PCI Express Ethernet Connection -Version: 3.9.17 +Version: 3.10.16 Release: 1 Source: %{name}-%{version}.tar.gz Vendor: Intel Corporation diff --git a/pci.updates b/pci.updates index 357a06f..c308b22 100644 --- a/pci.updates +++ b/pci.updates @@ -81,8 +81,11 @@ 8086 5003 Ethernet 10G 2P X540-t Adapter 1529 82599 10 Gigabit Dual Port Backplane Connection with FCoE 152a 82599 10 Gigabit Dual port Network Connection with FCoE + 154a Ethernet Converged Network Adapter X520-4 + 8086 011a Ethernet Converged Network Adapter X520-4 + 8086 011b Ethernet Converged Network Adapter X520-4 + 8086 011c Ethernet Converged Network Adapter X520-4 154d 82599EB 10-Gigabit SFP+ Network Connection 8086 7b11 Ethernet 10G 2P X520 Adapter 154f 82599EB 10Gigabit Dual Port Network Connection 1557 82599EN Intel(R) 82599 10 Gigabit Network Connection - 1558 82599 QSFP Ethernet Network Adapter diff --git a/src/Makefile b/src/Makefile index 05c3c27..f25935d 100644 --- a/src/Makefile +++ b/src/Makefile @@ -30,9 +30,8 @@ # core driver files CFILES = ixgbe_main.c ixgbe_common.c ixgbe_api.c ixgbe_param.c \ - ixgbe_ethtool.c kcompat.c ixgbe_82598.c \ - ixgbe_82599.c ixgbe_ptp.c \ - ixgbe_x540.c \ + ixgbe_lib.c ixgbe_ethtool.c kcompat.c ixgbe_82598.c \ + ixgbe_82599.c ixgbe_ptp.c ixgbe_x540.c \ ixgbe_sriov.c ixgbe_mbx.c \ ixgbe_dcb.c ixgbe_dcb_82598.c \ ixgbe_dcb_82599.c \ diff --git a/src/ixgbe.h b/src/ixgbe.h index b188028..d5c2601 100644 --- a/src/ixgbe.h +++ b/src/ixgbe.h @@ -62,10 +62,6 @@ #include "ixgbe_fcoe.h" #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ -#if defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE) -#define HAVE_IXGBE_PTP -#endif - #include "ixgbe_api.h" #define PFX "ixgbe: " @@ -74,6 +70,7 @@ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ __func__ , ## args))) + /* TX/RX descriptor defines */ #define IXGBE_DEFAULT_TXD 512 #define IXGBE_DEFAULT_TX_WORK 256 @@ -96,7 +93,7 @@ #define IXGBE_MAX_FCPAUSE 0xFFFF /* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ +#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT #define IXGBE_RXBUFFER_1536 1536 #define IXGBE_RXBUFFER_2K 2048 @@ -109,13 +106,14 @@ #define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ /* - * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we - * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, - * this adds up to 512 bytes of extra data meaning the smallest allocation - * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -147,7 +145,7 @@ #define IXGBE_X540_VF_DEVICE_ID 0x1515 #ifdef CONFIG_PCI_IOV -#define VMDQ_P(p) ((p) + adapter->num_vfs) +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) #else #define VMDQ_P(p) (p) #endif @@ -199,7 +197,6 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; - struct pci_dev *vfdev; }; struct vf_macvlans { @@ -304,7 +301,7 @@ enum ixgbe_ring_state_t { #endif __IXGBE_RX_CSUM_UDP_ZERO_ERR, #ifdef IXGBE_FCOE - __IXGBE_RX_FCOE_BUFSZ, + __IXGBE_RX_FCOE, #endif }; @@ -398,10 +395,16 @@ enum ixgbe_ring_f_enum { #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature { - int indices; - int mask; + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ }; +#define IXGBE_82599_VMDQ_8Q_MASK 0x78 +#define IXGBE_82599_VMDQ_4Q_MASK 0x7C +#define IXGBE_82599_VMDQ_2Q_MASK 0x7E + #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT /* * FCoE requires that all Rx buffers be over 2200 bytes in length. Since @@ -411,7 +414,7 @@ struct ixgbe_ring_feature { #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) { - return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0; + return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; } #else #define ixgbe_rx_pg_order(_ring) 0 @@ -449,9 +452,7 @@ struct ixgbe_q_vector { u16 itr; /* Interrupt throttle rate written to EITR */ struct ixgbe_ring_container rx, tx; -#ifdef CONFIG_IXGBE_NAPI struct napi_struct napi; -#endif #ifndef HAVE_NETDEV_NAPI_LIST struct net_device poll_dev; #endif @@ -573,56 +574,52 @@ struct ixgbe_adapter { #ifndef IXGBE_NO_LLI #define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4) #endif -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 8) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 5) #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 9) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 10) -#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 11) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 6) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 7) +#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) #else #define IXGBE_FLAG_DCA_ENABLED (u32)0 #define IXGBE_FLAG_DCA_CAPABLE (u32)0 #define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0 #endif -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 12) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 13) -#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 14) -#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 15) -#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 16) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 18) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 19) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 20) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 21) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 22) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 23) +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) #ifdef IXGBE_FCOE -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 24) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 25) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 17) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 18) #endif /* IXGBE_FCOE */ -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 26) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 27) -#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 28) -#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 29) -#define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 30) -#define IXGBE_FLAG_RX_BB_CAPABLE (u32)(1 << 31) +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 23) u32 flags2; #ifndef IXGBE_NO_HW_RSC -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) #else #define IXGBE_FLAG2_RSC_CAPABLE 0 #define IXGBE_FLAG2_RSC_ENABLED 0 #endif -#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 2) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 4) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 5) -#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 6) -#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 7) -#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 8) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 9) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 10) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 11) -#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 12) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 3) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 4) +#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 5) +#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 6) +#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 7) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 8) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 9) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 10) +#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 12) /* Tx fast path data */ int num_tx_queues; @@ -650,9 +647,6 @@ struct ixgbe_adapter { u64 rsc_total_count; u64 rsc_total_flush; u64 non_eop_descs; -#ifndef CONFIG_IXGBE_NAPI - u64 rx_dropped_backlog; /* count drops from rx intr handler */ -#endif u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; @@ -671,8 +665,8 @@ struct ixgbe_adapter { #endif enum ixgbe_fc_mode last_lfc_mode; - int num_msix_vectors; - int max_msix_q_vectors; /* true count of q_vectors for device */ + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries; @@ -799,6 +793,8 @@ struct ixgbe_cb { }; #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) +/* ESX ixgbe CIM IOCTL definition */ + #ifdef IXGBE_SYSFS void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); @@ -854,11 +850,10 @@ extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_set_rx_mode(struct net_device *netdev); extern int ixgbe_write_mc_addr_list(struct net_device *netdev); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); -#ifdef IXGBE_FCOE extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); -#endif /* IXGBE_FCOE */ extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); +extern int ixgbe_poll(struct napi_struct *napi, int budget); extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter); @@ -872,7 +867,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len); -extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); @@ -883,9 +877,14 @@ extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc); #endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); #ifdef HAVE_NETDEV_OPS_FCOE_ENABLE extern int ixgbe_fcoe_enable(struct net_device *netdev); extern int ixgbe_fcoe_disable(struct net_device *netdev); +#else +int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter); +void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter); #endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ #ifdef CONFIG_DCB #ifdef HAVE_DCBNL_OPS_GETAPP @@ -893,6 +892,7 @@ extern u8 ixgbe_fcoe_getapp(struct net_device *netdev); #endif /* HAVE_DCBNL_OPS_GETAPP */ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); #endif /* CONFIG_DCB */ +extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); #ifdef HAVE_NETDEV_OPS_FCOE_GETWWN extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); #endif @@ -904,6 +904,8 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame); #endif /* HAVE_DCBNL_IEEE */ #endif /* CONFIG_DCB */ +extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring); extern int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd); @@ -918,8 +920,7 @@ extern int ixgbe_available_rars(struct ixgbe_adapter *adapter); #ifndef HAVE_VLAN_RX_REGISTER extern void ixgbe_vlan_mode(struct net_device *, u32); #endif -#ifndef ixgbe_get_netdev_tc_txq -#define ixgbe_get_netdev_tc_txq(dev, tc) (&dev->tc_to_txq[tc]) -#endif + + extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/src/ixgbe_82599.c b/src/ixgbe_82599.c index c57d84e..678aac4 100644 --- a/src/ixgbe_82599.c +++ b/src/ixgbe_82599.c @@ -40,10 +40,6 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data); static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { @@ -92,25 +88,7 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = 0; - u32 esdp; - - if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { - /* Store flag indicating I2C bus access control unit. */ - hw->phy.qsfp_shared_i2c_bus = TRUE; - - /* Initialize access to QSFP+ I2C bus */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0_DIR; - esdp &= ~IXGBE_ESDP_SDP1_DIR; - esdp &= ~IXGBE_ESDP_SDP0; - esdp &= ~IXGBE_ESDP_SDP0_NATIVE; - esdp &= ~IXGBE_ESDP_SDP1_NATIVE; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; - phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; - } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) @@ -429,6 +407,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: media_type = ixgbe_media_type_fiber; break; @@ -441,9 +420,6 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_LS: media_type = ixgbe_media_type_fiber_lco; break; - case IXGBE_DEV_ID_82599_QSFP_SF_QP: - media_type = ixgbe_media_type_fiber_qsfp; - break; default: media_type = ixgbe_media_type_unknown; break; @@ -2202,113 +2178,4 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, return ret_val; } -/** - * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) -{ - u32 esdp; - s32 status; - s32 timeout = 200; - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Acquire I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - - while (timeout) { - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - break; - - msleep(5); - timeout--; - } - - if (!timeout) { - hw_dbg(hw, "Driver can't access resource," - " acquiring I2C bus timeout.\n"); - status = IXGBE_ERR_I2C; - goto release_i2c_access; - } - } - - status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); - -release_i2c_access: - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Release I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp &= ~IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - } - - return status; -} - -/** - * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) -{ - u32 esdp; - s32 status; - s32 timeout = 200; - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Acquire I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - - while (timeout) { - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - break; - - msleep(5); - timeout--; - } - - if (!timeout) { - hw_dbg(hw, "Driver can't access resource," - " acquiring I2C bus timeout.\n"); - status = IXGBE_ERR_I2C; - goto release_i2c_access; - } - } - - status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); - -release_i2c_access: - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Release I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp &= ~IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - } - - return status; -} diff --git a/src/ixgbe_api.c b/src/ixgbe_api.c index 9a0a43e..4e3804e 100644 --- a/src/ixgbe_api.c +++ b/src/ixgbe_api.c @@ -104,7 +104,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: - case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: case IXGBE_DEV_ID_82599_CX4: case IXGBE_DEV_ID_82599_LS: diff --git a/src/ixgbe_common.c b/src/ixgbe_common.c index a0a0046..8646bbd 100644 --- a/src/ixgbe_common.c +++ b/src/ixgbe_common.c @@ -3155,17 +3155,14 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) **/ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { - u32 mpsar; u32 rar = hw->mac.san_mac_rar_index; if (vmdq < 32) { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); } return 0; @@ -3601,20 +3598,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) * PFVFSPOOF register array is size 8 with 8 bits assigned to * MAC anti-spoof enables in each register array element. */ - for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + for (j = 0; j < pf_target_reg; j++) IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - /* If not enabling anti-spoofing then done */ - if (!enable) - return; - /* * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Reset the bit assigned to the PF + * emulation mode NICs. Do not set the bits assigned to the PF + */ + pfvfspoof &= (1 << pf_target_shift) - 1; + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. */ - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); - pfvfspoof ^= (1 << pf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); + for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); } /** diff --git a/src/ixgbe_dcb.c b/src/ixgbe_dcb.c index f687afb..f515fd2 100644 --- a/src/ixgbe_dcb.c +++ b/src/ixgbe_dcb.c @@ -177,8 +177,8 @@ s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, return ret_val; } -/* ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info - * +/** + * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info * @cfg: dcb configuration to unpack into hardware consumable fields * @map: user priority to traffic class map * @pfc_up: u8 to store user priority PFC bitmask @@ -190,111 +190,88 @@ s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, */ void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) { - int tc, up; - - *pfc_up = 0; - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { - if (cfg->tc_config[tc].pfc == ixgbe_dcb_pfc_disabled) - continue; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; - /* PFC is enabled on 'tc' mark each user priority bit - * in 'pfc_up' that has a user priority mapped to this - * traffic class. - */ - for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { - if (map[up] == tc) - *pfc_up |= 1 << up; - } + /* + * If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; } } void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { - struct ixgbe_dcb_tc_path *p; - int i; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - refill[i] = p->data_credits_refill; - } + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; } void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) { - int i; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) - max[i] = cfg->tc_config[i].desc_credits_max; + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; } void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { - struct ixgbe_dcb_tc_path *p; - int i; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - bwgid[i] = p->bwg_id; - } + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; } void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *tsa) { - struct ixgbe_dcb_tc_path *p; - int i; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - tsa[i] = p->tsa; - } + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; } -/* ixgbe_dcb_bitmap_to_up - find first set user priority of bitmap - * @bitmap: bitmap with each bit representing a UP - * - * returns the user priority of the bit set 1-8 or 0 if no bits - * are set. - */ -static u8 ixgbe_dcb_bitmap_to_up(u8 bitmap) +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { - int j; - int up = -1; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; - for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { - if (bitmap & (1 << j)) { - up = j; + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + goto out; + + /* + * Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) break; - } } - - return up < 0 ? 0 : up + 1; +out: + return tc; } void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *map) { - struct ixgbe_dcb_tc_path *p; - int up; - u8 i; - u8 tmap; + u8 up; - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - up = 0; - p = &cfg->tc_config[i].path[direction]; - tmap = p->up_to_tc_bitmap; - - up = ixgbe_dcb_bitmap_to_up(tmap); - - while (up > 0) { - up--; - map[up] = i; - tmap &= ~(1 << up); - up = ixgbe_dcb_bitmap_to_up(tmap); - } - } + for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); } /** diff --git a/src/ixgbe_dcb.h b/src/ixgbe_dcb.h index a669045..ae6bd69 100644 --- a/src/ixgbe_dcb.h +++ b/src/ixgbe_dcb.h @@ -161,6 +161,7 @@ void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); /* DCB initialization */ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); diff --git a/src/ixgbe_dcb_nl.c b/src/ixgbe_dcb_nl.c index 38866d5..8fb3aa8 100644 --- a/src/ixgbe_dcb_nl.c +++ b/src/ixgbe_dcb_nl.c @@ -46,30 +46,6 @@ #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ -/** - * ixgbe_get_tc_from_up - get the TC UP is mapped to - * @netdev : the corresponding netdev - * @up: the 802.1p user priority value - * - * Returns : TC, UP is mapped to - */ -static u8 ixgbe_get_tc_from_up(struct net_device *netdev, u8 up) -{ - struct ixgbe_adapter *adapter; - struct ixgbe_dcb_tc_config *tc_cfg; - u32 i; - - adapter = netdev_priv(netdev); - /* cache should contain current or pending configuration */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - tc_cfg = &adapter->temp_dcb_cfg.tc_config[i]; - if (tc_cfg->path[0].up_to_tc_bitmap & (1 << up)) - return i; - } - return 0; -} - - int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) { struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; @@ -80,13 +56,12 @@ int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) int tx = IXGBE_DCB_TX_CONFIG; int rx = IXGBE_DCB_RX_CONFIG; int changes = 0; -#ifdef IXGBE_FCOE - u8 up = ixgbe_fcoe_getapp(adapter->netdev); - if (up && !(up & (1 << adapter->fcoe.up_set))) +#ifdef IXGBE_FCOE + if (adapter->fcoe.up_set != adapter->fcoe.up) changes |= BIT_APP_UPCHG; -#endif /* IXGBE_FCOE */ +#endif /* IXGBE_FCOE */ for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; @@ -174,32 +149,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { - u8 err = 0; - u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = {0}; - int i; struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; /* verify there is something to do, if not then exit */ - if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return err; - - if (state > 0) { - err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs); - ixgbe_dcb_unpack_map_cee(&adapter->dcb_cfg, - IXGBE_DCB_TX_CONFIG, - prio_tc); - } else { - err = ixgbe_setup_tc(netdev, 0); - } + if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + goto out; - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) - netdev_set_prio_tc_map(netdev, i, prio_tc[i]); - - return err; + err = ixgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +out: + return !!err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, @@ -316,28 +280,22 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } -static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, - u8 setting) +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) { - u8 tc = priority; struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); - tc = ixgbe_get_tc_from_up(netdev, priority); - - adapter->temp_dcb_cfg.tc_config[tc].pfc = setting; + adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; if (adapter->temp_dcb_cfg.tc_config[tc].pfc != adapter->dcb_cfg.tc_config[tc].pfc) adapter->temp_dcb_cfg.pfc_mode_enable = true; } -static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, - u8 *setting) +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) { - u8 tc = priority; struct ixgbe_adapter *adapter = netdev_priv(netdev); - - tc = ixgbe_get_tc_from_up(netdev, priority); - *setting = adapter->dcb_cfg.tc_config[tc].pfc; + u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); + *pfc = adapter->dcb_cfg.tc_config[tc].pfc; } #ifdef IXGBE_FCOE @@ -367,8 +325,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) clear_bit(__IXGBE_RESETTING, &adapter->state); } -#endif +#endif static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -382,7 +340,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) return ret; adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, - IXGBE_DCB_MAX_TRAFFIC_CLASS); + IXGBE_DCB_MAX_TRAFFIC_CLASS); if (!adapter->dcb_set_bitmap) return ret; @@ -434,9 +392,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) * changes or the up2tc mapping is updated. */ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - u8 up = ixgbe_fcoe_getapp(adapter->netdev); - - adapter->fcoe.up_set = ffs(up) - 1; + adapter->fcoe.up_set = adapter->fcoe.up; ixgbe_dcbnl_devreset(netdev); ret = DCB_HW_CHG_RST; } @@ -585,7 +541,7 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE if (id == ETH_P_FCOE) - rval = ixgbe_fcoe_getapp(netdev_priv(netdev)); + rval = ixgbe_fcoe_getapp(netdev); #endif break; case DCB_APP_IDTYPE_PORTNUM: @@ -664,7 +620,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i; + int i, err = 0; __u8 max_tc = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -691,12 +647,14 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, return -EINVAL; if (max_tc != netdev_get_num_tc(dev)) - ixgbe_setup_tc(dev, max_tc); + err = ixgbe_setup_tc(dev, max_tc); - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); + if (err) + goto err_out; - return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); + err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +err_out: + return err; } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, @@ -805,7 +763,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, return err; adapter->fcoe.up = app_mask ? - ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; + ffs(app_mask) - 1 : IXGBE_FCOE_DEFUP; ixgbe_dcbnl_devreset(dev); } #endif diff --git a/src/ixgbe_ethtool.c b/src/ixgbe_ethtool.c index 8764ee7..124ade9 100644 --- a/src/ixgbe_ethtool.c +++ b/src/ixgbe_ethtool.c @@ -95,9 +95,6 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { IXGBE_STAT("lsc_int", lsc_int), IXGBE_STAT("tx_busy", tx_busy), IXGBE_STAT("non_eop_descs", non_eop_descs), -#ifndef CONFIG_IXGBE_NAPI - IXGBE_STAT("rx_dropped_backlog", rx_dropped_backlog), -#endif IXGBE_STAT("broadcast", stats.bprc), IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) , IXGBE_STAT("tx_timeout_count", tx_timeout_count), @@ -2015,55 +2012,15 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, struct ethtool_wolinfo *wol) { struct ixgbe_hw *hw = &adapter->hw; - int retval = 1; - u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - - /* WOL not supported except for the following */ - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevice could supports WOL */ - switch (hw->subsystem_device_id) { - case IXGBE_SUBDEV_ID_82599_560FLR: - /* only support first port */ - if (hw->bus.func != 0) { - wol->supported = 0; - break; - } - case IXGBE_SUBDEV_ID_82599_SFP: - retval = 0; - break; - default: - wol->supported = 0; - break; - } - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (hw->subsystem_device_id == - IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { - wol->supported = 0; - break; - } - retval = 0; - break; - case IXGBE_DEV_ID_82599_KX4: - retval = 0; - break; - case IXGBE_DEV_ID_X540T: - /* check eeprom to see if enabled wol */ - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) { - retval = 0; - break; - } + int retval = 0; - /* All others not supported */ - wol->supported = 0; - break; - default: + /* WOL not supported for all devices */ + if (!ixgbe_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; wol->supported = 0; } + return retval; } @@ -2185,9 +2142,6 @@ static int ixgbe_get_coalesce(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; -#ifndef CONFIG_IXGBE_NAPI - ec->rx_max_coalesced_frames_irq = adapter->rx_work_limit; -#endif /* CONFIG_IXGBE_NAPI */ /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) ec->rx_coalesce_usecs = adapter->rx_itr_setting; @@ -2249,7 +2203,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; int i; - int num_vectors; u16 tx_itr_param, rx_itr_param; bool need_reset = false; @@ -2261,11 +2214,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames_irq) adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; -#ifndef CONFIG_IXGBE_NAPI - if (ec->rx_max_coalesced_frames_irq) - adapter->rx_work_limit = ec->rx_max_coalesced_frames_irq; - -#endif if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; @@ -2293,12 +2241,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, /* check the old value and enable RSC if necessary */ need_reset = ixgbe_update_rsc(adapter); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_vectors = 1; - - for (i = 0; i < num_vectors; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; q_vector->tx.work_limit = adapter->tx_work_limit; q_vector->rx.work_limit = adapter->rx_work_limit; @@ -2442,10 +2385,6 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) return -EINVAL; #endif -#ifdef NETIF_F_RXHASH - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) - supported_flags |= ETH_FLAG_RXHASH; -#endif #ifdef IXGBE_NO_LRO if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) #endif @@ -2460,6 +2399,10 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) break; } +#endif +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; + #endif rc = ethtool_op_set_flags(netdev, data, supported_flags); if (rc) @@ -2497,20 +2440,40 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) * Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ - if (!(netdev->features & NETIF_F_NTUPLE)) { - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { - /* turn off Flow Director, set ATR and reset */ - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - need_reset = true; - } - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + switch (netdev->features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - need_reset = true; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; } #endif /* ETHTOOL_GRXRINGS */ @@ -2621,10 +2584,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, { cmd->data = 0; - /* if RSS is disabled then report no hashing */ - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return 0; - /* Report default options for RSS on ixgbe */ switch (cmd->flow_type) { case TCP_V4_FLOW: @@ -3132,3 +3091,4 @@ void ixgbe_set_ethtool_ops(struct net_device *netdev) SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); } #endif /* SIOCETHTOOL */ + diff --git a/src/ixgbe_fcoe.c b/src/ixgbe_fcoe.c index 9cbd403..21ec1ff 100644 --- a/src/ixgbe_fcoe.c +++ b/src/ixgbe_fcoe.c @@ -108,10 +108,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) udelay(100); } if (ddp->sgl) - pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); if (ddp->pool) { - pci_pool_free(ddp->pool, ddp->udl, ddp->udp); + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ddp->pool = NULL; } @@ -138,6 +138,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, struct ixgbe_hw *hw; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_fcoe_ddp_pool *ddp_pool; struct scatterlist *sg; unsigned int i, j, dmacount; unsigned int len; @@ -148,8 +149,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; - struct pci_pool *pool; - unsigned int cpu; if (!netdev || !sgl || !sgc) return 0; @@ -166,11 +165,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; fcoe = &adapter->fcoe; - if (!fcoe->pool) { - e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); - return 0; - } - ddp = &fcoe->ddp[xid]; if (ddp->sgl) { e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", @@ -179,21 +173,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } ixgbe_fcoe_clear_ddp(ddp); + + if (!fcoe->ddp_pool) { + e_warn(drv, "No ddp_pool resources allocated\n"); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + goto out_noddp; + } + /* setup dma from scsi command sgl */ - dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { e_err(drv, "xid 0x%x DMA map error\n", xid); - return 0; + goto out_noddp; } /* alloc the udl from per cpu ddp pool */ - pool = *per_cpu_ptr(fcoe->pool, get_cpu()); - ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; } - ddp->pool = pool; + ddp->pool = ddp_pool->pool; ddp->sgl = sgl; ddp->sgc = sgc; @@ -204,8 +209,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, while (len) { /* max number of buffers allowed in one DDP context */ if (j >= IXGBE_BUFFCNT_MAX) { - cpu = get_cpu(); - *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; + ddp_pool->noddp++; goto out_noddp_free; } @@ -246,8 +250,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, */ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) { - cpu = get_cpu(); - *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; + ddp_pool->noddp_ext_buff++; goto out_noddp_free; } @@ -299,12 +302,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 1; out_noddp_free: - put_cpu(); - pci_pool_free(pool, ddp->udl, ddp->udp); + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: - pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp: put_cpu(); return 0; } @@ -417,7 +420,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, break; /* unmap the sg list when FCPRSP is received */ case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): - pci_unmap_sg(adapter->pdev, ddp->sgl, + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); ddp->err = ddp_err; ddp->sgl = NULL; @@ -446,6 +449,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && (fctl & FC_FC_END_SEQ)) { struct fcoe_crc_eof *crc; + skb_linearize(skb); crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); crc->fcoe_eof = FC_EOF_T; } @@ -573,44 +577,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, return 0; } -static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) +static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) { - unsigned int cpu; - struct pci_pool **pool; + struct ixgbe_fcoe_ddp_pool *ddp_pool; - for_each_possible_cpu(cpu) { - pool = per_cpu_ptr(fcoe->pool, cpu); - if (*pool) - pci_pool_destroy(*pool); - } - free_percpu(fcoe->pool); - fcoe->pool = NULL; + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (ddp_pool->pool) + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; } -static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) +static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, + struct device *dev, + unsigned int cpu) { - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - unsigned int cpu; - struct pci_pool **pool; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; char pool_name[32]; - fcoe->pool = alloc_percpu(struct pci_pool *); - if (!fcoe->pool) - return; + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); - /* allocate pci pool for each cpu */ - for_each_possible_cpu(cpu) { - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); - pool = per_cpu_ptr(fcoe->pool, cpu); - *pool = pci_pool_create(pool_name, - adapter->pdev, IXGBE_FCPTR_MAX, - IXGBE_FCPTR_ALIGN, PAGE_SIZE); - if (!*pool) { - e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); - ixgbe_fcoe_ddp_pools_free(fcoe); - return; - } - } + pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!pool) + return -ENOMEM; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + ddp_pool->pool = pool; + ddp_pool->noddp = 0; + ddp_pool->noddp_ext_buff = 0; + + return 0; } /** @@ -623,155 +620,180 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) */ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { - int i, fcoe_q, fcoe_i; + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - unsigned int cpu; -#ifdef CONFIG_DCB - u8 tc; - u32 up2tc; - -#endif /* CONFIG_DCB */ - if (!fcoe->pool) { - spin_lock_init(&fcoe->lock); - - ixgbe_fcoe_ddp_pools_alloc(adapter); - if (!fcoe->pool) { - e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); - return; - } - - /* Extra buffer to be shared by all DDPs for HW work around */ - fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (fcoe->extra_ddp_buffer == NULL) { - e_err(drv, "failed to allocated extra DDP buffer\n"); - goto out_ddp_pools; - } + int i, fcoe_q, fcoe_i; + u32 etqf; - fcoe->extra_ddp_buffer_dma = - dma_map_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); - if (dma_mapping_error(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma)) { - e_err(drv, "failed to map extra DDP buffer\n"); - goto out_extra_ddp_buffer; - } + /* Minimal funcionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) + return; - /* Alloc per cpu mem to count the ddp alloc failure number */ - fcoe->pcpu_noddp = alloc_percpu(u64); - if (!fcoe->pcpu_noddp) { - e_err(drv, "failed to alloc noddp counter\n"); - goto out_pcpu_noddp_alloc_fail; - } + /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ + etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64); - if (!fcoe->pcpu_noddp_ext_buff) { - e_err(drv, "failed to alloc noddp extra buff cnt\n"); - goto out_pcpu_noddp_extra_buff_alloc_fail; - } + /* leave remaining registers unconfigued if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return; - for_each_possible_cpu(cpu) { - *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0; - *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0; - } + /* Use one or more Rx queues for FCoE by redirection table */ + for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcoe_i = fcoe->offset + (i % fcoe->indices); + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); } + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); - /* Enable L2 eth type filter for FCoE */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), - (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); - /* Enable L2 eth type filter for FIP */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), - (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); - if (adapter->ring_feature[RING_F_FCOE].indices) { - /* Use multiple rx queues for FCoE by redirection table */ - for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { - fcoe_i = f->mask + i % f->indices; - fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); - } - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - } else { - /* Use single rx queue for FCoE */ - fcoe_i = f->mask; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), - IXGBE_ETQS_QUEUE_EN | - (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + /* Enable L2 EtherType filter for FIP */ + etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; } - /* Enable L2 eth type filter for FIP */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), - (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); - /* send FIP frames to the first FCoE queue */ - fcoe_i = f->mask; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); + + /* Send FIP frames to the first FCoE queue */ + fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_ETQS_QUEUE_EN | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + /* Configure FCoE Rx control */ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCOELLI | IXGBE_FCRXCTRL_FCCRCBO | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); -#ifdef CONFIG_DCB - - up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); - for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { - tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT)); - tc &= (IXGBE_DCB_MAX_TRAFFIC_CLASS - 1); - if (fcoe->tc == tc) { - fcoe->up = i; - break; - } - } -#endif /* CONFIG_DCB */ - - return; - -out_pcpu_noddp_extra_buff_alloc_fail: - free_percpu(fcoe->pcpu_noddp); -out_pcpu_noddp_alloc_fail: - dma_unmap_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); -out_extra_ddp_buffer: - kfree(fcoe->extra_ddp_buffer); -out_ddp_pools: - ixgbe_fcoe_ddp_pools_free(fcoe); } /** - * ixgbe_cleanup_fcoe - release all fcoe ddp context resources + * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources * @adapter : ixgbe adapter * * Cleans up outstanding ddp context resources * * Returns : none */ -void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { - int i; struct ixgbe_fcoe *fcoe = &adapter->fcoe; + int cpu, i; - if (!fcoe->pool) + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) return; for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); + + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_free(fcoe, cpu); + dma_unmap_single(&adapter->pdev->dev, fcoe->extra_ddp_buffer_dma, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); kfree(fcoe->extra_ddp_buffer); - free_percpu(fcoe->pcpu_noddp); - free_percpu(fcoe->pcpu_noddp_ext_buff); - ixgbe_fcoe_ddp_pools_free(fcoe); + + fcoe->extra_ddp_buffer = NULL; + fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: ixgbe adapter + * + * Sets up ddp context resouces + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = &adapter->pdev->dev; + void *buffer; + dma_addr_t dma; + unsigned int cpu; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return 0; + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (!buffer) { + e_err(drv, "failed to allocate extra DDP buffer\n"); + return -ENOMEM; + } + + dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + kfree(buffer); + return -ENOMEM; + } + + fcoe->extra_ddp_buffer = buffer; + fcoe->extra_ddp_buffer_dma = dma; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); + if (!err) + continue; + + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + ixgbe_free_fcoe_ddp_resources(adapter); + return -ENOMEM; + } + + return 0; +} + +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +#else +static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +#endif +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); + + if (!fcoe->ddp_pool) { + e_err(drv, "failed to allocate percpu DDP resources\n"); + return -ENOMEM; + } + + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + + return 0; +} + +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +#else +static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +#endif +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + adapter->netdev->fcoe_ddp_xid = 0; + + if (!fcoe->ddp_pool) + return; + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; } #ifdef HAVE_NETDEV_OPS_FCOE_ENABLE @@ -785,40 +807,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) */ int ixgbe_fcoe_enable(struct net_device *netdev) { - int rc = -EINVAL; struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_fcoe *fcoe = &adapter->fcoe; + atomic_inc(&fcoe->refcnt); if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - goto out_enable; + return -EINVAL; - atomic_inc(&fcoe->refcnt); if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - goto out_enable; + return -EINVAL; e_info(drv, "Enabling FCoE offload features.\n"); if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); + /* Allocate per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_enable(adapter); + /* enable FCoE and notify stack */ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; - netdev->features |= NETIF_F_FCOE_CRC; - netdev->features |= NETIF_F_FSO; netdev->features |= NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + netdev_features_change(netdev); + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); - netdev_features_change(netdev); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); - rc = 0; -out_enable: - return rc; + return 0; } /** @@ -831,39 +850,35 @@ int ixgbe_fcoe_enable(struct net_device *netdev) */ int ixgbe_fcoe_disable(struct net_device *netdev) { - int rc = -EINVAL; struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) || - !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - goto out_disable; + if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) + return -EINVAL; - if (!atomic_dec_and_test(&fcoe->refcnt)) - goto out_disable; + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return -EINVAL; e_info(drv, "Disabling FCoE offload features.\n"); - netdev->features &= ~NETIF_F_FCOE_CRC; - netdev->features &= ~NETIF_F_FSO; - netdev->features &= ~NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = 0; - netdev_features_change(netdev); - if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); + /* Free per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_disable(adapter); + + /* disable FCoE and notify stack */ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; - ixgbe_cleanup_fcoe(adapter); + netdev->features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); - rc = 0; -out_disable: - return rc; + return 0; } #endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ @@ -887,7 +902,6 @@ u8 ixgbe_fcoe_getapp(struct net_device *netdev) #endif /* HAVE_DCBNL_OPS_GETAPP */ #endif /* CONFIG_DCB */ - #ifdef HAVE_NETDEV_OPS_FCOE_GETWWN /** * ixgbe_fcoe_get_wwn - get world wide name for the node or the port @@ -932,5 +946,16 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) } return rc; } + #endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ +/** + * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) +{ + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +} #endif /* IXGBE_FCOE */ diff --git a/src/ixgbe_fcoe.h b/src/ixgbe_fcoe.h index cad2862..2c7a00e 100644 --- a/src/ixgbe_fcoe.h +++ b/src/ixgbe_fcoe.h @@ -48,8 +48,8 @@ #define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ -/* Default traffic class to use for FCoE */ -#define IXGBE_FCOE_DEFTC 3 +/* Default user priority to use for FCoE */ +#define IXGBE_FCOE_DEFUP 3 /* fcerr */ #define IXGBE_FCERR_BADCRC 0x00100000 @@ -69,20 +69,24 @@ struct ixgbe_fcoe_ddp { struct scatterlist *sgl; dma_addr_t udp; u64 *udl; - struct pci_pool *pool; + struct dma_pool *pool; +}; + +/* per cpu variables */ +struct ixgbe_fcoe_ddp_pool { + struct dma_pool *pool; + u64 noddp; + u64 noddp_ext_buff; }; struct ixgbe_fcoe { - struct pci_pool **pool; + struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; atomic_t refcnt; spinlock_t lock; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; - unsigned char *extra_ddp_buffer; + void *extra_ddp_buffer; dma_addr_t extra_ddp_buffer_dma; - u64 __percpu *pcpu_noddp; - u64 __percpu *pcpu_noddp_ext_buff; unsigned long mode; - u8 tc; u8 up; u8 up_set; }; diff --git a/src/ixgbe_lib.c b/src/ixgbe_lib.c new file mode 100644 index 0000000..36a858a --- /dev/null +++ b/src/ixgbe_lib.c @@ -0,0 +1,1269 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_sriov.h" + +#ifdef HAVE_TX_MQ +/** + * ixgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool ixgbe_cache_ring_dcb_vmdq(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* + * The bits on the 82598 are reversed compared to the other + * adapters. The DCB bits are the higher order bits and the + * lower bits belong to the VMDq pool. In order to sort + * this out we have to swap the bits to get the correct layout + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + u8 reg_idx = ((i >> 3) | (i << 3)) & 0x3F; + adapter->rx_ring[i]->reg_idx = reg_idx; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = ((i >> 4) | (i << 2)) & 0x1F; + adapter->tx_ring[i]->reg_idx = reg_idx; + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + + break; + default: + break; + } + +#ifdef IXGBE_FCOE + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return true; + + /* The work is already done if the FCoE ring is shared */ + if (fcoe->offset < tcs) + return true; + + /* The FCoE rings exist separately, we need to move their reg_idx */ + if (fcoe->indices) { + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->rx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + } + +#endif /* IXGBE_FCOE */ + return true; +} + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + /* TxQs/TC: 4 RxQs/TC: 8 */ + *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ + *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (num_tcs > 4) { + /* + * TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ + } else { + /* + * TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ + } + default: + break; + } +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ + int tc, offset, rss_i, i; + unsigned int tx_idx, rx_idx; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = tc; + adapter->rx_ring[offset + i]->dcb_tc = tc; + } + } + + return true; +} + +#endif /* HAVE_TX_MQ */ +/** + * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; + +#endif + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + +#endif + + + return true; +} + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ +#ifdef HAVE_TX_MQ + if (ixgbe_cache_ring_dcb_vmdq(adapter)) + return; + + if (ixgbe_cache_ring_dcb(adapter)) + return; + +#endif + if (ixgbe_cache_ring_vmdq(adapter)) + return; + + ixgbe_cache_ring_rss(adapter); +} + +#define IXGBE_RSS_16Q_MASK 0xF +#define IXGBE_RSS_8Q_MASK 0x7 +#define IXGBE_RSS_4Q_MASK 0x3 +#define IXGBE_RSS_2Q_MASK 0x1 +#define IXGBE_RSS_DISABLED_MASK 0x0 + +#ifdef HAVE_TX_MQ +/** + * ixgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u8 tcs = netdev_get_num_tc(adapter->netdev); +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* 4 pools w/ 8TC per pool */ + vmdq_i = min_t(u16, vmdq_i, 4); + vmdq_m = 0x7; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + break; + default: + /* unknown hardware, only support one pool w/ one queue */ + vmdq_i = 1; + tcs = 1; + break; + } + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * tcs; + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } else if (tcs > 1) { + /* use queue belonging to FcoE TC */ + fcoe->indices = 1; + fcoe->offset = ixgbe_fcoe_get_tc(adapter); + } else { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + + fcoe->indices = 0; + fcoe->offset = 0; + } + } + +#endif /* IXGBE_FCOE */ + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} + +/** + * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * @adapter: board private structure to initialize + * + * When DCB (Data Center Bridging) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort DCB + * initialization. + * + * This function handles all combinations of DCB, RSS, and FCoE. + * + **/ +static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + rss_i = min_t(u16, rss_i, 4); + rss_m = IXGBE_RSS_4Q_MASK; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = IXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = IXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when DCB is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* + * FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + u8 tc = ixgbe_fcoe_get_tc(adapter); + + f = &adapter->ring_feature[RING_F_FCOE]; + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } + +#endif /* IXGBE_FCOE */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +#endif +/** + * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = IXGBE_RSS_DISABLED_MASK; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + vmdq_i = min_t(u16, vmdq_i, 16); + /* 16 pool mode with 1 queue per pool */ + if ((vmdq_i > 4) || (rss_i == 1)) { + vmdq_m = 0x0F; + rss_i = 1; + /* 4 pool mode with 8 queue per pool */ + } else { + vmdq_m = 0x18; + rss_m = IXGBE_RSS_8Q_MASK; + rss_i = min_t(u16, rss_i, 8); + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool */ + if ((vmdq_i > 32) || (rss_i < 4)) { + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; + rss_m = IXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + rss_m = IXGBE_RSS_4Q_MASK; + rss_i = 4; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + break; + default: + /* unknown hardware, support one pool w/ one queue */ + vmdq_i = 1; + rss_i = 1; + break; + } + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = vmdq_i * rss_i; +#else + adapter->num_tx_queues = vmdq_i; +#endif /* HAVE_TX_MQ */ + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* + * FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the + * FCoE indices to the total ring count. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (vmdq_i > 1 && fcoe_i) { + /* reserve no more than number of CPUs */ + fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); + + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * rss_i; + } else { + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; + fcoe_i -= rss_i; + } + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } + +#endif + return true; +} + +/** + * ixgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = IXGBE_RSS_16Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* + * Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (rss_i > 1 && adapter->atr_sample_rate) { + f = &adapter->ring_feature[RING_F_FDIR]; + + f->indices = min_t(u16, num_online_cpus(), f->limit); + rss_i = max_t(u16, rss_i, f->indices); + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + +#ifdef IXGBE_FCOE + /* + * FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order + * to get the best performance we allocate as many FCoE queues + * as we can and we place them at the end of the ring array to + * avoid sharing queues with standard RSS on systems with 24 or + * more CPUs. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + u16 fcoe_i; + + f = &adapter->ring_feature[RING_F_FCOE]; + + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + f->indices = min_t(u16, fcoe_i, f->limit); + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); + } + +#endif /* IXGBE_FCOE */ + adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; +#endif + + return true; +} + +/* + * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + +#ifdef HAVE_TX_MQ + if (ixgbe_set_dcb_vmdq_queues(adapter)) + return; + + if (ixgbe_set_dcb_queues(adapter)) + return; + +#endif + if (ixgbe_set_vmdq_queues(adapter)) + return; + + ixgbe_set_rss_queues(adapter); +} + +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, + int vectors) +{ + int err, vector_threshold; + + /* + * We'll want at least 2 (vector_threshold): + * 1) TxQ[0] + RxQ[0] handler + * 2) Other (Link Status Change, etc.) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* + * The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + while (vectors >= vector_threshold) { + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + vectors); + if (!err) /* Success in acquiring all requested vectors. */ + break; + else if (err < 0) + vectors = 0; /* Nasty failure, quit now */ + else /* err == number of vectors we should try again with */ + vectors = err; + } + + if (vectors < vector_threshold) { + /* Can't allocate enough MSI-X interrupts? Oh well. + * This just means we'll go with either a single MSI + * vector or fall back to legacy interrupts. + */ + e_warn(hw, "Unable to allocate MSI-X interrupts\n"); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else { + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + } +} + +static void ixgbe_add_ring(struct ixgbe_ring *ring, + struct ixgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct ixgbe_q_vector *q_vector; + struct ixgbe_ring *ring; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; +#endif + int ring_count, size; + + ring_count = txr_count + rxr_count; + size = sizeof(struct ixgbe_q_vector) + + (sizeof(struct ixgbe_ring) * ring_count); + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + +#endif + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + else + cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); +#endif + q_vector->numa_node = node; + +#ifndef IXGBE_NO_LRO + /* initialize LRO */ + __skb_queue_head_init(&q_vector->lrolist.active); + +#endif + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbe_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ixgbe_add_ring(ring, &q_vector->rx); + + /* + * 82599 errata, UDP frames with a 0 checksum + * can be marked as checksum errors. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); + +#ifndef HAVE_NDO_SET_FEATURES + /* enable rx csum by default */ + set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state); + +#endif +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + + if ((rxr_idx >= f->offset) && + (rxr_idx < f->offset + f->indices)) { + set_bit(__IXGBE_RX_FCOE, &ring->state); + } + } + +#endif + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) +{ + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ixgbe_ring *ring; + + ixgbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + ixgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); +#ifndef IXGBE_NO_LRO + __skb_queue_purge(&q_vector->lrolist.active); +#endif + kfree(q_vector); +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbe_free_q_vector(adapter, v_idx); +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + int vector, v_budget; + + if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) + goto try_msi; + + /* + * It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * the default is to use pairs of vectors + */ + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; + + /* + * At the same time, hardware can only support a maximum of + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. + */ + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (adapter->msix_entries) { + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + ixgbe_acquire_msix_vectors(adapter, v_budget); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + return; + } + +try_msi: + /* disable DCB if number of TCs exceeds 1 */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); + netdev_reset_tc(adapter->netdev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + + /* disable VMDq */ + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV */ + ixgbe_disable_sriov(adapter); + +#endif /* CONFIG_PCI_IOV */ + /* disable RSS */ + adapter->ring_feature[RING_F_RSS].limit = 1; + + ixgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) { + e_warn(hw, "Unable to allocate MSI interrupt, " + "falling back to legacy. Error: %d\n", err); + return; + } + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + ixgbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ixgbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ixgbe_reset_interrupt_capability(adapter); + return err; + } + + ixgbe_cache_ring_register(adapter); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/src/ixgbe_main.c b/src/ixgbe_main.c index d2a043b..b04c4cb 100644 --- a/src/ixgbe_main.c +++ b/src/ixgbe_main.c @@ -64,21 +64,17 @@ static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; #define DRV_HW_PERF -#ifndef CONFIG_IXGBE_NAPI -#define DRIVERNAPI -#else -#define DRIVERNAPI "-NAPI" -#endif - #define FPGA +#define DRIVERIOV + #define VMDQ_TAG #define MAJ 3 -#define MIN 9 -#define BUILD 17 +#define MIN 10 +#define BUILD 16 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) DRIVERNAPI DRV_HW_PERF FPGA VMDQ_TAG + __stringify(BUILD) DRIVERIOV DRV_HW_PERF FPGA VMDQ_TAG const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2012 Intel Corporation."; @@ -119,7 +115,7 @@ DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2)}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS)}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP)}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP)}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP)}, /* required last entry */ {0, } }; @@ -680,8 +676,7 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { - int num_q_vectors; - int i; + int v_idx; if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) return; @@ -689,14 +684,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) /* always use CB2 mode, difference is masked in the CB driver */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_q_vectors = 1; - - for (i = 0; i < num_q_vectors; i++) { - adapter->q_vector[i]->cpu = -1; - ixgbe_update_dca(adapter->q_vector[i]); + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + adapter->q_vector[v_idx]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[v_idx]); } } @@ -745,17 +735,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, #ifdef IXGBE_FCOE /** * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type - * @adapter: address of board private structure + * @ring: structure containing ring specific data * @rx_desc: advanced rx descriptor * * Returns : true if it is FCoE pkt */ -static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + return test_bit(__IXGBE_RX_FCOE, &ring->state) && ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); @@ -882,7 +872,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, /* alloc new page for storage */ if (likely(!page)) { - page = alloc_pages(GFP_ATOMIC | __GFP_COLD, + page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; @@ -1062,27 +1052,17 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, struct vlan_group **vlgrp = netdev_priv(skb->dev); if (!*vlgrp) dev_kfree_skb_any(skb); -#ifdef CONFIG_IXGBE_NAPI else if (adapter->flags & IXGBE_FLAG_IN_NETPOLL) vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); else vlan_gro_receive(&q_vector->napi, *vlgrp, vlan_tag, skb); -#else - else if (vlan_hwaccel_rx(skb, *vlgrp, vlan_tag) == NET_RX_DROP) - adapter->rx_dropped_backlog++; -#endif } else { #endif /* NETIF_F_HW_VLAN_TX */ -#ifdef CONFIG_IXGBE_NAPI if (adapter->flags & IXGBE_FLAG_IN_NETPOLL) netif_rx(skb); else napi_gro_receive(&q_vector->napi, skb); -#else - if (netif_rx(skb) == NET_RX_DROP) - adapter->rx_dropped_backlog++; -#endif #ifdef NETIF_F_HW_VLAN_TX } #endif /* NETIF_F_HW_VLAN_TX */ @@ -1194,12 +1174,7 @@ static void ixgbe_lro_flush(struct ixgbe_q_vector *q_vector, #ifdef HAVE_VLAN_RX_REGISTER ixgbe_receive_skb(q_vector, skb); #else -#ifdef CONFIG_IXGBE_NAPI napi_gro_receive(&q_vector->napi, skb); -#else - if (netif_rx(skb) == NET_RX_DROP) - q_vector->adapter->rx_dropped_backlog++; -#endif #endif lrolist->stats.flushed++; } @@ -1462,12 +1437,7 @@ static void ixgbe_lro_receive(struct ixgbe_q_vector *q_vector, #ifdef HAVE_VLAN_RX_REGISTER ixgbe_receive_skb(q_vector, new_skb); #else -#ifdef CONFIG_IXGBE_NAPI napi_gro_receive(&q_vector->napi, new_skb); -#else - if (netif_rx(new_skb) == NET_RX_DROP) - q_vector->adapter->rx_dropped_backlog++; -#endif #endif /* HAVE_VLAN_RX_REGISTER */ } @@ -1494,6 +1464,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, struct vlan_hdr *vlan; /* l3 headers */ struct iphdr *ipv4; + struct ipv6hdr *ipv6; } hdr; __be16 protocol; u8 nexthdr = 0; /* default to not TCP */ @@ -1534,6 +1505,15 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, /* record next protocol */ nexthdr = hdr.ipv4->protocol; hdr.network += hlen; +#ifdef NETIF_F_TSO6 + } else if (protocol == __constant_htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); +#endif /* NETIF_F_TSO6 */ #ifdef IXGBE_FCOE } else if (protocol == __constant_htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) @@ -1544,7 +1524,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return hdr.network - data; } - /* finally sort out TCP */ + /* finally sort out TCP/UDP */ if (nexthdr == IPPROTO_TCP) { if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) return max_len; @@ -1557,6 +1537,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return hdr.network - data; hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); } /* @@ -1629,6 +1614,22 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, IXGBE_CB(skb)->append_cnt = 0; } +static void ixgbe_rx_vlan(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_RX) && + ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) +#ifndef HAVE_VLAN_RX_REGISTER + __vlan_hwaccel_put_tag(skb, + le16_to_cpu(rx_desc->wb.upper.vlan)); +#else + IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + IXGBE_CB(skb)->vid = 0; +#endif +} + /** * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -1650,16 +1651,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, #endif /* NETIF_F_RXHASH */ ixgbe_rx_checksum(rx_ring, rx_desc, skb); - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { - u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); -#ifdef HAVE_VLAN_RX_REGISTER - IXGBE_CB(skb)->vid = vid; - } else { - IXGBE_CB(skb)->vid = 0; -#else - __vlan_hwaccel_put_tag(skb, vid); -#endif - } + ixgbe_rx_vlan(rx_ring, rx_desc, skb); skb_record_rx_queue(skb, ring_queue_index(rx_ring)); @@ -1679,12 +1671,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, #ifdef HAVE_VLAN_RX_REGISTER ixgbe_receive_skb(q_vector, skb); #else -#ifdef CONFIG_IXGBE_NAPI napi_gro_receive(&q_vector->napi, skb); -#else - if (netif_rx(skb) == NET_RX_DROP) - q_vector->adapter->rx_dropped_backlog++; -#endif #endif #ifndef NETIF_F_GRO @@ -1803,8 +1790,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, * 60 bytes if the skb->len is less than 60 for skb_pad. */ pull_len = skb_frag_size(frag); - if (pull_len > 256) - pull_len = ixgbe_get_headlen(va, pull_len); + if (pull_len > IXGBE_RX_HDR_SIZE) + pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); @@ -1826,6 +1813,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, skb->truesize -= ixgbe_rx_bufsz(rx_ring); } +#ifdef IXGBE_FCOE + /* do not attempt to pad FCoE Frames as this will disrupt DDP */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) + return false; + +#endif /* if skb_pad returns an error the skb was freed */ if (unlikely(skb->len < 60)) { int pad_len = 60 - skb->len; @@ -1925,20 +1918,12 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, * * Returns true if all work is completed without reaching budget **/ -#ifdef IXGBE_NO_NAPI -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring) -{ - unsigned int budget = q_vector->rx.work_limit; -#else static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int budget) { -#endif unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE - struct ixgbe_adapter *adapter = q_vector->adapter; int ddp_bytes = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); @@ -2059,8 +2044,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ - if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, + rx_desc, skb); if (!ddp_bytes) { dev_kfree_skb_any(skb); #ifndef NETIF_F_GRO @@ -2120,20 +2106,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, * * Returns true if all work is completed without reaching budget **/ -#ifdef IXGBE_NO_NAPI -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring) -{ - unsigned int budget = q_vector->rx.work_limit; -#else static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int budget) { -#endif unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE - struct ixgbe_adapter *adapter = q_vector->adapter; int ddp_bytes = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); @@ -2227,8 +2205,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ - if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, + rx_desc, skb); if (!ddp_bytes) { dev_kfree_skb_any(skb); #ifndef NETIF_F_GRO @@ -2285,12 +2264,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, **/ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { - struct ixgbe_q_vector *q_vector; - int q_vectors, v_idx; + int v_idx; u32 mask; - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* Populate MSIX to EITR Select */ if (adapter->num_vfs >= 32) { u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; @@ -2301,9 +2277,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) * Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (v_idx = 0; v_idx < q_vectors; v_idx++) { + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - q_vector = adapter->q_vector[v_idx]; ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); @@ -2801,41 +2777,6 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) return IRQ_HANDLED; } -#ifndef CONFIG_IXGBE_NAPI -static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) -{ - struct ixgbe_q_vector *q_vector = data; - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - bool clean_complete = true; - - if (!q_vector->tx.ring && !q_vector->rx.ring) - return IRQ_HANDLED; - -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); - - ixgbe_for_each_ring(ring, q_vector->rx) - clean_complete &= ixgbe_clean_rx_irq(q_vector, ring); - - if (adapter->rx_itr_setting == 1) - ixgbe_set_itr(q_vector); - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - u64 eics = ((u64)1 << q_vector->v_idx); - ixgbe_irq_enable_queues(adapter, eics); - if (!clean_complete) - ixgbe_irq_rearm_queues(adapter, eics); - } - - return IRQ_HANDLED; -} -#else /* CONFIG_IXGBE_NAPI */ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) { struct ixgbe_q_vector *q_vector = data; @@ -2855,7 +2796,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) * * This function will clean all queues associated with a q_vector. **/ -static int ixgbe_poll(struct napi_struct *napi, int budget) +int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); @@ -2901,7 +2842,6 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) return 0; } -#endif /* CONFIG_IXGBE_NAPI */ /** * ixgbe_request_msix_irqs - Initialize MSI-X interrupts @@ -2913,11 +2853,10 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int vector, err; int ri = 0, ti = 0; - for (vector = 0; vector < q_vectors; vector++) { + for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; @@ -3030,7 +2969,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); -#ifdef CONFIG_IXGBE_NAPI /* would disable interrupts here but EIAM disabled it */ napi_schedule(&q_vector->napi); @@ -3040,22 +2978,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, false, false); -#else - ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); - ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0]); - - /* dynamically adjust throttle */ - if (adapter->rx_itr_setting == 1) - ixgbe_set_itr(q_vector); - /* - * Workaround of Silicon errata #26 on 82598. Unmask - * the interrupt that we masked before the EICR read - * no flush of the re-enable is necessary here - */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, true, false); -#endif return IRQ_HANDLED; } @@ -3088,32 +3011,30 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) static void ixgbe_free_irq(struct ixgbe_adapter *adapter) { - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i, q_vectors; + int vector; - q_vectors = adapter->num_msix_vectors; - i = q_vectors - 1; - free_irq(adapter->msix_entries[i].vector, adapter); - i--; + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } - for (; i >= 0; i--) { - /* free only the irqs that were actually requested */ - if (!adapter->q_vector[i]->rx.ring && - !adapter->q_vector[i]->tx.ring) - continue; + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; #ifdef HAVE_IRQ_AFFINITY_HINT - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(adapter->msix_entries[i].vector, - NULL); + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); #endif - free_irq(adapter->msix_entries[i].vector, - adapter->q_vector[i]); - } - } else { - free_irq(adapter->pdev->irq, adapter); + free_irq(entry->vector, q_vector); } + + free_irq(adapter->msix_entries[vector++].vector, adapter); } /** @@ -3137,9 +3058,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) } IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i; - for (i = 0; i < adapter->num_msix_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); } else { synchronize_irq(adapter->pdev->irq); } @@ -3226,8 +3150,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && - adapter->atr_sample_rate) { + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ring->atr_sample_rate = adapter->atr_sample_rate; ring->atr_count = 0; set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); @@ -3257,9 +3180,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 rttdcs; - u32 mask; - u32 reg; + u32 rttdcs, mtqc; u8 tcs = netdev_get_num_tc(adapter->netdev); if (hw->mac.type == ixgbe_mac_82598EB) @@ -3271,46 +3192,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); /* set transmit pool layout */ - mask = IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED; - mask |= IXGBE_FLAG_DCB_ENABLED; - switch (adapter->flags & mask) { - - case IXGBE_FLAG_VMDQ_ENABLED: - case IXGBE_FLAG_SRIOV_ENABLED: - case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): - IXGBE_WRITE_REG(hw, IXGBE_MTQC, - (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); - break; - case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED): - case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED): - case (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED - | IXGBE_FLAG_DCB_ENABLED): - IXGBE_WRITE_REG(hw, IXGBE_MTQC, - (IXGBE_MTQC_RT_ENA - | IXGBE_MTQC_VT_ENA - | IXGBE_MTQC_4TC_4TQ)); - break; - - case IXGBE_FLAG_DCB_ENABLED: - if (!tcs) - reg = IXGBE_MTQC_64Q_1PB; - else if (tcs <= 4) - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { + mtqc = IXGBE_MTQC_VT_ENA; + if (tcs > 4) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + mtqc |= IXGBE_MTQC_32VF; + else + mtqc |= IXGBE_MTQC_64VF; + } else { + if (tcs > 4) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; else - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + mtqc = IXGBE_MTQC_64Q_1PB; + } - IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); - /* Enable Security TX Buffer IFG for multiple pb */ - if (tcs) { - reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - reg |= IXGBE_SECTX_DCB; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); - } - break; - default: - IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); - break; + /* Enable Security TX Buffer IFG for multiple pb */ + if (tcs) { + u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + sectx |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); } /* re-enable the arbiter */ @@ -3413,59 +3320,28 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, u32 srrctl; u8 reg_idx = rx_ring->reg_idx; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: { - struct ixgbe_ring_feature *feature = adapter->ring_feature; - /* program one srrctl register per VMDq index */ - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - unsigned long mask; - long shift, len; - mask = (unsigned long) feature[RING_F_VMDQ].mask; - len = sizeof(feature[RING_F_VMDQ].mask) * 8; - shift = find_first_bit(&mask, len); - reg_idx = (reg_idx & mask) >> shift; - } else { - /* - * if VMDq is not active we must program one srrctl - * register per RSS queue since we have enabled - * RDRXCTL.MVMEN - */ - const int mask = feature[RING_F_RSS].mask; - reg_idx = reg_idx & mask; - } - } - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - default: - break; - } + if (hw->mac.type == ixgbe_mac_82598EB) { + u16 mask = adapter->ring_feature[RING_F_RSS].mask; - srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + /* program one srrctl register per VMDq index */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + mask = adapter->ring_feature[RING_F_VMDQ].mask; - srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; - srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; - srrctl &= ~IXGBE_SRRCTL_DROP_EN; + /* + * if VMDq is not active we must program one srrctl register + * per RSS queue since we have enabled RDRXCTL.MVMEN + */ + reg_idx &= mask; - /* - * We should set the drop enable bit if: - * SR-IOV is enabled - * or - * Flow Control is disabled and number of RX queues > 1 - * - * This allows us to avoid head of line blocking for security - * and performance reasons. - */ - if (adapter->num_vfs || - (adapter->num_rx_queues > 1 && - (hw->fc.disable_fc_autoneg || - hw->fc.requested_mode == ixgbe_fc_none || - hw->fc.requested_mode == ixgbe_fc_rx_pause))) - srrctl |= IXGBE_SRRCTL_DROP_EN; + /* divide by the first bit of the mask to get the indices */ + if (reg_idx) + reg_idx /= ((~mask) + 1) & mask; + } - srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK; + /* configure header buffer length, needed for RSC */ + srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + /* configure the packet buffer length */ #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; @@ -3476,6 +3352,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #endif #endif + + /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); @@ -3490,13 +3368,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) u32 mrqc = 0, reta = 0; u32 rxcsum; int i, j; - int maxq = adapter->ring_feature[RING_F_RSS].indices; - u8 tcs = netdev_get_num_tc(adapter->netdev); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; -#ifdef HAVE_MQPRIO - if (tcs) - maxq = min(maxq, adapter->num_rx_queues / tcs); -#endif + /* + * Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; /* Fill out hash function seeds */ for (i = 0; i < 10; i++) @@ -3504,7 +3384,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) /* Fill out redirection table */ for (i = 0, j = 0; i < 128; i++, j++) { - if (j == maxq) + if (j == rss_i) j = 0; /* reta = 4-byte sliding window of * 0x00..(indices-1)(indices-1)00..etc. */ @@ -3518,64 +3398,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) rxcsum |= IXGBE_RXCSUM_PCSD; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); - if (adapter->hw.mac.type == ixgbe_mac_82598EB && - (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - mrqc = IXGBE_MRQC_RSSEN; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + if (adapter->ring_feature[RING_F_RSS].mask) + mrqc = IXGBE_MRQC_RSSEN; } else { - int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED - | IXGBE_FLAG_DCB_ENABLED - | IXGBE_FLAG_VMDQ_ENABLED - | IXGBE_FLAG_SRIOV_ENABLED); + u8 tcs = netdev_get_num_tc(adapter->netdev); - switch (mask) { - case (IXGBE_FLAG_RSS_ENABLED): - mrqc = IXGBE_MRQC_RSSEN; - break; - case (IXGBE_FLAG_SRIOV_ENABLED): - mrqc = IXGBE_MRQC_VMDQEN; - break; - case (IXGBE_FLAG_VMDQ_ENABLED): - case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED): - mrqc = IXGBE_MRQC_VMDQEN; - break; - case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): - if (adapter->ring_feature[RING_F_RSS].indices == 4) - mrqc = IXGBE_MRQC_VMDQRSS32EN; - else if (adapter->ring_feature[RING_F_RSS].indices == 2) - mrqc = IXGBE_MRQC_VMDQRSS64EN; - else - mrqc = IXGBE_MRQC_VMDQEN; - break; - case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): - case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_SRIOV_ENABLED): - case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_VMDQ_ENABLED - | IXGBE_FLAG_SRIOV_ENABLED): - if (tcs <= 4) + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { + if (tcs > 4) + mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ + else if (tcs > 1) mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + mrqc = IXGBE_MRQC_VMDQRSS32EN; else - mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ - break; - case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): - if (!tcs) - mrqc = IXGBE_MRQC_RSSEN; - else if (tcs <= 4) + mrqc = IXGBE_MRQC_VMDQRSS64EN; + } else { + if (tcs > 4) + mrqc = IXGBE_MRQC_RTRSS8TCEN; + else if (tcs > 1) mrqc = IXGBE_MRQC_RTRSS4TCEN; else - mrqc = IXGBE_MRQC_RTRSS8TCEN; - break; - case (IXGBE_FLAG_DCB_ENABLED): - mrqc = IXGBE_MRQC_RT8TCEN; - break; - default: - break; + mrqc = IXGBE_MRQC_RSSEN; } } /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; @@ -3768,6 +3620,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; int p; /* PSRTYPE must be initialized in non 82598 adapters */ @@ -3780,13 +3633,10 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) return; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - int rss_i = adapter->num_rx_queues_per_pool; - if (rss_i > 3) - psrtype |= 2 << 29; - else if (rss_i > 1) - psrtype |= 1 << 29; - } + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; for (p = 0; p < adapter->num_rx_pools; p++) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); @@ -3795,55 +3645,51 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; -#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + u32 reg_offset, vf_shift; + u32 gcr_ext, vmdctl; int i; -#endif - u32 gcr_ext; - u32 vt_reg; - u32 vt_reg_bits; - u32 pool; - u32 vmdctl; - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED || - adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return; switch (hw->mac.type) { case ixgbe_mac_82598EB: - vt_reg = IXGBE_VMD_CTL; - vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN; - vmdctl = IXGBE_READ_REG(hw, vt_reg); - IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits); + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: - vt_reg = IXGBE_VT_CTL; - vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN; - if (adapter->num_vfs) { - vt_reg_bits &= ~IXGBE_VT_CTL_POOL_MASK; - vt_reg_bits |= (adapter->num_vfs << - IXGBE_VT_CTL_POOL_SHIFT); - vt_reg_bits |= IXGBE_VT_CTL_REPLEN; - } - vmdctl = IXGBE_READ_REG(hw, vt_reg); - IXGBE_WRITE_REG(hw, vt_reg, vmdctl | vt_reg_bits); - for (pool = 1; pool < adapter->num_rx_pools; pool++) { + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl |= IXGBE_VT_CTL_VT_ENABLE; + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; + if (adapter->num_vfs) + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + + for (i = 1; i < adapter->num_rx_pools; i++) { u32 vmolr; - int vmdq_pool = VMDQ_P(pool); + int pool = VMDQ_P(i); /* * accept untagged packets until a vlan tag * is specifically set for the VMDQ queue/pool */ - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vmdq_pool)); + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); vmolr |= IXGBE_VMOLR_AUPE; vmolr |= IXGBE_VMOLR_BAM; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vmdq_pool), vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); } - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0xFFFFFFFF); + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF pools for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); break; default: break; @@ -3856,9 +3702,18 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV */ - gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; - gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; + break; + default: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; + break; + } + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); /* enable Tx loopback for VF/PF communication */ @@ -3947,7 +3802,7 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) rx_ring->rx_buf_len = rx_buf_len; #ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state) && + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state) && (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)) rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* IXGBE_FCOE */ @@ -4043,7 +3898,7 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = adapter->num_vfs; + int pool_ndx = VMDQ_P(0); /* add VID to filter table */ if (hw->mac.ops.set_vfta) { @@ -4097,7 +3952,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = adapter->num_vfs; + int pool_ndx = VMDQ_P(0); /* User is not allowed to remove vlan ID 0 */ if (!vid) @@ -4283,10 +4138,12 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) #else struct dev_mc_list *mc_ptr; #endif +#ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ u8 *addr = *mc_addr_ptr; - *vmdq = adapter->num_vfs; + *vmdq = VMDQ_P(0); #ifdef NETDEV_HW_ADDR_T_MULTICAST mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); @@ -4411,11 +4268,12 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) int i; if (is_zero_ether_addr(addr)) - return 0; + return -EINVAL; for (i = 0; i < hw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) { continue; + } adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | IXGBE_MAC_STATE_IN_USE); memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); @@ -4456,7 +4314,8 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8* addr, u16 queue) struct ixgbe_hw *hw = &adapter->hw; if (is_zero_ether_addr(addr)) - return 0; + return -EINVAL; + for (i = 0; i < hw->mac.num_rar_entries; i++) { if (!compare_ether_addr(addr, adapter->mac_table[i].addr) && adapter->mac_table[i].queue == queue) { @@ -4573,8 +4432,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = ixgbe_write_uc_addr_list(adapter, netdev, - adapter->num_vfs); + count = ixgbe_write_uc_addr_list(adapter, netdev, VMDQ_P(0)); if (count < 0) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; @@ -4583,10 +4441,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } if (hw->mac.type != ixgbe_mac_82598EB) { - vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); } IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); @@ -4595,38 +4453,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev) static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { -#ifdef CONFIG_IXGBE_NAPI - int q_idx; struct ixgbe_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; + int q_idx; - for (q_idx = 0; q_idx < q_vectors; q_idx++) { + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_enable(&q_vector->napi); } -#endif /* CONFIG_IXGBE_NAPI */ } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { -#ifdef CONFIG_IXGBE_NAPI - int q_idx; struct ixgbe_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; + int q_idx; - for (q_idx = 0; q_idx < q_vectors; q_idx++) { + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); } -#endif } #ifdef HAVE_DCBNL_IEEE @@ -4694,7 +4538,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif /* IXGBE_FCOE */ - #ifdef HAVE_DCBNL_IEEE if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { if (adapter->ixgbe_ieee_ets) @@ -4724,24 +4567,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) /* Enable RSS Hash per TC */ if (hw->mac.type != ixgbe_mac_82598EB) { - int i; - u32 reg = 0; - - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - u8 msb = 0; -#ifdef HAVE_MQPRIO - u8 cnt; - struct netdev_tc_txq *tc_to_txq = - ixgbe_get_netdev_tc_txq(dev, i); - - cnt = tc_to_txq->count; - while (cnt >>= 1) - msb++; -#endif + u32 msb = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; - reg |= msb << IXGBE_RQTC_SHIFT_TC(i); + while (rss_i) { + msb++; + rss_i >>= 1; } - IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + + /* write msb to all 8 TCs in one write */ + IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); } } @@ -4894,16 +4729,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ - if (dev->features & NETIF_F_FCOE_MTU) { - int fcoe_pb = 0; + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; - fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up); - - if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) - tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; - } #endif - /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: @@ -4956,16 +4787,12 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ - if (dev->features & NETIF_F_FCOE_MTU) { - int fcoe_pb = 0; - - fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up); + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; - if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) - tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; - } #endif - /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: @@ -5053,17 +4880,17 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_pb(adapter); ixgbe_configure_dcb(adapter); + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); + ixgbe_set_rx_mode(adapter->netdev); #ifdef NETIF_F_HW_VLAN_TX ixgbe_restore_vlan(adapter); #endif -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - ixgbe_configure_fcoe(adapter); - -#endif /* IXGBE_FCOE */ - if (adapter->hw.mac.type != ixgbe_mac_82598EB) hw->mac.ops.disable_sec_rx_path(hw); @@ -5079,8 +4906,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) if (adapter->hw.mac.type != ixgbe_mac_82598EB) hw->mac.ops.enable_sec_rx_path(hw); - ixgbe_configure_virtualization(adapter); +#ifdef IXGBE_FCOE + /* configure FCoE L2 filters, redirection table, and Rx control */ + ixgbe_configure_fcoe(adapter); +#endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); } @@ -5203,7 +5033,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; -#ifdef CONFIG_IXGBE_NAPI gpie |= IXGBE_GPIE_EIAME; /* * use EIAM to auto-mask when MSI-X interrupt is asserted @@ -5224,7 +5053,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) /* legacy interrupts, use EIAM to auto-mask when reading EICR, * specifically only auto mask tx and rx interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); -#endif } /* XXX: to interrupt immediately for EICS writes, enable this */ @@ -5232,7 +5060,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_VTMODE_64; + + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_32; + break; + default: + gpie |= IXGBE_GPIE_VTMODE_64; + break; + } } /* Enable Thermal over heat sensor interrupt */ @@ -5392,12 +5231,16 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) ixgbe_flush_sw_mac_table(adapter); memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr, netdev->addr_len); - adapter->mac_table[0].queue = adapter->num_vfs; + adapter->mac_table[0].queue = VMDQ_P(0); adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE); hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, adapter->mac_table[0].queue, IXGBE_RAH_AV); + + /* update SAN MAC vmdq pool selection */ + if (hw->mac.san_mac_rar_index) + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); } #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT @@ -5653,1098 +5496,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter) #endif } - -/* Artificial max queue cap per traffic class in DCB mode */ -#define DCB_QUEUE_CAP 8 - /** - * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize * - * When DCB (Data Center Bridging) is enabled, allocate queues for - * each traffic class. If multiqueue isn't available,then abort DCB - * initialization. - * - * This function handles all combinations of DCB, RSS, and FCoE. - * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). **/ -static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) { - int tcs; -#ifdef HAVE_MQPRIO - int rss_i, i, offset = 0; - struct net_device *dev = adapter->netdev; - - /* Map queue offset and counts onto allocated tx queues */ - tcs = netdev_get_num_tc(dev); + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct ixgbe_dcb_tc_config *tc; + int j, bwg_pct; + int err; - if (!tcs) - return false; - - rss_i = min_t(int, dev->num_tx_queues / tcs, num_online_cpus()); - - if (rss_i > DCB_QUEUE_CAP) - rss_i = DCB_QUEUE_CAP; - - for (i = 0; i < tcs; i++) { - netdev_set_tc_queue(dev, i, rss_i, offset); - offset += rss_i; - } - - adapter->num_tx_queues = rss_i * tcs; - adapter->num_rx_queues = rss_i * tcs; - -#ifdef IXGBE_FCOE - /* FCoE enabled queues require special configuration indexed - * by feature specific indices and mask. Here we map FCoE - * indices onto the DCB queue pairs allowing FCoE to own - * configuration later. - */ - - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *f; - int tc; - u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = {0}; - - ixgbe_dcb_unpack_map_cee(&adapter->dcb_cfg, - IXGBE_DCB_TX_CONFIG, - prio_tc); - tc = prio_tc[adapter->fcoe.up]; - - f = &adapter->ring_feature[RING_F_FCOE]; - f->indices = min_t(int, rss_i, f->indices); - f->mask = rss_i * tc; - } -#endif /* IXGBE_FCOE */ -#else - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return false; - - /* Enable one Queue per traffic class */ - tcs = adapter->tc; - if (!tcs) - return false; - -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *f; - int tc = netdev_get_prio_tc_map(adapter->netdev, - adapter->fcoe.up); - - f = &adapter->ring_feature[RING_F_FCOE]; - - /* - * We have max 8 queues for FCoE, where 8 the is - * FCoE redirection table size. We must also share - * ring resources with network traffic so if FCoE TC is - * 4 or greater and we are in 8 TC mode we can only use - * 7 queues. - */ - if ((tcs > 4) && (tc >= 4) && (f->indices > 7)) - f->indices = 7; - - f->indices = min_t(int, num_online_cpus(), f->indices); - f->mask = tcs; - - adapter->num_rx_queues = f->indices + tcs; - adapter->num_tx_queues = f->indices + tcs; - - return true; - } - -#endif /* IXGBE_FCOE */ - adapter->num_rx_queues = tcs; - adapter->num_tx_queues = tcs; -#endif /* HAVE_MQ */ - - return true; -} - -/** - * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices - * @adapter: board private structure to initialize - * - * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues - * and VM pools where appropriate. If RSS is available, then also try and - * enable RSS and map accordingly. - * - **/ -static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) -{ - int vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices; - int vmdq_m = 0; - int rss_i = adapter->ring_feature[RING_F_RSS].indices; - unsigned long i; - int rss_shift; - bool ret = false; - - - switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED - | IXGBE_FLAG_DCB_ENABLED - | IXGBE_FLAG_VMDQ_ENABLED)) { - - case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - vmdq_i = min(IXGBE_MAX_VMDQ_INDICES, vmdq_i); - if (vmdq_i > 32) - rss_i = 2; - else - rss_i = 4; - i = rss_i; - rss_shift = find_first_bit(&i, sizeof(i) * 8); - vmdq_m = ((IXGBE_MAX_VMDQ_INDICES - 1) << - rss_shift) & (MAX_RX_QUEUES - 1); - break; - default: - break; - } - adapter->num_rx_queues = vmdq_i * rss_i; - adapter->num_tx_queues = min(MAX_TX_QUEUES, vmdq_i * rss_i); - ret = true; - break; - - case (IXGBE_FLAG_VMDQ_ENABLED): - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1) << 1; - break; - default: - break; - } - adapter->num_rx_queues = vmdq_i; - adapter->num_tx_queues = vmdq_i; - ret = true; - break; - - default: - ret = false; - goto vmdq_queues_out; - } - - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - adapter->num_rx_pools = vmdq_i; - adapter->num_rx_queues_per_pool = adapter->num_rx_queues / - vmdq_i; - } else { - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; - } - /* save the mask for later use */ - adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; -vmdq_queues_out: - return ret; -} - -/** - * ixgbe_set_rss_queues: Allocate queues for RSS - * @adapter: board private structure to initialize - * - * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try - * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. - * - **/ -static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f; - - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - return false; - } - - /* set mask for 16 queue limit of RSS */ - f = &adapter->ring_feature[RING_F_RSS]; - f->mask = 0xF; - - /* - * Use Flow Director in addition to RSS to ensure the best - * distribution of flows across cores, even when an FDIR flow - * isn't matched. - */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - f = &adapter->ring_feature[RING_F_FDIR]; - - f->indices = min_t(int, num_online_cpus(), f->indices); - f->mask = 0; - } - - adapter->num_rx_queues = f->indices; -#ifdef HAVE_TX_MQ - adapter->num_tx_queues = f->indices; -#endif - - return true; -} - -#ifdef IXGBE_FCOE -/** - * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) - * @adapter: board private structure to initialize - * - * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. - * The ring feature mask is not used as a mask for FCoE, as it can take any 8 - * rx queues out of the max number of rx queues, instead, it is used as the - * index of the first rx queue used by FCoE. - * - **/ -static bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - ixgbe_set_rss_queues(adapter); - - f = &adapter->ring_feature[RING_F_FCOE]; - f->indices = min_t(int, num_online_cpus(), f->indices); - - /* adding FCoE queues */ - f->mask = adapter->num_rx_queues; - adapter->num_rx_queues += f->indices; - adapter->num_tx_queues += f->indices; - - return true; -} - -#endif /* IXGBE_FCOE */ -/* - * ixgbe_set_num_queues: Allocate queues for device, feature dependent - * @adapter: board private structure to initialize - * - * This is the top level queue allocation routine. The order here is very - * important, starting with the "most" number of features turned on at once, - * and ending with the smallest set of features. This way large combinations - * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. - * - **/ -static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) -{ - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; - - if (ixgbe_set_vmdq_queues(adapter)) - return; - - if (ixgbe_set_dcb_queues(adapter)) - return; - -#ifdef IXGBE_FCOE - if (ixgbe_set_fcoe_queues(adapter)) - return; - -#endif /* IXGBE_FCOE */ - ixgbe_set_rss_queues(adapter); -} - -static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) -{ - int err, vector_threshold; - - /* - * We'll want at least 2 (vector_threshold): - * 1) TxQ[0] + RxQ[0] handler - * 2) Other (Link Status Change, etc.) - */ - vector_threshold = MIN_MSIX_COUNT; - - /* - * The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. - */ - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); - if (!err) /* Success in acquiring all requested vectors. */ - break; - else if (err < 0) - vectors = 0; /* Nasty failure, quit now */ - else /* err == number of vectors we should try again with */ - vectors = err; - } - - if (vectors < vector_threshold) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. - */ - e_warn(hw, "Unable to allocate MSI-X interrupts\n"); - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else { - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = min(vectors, - adapter->max_msix_q_vectors + NON_Q_VECTORS); - } -} - -/** - * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for RSS to the assigned rings. - * - **/ -static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) -{ - int i; - - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return false; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - - return true; -} - -/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ -static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, - unsigned int *tx, unsigned int *rx) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *dev = adapter->netdev; - u8 num_tcs = netdev_get_num_tc(dev); - - *tx = 0; - *rx = 0; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - /* TxQs/TC: 4 RxQs/TC: 8 */ - *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ - *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (num_tcs > 4) { - /* - * TCs : TC0/1 TC2/3 TC4-7 - * TxQs/TC: 32 16 8 - * RxQs/TC: 16 16 16 - */ - *rx = tc << 4; - if (tc < 3) - *tx = tc << 5; /* 0, 32, 64 */ - else if (tc < 5) - *tx = (tc + 2) << 4; /* 80, 96 */ - else - *tx = (tc + 8) << 3; /* 104, 112, 120 */ - } else { - /* - * TCs : TC0 TC1 TC2/3 - * TxQs/TC: 64 32 16 - * RxQs/TC: 32 32 32 - */ - *rx = tc << 5; - if (tc < 2) - *tx = tc << 6; /* 0, 64 */ - else - *tx = (tc + 4) << 4; /* 96, 112 */ - } - default: - break; - } -} - -/** - * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for DCB to the assigned rings. - * - **/ -static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) -{ - int tc, offset, rss_i, i; - unsigned int tx_idx, rx_idx; - struct net_device *dev = adapter->netdev; - u8 num_tcs = netdev_get_num_tc(dev); - - if (!num_tcs) - return false; - -#ifdef HAVE_MQPRIO - /* - * Since all TCs have the same number of queues, we just need to - * check one TC in order to detmerine what the RSS indices is. - */ - rss_i = ixgbe_get_netdev_tc_txq(dev, 0)->count; -#else - rss_i = 1; -#endif - - for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { - ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); - for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { - adapter->tx_ring[offset + i]->reg_idx = tx_idx; - adapter->rx_ring[offset + i]->reg_idx = rx_idx; - adapter->tx_ring[offset + i]->dcb_tc = tc; - adapter->rx_ring[offset + i]->dcb_tc = tc; - } - } - -#ifndef HAVE_MQPRIO -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *f; - int tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); - - ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); - - f = &adapter->ring_feature[RING_F_FCOE]; - - /* start at the 2nd index to avoid false sharing with 1st */ - tx_idx++; - rx_idx++; - - for (i = 0; i < f->indices; i++, tx_idx++, rx_idx++) { - adapter->tx_ring[f->mask + i]->reg_idx = tx_idx; - adapter->rx_ring[f->mask + i]->reg_idx = rx_idx; - adapter->tx_ring[f->mask + i]->dcb_tc = tc; - adapter->rx_ring[f->mask + i]->dcb_tc = tc; - } - } - -#endif /* IXGBE_FCOE */ -#endif /* HAVE_MQPRIO */ - return true; -} - -/** - * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for VMDq to the assigned rings. It - * will also try to cache the proper offsets if RSS/DCB/CNA are all - * enabled along with VMDq. - * - **/ -static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) -{ - int i; - bool ret = false; -#ifdef IXGBE_FCOE - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; -#endif /* IXGBE_FCOE */ - u8 tc = netdev_get_num_tc(adapter->netdev); - - switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED - | IXGBE_FLAG_DCB_ENABLED - | IXGBE_FLAG_VMDQ_ENABLED)) { - - case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED): - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - /* since the # of rss queues per vmdq pool is - * limited to either 2 or 4, there is no index - * skipping and we can set them up with no - * funky mapping - */ - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - ret = true; - break; - default: - break; - } - break; - - case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED): - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->rx_ring[i]->reg_idx = VMDQ_P(i) * tc; -#ifdef IXGBE_FCOE - if (i >= adapter->num_rx_queues - f->indices) - adapter->rx_ring[i]->reg_idx += - adapter->fcoe.tc; -#endif /* IXGBE_FCOE */ - } - for (i = 0; i < adapter->num_tx_queues; i++) { - adapter->tx_ring[i]->reg_idx = VMDQ_P(i) * tc; -#ifdef IXGBE_FCOE - if (i >= adapter->num_tx_queues - f->indices) - adapter->tx_ring[i]->reg_idx += - adapter->fcoe.tc; -#endif /* IXGBE_FCOE */ - } - ret = true; - break; - default: - break; - } - break; - - case (IXGBE_FLAG_VMDQ_ENABLED): - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - ret = true; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - /* even without rss, there are 2 queues per - * pool, the odd numbered ones are unused. - */ - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = VMDQ_P(i) * 2; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = VMDQ_P(i) * 2; - ret = true; - break; - default: - break; - } - break; - } - - return ret; -} - -#ifdef IXGBE_FCOE -/** - * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for FCoE mode to the assigned rings. - * - */ -static bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - int i; - unsigned int fcoe_rx_i = 0, fcoe_tx_i = 0; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - ixgbe_cache_ring_rss(adapter); - - fcoe_rx_i = f->mask; - fcoe_tx_i = f->mask; - } - for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { - adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; - adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; - } - return true; -} - -#endif /* IXGBE_FCOE */ -/** - * ixgbe_cache_ring_register - Descriptor ring to register mapping - * @adapter: board private structure to initialize - * - * Once we know the feature-set enabled for the device, we'll cache - * the register offset the descriptor ring is assigned to. - * - * Note, the order the various feature calls is important. It must start with - * the "most" features enabled at the same time, then trickle down to the - * least amount of features turned on at once. - **/ -static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) -{ - /* handle single queue, address posible case of SR-IOV enabled */ - adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; - adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; - - if (ixgbe_cache_ring_vmdq(adapter)) - return; - - if (ixgbe_cache_ring_dcb(adapter)) - return; - -#ifdef IXGBE_FCOE - if (ixgbe_cache_ring_fcoe(adapter)) - return; - -#endif /* IXGBE_FCOE */ - ixgbe_cache_ring_rss(adapter); -} - -/** - * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported - * @adapter: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err = 0; - int vector, v_budget; - - if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) - goto try_msi; - - /* - * It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. - * the default is to use pairs of vectors - */ - v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); - v_budget = min_t(int, v_budget, num_online_cpus()); - v_budget += NON_Q_VECTORS; - - /* - * At the same time, hardware can only support a maximum of - * hw.mac->max_msix_vectors vectors. With features - * such as RSS and VMDq, we can easily surpass the number of Rx and Tx - * descriptor queues supported by our device. Thus, we cap it off in - * those rare cases where the cpu count also exceeds our vector limit. - */ - v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (adapter->msix_entries) { - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - ixgbe_acquire_msix_vectors(adapter, v_budget); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - goto out; - } - - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - e_err(probe, - "Flow Director is not supported while multiple " - "queues are disabled. Disabling Flow Director\n"); - } - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->atr_sample_rate = 0; - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; -#ifdef CONFIG_PCI_IOV - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_err(probe, "MSI-X interrupt not available - disabling " - "SR-IOV\n"); - ixgbe_disable_sriov(adapter); - } -#endif /* CONFIG_PCI_IOV */ - - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - ixgbe_set_num_queues(adapter); - -try_msi: - if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) - goto out; - - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; - } else { - e_warn(hw, "Unable to allocate MSI interrupt, " - "falling back to legacy. Error: %d\n", err); - /* reset err */ - err = 0; - } - -out: - /* Notify the stack of the (possibly) reduced Tx Queue count. */ - netif_set_real_num_tx_queues(adapter->netdev, - adapter->num_rx_pools > 1 ? 1 : - adapter->num_tx_queues); - return err; -} - -static void ixgbe_add_ring(struct ixgbe_ring *ring, - struct ixgbe_ring_container *head) -{ - ring->next = head->ring; - head->ring = ring; - head->count++; -} - -/** - * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector - * @adapter: board private structure to initialize - * @v_count: q_vectors allocated on adapter, used for ring interleaving - * @v_idx: index of vector in adapter struct - * @txr_count: total number of Tx rings to allocate - * @txr_idx: index of first Tx ring to allocate - * @rxr_count: total number of Rx rings to allocate - * @rxr_idx: index of first Rx ring to allocate - * - * We allocate one q_vector. If allocation fails we return -ENOMEM. - **/ -static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, - int v_count, int v_idx, - int txr_count, int txr_idx, - int rxr_count, int rxr_idx) -{ - struct ixgbe_q_vector *q_vector; - struct ixgbe_ring *ring; - int node = -1; -#ifdef HAVE_IRQ_AFFINITY_HINT - int cpu = -1; -#endif - int ring_count, size; - - ring_count = txr_count + rxr_count; - size = sizeof(struct ixgbe_q_vector) + - (sizeof(struct ixgbe_ring) * ring_count); - -#ifdef HAVE_IRQ_AFFINITY_HINT - /* customize cpu for Flow Director mapping */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - if (cpu_online(v_idx)) { - cpu = v_idx; - node = cpu_to_node(cpu); - } - } - -#endif - /* allocate q_vector and rings */ - q_vector = kzalloc_node(size, GFP_KERNEL, node); - if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); - if (!q_vector) - return -ENOMEM; - - /* setup affinity mask and node */ -#ifdef HAVE_IRQ_AFFINITY_HINT - if (cpu != -1) - cpumask_set_cpu(cpu, &q_vector->affinity_mask); - else - cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); -#endif - q_vector->numa_node = node; - -#ifndef IXGBE_NO_LRO - /* initialize LRO */ - __skb_queue_head_init(&q_vector->lrolist.active); - -#endif -#ifdef CONFIG_IXGBE_NAPI - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - ixgbe_poll, 64); - -#endif /* CONFIG_IXGBE_NAPI */ - /* tie q_vector and adapter together */ - adapter->q_vector[v_idx] = q_vector; - q_vector->adapter = adapter; - q_vector->v_idx = v_idx; - - /* initialize work limits */ - q_vector->tx.work_limit = adapter->tx_work_limit; - q_vector->rx.work_limit = adapter->rx_work_limit; - - /* initialize pointer to rings */ - ring = q_vector->ring; - - while (txr_count) { - /* assign generic ring traits */ - ring->dev = pci_dev_to_dev(adapter->pdev); - ring->netdev = adapter->netdev; - - /* configure backlink on ring */ - ring->q_vector = q_vector; - - /* update q_vector Tx values */ - ixgbe_add_ring(ring, &q_vector->tx); - - /* apply Tx specific ring traits */ - ring->count = adapter->tx_ring_count; - ring->queue_index = txr_idx; - - /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; - - /* update count and index */ - txr_count--; - txr_idx += v_count; - - /* push pointer to next ring */ - ring++; - } - - while (rxr_count) { - /* assign generic ring traits */ - ring->dev = pci_dev_to_dev(adapter->pdev); - ring->netdev = adapter->netdev; - - /* configure backlink on ring */ - ring->q_vector = q_vector; - - /* update q_vector Rx values */ - ixgbe_add_ring(ring, &q_vector->rx); - - /* - * 82599 errata, UDP frames with a 0 checksum - * can be marked as checksum errors. - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) - set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); - -#ifndef HAVE_NDO_SET_FEATURES - /* enable rx csum by default */ - set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state); - -#endif -#ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) { - struct ixgbe_ring_feature *f; - f = &adapter->ring_feature[RING_F_FCOE]; - - if ((rxr_idx >= f->mask) && - (rxr_idx < f->mask + f->indices)) - set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state); - } - -#endif - /* apply Rx specific ring traits */ - ring->count = adapter->rx_ring_count; - ring->queue_index = rxr_idx; - - /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; - - /* update count and index */ - rxr_count--; - rxr_idx += v_count; - - /* push pointer to next ring */ - ring++; - } - - return 0; -} - -/** - * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector - * @adapter: board private structure to initialize - * @v_idx: Index of vector to be freed - * - * This function frees the memory allocated to the q_vector. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) -{ - struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; - struct ixgbe_ring *ring; - - ixgbe_for_each_ring(ring, q_vector->tx) - adapter->tx_ring[ring->queue_index] = NULL; - - ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; - - adapter->q_vector[v_idx] = NULL; -#ifdef CONFIG_IXGBE_NAPI - netif_napi_del(&q_vector->napi); -#endif -#ifndef IXGBE_NO_LRO - __skb_queue_purge(&q_vector->lrolist.active); -#endif - kfree(q_vector); -} - -/** - * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) -{ - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - int rxr_remaining = adapter->num_rx_queues; - int txr_remaining = adapter->num_tx_queues; - int rxr_idx = 0, txr_idx = 0, v_idx = 0; - int err; - - /* only one q_vector if MSI-X is disabled. */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - - if (q_vectors >= (rxr_remaining + txr_remaining)) { - for (; rxr_remaining; v_idx++) { - err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, - 0, 0, 1, rxr_idx); - if (err) - goto err_out; - - /* update counts and index */ - rxr_remaining--; - rxr_idx++; - } - } - - for (; v_idx < q_vectors; v_idx++) { - int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); - int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); - err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, - tqpv, txr_idx, - rqpv, rxr_idx); - - if (err) - goto err_out; - - /* update counts and index */ - rxr_remaining -= rqpv; - txr_remaining -= tqpv; - rxr_idx++; - txr_idx++; - } - - return 0; - -err_out: - while (v_idx) { - v_idx--; - ixgbe_free_q_vector(adapter, v_idx); - } - - return -ENOMEM; -} - -/** - * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) -{ - int v_idx, q_vectors; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - q_vectors = 1; - - for (v_idx = 0; v_idx < q_vectors; v_idx++) - ixgbe_free_q_vector(adapter, v_idx); -} - -static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) -{ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; - pci_disable_msi(adapter->pdev); - } -} - -/** - * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme - * @adapter: board private structure to initialize - * - * We determine which interrupt scheme to use based on... - * - Kernel support (MSI, MSI-X) - * - which can be user-defined (via MODULE_PARAM) - * - Hardware queue count (num_*_queues) - * - defined by miscellaneous hardware support/features (RSS, etc.) - **/ -int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - int err; - - /* Number of supported queues */ - ixgbe_set_num_queues(adapter); - - err = ixgbe_set_interrupt_capability(adapter); - if (err) { - e_err(probe, "Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } - - err = ixgbe_alloc_q_vectors(adapter); - if (err) { - e_err(probe, "Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; - } - - ixgbe_cache_ring_register(adapter); - - set_bit(__IXGBE_DOWN, &adapter->state); - - return 0; - -err_alloc_q_vectors: - ixgbe_reset_interrupt_capability(adapter); -err_set_interrupt: - return err; -} - -/** - * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings - * @adapter: board private structure to clear interrupt scheme on - * - * We go through and clear interrupt specific resources and reset the structure - * to pre-load conditions - **/ -void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - adapter->num_tx_queues = 0; - adapter->num_rx_queues = 0; - - ixgbe_free_q_vectors(adapter); - ixgbe_reset_interrupt_capability(adapter); -} - -/** - * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) - * @adapter: board private structure to initialize - * - * ixgbe_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int err; - - /* PCI config space info */ + /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; @@ -6765,9 +5533,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) case ixgbe_mac_82598EB: adapter->flags |= IXGBE_FLAG_MSI_CAPABLE | IXGBE_FLAG_MSIX_CAPABLE | - IXGBE_FLAG_MQ_CAPABLE | - IXGBE_FLAG_RSS_CAPABLE; - adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + IXGBE_FLAG_MQ_CAPABLE; #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif @@ -6777,16 +5543,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; - adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; + adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; break; case ixgbe_mac_X540: adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; case ixgbe_mac_82599EB: adapter->flags |= IXGBE_FLAG_MSI_CAPABLE | IXGBE_FLAG_MSIX_CAPABLE | - IXGBE_FLAG_MQ_CAPABLE | - IXGBE_FLAG_RSS_CAPABLE; - adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + IXGBE_FLAG_MQ_CAPABLE; #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif @@ -6795,14 +5559,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; #ifdef CONFIG_DCB /* Default traffic class to use for FCoE */ - adapter->fcoe.tc = IXGBE_FCOE_DEFTC; - adapter->fcoe.up = IXGBE_FCOE_DEFTC; - adapter->fcoe.up_set = IXGBE_FCOE_DEFTC; + adapter->fcoe.up = IXGBE_FCOE_DEFUP; + adapter->fcoe.up_set = IXGBE_FCOE_DEFUP; #endif #endif + adapter->ring_feature[RING_F_FDIR].limit = + IXGBE_MAX_FDIR_INDICES; if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; #ifndef IXGBE_NO_SMART_SPEED @@ -6810,59 +5574,68 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) #else hw->phy.smart_speed = ixgbe_smart_speed_off; #endif - adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; + adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; + default: + break; + } + +#ifdef IXGBE_FCOE + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); + +#endif + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = 8; + adapter->dcb_cfg.num_tcs.pfc_tcs = 8; + break; + case ixgbe_mac_X540: + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + break; default: + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; break; } - /* n-tuple support exists, always init our spinlock */ - spin_lock_init(&adapter->fdir_perfect_lock); + /* Configure DCB traffic classes */ + bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; + for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + tc->pfc = ixgbe_dcb_pfc_disabled; + } + /* reset back to TC 0 */ + tc = &adapter->dcb_cfg.tc_config[0]; - if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) { - int j; - struct ixgbe_dcb_tc_config *tc; - int dcb_i = IXGBE_DCB_MAX_TRAFFIC_CLASS; - - - adapter->dcb_cfg.num_tcs.pg_tcs = dcb_i; - adapter->dcb_cfg.num_tcs.pfc_tcs = dcb_i; - for (j = 0; j < dcb_i; j++) { - tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / dcb_i; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / dcb_i; - tc->pfc = ixgbe_dcb_pfc_disabled; - if (j == 0) { - /* total of all TCs bandwidth needs to be 100 */ - tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent += - 100 % dcb_i; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent += - 100 % dcb_i; - } - } + /* total of all TCs bandwidth needs to be 100 */ + bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; - /* Initialize default user to priority mapping, UPx->TC0 */ - tc = &adapter->dcb_cfg.tc_config[0]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + /* Initialize default user to priority mapping, UPx->TC0 */ + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100; - adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.round_robin_enable = false; - adapter->dcb_set_bitmap = 0x00; + adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.round_robin_enable = false; + adapter->dcb_set_bitmap = 0x00; #ifdef CONFIG_DCB - adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; #endif /* CONFIG_DCB */ - - if (hw->mac.type == ixgbe_mac_X540) { - adapter->dcb_cfg.num_tcs.pg_tcs = 4; - adapter->dcb_cfg.num_tcs.pfc_tcs = 4; - } - } #ifdef CONFIG_DCB /* XXX does this need to be initialized even w/o DCB? */ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, @@ -6963,10 +5736,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; + e_err(probe, "Allocation for Tx Queue %u failed\n", i); - break; + goto err_setup_tx; } + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_tx_resources(adapter->tx_ring[i]); return err; } @@ -7040,10 +5819,20 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; + e_err(probe, "Allocation for Rx Queue %u failed\n", i); - break; + goto err_setup_rx; } +#ifdef IXGBE_FCOE + err = ixgbe_setup_fcoe_ddp_resources(adapter); + if (!err) +#endif + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_rx_resources(adapter->rx_ring[i]); return err; } @@ -7066,7 +5855,6 @@ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); - tx_ring->desc = NULL; } @@ -7081,8 +5869,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - if (adapter->tx_ring[i]->desc) - ixgbe_free_tx_resources(adapter->tx_ring[i]); + ixgbe_free_tx_resources(adapter->tx_ring[i]); } /** @@ -7118,9 +5905,12 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) { int i; +#ifdef IXGBE_FCOE + ixgbe_free_fcoe_ddp_resources(adapter); + +#endif for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->rx_ring[i]->desc) - ixgbe_free_rx_resources(adapter->rx_ring[i]); + ixgbe_free_rx_resources(adapter->rx_ring[i]); } /** @@ -7199,15 +5989,28 @@ static int ixgbe_open(struct net_device *netdev) if (err) goto err_req_irq; + /* Notify the stack of the actual queue counts. */ + netif_set_real_num_tx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_tx_queues); + + err = netif_set_real_num_rx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_rx_queues); + if (err) + goto err_set_queues; + ixgbe_up_complete(adapter); return 0; +err_set_queues: + ixgbe_free_irq(adapter); err_req_irq: -err_setup_rx: ixgbe_free_all_rx_resources(adapter); -err_setup_tx: +err_setup_rx: ixgbe_free_all_tx_resources(adapter); +err_setup_tx: ixgbe_reset(adapter); return err; @@ -7265,21 +6068,20 @@ static int ixgbe_resume(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); - err = ixgbe_init_interrupt_scheme(adapter); - if (err) { - e_dev_err("Cannot initialize interrupts for device\n"); - return err; - } - ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - if (netif_running(netdev)) { + rtnl_lock(); + + err = ixgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) err = ixgbe_open(netdev); - if (err) - return err; - } + + rtnl_unlock(); + + if (err) + return err; netif_device_attach(netdev); @@ -7306,17 +6108,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); + rtnl_lock(); + if (netif_running(netdev)) { - rtnl_lock(); ixgbe_down(adapter); ixgbe_free_irq(adapter); + ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); - rtnl_unlock(); } ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); + #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) @@ -7452,24 +6257,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; #ifndef IXGBE_NO_LRO u32 flushed = 0, coal = 0; - int num_q_vectors = 1; #endif -#ifdef IXGBE_FCOE - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - unsigned int cpu; - u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0; -#endif /* IXGBE_FCOE */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; -#ifndef IXGBE_NO_LRO - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - -#endif if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { u64 rsc_count = 0; u64 rsc_flush = 0; @@ -7482,7 +6275,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } #ifndef IXGBE_NO_LRO - for (i = 0; i < num_q_vectors; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; if (!q_vector) continue; @@ -7610,18 +6403,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); - /* Add up per cpu counters for total ddp aloc fail */ - if (fcoe && fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { + /* Add up per cpu counters for total ddp alloc fail */ + if (adapter->fcoe.ddp_pool) { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + unsigned int cpu; + u64 noddp = 0, noddp_ext_buff = 0; for_each_possible_cpu(cpu) { - fcoe_noddp_counts_sum += - *per_cpu_ptr(fcoe->pcpu_noddp, cpu); - fcoe_noddp_ext_buff_counts_sum += - *per_cpu_ptr(fcoe-> - pcpu_noddp_ext_buff, cpu); + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + noddp += ddp_pool->noddp; + noddp_ext_buff += ddp_pool->noddp_ext_buff; } + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; } - hwstats->fcoe_noddp = fcoe_noddp_counts_sum; - hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum; #endif /* IXGBE_FCOE */ break; @@ -7779,8 +6574,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); } else { /* get one bit for every active tx/rx interrupt vector */ - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; - i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= ((u64)1 << i); @@ -7893,6 +6687,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) ixgbe_check_vf_rate_limit(adapter); #endif /* IFLA_VF_MAX */ netif_tx_wake_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); } /** @@ -7919,6 +6716,9 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); } /** @@ -8222,32 +7022,6 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); } -#ifdef IXGBE_FCOE -void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) -#else -static void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, - u32 mss_l4len_idx) -#endif -{ - struct ixgbe_adv_tx_context_desc *context_desc; - u16 i = tx_ring->next_to_use; - - context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); - - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - - /* set bits to identify this as an advanced context descriptor */ - type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); -} - static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) @@ -8727,11 +7501,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) (protocol == __constant_htons(ETH_P_FIP))) { if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; while (txq >= f->indices) txq -= f->indices; - txq += f->mask; + txq += f->offset; return txq; } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { @@ -8743,11 +7518,17 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) #endif /* IXGBE_FCOE */ #ifndef HAVE_MQPRIO if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + struct ixgbe_ring_feature *f; if (skb->priority == TC_PRIO_CONTROL) txq = adapter->tc - 1; else txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; + + f = &adapter->ring_feature[RING_F_RSS]; + txq *= f->indices; + txq += __skb_tx_hash(dev, skb, f->indices); + return txq; } @@ -8758,7 +7539,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) return txq; } - return skb_tx_hash(dev, skb); + return __skb_tx_hash(dev, skb, + adapter->ring_feature[RING_F_RSS].indices); } #endif /* HAVE_NETDEV_SELECT_QUEUE */ @@ -8868,7 +7650,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ if ((protocol == __constant_htons(ETH_P_FCOE)) && - (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; @@ -8958,15 +7740,13 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ixgbe_del_mac_filter(adapter, hw->mac.addr, - adapter->num_vfs); + ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ - ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, - adapter->num_vfs); + ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); return (ret > 0 ? 0 : ret); } @@ -8982,12 +7762,16 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; + struct ixgbe_hw *hw = &adapter->hw; - if (is_valid_ether_addr(mac->san_addr)) { + if (is_valid_ether_addr(hw->mac.san_addr)) { rtnl_lock(); - err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + err = dev_addr_add(dev, hw->mac.san_addr, + NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); } return err; } @@ -9028,6 +7812,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCETHTOOL: return ethtool_ioctl(ifr); #endif + default: return -EOPNOTSUPP; } @@ -9042,35 +7827,25 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) static void ixgbe_netpoll(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state)) return; -#ifndef CONFIG_IXGBE_NAPI - ixgbe_irq_disable(adapter); -#endif adapter->flags |= IXGBE_FLAG_IN_NETPOLL; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - for (i = 0; i < num_q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - ixgbe_msix_clean_rings(0, q_vector); - } + int i; + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); } else { ixgbe_intr(0, adapter); } adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; -#ifndef CONFIG_IXGBE_NAPI - ixgbe_irq_enable(adapter, true, true); -#endif } - -#endif +#endif /* CONFIG_NET_POLL_CONTROLLER */ /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. - * #adapter: pointer to ixgbe_adapter + * @adapter: pointer to ixgbe_adapter * @tc: number of traffic classes currently enabled * * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm @@ -9105,7 +7880,35 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) return; } -/* ixgbe_setup_tc - routine to configure net_device for multiple traffic +/** + * ixgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) +{ +#ifdef HAVE_DCBNL_IEEE + struct net_device *dev = adapter->netdev; + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->ixgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < IXGBE_DCB_MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +#endif +} + +/** + * ixgbe_setup_tc - routine to configure net_device for multiple traffic * classes. * * @netdev: net device to configure @@ -9116,12 +7919,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - /* Multiple traffic classes requires multiple queues */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - e_err(drv, "Enable failed, needs MSI-X\n"); - return -EINVAL; - } - /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && @@ -9136,34 +7933,23 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ixgbe_close(dev); ixgbe_clear_interrupt_scheme(adapter); -#ifndef HAVE_MQPRIO - adapter->tc = tc; - -#endif if (tc) { -#ifdef HAVE_MQPRIO netdev_set_num_tc(dev, tc); -#else - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; -#endif + ixgbe_set_prio_tc_map(adapter); + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { adapter->last_lfc_mode = adapter->hw.fc.requested_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; } } else { -#ifdef HAVE_MQPRIO netdev_reset_tc(dev); -#else - adapter->flags |= IXGBE_FLAG_RSS_ENABLED; -#endif + if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; @@ -9191,17 +7977,15 @@ void ixgbe_do_reset(struct net_device *netdev) static netdev_features_t ixgbe_fix_features(struct net_device *netdev, netdev_features_t features) { +#if defined(CONFIG_DCB) || defined(IXGBE_NO_LRO) struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif #ifdef CONFIG_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) features |= NETIF_F_HW_VLAN_RX; #endif - /* return error if RXHASH is being enabled when RSS is not supported */ - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - features &= ~NETIF_F_RXHASH; - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; @@ -9247,22 +8031,47 @@ static int ixgbe_set_features(struct net_device *netdev, * Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ - if (!(features & NETIF_F_NTUPLE)) { - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { - /* turn off Flow Director, set ATR and reset */ - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - need_reset = true; - } - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - need_reset = true; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; } + if (features & NETIF_F_HW_VLAN_RX) + ixgbe_vlan_stripping_enable(adapter); + else + ixgbe_vlan_stripping_disable(adapter); + if (need_reset) ixgbe_do_reset(netdev); @@ -9365,17 +8174,56 @@ void ixgbe_assign_netdev_ops(struct net_device *dev) dev->watchdog_timeo = 5 * HZ; } -static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter) + +/** + * ixgbe_wol_supported - Check whether device supports WoL + * @hw: hw specific details + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) { -#ifdef CONFIG_PCI_IOV + int is_wol_supported = 0; + struct ixgbe_hw *hw = &adapter->hw; + u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - /* The 82599 supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the - * physical function - */ - ixgbe_enable_sriov(adapter); -#endif /* CONFIG_PCI_IOV */ + switch (device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only these subdevices could supports WOL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599_560FLR: + /* only support first port */ + if (hw->bus.func != 0) + break; + case IXGBE_SUBDEV_ID_82599_SFP: + is_wol_supported = 1; + break; + } + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + is_wol_supported = 1; + break; + case IXGBE_DEV_ID_82599_KX4: + is_wol_supported = 1; + break; + case IXGBE_DEV_ID_X540T: + /* check eeprom to see if enabled wol */ + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) { + is_wol_supported = 1; + } + break; + } + + return is_wol_supported; } /** @@ -9410,7 +8258,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #ifdef IXGBE_FCOE u16 device_caps; #endif - u16 wol_cap; err = pci_enable_device_mem(pdev); if (err) @@ -9474,22 +8321,23 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #ifdef HAVE_TX_MQ #ifdef CONFIG_DCB -#ifdef HAVE_MQPRIO indices *= IXGBE_DCB_MAX_TRAFFIC_CLASS; -#else - indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); -#endif /* HAVE_MQPRIO */ #endif /* CONFIG_DCB */ +#ifdef IXGBE_FCOE + indices += min_t(unsigned int, num_possible_cpus(), + IXGBE_MAX_FCOE_INDICES); +#endif + if (mac_type == ixgbe_mac_82598EB) +#ifdef CONFIG_DCB + indices = min_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES * 4); +#else indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); +#endif else indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); -#ifdef IXGBE_FCOE - indices += min_t(unsigned int, num_possible_cpus(), - IXGBE_MAX_FCOE_INDICES); -#endif netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); #else /* HAVE_TX_MQ */ netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); @@ -9510,10 +8358,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, hw->back = adapter; adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; -#ifdef HAVE_DEVICE_NUMA_NODE - e_info(tx_err, "my (original) node was: %d\n", dev_to_node(&pdev->dev)); - -#endif /* HAVE_DEVICE_NUMA_NODE */ #ifdef HAVE_PCI_ERS /* * call save state here in standalone driver because it relies on @@ -9584,9 +8428,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_probe_vf(adapter); +#ifdef CONFIG_PCI_IOV + ixgbe_enable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ #ifdef MAX_SKB_FRAGS netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; @@ -9654,27 +8499,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, NETIF_F_TSO6; #endif /* HAVE_NETDEV_VLAN_FEATURES */ - /* - * If perfect filters were enabled in check_options(), enable them - * on the netdevice too. - */ - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - netdev->features |= NETIF_F_NTUPLE; - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - /* clear n-tuple support in the netdev unconditionally */ - netdev->features &= ~NETIF_F_NTUPLE; - } - -#ifdef NETIF_F_RXHASH - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - netdev->features &= ~NETIF_F_RXHASH; - -#endif /* NETIF_F_RXHASH */ if (netdev->features & NETIF_F_LRO) { if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && ((adapter->rx_itr_setting == 1) || @@ -9682,11 +8506,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { #ifdef IXGBE_NO_LRO - e_info(probe, "InterruptThrottleRate set too high, " - "disabling RSC\n"); + e_dev_info("InterruptThrottleRate set too high, " + "disabling RSC\n"); #else - e_info(probe, "InterruptThrottleRate set too high, " - "falling back to software LRO\n"); + e_dev_info("InterruptThrottleRate set too high, " + "falling back to software LRO\n"); #endif } } @@ -9701,20 +8525,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) { adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; - e_info(probe, "FCoE offload feature is not available. " - "Disabling FCoE offload feature\n"); - } + e_dev_info("FCoE offload feature is not available. " + "Disabling FCoE offload feature\n"); + } else { + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; #ifndef HAVE_NETDEV_OPS_FCOE_ENABLE - else { + ixgbe_fcoe_ddp_enable(adapter); adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = - IXGBE_FCRETA_SIZE; - netdev->features |= NETIF_F_FSO | - NETIF_F_FCOE_CRC | - NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - } + netdev->features |= NETIF_F_FCOE_MTU; #endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ + } + + adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + #ifdef HAVE_NETDEV_VLAN_FEATURES netdev->vlan_features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | @@ -9729,8 +8553,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->vlan_features |= NETIF_F_HIGHDMA; #endif /* HAVE_NETDEV_VLAN_FEATURES */ } - #endif /* MAX_SKB_FRAGS */ + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum && (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) { @@ -9757,7 +8581,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #endif memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr, netdev->addr_len); - adapter->mac_table[0].queue = adapter->num_vfs; + adapter->mac_table[0].queue = VMDQ_P(0); adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE); hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, @@ -9775,39 +8599,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; adapter->wol = 0; - /* WOL not supported for all but the following */ - switch (pdev->device) { - case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevice supports WOL */ - switch (pdev->subsystem_device) { - case IXGBE_SUBDEV_ID_82599_560FLR: - /* only support first port */ - if (hw->bus.func != 0) - break; - case IXGBE_SUBDEV_ID_82599_SFP: - adapter->wol = IXGBE_WUFC_MAG; - break; - } - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - adapter->wol = IXGBE_WUFC_MAG; - break; - case IXGBE_DEV_ID_82599_KX4: + if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) adapter->wol = IXGBE_WUFC_MAG; - break; - case IXGBE_DEV_ID_X540T: - /* Check eeprom to see if it is enabled */ - ixgbe_read_eeprom(hw, 0x2c, &adapter->eeprom_cap); - wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) - adapter->wol = IXGBE_WUFC_MAG; - break; - } device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); @@ -9817,7 +8611,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, */ ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); - etrack_id = (eeprom_verh << 16) | eeprom_verl; ixgbe_read_eeprom(hw, 0x17, &offset); @@ -9950,12 +8743,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #endif if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) i_s_var += sprintf(i_s_var, "FdirHash "); - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - i_s_var += sprintf(i_s_var, "FdirPerfect "); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) i_s_var += sprintf(i_s_var, "DCB "); - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) - i_s_var += sprintf(i_s_var, "RSS "); if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) i_s_var += sprintf(i_s_var, "DCA "); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) @@ -10005,8 +8794,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ixgbe_release_hw_control(adapter); err_sw_init: #ifdef CONFIG_PCI_IOV - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); + ixgbe_disable_sriov(adapter); #endif /* CONFIG_PCI_IOV */ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; kfree(adapter->mac_table); @@ -10056,11 +8844,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) #endif /* IXGBE_PROCFS */ #endif /* IXGBE-SYSFS */ -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - ixgbe_cleanup_fcoe(adapter); - -#endif /* IXGBE_FCOE */ #if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); @@ -10072,15 +8855,15 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) } #ifdef CONFIG_PCI_IOV - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - if (!(ixgbe_check_vf_assignment(adapter))) - ixgbe_disable_sriov(adapter); - else - e_dev_warn("Unloading driver while VFs are assigned " - "- VFs will not be deallocated\n"); - } + ixgbe_disable_sriov(adapter); #endif /* CONFIG_PCI_IOV */ +#ifdef IXGBE_FCOE +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + ixgbe_fcoe_ddp_disable(adapter); + +#endif +#endif /* IXGBE_FCOE */ ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); diff --git a/src/ixgbe_param.c b/src/ixgbe_param.c index 6e3114b..ae8d7a4 100644 --- a/src/ixgbe_param.c +++ b/src/ixgbe_param.c @@ -128,20 +128,18 @@ IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, " "1=descriptor only, 2=descriptor and data"); - #endif /* RSS - Receive-Side Scaling (RSS) Descriptor Queues * * Valid Range: 0-16 - * - 0 - disables RSS - * - 1 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). - * - 2-16 - enables RSS and sets the Desc. Q's to the specified value. + * - 0 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). + * - 1-16 - enables RSS and sets the Desc. Q's to the specified value. * - * Default Value: 1 + * Default Value: 0 */ IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " - "default 1=number of cpus"); + "default 0=number of cpus"); /* VMDQ - Virtual Machine Device Queues (VMDQ) * @@ -266,26 +264,9 @@ IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); #endif /* IXGBE_NO_LLI */ #ifdef HAVE_TX_MQ -/* Flow Director filtering mode - * - * Valid Range: 0-2 0 = off, 1 = Hashing (ATR), and 2 = perfect filters - * - * Default Value: 1 (ATR) - */ -IXGBE_PARAM(FdirMode, "Flow Director filtering modes:\n" - "\t\t\t0 = Filtering off\n" - "\t\t\t1 = Signature Hashing filters (SW ATR)\n" - "\t\t\t2 = Perfect Filters"); - -#define IXGBE_FDIR_FILTER_OFF 0 -#define IXGBE_FDIR_FILTER_HASH 1 -#define IXGBE_FDIR_FILTER_PERFECT 2 -#define IXGBE_DEFAULT_FDIR_FILTER IXGBE_FDIR_FILTER_HASH - /* Flow Director packet buffer allocation level * - * Valid Range: 0-3 - * 0 = No memory allocation, + * Valid Range: 1-3 * 1 = 8k hash/2k perfect, * 2 = 16k hash/4k perfect, * 3 = 32k hash/8k perfect @@ -311,6 +292,7 @@ IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); #define IXGBE_MIN_ATR_SAMPLE_RATE 1 #define IXGBE_ATR_SAMPLE_RATE_OFF 0 #define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20 + #endif /* HAVE_TX_MQ */ #ifdef IXGBE_FCOE /* FCoE - Fibre Channel over Ethernet Offload Enable/Disable @@ -470,14 +452,12 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) "support unavailable\n"); } else { *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; - *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; } break; case IXGBE_INT_LEGACY: default: *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; - *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; break; } #ifdef module_param_array @@ -491,11 +471,9 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) *aflags & IXGBE_FLAG_MSI_CAPABLE) { *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; *aflags |= IXGBE_FLAG_MSI_CAPABLE; - *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; } else { *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; - *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; } } #endif @@ -582,8 +560,8 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) .type = range_option, .name = "Receive-Side Scaling (RSS)", .err = "using default.", - .def = OPTION_ENABLED, - .arg = { .r = { .min = OPTION_DISABLED, + .def = 0, + .arg = { .r = { .min = 0, .max = IXGBE_MAX_RSS_INDICES} } }; unsigned int rss = RSS[bd]; @@ -591,55 +569,29 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) #ifdef module_param_array if (num_RSS > bd) { #endif - if (rss != OPTION_ENABLED) - ixgbe_validate_option(&rss, &opt); - /* - * we cannot use an else since validate option may - * have changed the state of RSS - */ - if (rss == OPTION_ENABLED) { - /* - * Base it off num_online_cpus() with - * a hardware limit cap. - */ + ixgbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); - } - feature[RING_F_RSS].indices = rss ? : 1; - if (rss) - *aflags |= IXGBE_FLAG_RSS_ENABLED; else - *aflags &= ~IXGBE_FLAG_RSS_ENABLED; + feature[RING_F_FDIR].limit = rss; + + feature[RING_F_RSS].limit = rss; #ifdef module_param_array - } else { - if (opt.def == OPTION_DISABLED) { - *aflags &= ~IXGBE_FLAG_RSS_ENABLED; - } else { - rss = min_t(int, IXGBE_MAX_RSS_INDICES, - num_online_cpus()); - feature[RING_F_RSS].indices = rss; - if (rss) - *aflags |= IXGBE_FLAG_RSS_ENABLED; - else - *aflags &= ~IXGBE_FLAG_RSS_ENABLED; - } + } else if (opt.def == 0) { + rss = min_t(int, IXGBE_MAX_RSS_INDICES, + num_online_cpus()); + feature[RING_F_RSS].limit = rss; } #endif /* Check Interoperability */ - if (*aflags & IXGBE_FLAG_RSS_ENABLED) { - if (!(*aflags & IXGBE_FLAG_RSS_CAPABLE)) { - DPRINTK(PROBE, INFO, - "RSS is not supported on this " - "hardware. Disabling RSS.\n"); - *aflags &= ~IXGBE_FLAG_RSS_ENABLED; - feature[RING_F_RSS].indices = 0; - } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + if (rss > 1) { + if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { DPRINTK(PROBE, INFO, - "RSS is not supported while multiple " - "queues are disabled. " - "Disabling RSS.\n"); - *aflags &= ~IXGBE_FLAG_RSS_ENABLED; - feature[RING_F_RSS].indices = 0; + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; } } } @@ -654,28 +606,38 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) } } }; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* 82598 only supports up to 16 pools */ + opt.arg.r.max = 16; + break; + default: + break; + } + #ifdef module_param_array if (num_VMDQ > bd) { #endif unsigned int vmdq = VMDQ[bd]; + ixgbe_validate_option(&vmdq, &opt); - feature[RING_F_VMDQ].indices = vmdq; - adapter->flags2 |= IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE; + /* zero or one both mean disabled from our driver's * perspective */ if (vmdq > 1) *aflags |= IXGBE_FLAG_VMDQ_ENABLED; else *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = vmdq; #ifdef module_param_array } else { - if (opt.def == OPTION_DISABLED) { + if (opt.def == OPTION_DISABLED) *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; - } else { - feature[RING_F_VMDQ].indices = - IXGBE_DEFAULT_NUM_VMDQ; + else *aflags |= IXGBE_FLAG_VMDQ_ENABLED; - } + + feature[RING_F_VMDQ].limit = opt.def; } #endif /* Check Interoperability */ @@ -686,18 +648,8 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) "queues are disabled. " "Disabling VMDQ.\n"); *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; - feature[RING_F_VMDQ].indices = 0; + feature[RING_F_VMDQ].limit = 0; } - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - feature[RING_F_VMDQ].indices = - min(feature[RING_F_VMDQ].indices, 16); - - /* Disable incompatible features */ - *aflags &= ~IXGBE_FLAG_RSS_CAPABLE; - *aflags &= ~IXGBE_FLAG_RSS_ENABLED; - *aflags &= ~IXGBE_FLAG_DCB_CAPABLE; - *aflags &= ~IXGBE_FLAG_DCB_ENABLED; } } #ifdef CONFIG_PCI_IOV @@ -755,9 +707,6 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) "Disabling IOV.\n"); *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; adapter->num_vfs = 0; - } else { - *aflags &= ~IXGBE_FLAG_RSS_CAPABLE; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; } } } @@ -968,74 +917,6 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) } #endif /* IXGBE_NO_LLI */ #ifdef HAVE_TX_MQ - { /* Flow Director filtering mode */ - unsigned int fdir_filter_mode; - static struct ixgbe_option opt = { - .type = range_option, - .name = "Flow Director filtering mode", - .err = "using default of " - __MODULE_STRING(IXGBE_DEFAULT_FDIR_FILTER), - .def = IXGBE_DEFAULT_FDIR_FILTER, - .arg = {.r = {.min = IXGBE_FDIR_FILTER_OFF, - .max = IXGBE_FDIR_FILTER_PERFECT} } - }; - - *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - goto no_flow_director; - - if (num_FdirMode > bd) { - fdir_filter_mode = FdirMode[bd]; - ixgbe_validate_option(&fdir_filter_mode, &opt); - - switch (fdir_filter_mode) { - case IXGBE_FDIR_FILTER_PERFECT: -#ifdef ETHTOOL_GRXRINGS - *aflags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - DPRINTK(PROBE, INFO, "Flow Director perfect " - "filtering enabled\n"); -#else /* ETHTOOL_GRXRINGS */ - DPRINTK(PROBE, INFO, "No ethtool support for " - "Flow Director perfect filtering.\n"); -#endif /* ETHTOOL_GRXRINGS */ - break; - case IXGBE_FDIR_FILTER_HASH: - *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - DPRINTK(PROBE, INFO, "Flow Director hash " - "filtering enabled\n"); - break; - case IXGBE_FDIR_FILTER_OFF: - default: - DPRINTK(PROBE, INFO, "Flow Director " - "disabled\n"); - break; - } - } else { - *aflags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - } - /* Check interoperability */ - if ((*aflags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | - IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) && - !(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { - DPRINTK(PROBE, INFO, - "Flow Director is not supported while " - "multiple queues are disabled. " - "Disabling Flow Director\n"); - *aflags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - *aflags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - } - - /* limit the number of queues for FDIR using RSS param */ - if (feature[RING_F_RSS].indices && num_RSS > bd && RSS[bd]) - feature[RING_F_FDIR].indices = - feature[RING_F_RSS].indices; - -no_flow_director: - /* empty code line with semi-colon */ ; - } { /* Flow Director packet buffer allocation */ unsigned int fdir_pballoc_mode; static struct ixgbe_option opt = { @@ -1049,25 +930,24 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) }; char pstring[10]; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - goto no_fdir_pballoc; - if (num_FdirPballoc > bd) { + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_NONE; + } else if (num_FdirPballoc > bd) { fdir_pballoc_mode = FdirPballoc[bd]; ixgbe_validate_option(&fdir_pballoc_mode, &opt); switch (fdir_pballoc_mode) { - case IXGBE_FDIR_PBALLOC_64K: - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; - sprintf(pstring, "64kB"); + case IXGBE_FDIR_PBALLOC_256K: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; + sprintf(pstring, "256kB"); break; case IXGBE_FDIR_PBALLOC_128K: adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K; sprintf(pstring, "128kB"); break; - case IXGBE_FDIR_PBALLOC_256K: - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; - sprintf(pstring, "256kB"); - break; + case IXGBE_FDIR_PBALLOC_64K: default: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; + sprintf(pstring, "64kB"); break; } DPRINTK(PROBE, INFO, "Flow Director will be allocated " @@ -1075,8 +955,6 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) } else { adapter->fdir_pballoc = opt.def; } -no_fdir_pballoc: - /* empty code line with semi-colon */ ; } { /* Flow Director ATR Tx sample packet rate */ static struct ixgbe_option opt = { @@ -1091,11 +969,9 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) static const char atr_string[] = "ATR Tx Packet sample rate set to"; - adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - goto no_fdir_sample; - - if (num_AtrSampleRate > bd) { + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; + } else if (num_AtrSampleRate > bd) { adapter->atr_sample_rate = AtrSampleRate[bd]; if (adapter->atr_sample_rate) { @@ -1107,8 +983,6 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) } else { adapter->atr_sample_rate = opt.def; } -no_fdir_sample: - /* empty code line with semi-colon */ ; } #endif /* HAVE_TX_MQ */ #ifdef IXGBE_FCOE @@ -1137,11 +1011,6 @@ void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) if (opt.def == OPTION_ENABLED) *aflags |= IXGBE_FLAG_FCOE_CAPABLE; } -#endif -#ifdef CONFIG_PCI_IOV - if (*aflags & (IXGBE_FLAG_SRIOV_ENABLED | - IXGBE_FLAG_VMDQ_ENABLED)) - *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; #endif DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ? diff --git a/src/ixgbe_phy.c b/src/ixgbe_phy.c index e3f5275..4de5040 100644 --- a/src/ixgbe_phy.c +++ b/src/ixgbe_phy.c @@ -886,9 +886,6 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) status = ixgbe_identify_sfp_module_generic(hw); break; - case ixgbe_media_type_fiber_qsfp: - status = ixgbe_identify_qsfp_module_generic(hw); - break; default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1190,23 +1187,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) return IXGBE_ERR_SFP_NOT_PRESENT; } -/** - * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules - * @hw: pointer to hardware structure - * - * Searches for and identifies the QSFP module and assigns appropriate PHY type - **/ -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) -{ - s32 status = 0; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - status = IXGBE_ERR_SFP_NOT_PRESENT; - } - - return status; -} /** diff --git a/src/ixgbe_phy.h b/src/ixgbe_phy.h index bbe5a9e..4caf281 100644 --- a/src/ixgbe_phy.h +++ b/src/ixgbe_phy.h @@ -120,7 +120,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); diff --git a/src/ixgbe_sriov.c b/src/ixgbe_sriov.c index 6d00afa..e04295e 100644 --- a/src/ixgbe_sriov.c +++ b/src/ixgbe_sriov.c @@ -42,50 +42,75 @@ #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV -static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter) -{ - int vfs_found = 0; -#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *pvfdev; - u16 vf_devfn = 0; - int device_id; +#if defined(IFLA_VF_MAX) && defined(__VMKLNX__) +#define pci_enable_sriov(dev,vfs) \ + (vmklnx_enable_vfs((dev), (vfs), NULL, NULL) != (vfs) ? -ENOTSUPP : 0) - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - device_id = IXGBE_DEV_ID_82599_VF; +#define pci_disable_sriov(dev) \ + vmklnx_disable_vfs((dev), adapter->num_vfs, NULL, NULL) + +static VMK_ReturnStatus ixgbe_passthru_ops(struct net_device *netdev, + vmk_NetPTOP op, + void *pargs) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + VMK_ReturnStatus ret; + + switch (op) { + case VMK_NETPTOP_VF_SET_MAC: + { + vmk_NetPTOPVFSetMacArgs *args = pargs; + + if (is_zero_ether_addr(args->mac)) { + /* Remove the VF mac address */ + ixgbe_del_mac_filter(adapter, + adapter->vfinfo[args->vf].vf_mac_addresses, + args->vf); + memset(adapter->vfinfo[args->vf].vf_mac_addresses, + 0, ETH_ALEN); + adapter->vfinfo[args->vf].pf_set_mac = false; + ret = VMK_OK; + } else { + if (ixgbe_ndo_set_vf_mac(netdev, + args->vf, args->mac) < 0) + ret = VMK_FAILURE; + else + ret = VMK_OK; + } break; - case ixgbe_mac_X540: - device_id = IXGBE_DEV_ID_X540_VF; + } + case VMK_NETPTOP_VF_SET_DEFAULT_VLAN: + { + vmk_NetPTOPVFSetDefaultVlanArgs *args = pargs; + + if (args->enable) { + adapter->vfinfo[args->vf].pf_set_vlan = true; + ret = ixgbe_ndo_set_vf_vlan(netdev, args->vf, args->vid, + args->prio) ? VMK_FAILURE : VMK_OK; + } else { + adapter->vfinfo[args->vf].pf_set_vlan = false; + ret = ixgbe_ndo_set_vf_vlan(netdev, args->vf, 0, 0) ? + VMK_FAILURE : VMK_OK; + } break; + } default: - device_id = 0; + e_err(probe, "Unhandled OP %d\n", op); + ret = VMK_FAILURE; break; } - - vf_devfn = pdev->devfn + 0x80; - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == vf_devfn) - vfs_found++; - vf_devfn += 2; - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, - device_id, pvfdev); - } - -#endif - return vfs_found; + return ret; } +#endif void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int err = 0; int num_vf_macvlans, i; struct vf_macvlans *mv_list; int pre_existing_vfs = 0; - pre_existing_vfs = ixgbe_find_enabled_vfs(adapter); + pre_existing_vfs = pci_num_vf(adapter->pdev); if (!pre_existing_vfs && !adapter->num_vfs) return; @@ -104,16 +129,33 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) "enabled for this device - Please reload all " "VF drivers to avoid spoofed packet errors\n"); } else { + int err; + /* + * The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function + */ + + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } } - if (err) { - e_err(probe, "Failed to enable PCI sriov: %d\n", err); - goto err_novfs; - } - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); @@ -138,18 +180,44 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) kcalloc(adapter->num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL); if (adapter->vfinfo) { + /* enable L2 switch and replication */ adapter->flags |= IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | IXGBE_FLAG_SRIOV_REPLICATION_ENABLE; - /* RSS not compatible with SR-IOV operation */ - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + /* limit traffic classes based on VFs enabled */ + if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + adapter->dcb_cfg.vt_mode = true; + + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; - /* Disable RSC when in SR-IOV mode */ + /* disable RSC when in SR-IOV mode */ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); - +#ifdef IXGBE_FCOE + /* + * When SR-IOV is enabled 82599 cannot support jumbo frames + * so we must disable FCoE because we cannot support FCoE MTU. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED | + IXGBE_FLAG_FCOE_CAPABLE); +#endif +#if defined(IFLA_VF_MAX) && defined(__VMKLNX__) + /* Register control callback */ + e_info(probe, "Registered Passthru Ops\n"); + VMK_REGISTER_PT_OPS(adapter->netdev, ixgbe_passthru_ops); +#endif + /* enable spoof checking for all VFs */ for (i = 0; i < adapter->num_vfs; i++) adapter->vfinfo[i].spoofchk_enabled = true; + return; } @@ -157,29 +225,85 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) e_err(probe, "Unable to allocate memory for VF Data Storage - " "SRIOV disabled\n"); ixgbe_disable_sriov(adapter); +} -err_novfs: - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->num_vfs = 0; +static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) +{ +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + int dev_id; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + dev_id = IXGBE_DEV_ID_82599_VF; + break; + case ixgbe_mac_X540: + dev_id = IXGBE_DEV_ID_X540_VF; + break; + default: + return false; + } + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + return true; + } + + vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, dev_id, vfdev); + } + +#endif + return false; } #endif /* CONFIG_PCI_IOV */ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 gcr; u32 gpie; u32 vmdctl; + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + if (adapter->vfinfo) { + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + } + + if (adapter->mv_list) { + kfree(adapter->mv_list); + adapter->mv_list = NULL; + } + #ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (ixgbe_vfs_are_assigned(adapter)) { + e_dev_warn("Unloading driver while VFs are assigned " + "- VFs will not be deallocated\n"); + return; + } + /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); #endif /* turn off device IOV mode */ - gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - gcr &= ~(IXGBE_GCR_EXT_SRIOV); - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie &= ~IXGBE_GPIE_VTMODE_MASK; IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); @@ -190,20 +314,14 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); IXGBE_WRITE_FLUSH(hw); + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + adapter->ring_feature[RING_F_VMDQ].offset = 0; + /* take a breather then clean up driver data */ msleep(100); - if (adapter->vfinfo) { - kfree(adapter->vfinfo); - adapter->vfinfo = NULL; - } - - if (adapter->mv_list) { - kfree(adapter->mv_list); - adapter->mv_list = NULL; - } - - adapter->num_vfs = 0; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; } @@ -278,6 +396,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + return ixgbe_set_vfta(&adapter->hw, vid, vf, (bool)add); } @@ -344,11 +466,11 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) VLAN_PRIO_SHIFT)), vf); ixgbe_set_vmolr(hw, vf, false); } else { + ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_set_vmvir(adapter, 0, vf); ixgbe_set_vmolr(hw, vf, true); } - /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; @@ -361,12 +483,15 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, int vf, unsigned char *mac_addr) { - + s32 retval = 0; ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); - ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); - return 0; + return retval; } static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, @@ -374,6 +499,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, { struct list_head *pos; struct vf_macvlans *entry; + s32 retval = 0; if (index <= 1) { list_for_each(pos, &adapter->vf_mvs.l) { @@ -414,26 +540,15 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!entry || !entry->free) return -ENOSPC; - entry->free = false; - entry->is_macvlan = true; - entry->vf = vf; - memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, mac_addr, vf); - - return 0; -} - -int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) -{ -#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED - int i; - for (i = 0; i < adapter->num_vfs; i++) { - if (adapter->vfinfo[i].vfdev->dev_flags & - PCI_DEV_FLAGS_ASSIGNED) - return true; + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); } -#endif - return false; + + return retval; } #ifdef CONFIG_PCI_IOV @@ -442,12 +557,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) unsigned char vf_mac_addr[6]; struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); -#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED - struct pci_dev *pvfdev; - unsigned int device_id; - u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) | - (pdev->devfn & 1); -#endif bool enable = ((event_mask & 0x10000000U) != 0); if (enable) { @@ -461,39 +570,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) * for it later. */ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); -#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - device_id = IXGBE_DEV_ID_82599_VF; - break; - case ixgbe_mac_X540: - device_id = IXGBE_DEV_ID_X540_VF; - break; - default: - device_id = 0; - break; - } - - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == thisvf_devfn) - break; - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, - device_id, pvfdev); - } - if (pvfdev) - adapter->vfinfo[vfn].vfdev = pvfdev; - else - e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n", - thisvf_devfn); -#endif } return 0; } +#endif /* CONFIG_PCI_IOV */ -#endif inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -519,13 +601,14 @@ inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) reg |= (1 << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); - /* Reset the VFs TDWBAL and TDWBAH registers - * which are not cleared by an FLR - */ - for (i = 0; i < q_per_pool; i++) { - IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); - IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); - } + /* + * Reset the VFs TDWBAL and TDWBAH registers + * which are not cleared by an FLR + */ + for (i = 0; i < q_per_pool; i++) { + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); + } ixgbe_vf_reset_event(adapter, vf); } @@ -561,13 +644,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) adapter->vfinfo[vf].clear_to_send = false; ixgbe_vf_reset_msg(adapter, vf); adapter->vfinfo[vf].clear_to_send = true; - - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) - ixgbe_set_vf_mac(adapter, vf, new_mac); - else - ixgbe_set_vf_mac(adapter, - vf, adapter->vfinfo[vf].vf_mac_addresses); + ixgbe_set_vf_mac(adapter, vf, vf_mac); /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; @@ -575,7 +652,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* Piggyback the multicast filter type so VF can compute the * correct vectors */ msgbuf[3] = hw->mac.mc_filter_type; - ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + retval = ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); return retval; } @@ -591,8 +668,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) new_mac = ((u8 *)(&msgbuf[1])); if (is_valid_ether_addr(new_mac) && !adapter->vfinfo[vf].pf_set_mac) { - ixgbe_set_vf_mac(adapter, vf, new_mac); e_info(probe, "Set MAC msg received from VF %d\n", vf); + if (ixgbe_set_vf_mac(adapter, vf, new_mac) >= 0) + retval = 0 ; + else + retval = -1; } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) { e_warn(drv, "VF %d attempted to override " @@ -642,7 +722,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = -1; break; } - #ifdef HAVE_VF_SPOOFCHK_CONFIGURE /* * If the VF is allowed to set MAC filters then turn off @@ -737,18 +816,24 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) #ifdef IFLA_VF_MAX int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { + s32 retval = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) return -EINVAL; adapter->vfinfo[vf].pf_set_mac = true; dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.\n"); + retval = ixgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); } - return ixgbe_set_vf_mac(adapter, vf, mac); + return retval; } static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, @@ -826,9 +911,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) return err; } -static int ixgbe_link_mbps(int internal_link_speed) +static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { - switch (internal_link_speed) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: return 100; case IXGBE_LINK_SPEED_1GB_FULL: @@ -840,27 +925,30 @@ static int ixgbe_link_mbps(int internal_link_speed) } } -static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, - int link_speed) +static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) { - int rf_dec, rf_int; - u32 bcnrc_val; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = 0; + u16 queue, queues_per_pool; + u16 tx_rate = adapter->vfinfo[vf].tx_rate; + + if (tx_rate) { + /* start with base link speed value */ + bcnrc_val = adapter->vf_rate_link_speed; - if (tx_rate != 0) { /* Calculate the rate factor values to set */ - rf_int = link_speed / tx_rate; - rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1<mask); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + } } void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) { - int actual_link_speed, i; - bool reset_rate = false; + int i; - /* VF TX rate limit was not set */ - if (adapter->vf_rate_link_speed == 0) + /* VF Tx rate limit was not set */ + if (!adapter->vf_rate_link_speed) return; - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if (actual_link_speed != adapter->vf_rate_link_speed) { - reset_rate = true; + if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, - "Link speed has been changed. VF Transmit rate " - "is disabled\n"); + "Link speed has been changed. VF Transmit rate is disabled\n"); } for (i = 0; i < adapter->num_vfs; i++) { - if (reset_rate) + if (!adapter->vf_rate_link_speed) adapter->vfinfo[i].tx_rate = 0; - ixgbe_set_vf_rate_limit(&adapter->hw, i, - adapter->vfinfo[i].tx_rate, - actual_link_speed); + ixgbe_set_vf_rate_limit(adapter, i); } } int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int actual_link_speed; + int link_speed; - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if ((vf >= adapter->num_vfs) || (!adapter->link_up) || - (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || - ((tx_rate != 0) && (tx_rate <= 10))) - /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ + /* verify VF is active */ + if (vf >= adapter->num_vfs) return -EINVAL; - adapter->vf_rate_link_speed = actual_link_speed; - adapter->vfinfo[vf].tx_rate = (u16)tx_rate; - ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 10Gbps */ + link_speed = ixgbe_link_mbps(adapter); + if (link_speed != 10000) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) + return -EINVAL; + + /* store values */ + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].tx_rate = tx_rate; + + /* update hardware configuration */ + ixgbe_set_vf_rate_limit(adapter, vf); return 0; } @@ -954,7 +1057,7 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } -#endif +#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -971,4 +1074,4 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, #endif return 0; } -#endif +#endif /* IFLA_VF_MAX */ diff --git a/src/ixgbe_sriov.h b/src/ixgbe_sriov.h index b1cc9d0..8dc6a11 100644 --- a/src/ixgbe_sriov.h +++ b/src/ixgbe_sriov.h @@ -51,13 +51,12 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); #endif int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); -#endif +#endif /* IFLA_VF_MAX */ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); #endif -int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); #ifdef IFLA_VF_MAX void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); #endif /* IFLA_VF_MAX */ diff --git a/src/ixgbe_sysfs.c b/src/ixgbe_sysfs.c index 862088e..8e3c18e 100644 --- a/src/ixgbe_sysfs.c +++ b/src/ixgbe_sysfs.c @@ -89,6 +89,7 @@ static struct ixgbe_adapter *ixgbe_get_adapter(struct kobject *kobj) return adapter; } + static bool ixgbe_thermal_present(struct kobject *kobj) { s32 status; @@ -793,6 +794,7 @@ static ssize_t ixgbe_sysfs_cautionthresh(struct kobject *kobj, } /* Initialize the attributes */ + static struct kobj_attribute ixgbe_sysfs_location_attr = __ATTR(location, 0444, ixgbe_sysfs_location, NULL); static struct kobj_attribute ixgbe_sysfs_temp_attr = @@ -876,6 +878,7 @@ static struct kobj_attribute ixgbe_sysfs_pciebnbr_attr = __ATTR(pciebnbr, 0444, ixgbe_pciebnbr, NULL); /* Add the attributes into an array, to be added to a group */ + static struct attribute *therm_attrs[] = { &ixgbe_sysfs_location_attr.attr, &ixgbe_sysfs_temp_attr.attr, @@ -925,6 +928,7 @@ static struct attribute *attrs[] = { }; /* add attributes to a group */ + static struct attribute_group therm_attr_group = { .attrs = therm_attrs, }; @@ -965,7 +969,6 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) struct net_device *netdev; int rc = 0; int i; - char buf[16]; if (adapter == NULL) goto err; @@ -991,6 +994,8 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + char buf[16]; + /* * Likewise only create individual kobjs that have * meaningful data. diff --git a/src/ixgbe_type.h b/src/ixgbe_type.h index 6b21c87..6e8b2fa 100644 --- a/src/ixgbe_type.h +++ b/src/ixgbe_type.h @@ -60,7 +60,7 @@ #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D -#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A #define IXGBE_DEV_ID_82599EN_SFP 0x1557 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC #define IXGBE_DEV_ID_82599_T3_LOM 0x151C @@ -836,6 +836,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 #define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ IXGBE_GCR_EXT_VT_MODE_64) +#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 /* Time Sync Registers */ #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ @@ -856,6 +857,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ #define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ #define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ #define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ #define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ #define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ @@ -1388,6 +1391,7 @@ enum { #define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ #define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ @@ -1405,6 +1409,7 @@ enum { #define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ @@ -1423,6 +1428,7 @@ enum { #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ @@ -1440,6 +1446,7 @@ enum { #define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ @@ -1525,6 +1532,7 @@ enum { #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ #define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 #define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ #define IXGBE_ETQS_RX_QUEUE_SHIFT 16 @@ -1966,6 +1974,10 @@ enum { #define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 + #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ @@ -2814,7 +2826,6 @@ enum ixgbe_sfp_type { enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, - ixgbe_media_type_fiber_qsfp, ixgbe_media_type_fiber_lco, ixgbe_media_type_copper, ixgbe_media_type_backplane, @@ -3154,7 +3165,6 @@ struct ixgbe_phy_info { bool smart_speed_active; bool multispeed_fiber; bool reset_if_overtemp; - bool qsfp_shared_i2c_bus; }; #include "ixgbe_mbx.h" diff --git a/src/kcompat.c b/src/kcompat.c index 9a3f943..b366648 100644 --- a/src/kcompat.c +++ b/src/kcompat.c @@ -1051,70 +1051,32 @@ void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, } #endif /* < 2.6.28 */ -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) -#ifdef HAVE_NETDEV_SELECT_QUEUE -#include -static u32 _kc_simple_tx_hashrnd; -static u32 _kc_simple_tx_hashrnd_initialized; - -u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev *dev) { - u32 addr1, addr2, ports; - u32 hash, ihl; - u8 ip_proto = 0; + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; - if (unlikely(!_kc_simple_tx_hashrnd_initialized)) { - get_random_bytes(&_kc_simple_tx_hashrnd, 4); - _kc_simple_tx_hashrnd_initialized = 1; - } + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; - switch (skb->protocol) { - case htons(ETH_P_IP): - if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) - ip_proto = ip_hdr(skb)->protocol; - addr1 = ip_hdr(skb)->saddr; - addr2 = ip_hdr(skb)->daddr; - ihl = ip_hdr(skb)->ihl; - break; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - case htons(ETH_P_IPV6): - ip_proto = ipv6_hdr(skb)->nexthdr; - addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; - addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; - ihl = (40 >> 2); - break; -#endif - default: - return 0; + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); } - - switch (ip_proto) { - case IPPROTO_TCP: - case IPPROTO_UDP: - case IPPROTO_DCCP: - case IPPROTO_ESP: - case IPPROTO_AH: - case IPPROTO_SCTP: - case IPPROTO_UDPLITE: - ports = *((u32 *) (skb_network_header(skb) + (ihl * 4))); - break; - - default: - ports = 0; - break; - } - - hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd); - - return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); +#endif + return num_vf; } -#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#endif /* < 2.6.30 */ +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) #ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) #ifndef CONFIG_NETDEVICES_MULTIQUEUE void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { @@ -1139,6 +1101,7 @@ void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) } } #endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ #endif /* HAVE_TX_MQ */ #endif /* < 2.6.35 */ @@ -1163,6 +1126,46 @@ int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) } #endif /* < 2.6.36 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +static u32 _kc_simple_tx_hashrnd; +static u32 _kc_simple_tx_hashrnd_initialized; + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (unlikely(!_kc_simple_tx_hashrnd_initialized)) { + get_random_bytes(&_kc_simple_tx_hashrnd, 4); + _kc_simple_tx_hashrnd_initialized = 1; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_simple_tx_hashrnd); + + return (u16) (((u64) hash * num_tx_queues) >> 32); +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* < 2.6.38 */ + /******************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) @@ -1175,20 +1178,23 @@ u8 _kc_netdev_get_num_tc(struct net_device *dev) return 0; } -u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) { struct adapter_struct *kc_adapter = netdev_priv(dev); - int tc; - u8 map; - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { - map = kc_adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; + if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; - if (map & (1 << up)) - return tc; - } + kc_adapter->tc = num_tc; return 0; } + +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ #endif /* < 2.6.39 */ diff --git a/src/kcompat.h b/src/kcompat.h index b1bb8d0..8bb73f4 100644 --- a/src/kcompat.h +++ b/src/kcompat.h @@ -56,32 +56,14 @@ #include /* NAPI enable/disable flags here */ -/* enable NAPI for ixgbe by default */ -#undef CONFIG_IXGBE_NAPI -#define CONFIG_IXGBE_NAPI #define NAPI -#ifdef CONFIG_IXGBE_NAPI -#undef NAPI -#define NAPI -#endif /* CONFIG_IXGBE_NAPI */ -#ifdef IXGBE_NAPI -#undef NAPI -#define NAPI -#endif /* IXGBE_NAPI */ -#ifdef IXGBE_NO_NAPI -#undef NAPI -#endif /* IXGBE_NO_NAPI */ #define adapter_struct ixgbe_adapter #define adapter_q_vector ixgbe_q_vector /* and finally set defines so that the code sees the changes */ #ifdef NAPI -#ifndef CONFIG_IXGBE_NAPI -#define CONFIG_IXGBE_NAPI -#endif #else -#undef CONFIG_IXGBE_NAPI #endif /* NAPI */ /* packet split disable/enable */ @@ -284,7 +266,7 @@ struct msix_entry { #if !defined(IXGBE_DCA) && !defined(IGB_DCA) #define dca_get_tag(b) 0 #define dca_add_requester(a) -1 -#define dca_remove_requester(b) do { } while(0) +#define dca_remove_requester(b) do { } while(0) #define DCA_PROVIDER_ADD 0x0001 #define DCA_PROVIDER_REMOVE 0x0002 #endif @@ -331,6 +313,10 @@ struct _kc_vlan_hdr { #define __GFP_COLD 0 #endif +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + /*****************************************************************************/ /* Installations with ethtool version without eeprom, adapter id, or statistics * support */ @@ -996,7 +982,6 @@ struct vlan_ethhdr { /* we won't support NAPI on less than 2.4.20 */ #ifdef NAPI #undef NAPI -#undef CONFIG_IXGBE_NAPI #endif #endif /* 2.4.20 => 2.4.19 */ @@ -1156,6 +1141,11 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) #define dma_unmap_single(dev,a,b,c) \ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + #define dma_sync_single(dev,a,b,c) \ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) @@ -1325,6 +1315,17 @@ extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); #endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + /*****************************************************************************/ /* 2.6.4 => 2.6.0 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) @@ -1541,6 +1542,7 @@ static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); #undef node_online_map #define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class #endif /* < 2.6.10 */ /*****************************************************************************/ @@ -1813,6 +1815,9 @@ static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) #if (!((RHEL_RELEASE_CODE && \ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ @@ -2403,11 +2408,10 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) #define skb_rx_queue_recorded(a) false #define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) #undef CONFIG_FCOE #undef CONFIG_FCOE_MODULE -extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); -#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s) -#define skb_record_rx_queue(a, b) do {} while (0) #ifndef CONFIG_PCI_IOV #undef pci_enable_sriov #define pci_enable_sriov(a, b) -ENOTSUPP @@ -2538,6 +2542,13 @@ extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb); /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +extern int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + #ifndef ETH_FLAG_NTUPLE #define ETH_FLAG_NTUPLE NETIF_F_NTUPLE #endif @@ -2686,7 +2697,7 @@ do { \ #endif /* for_each_set_bit */ #ifndef DEFINE_DMA_UNMAP_ADDR -#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR #define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN #define dma_unmap_addr pci_unmap_addr #define dma_unmap_addr_set pci_unmap_addr_set @@ -2709,15 +2720,17 @@ do { \ #ifdef HAVE_TX_MQ #include #ifndef CONFIG_NETDEVICES_MULTIQUEUE +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); #define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ #else /* CONFIG_NETDEVICES_MULTI_QUEUE */ #define netif_set_real_num_tx_queues(_netdev, _count) \ do { \ (_netdev)->egress_subqueue_count = _count; \ } while (0) #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ -#else +#else /* HAVE_TX_MQ */ #define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0) #endif /* HAVE_TX_MQ */ #ifndef ETH_FLAG_RXHASH @@ -2780,7 +2793,7 @@ do { \ } while (0) #undef usleep_range -#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) #else /* < 2.6.36 */ #define HAVE_PM_QOS_REQUEST_ACTIVE @@ -2790,6 +2803,15 @@ do { \ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif #ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) #endif @@ -2884,6 +2906,8 @@ static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) #define DCB_CAP_DCBX_STATIC 0x10 #endif #endif /* CONFIG_DCB */ +extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) #else /* < 2.6.38 */ #endif /* < 2.6.38 */ @@ -2898,6 +2922,10 @@ static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) extern u8 _kc_netdev_get_num_tc(struct net_device *dev); #define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +extern int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); #define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) #define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) @@ -2946,7 +2974,7 @@ extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); /* use < 2.6.40 because of a Fedora 15 kernel update where they * updated the kernel version to 2.6.40.x and they back-ported 3.0 features * like set_phys_id for ethtool. - */ + */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) #ifdef ETHTOOL_GRXRINGS #ifndef FLOW_EXT @@ -2990,6 +3018,18 @@ struct _kc_ethtool_rx_flow_spec { #define HAVE_ETHTOOL_SET_PHYS_ID #endif /* < 2.6.40 */ +/*****************************************************************************/ +#undef CONFIG_IXGBE_PTP +#ifdef IXGBE_PTP +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && (defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE)) +#define CONFIG_IXGBE_PTP +#else +#error Cannot enable PTP Hardware Clock due to insufficient kernel support +#endif +#endif /* IXGBE_PTP */ + +/*****************************************************************************/ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) #ifndef __netdev_alloc_skb_ip_align @@ -3062,6 +3102,13 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag) put_page(skb_frag_page(frag)); } #endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif #else /* < 3.2.0 */ #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED #define HAVE_PCI_DEV_FLAGS_ASSIGNED @@ -3069,9 +3116,7 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag) #endif #endif /* < 3.2.0 */ -#if (RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) #undef ixgbe_get_netdev_tc_txq #define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) #endif @@ -3102,7 +3147,9 @@ typedef u32 netdev_features_t; /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) +#define skb_tx_timestamp(skb) do {} while (0) #else #define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO #endif /* < 3.5.0 */ #endif /* _KCOMPAT_H_ */