From 0404249fedb1fae08271db11e218226dc478b856 Mon Sep 17 00:00:00 2001 From: Petr Oros Date: Tue, 19 Jul 2016 15:46:10 +0200 Subject: [PATCH] backport ixgbe for RHEL7.2GA Signed-off-by: Petr Oros --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 30 +- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 15 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 511 ++++--------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 60 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 1 - 5 files changed, 118 insertions(+), 499 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index c72c6d6..f98e081 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -150,7 +150,6 @@ struct vf_data_storage { u16 tx_rate; u8 spoofchk_enabled; bool rss_query_enabled; - u8 trusted; int xcast_mode; unsigned int vf_api; }; @@ -234,15 +233,6 @@ enum ixgbe_ring_state_t { __IXGBE_RX_FCOE, }; -struct ixgbe_fwd_adapter { - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct net_device *netdev; - struct ixgbe_adapter *real_adapter; - unsigned int tx_base_queue; - unsigned int rx_base_queue; - int pool; -}; - #define check_for_tx_hang(ring) \ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define set_check_for_tx_hang(ring) \ @@ -260,7 +250,6 @@ struct ixgbe_ring { struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ struct device *dev; /* device for DMA mapping */ - struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ union { struct ixgbe_tx_buffer *tx_buffer_info; @@ -337,6 +326,25 @@ struct ixgbe_ring_feature { #define IXGBE_82599_VMDQ_2Q_MASK 0x7E /* + * Return true only if the page has been allocated with + * ALLOC_NO_WATERMARKS and the low watermark was not + * met implying that the system is under some pressure. +*/ +static inline bool page_is_pfmemalloc(struct page *page) +{ +/* + * Page index cannot be this large so this must be + * a pfmemalloc page. +*/ + return page->index == -1UL; +} + +static inline void page_ref_inc(struct page *page) +{ + atomic_inc(&page->_count); +} + +/* * FCoE requires that all Rx buffers be over 2200 bytes in length. Since * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 6303278..20d297b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -502,7 +502,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif - bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) @@ -515,7 +514,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); /* 64 pool mode with 2 queues per pool */ - if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { + if ((vmdq_i > 32) || (rss_i < 4)) { vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); @@ -885,11 +884,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; - if (adapter->num_rx_pools > 1) - ring->queue_index = - txr_idx % adapter->num_rx_queues_per_pool; - else - ring->queue_index = txr_idx; + ring->queue_index = txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -932,11 +927,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, #endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; - if (adapter->num_rx_pools > 1) - ring->queue_index = - rxr_idx % adapter->num_rx_queues_per_pool; - else - ring->queue_index = rxr_idx; + ring->queue_index = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b28281c..8cd9b78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include #include @@ -1049,18 +1048,11 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - u32 head, tail; - - if (ring->l2_accel_priv) - adapter = ring->l2_accel_priv->real_adapter; - else - adapter = netdev_priv(ring->netdev); + struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); + struct ixgbe_hw *hw = &adapter->hw; - hw = &adapter->hw; - head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); if (head != tail) return (head < tail) ? @@ -3176,7 +3168,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_q_vector *q_vector = ring->q_vector; if (q_vector) - netif_set_xps_queue(ring->netdev, + netif_set_xps_queue(adapter->netdev, &q_vector->affinity_mask, ring->queue_index); } @@ -3698,7 +3690,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; - u16 pool; + int p; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | @@ -3715,8 +3707,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) else if (rss_i > 1) psrtype |= 1u << 29; - for_each_set_bit(pool, &adapter->fwd_bitmask, 32) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); + for (p = 0; p < adapter->num_rx_pools; p++) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), + psrtype); } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) @@ -4009,11 +4002,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - if (ring->l2_accel_priv) - continue; - j = ring->reg_idx; + j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); @@ -4046,11 +4035,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - if (ring->l2_accel_priv) - continue; - j = ring->reg_idx; + j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); @@ -4380,34 +4365,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } -/** - * ixgbe_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) - return -ENOMEM; - - if (!netdev_uc_empty(netdev)) { - struct netdev_hw_addr *ha; - netdev_for_each_uc_addr(ha, netdev) { - ixgbe_del_mac_filter(adapter, ha->addr, vfn); - ixgbe_add_mac_filter(adapter, ha->addr, vfn); - count++; - } - } - return count; -} static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { @@ -4790,217 +4747,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) spin_unlock(&adapter->fdir_perfect_lock); } -static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, - struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmolr; - - /* No unicast promiscuous support for VMDQ devices. */ - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); - - /* clear the affected bit */ - vmolr &= ~IXGBE_VMOLR_MPE; - - if (dev->flags & IFF_ALLMULTI) { - vmolr |= IXGBE_VMOLR_MPE; - } else { - vmolr |= IXGBE_VMOLR_ROMPE; - hw->mac.ops.update_mc_addr_list(hw, dev); - } - ixgbe_write_uc_addr_list(adapter->netdev, pool); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); -} - -static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int rss_i = vadapter->netdev->real_num_rx_queues; - struct ixgbe_hw *hw = &adapter->hw; - u16 pool = vadapter->pool; - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_L2HDR | - IXGBE_PSRTYPE_IPV6HDR; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - if (rss_i > 3) - psrtype |= 2u << 29; - else if (rss_i > 1) - psrtype |= 1u << 29; - - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); -} - -/** - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - - if (rx_buffer->skb) { - struct sk_buff *skb = rx_buffer->skb; - if (IXGBE_CB(skb)->page_released) - dma_unmap_page(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - dev_kfree_skb(skb); - rx_buffer->skb = NULL; - } - - if (!rx_buffer->page) - continue; - - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); - - rx_buffer->page = NULL; - } - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, - struct ixgbe_ring *rx_ring) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int index = rx_ring->queue_index + vadapter->rx_base_queue; - - /* shutdown specific queue receive and wait for dma to settle */ - ixgbe_disable_rx_queue(adapter, rx_ring); - usleep_range(10000, 20000); - ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); - ixgbe_clean_rx_ring(rx_ring); - rx_ring->l2_accel_priv = NULL; -} - -int ixgbe_fwd_ring_down(struct net_device *vdev, - struct ixgbe_fwd_adapter *accel) -{ - struct ixgbe_adapter *adapter = accel->real_adapter; - unsigned int rxbase = accel->rx_base_queue; - unsigned int txbase = accel->tx_base_queue; - int i; - - netif_tx_stop_all_queues(vdev); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); - adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; - } - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; - adapter->tx_ring[txbase + i]->netdev = adapter->netdev; - } - - - return 0; -} - -static int ixgbe_fwd_ring_up(struct net_device *vdev, - struct ixgbe_fwd_adapter *accel) -{ - struct ixgbe_adapter *adapter = accel->real_adapter; - unsigned int rxbase, txbase, queues; - int i, baseq, err = 0; - - if (!test_bit(accel->pool, &adapter->fwd_bitmask)) - return 0; - - baseq = accel->pool * adapter->num_rx_queues_per_pool; - netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", - accel->pool, adapter->num_rx_pools, - baseq, baseq + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); - - accel->netdev = vdev; - accel->rx_base_queue = rxbase = baseq; - accel->tx_base_queue = txbase = baseq; - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) - ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->rx_ring[rxbase + i]->netdev = vdev; - adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; - ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); - } - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->netdev = vdev; - adapter->tx_ring[txbase + i]->l2_accel_priv = accel; - } - - queues = min_t(unsigned int, - adapter->num_rx_queues_per_pool, vdev->num_tx_queues); - err = netif_set_real_num_tx_queues(vdev, queues); - if (err) - goto fwd_queue_err; - - queues = min_t(unsigned int, - adapter->num_rx_queues_per_pool, vdev->num_rx_queues); - err = netif_set_real_num_rx_queues(vdev, queues); - if (err) - goto fwd_queue_err; - - if (is_valid_ether_addr(vdev->dev_addr)) - ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); - - ixgbe_fwd_psrtype(accel); - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); - return err; -fwd_queue_err: - ixgbe_fwd_ring_down(vdev, accel); - return err; -} - -static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) -{ - struct net_device *upper; - struct list_head *iter; - int err; - - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; - - if (dfwd->fwd_priv) { - err = ixgbe_fwd_ring_up(upper, vadapter); - if (err) - continue; - } - } - } -} - static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5058,7 +4804,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); - ixgbe_configure_dfwd(adapter); } /** @@ -5196,8 +4941,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) static void ixgbe_up_complete(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; int err; u32 ctrl_ext; @@ -5245,16 +4988,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); - /* enable any upper devices */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) - netif_tx_start_all_queues(upper); - } - } - /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -5360,6 +5093,56 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } /** +* - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue +* - * @rx_ring: ring to free buffers from +* - **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (IXGBE_CB(skb)->page_released) + dma_unmap_page(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + + rx_buffer->page = NULL; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** * ixgbe_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ @@ -5436,8 +5219,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; int i; /* signal that we are down to the interrupt handler */ @@ -5459,19 +5240,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_disable(netdev); - /* disable any upper devices */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) { - netif_tx_stop_all_queues(upper); - netif_carrier_off(upper); - netif_tx_disable(upper); - } - } - } - ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); @@ -5760,8 +5528,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) return -EIO; } - /* PF holds first pool slot */ - set_bit(0, &adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); return 0; @@ -6068,7 +5834,7 @@ int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int err, queues; + int err; /* disallow open during test */ if (test_bit(__IXGBE_TESTING, &adapter->state)) @@ -6093,21 +5859,15 @@ int ixgbe_open(struct net_device *netdev) goto err_req_irq; /* Notify the stack of the actual queue counts. */ - if (adapter->num_rx_pools > 1) - queues = adapter->num_rx_queues_per_pool; - else - queues = adapter->num_tx_queues; - - err = netif_set_real_num_tx_queues(netdev, queues); + err = netif_set_real_num_tx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_tx_queues); if (err) goto err_set_queues; - if (adapter->num_rx_pools > 1 && - adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) - queues = IXGBE_MAX_L2A_QUEUES; - else - queues = adapter->num_rx_queues; - err = netif_set_real_num_rx_queues(netdev, queues); + err = netif_set_real_num_rx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_rx_queues); if (err) goto err_set_queues; @@ -7713,8 +7473,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { #if 0 /* RHEL - ixgbe_fwd_adapter not defined now */ struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; @@ -7744,7 +7503,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) break; default: - return fallback(dev, skb); + return __netdev_pick_tx(dev, skb); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -7757,7 +7516,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, return txq + f->offset; #else - return fallback(dev, skb); + return __netdev_pick_tx(dev, skb); #endif } @@ -7895,9 +7654,8 @@ out_drop: return NETDEV_TX_OK; } -static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev, - struct ixgbe_ring *ring) +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; @@ -7909,17 +7667,10 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, if (skb_put_padto(skb, 17)) return NETDEV_TX_OK; - tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; - + tx_ring = adapter->tx_ring[skb->queue_mapping]; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - return __ixgbe_xmit_frame(skb, netdev, NULL); -} - /** * ixgbe_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -8173,7 +7924,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - bool pools; /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) @@ -8182,10 +7932,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) return -EINVAL; - pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); - if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) - return -EBUSY; - /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. @@ -8231,16 +7977,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } -static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) -{ - /* Only support egress tc setup for now */ - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - return ixgbe_setup_tc(dev, tc->tc); -} - #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) { @@ -8428,7 +8164,7 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid, + const unsigned char *addr, u16 flags) { /* guarantee we can provide a unique filter for the unicast address */ @@ -8440,7 +8176,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -ENOMEM; } - return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); } /** @@ -8518,7 +8254,7 @@ static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags) + struct nlmsghdr *nlh) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; @@ -8554,7 +8290,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask, int nlflags) + u32 filter_mask) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -8562,81 +8298,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, - adapter->bridge_mode, 0, 0, nlflags); -} - -static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) -{ - struct ixgbe_fwd_adapter *fwd_adapter = NULL; - struct ixgbe_adapter *adapter = netdev_priv(pdev); - int pool, err; - - /* Check for hardware restriction on number of rx/tx queues */ - if (vdev->num_rx_queues != vdev->num_tx_queues || - vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || - vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { - netdev_info(pdev, - "%s: Supports RX/TX Queue counts 1,2, and 4\n", - pdev->name); - return ERR_PTR(-EINVAL); - } - - if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || - (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) - return ERR_PTR(-EBUSY); - - fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); - if (!fwd_adapter) - return ERR_PTR(-ENOMEM); - - pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); - adapter->num_rx_pools++; - set_bit(pool, &adapter->fwd_bitmask); - - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; - adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools; - adapter->ring_feature[RING_F_RSS].limit = vdev->num_rx_queues; - - /* Force reinit of ring allocation with VMDQ enabled */ - err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - if (err) - goto fwd_add_err; - fwd_adapter->pool = pool; - fwd_adapter->real_adapter = adapter; - err = ixgbe_fwd_ring_up(vdev, fwd_adapter); - if (err) - goto fwd_add_err; - netif_tx_start_all_queues(vdev); - return fwd_adapter; -fwd_add_err: - /* unwind counter and free adapter struct */ - netdev_info(pdev, - "%s: dfwd hardware acceleration failed\n", vdev->name); - clear_bit(pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; - kfree(fwd_adapter); - return ERR_PTR(err); -} - -static void ixgbe_fwd_del(struct net_device *pdev, void *priv) -{ - struct ixgbe_fwd_adapter *fwd_adapter = priv; - struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; - - clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; - - adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools; - ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); - ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", - fwd_adapter->pool, adapter->num_rx_pools, - fwd_adapter->rx_base_queue, - fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); - kfree(fwd_adapter); + adapter->bridge_mode, 0, 0); } #define IXGBE_MAX_TUNNEL_HDR_LEN 80 @@ -8649,13 +8311,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_CSUM_MASK; + return features & ~NETIF_F_ALL_CSUM; return features; } static const struct net_device_ops ixgbe_netdev_ops = { - .ndo_size = sizeof(struct net_device_ops), .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, @@ -8673,11 +8334,10 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, - .extended.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB - .ndo_setup_tc = __ixgbe_setup_tc, + .ndo_setup_tc = ixgbe_setup_tc, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, @@ -8704,8 +8364,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_del_vxlan_port = ixgbe_del_vxlan_port, #endif /* CONFIG_IXGBE_VXLAN */ .ndo_features_check = ixgbe_features_check, - .extended.ndo_dfwd_add_station = ixgbe_fwd_add, - .extended.ndo_dfwd_del_station = ixgbe_fwd_del, }; /** @@ -9048,12 +8706,11 @@ skip_sriov: NETIF_F_HW_VLAN_CTAG_FILTER; if (hw->mac.type >= ixgbe_mac_82599EB) - netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_SCTP_CSUM; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; - netdev->hw_features |= NETIF_F_RXALL | - NETIF_F_HW_L2FW_DOFFLOAD; + netdev->hw_features |= NETIF_F_RXALL; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->hw_features |= NETIF_F_NTUPLE; @@ -9062,7 +8719,7 @@ skip_sriov: NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; + NETIF_F_SCTP_CSUM; netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_HW_CSUM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1a90fbe..637e98a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -117,9 +117,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) */ adapter->vfinfo[i].rss_query_enabled = 0; - /* Untrust all VFs */ - adapter->vfinfo[i].trusted = false; - /* set the default xcast mode */ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } @@ -280,19 +277,18 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) IXGBE_WRITE_FLUSH(hw); /* Disable VMDq flag so device will be set in VM mode */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); - } else { - rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); - } - adapter->ring_feature[RING_F_VMDQ].offset = 0; + + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; /* take a breather then clean up driver data */ msleep(100); + + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; return 0; } @@ -353,10 +349,13 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) { + /* ixgbe_disable_sriov() doesn't clear VMDQ flag */ + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; #ifdef CONFIG_PCI_IOV - if (!err && current_flags != adapter->flags) ixgbe_sriov_reinit(adapter); #endif + } return err; } @@ -892,7 +891,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return -1; } - if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + if (adapter->vfinfo[vf].pf_set_mac && !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { e_warn(drv, "VF %d attempted to override administratively set MAC address\n" @@ -1090,8 +1089,7 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return -EOPNOTSUPP; } - if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && - !adapter->vfinfo[vf].trusted) { + if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI) { xcast_mode = IXGBEVF_XCAST_MODE_MULTI; } @@ -1254,17 +1252,6 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } -static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 ping; - - ping = IXGBE_PF_CONTROL_MSG; - if (adapter->vfinfo[vf].clear_to_send) - ping |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, &ping, 1, vf); -} - void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1563,28 +1550,6 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } -int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (vf >= adapter->num_vfs) - return -EINVAL; - - /* nothing to do */ - if (adapter->vfinfo[vf].trusted == setting) - return 0; - - adapter->vfinfo[vf].trusted = setting; - - /* reset VF to reconfigure features */ - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_ping_vf(adapter, vf); - - e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); - - return 0; -} - int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1599,6 +1564,5 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; - ivi->trusted = adapter->vfinfo[vf].trusted; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index dad9257..2c197e6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -49,7 +49,6 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); -int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); -- 1.8.3.1