Commit c437d888 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:

 1) ax88796 does 64-bit divides which causes link errors on ARM, fix
    from Arnd Bergmann.

 2) Once an improper offload setting is detected on an SKB we don't rate
    limit the log message so we can very easily live lock.  From Ben
    Greear.

 3) Openvswitch cannot report vport configuration changes reliably
    because it didn't preallocate the netlink notification message
    before changing state.  From Jesse Gross.

 4) The effective UID/GID SCM credentials fix, from Linus.

 5) When a user explicitly asks for wireless authentication, cfg80211
    isn't told about the AP detachment leaving inconsistent state.  Fix
    from Johannes Berg.

 6) Fix self-MAC checks in batman-adv on multi-mesh nodes, from Antonio
    Quartulli.

 7) Revert build_skb() change sin IGB driver, can result in memory
    corruption.  From Alexander Duyck.

 8) Fix setting VLANs on virtual functions in IXGBE, from Greg Rose.

 9) Fix TSO races in qlcnic driver, from Sritej Velaga.

10) In bnx2x the kernel driver and UNDI firmware can try to program the
    chip at the same time, resulting in corruption.  Add proper
    synchronization.  From Dmitry Kravkov.

11) Fix corruption of status block in firmware ram in bxn2x, from Ariel
    Elior.

12) Fix load balancing hash regression of bonding driver in forwarding
    configurations, from Eric Dumazet.

13) Fix TS ECR regression in TCP by calling tcp_replace_ts_recent() in
    all the right spots, from Eric Dumazet.

14) Fix several bonding bugs having to do with address manintainence,
    including not removing address when configuration operations
    encounter errors, missed locking on the address lists, missing
    refcounting on VLAN objects, etc.  All from Nikolay Aleksandrov.

15) Add workarounds for firmware bugs in LTE qmi_wwan devices, wherein
    the devices fail to add a proper ethernet header while on LTE
    networks but otherwise properly do so on 2G and 3G ones.  From Bjørn
    Mork.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (38 commits)
  net: fix incorrect credentials passing
  net: rate-limit warn-bad-offload splats.
  net: ax88796: avoid 64 bit arithmetic
  qlge: Update version to 1.00.00.32.
  qlge: Fix ethtool autoneg advertising.
  qlge: Fix receive path to drop error frames
  net: qmi_wwan: prevent duplicate mac address on link (firmware bug workaround)
  net: qmi_wwan: fixup destination address (firmware bug workaround)
  net: qmi_wwan: fixup missing ethernet header (firmware bug workaround)
  bonding: in bond_mc_swap() bond's mc addr list is walked without lock
  bonding: disable netpoll on enslave failure
  bonding: primary_slave & curr_active_slave are not cleaned on enslave failure
  bonding: vlans don't get deleted on enslave failure
  bonding: mc addresses don't get deleted on enslave failure
  pkt_sched: fix error return code in fw_change_attrs()
  irda: small read past the end of array in debug code
  tcp: call tcp_replace_ts_recent() from tcp_ack()
  netfilter: xt_rpfilter: skip locally generated broadcast/multicast, too
  netfilter: ipset: bitmap:ip,mac: fix listing with timeout
  bonding: fix l23 and l34 load balancing in forwarding path
  ...
parents f068f5e1 83f1b4ba
...@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, ...@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI) if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1); dev_set_allmulti(old_active->dev, -1);
netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev) netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_del(old_active->dev, ha->addr); dev_mc_del(old_active->dev, ha->addr);
netif_addr_unlock_bh(bond->dev);
} }
if (new_active) { if (new_active) {
...@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, ...@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI) if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1); dev_set_allmulti(new_active->dev, 1);
netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev) netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_add(new_active->dev, ha->addr); dev_mc_add(new_active->dev, ha->addr);
netif_addr_unlock_bh(bond->dev);
} }
} }
...@@ -1901,9 +1905,26 @@ err_dest_symlinks: ...@@ -1901,9 +1905,26 @@ err_dest_symlinks:
bond_destroy_slave_symlinks(bond_dev, slave_dev); bond_destroy_slave_symlinks(bond_dev, slave_dev);
err_detach: err_detach:
if (!USES_PRIMARY(bond->params.mode)) {
netif_addr_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
netif_addr_unlock_bh(bond_dev);
}
bond_del_vlans_from_slave(bond, slave_dev);
write_lock_bh(&bond->lock); write_lock_bh(&bond->lock);
bond_detach_slave(bond, new_slave); bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
if (bond->curr_active_slave == new_slave) {
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
}
slave_disable_netpoll(new_slave);
err_close: err_close:
slave_dev->priv_flags &= ~IFF_BONDING; slave_dev->priv_flags &= ~IFF_BONDING;
...@@ -3296,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) ...@@ -3296,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
*/ */
static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
{ {
struct ethhdr *data = (struct ethhdr *)skb->data; const struct ethhdr *data;
struct iphdr *iph; const struct iphdr *iph;
struct ipv6hdr *ipv6h; const struct ipv6hdr *ipv6h;
u32 v6hash; u32 v6hash;
__be32 *s, *d; const __be32 *s, *d;
if (skb->protocol == htons(ETH_P_IP) && if (skb->protocol == htons(ETH_P_IP) &&
skb_network_header_len(skb) >= sizeof(*iph)) { pskb_network_may_pull(skb, sizeof(*iph))) {
iph = ip_hdr(skb); iph = ip_hdr(skb);
data = (struct ethhdr *)skb->data;
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
(data->h_dest[5] ^ data->h_source[5])) % count; (data->h_dest[5] ^ data->h_source[5])) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) && } else if (skb->protocol == htons(ETH_P_IPV6) &&
skb_network_header_len(skb) >= sizeof(*ipv6h)) { pskb_network_may_pull(skb, sizeof(*ipv6h))) {
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
data = (struct ethhdr *)skb->data;
s = &ipv6h->saddr.s6_addr32[0]; s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0];
v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
...@@ -3328,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) ...@@ -3328,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
{ {
u32 layer4_xor = 0; u32 layer4_xor = 0;
struct iphdr *iph; const struct iphdr *iph;
struct ipv6hdr *ipv6h; const struct ipv6hdr *ipv6h;
__be32 *s, *d; const __be32 *s, *d;
__be16 *layer4hdr; const __be16 *l4 = NULL;
__be16 _l4[2];
int noff = skb_network_offset(skb);
int poff;
if (skb->protocol == htons(ETH_P_IP) && if (skb->protocol == htons(ETH_P_IP) &&
skb_network_header_len(skb) >= sizeof(*iph)) { pskb_may_pull(skb, noff + sizeof(*iph))) {
iph = ip_hdr(skb); iph = ip_hdr(skb);
if (!ip_is_fragment(iph) && poff = proto_ports_offset(iph->protocol);
(iph->protocol == IPPROTO_TCP ||
iph->protocol == IPPROTO_UDP) && if (!ip_is_fragment(iph) && poff >= 0) {
(skb_headlen(skb) - skb_network_offset(skb) >= l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { sizeof(_l4), &_l4);
layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); if (l4)
layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); layer4_xor = ntohs(l4[0] ^ l4[1]);
} }
return (layer4_xor ^ return (layer4_xor ^
((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) && } else if (skb->protocol == htons(ETH_P_IPV6) &&
skb_network_header_len(skb) >= sizeof(*ipv6h)) { pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
if ((ipv6h->nexthdr == IPPROTO_TCP || poff = proto_ports_offset(ipv6h->nexthdr);
ipv6h->nexthdr == IPPROTO_UDP) && if (poff >= 0) {
(skb_headlen(skb) - skb_network_offset(skb) >= l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { sizeof(_l4), &_l4);
layer4hdr = (__be16 *)(ipv6h + 1); if (l4)
layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); layer4_xor = ntohs(l4[0] ^ l4[1]);
} }
s = &ipv6h->saddr.s6_addr32[0]; s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0];
......
...@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev) ...@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
struct ei_device *ei_local; struct ei_device *ei_local;
struct ax_device *ax; struct ax_device *ax;
struct resource *irq, *mem, *mem2; struct resource *irq, *mem, *mem2;
resource_size_t mem_size, mem2_size = 0; unsigned long mem_size, mem2_size = 0;
int ret = 0; int ret = 0;
dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
......
...@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} }
} }
/* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp);
/* setup the leading queue */ /* setup the leading queue */
rc = bnx2x_setup_leading(bp); rc = bnx2x_setup_leading(bp);
if (rc) { if (rc) {
...@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, ...@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
u32 addr = BAR_CSTRORM_INTMEM + u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
u16 flags = REG_RD16(bp, addr); u8 flags = REG_RD8(bp, addr);
/* clear and set */ /* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED; flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag; flags |= enable_flag;
REG_WR16(bp, addr, flags); REG_WR8(bp, addr, flags);
DP(NETIF_MSG_IFUP, DP(NETIF_MSG_IFUP,
"port %x fw_sb_id %d sb_index %d disable %d\n", "port %x fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable); port, fw_sb_id, sb_index, disable);
......
...@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) ...@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
} }
} }
if (!CHIP_IS_E1x(bp))
/* block FW from writing to host */
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
/* wait until BRB is empty */ /* wait until BRB is empty */
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) { while (timer_count) {
......
...@@ -284,18 +284,10 @@ struct igb_q_vector { ...@@ -284,18 +284,10 @@ struct igb_q_vector {
enum e1000_ring_flags_t { enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG IGB_RING_FLAG_TX_DETECT_HANG
}; };
#define ring_uses_build_skb(ring) \
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define set_ring_build_skb_enabled(ring) \
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \ #define IGB_RX_DESC(R, i) \
......
...@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl); wr32(E1000_RXDCTL(reg_idx), rxdctl);
} }
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
#define IGB_MAX_BUILD_SKB_SIZE \
(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
(NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
/* set build_skb flag */
if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
set_ring_build_skb_enabled(rx_ring);
else
clear_ring_build_skb_enabled(rx_ring);
}
/** /**
* igb_configure_rx - Configure receive Unit after Reset * igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) ...@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++)
struct igb_ring *rx_ring = adapter->rx_ring[i]; igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
igb_set_rx_buffer_len(adapter, rx_ring);
igb_configure_rx_ring(adapter, rx_ring);
}
} }
/** /**
...@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return igb_can_reuse_rx_page(rx_buffer, page, truesize); return igb_can_reuse_rx_page(rx_buffer, page, truesize);
} }
static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
{
struct igb_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;
void *page_addr;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(NET_SKB_PAD +
NET_IP_ALIGN +
size);
#endif
/* If we spanned a buffer we have a huge mess so test for it */
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
page_addr = page_address(page) + rx_buffer->page_offset;
/* prefetch first cache line of first page */
prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif
/* build an skb to around the page buffer */
skb = build_skb(page_addr, truesize);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* update pointers within the skb to store the data */
skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
__skb_put(skb, size);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
__skb_pull(skb, IGB_TS_HDR_LEN);
}
if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}
/* clear contents of buffer_info */
rx_buffer->dma = 0;
rx_buffer->page = NULL;
return skb;
}
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc, union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -6690,9 +6601,6 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -6690,9 +6601,6 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rmb(); rmb();
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (ring_uses_build_skb(rx_ring))
skb = igb_build_rx_buffer(rx_ring, rx_desc);
else
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
...@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, ...@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true; return true;
} }
static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
{
if (ring_uses_build_skb(rx_ring))
return NET_SKB_PAD + NET_IP_ALIGN;
else
return 0;
}
/** /**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split * igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure * @adapter: address of board private structure
...@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change * Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. * because each write-back erases this info.
*/ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
bi->page_offset +
igb_rx_offset(rx_ring));
rx_desc++; rx_desc++;
bi++; bi++;
......
...@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) ...@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL; return -EINVAL;
if (vlan || qos) { if (vlan || qos) {
if (adapter->vfinfo[vf].pf_vlan)
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan,
vf);
if (err)
goto out;
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err) if (err)
goto out; goto out;
......
...@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) ...@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
} }
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1); } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
/* Make sure carrier is off and queue is stopped during loopback */
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
ret = qlcnic_do_lb_test(adapter, mode); ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode); qlcnic_83xx_clear_lb_mode(adapter, mode);
...@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, ...@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
{ {
struct qlcnic_cmd_args cmd; struct qlcnic_cmd_args cmd;
struct net_device *netdev = adapter->netdev;
int ret = 0; int ret = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
...@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) ...@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data, data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_TX, &ret); QLC_83XX_STAT_TX, &ret);
if (ret) { if (ret) {
dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); netdev_err(netdev, "Error getting Tx stats\n");
goto out; goto out;
} }
/* Get MAC stats */ /* Get MAC stats */
...@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) ...@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data, data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_MAC, &ret); QLC_83XX_STAT_MAC, &ret);
if (ret) { if (ret) {
dev_info(&adapter->pdev->dev, netdev_err(netdev, "Error getting MAC stats\n");
"Error getting Rx stats\n");
goto out; goto out;
} }
/* Get Rx stats */ /* Get Rx stats */
...@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) ...@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data, data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_RX, &ret); QLC_83XX_STAT_RX, &ret);
if (ret) if (ret)
dev_info(&adapter->pdev->dev, netdev_err(netdev, "Error getting Rx stats\n");
"Error getting Tx stats\n");
out: out:
qlcnic_free_mbx_args(&cmd); qlcnic_free_mbx_args(&cmd);
} }
......
...@@ -358,8 +358,7 @@ set_flags: ...@@ -358,8 +358,7 @@ set_flags:
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
} }
opcode = TX_ETHER_PKT; opcode = TX_ETHER_PKT;
if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && if (skb_is_gso(skb)) {
skb_shinfo(skb)->gso_size > 0) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len; first_desc->total_hdr_length = hdr_len;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
*/ */
#define DRV_NAME "qlge" #define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
#define DRV_VERSION "v1.00.00.31" #define DRV_VERSION "v1.00.00.32"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
......
...@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev, ...@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->supported = SUPPORTED_10000baseT_Full; ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full; ecmd->advertising = ADVERTISED_10000baseT_Full;
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL; ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & STS_LINK_TYPE_MASK) == if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) { STS_LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP; ecmd->port = PORT_TP;
ecmd->autoneg = AUTONEG_ENABLE;
} else { } else {
ecmd->supported |= SUPPORTED_FIBRE; ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE;
......
...@@ -1434,11 +1434,13 @@ map_error: ...@@ -1434,11 +1434,13 @@ map_error:
} }
/* Categorizing receive firmware frame errors */ /* Categorizing receive firmware frame errors */
static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
struct rx_ring *rx_ring)
{ {
struct nic_stats *stats = &qdev->nic_stats; struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++; stats->rx_err_count++;
rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR: case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
...@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, ...@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi; struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
put_page(lbq_desc->p.pg_chunk.page);
return;
}
napi->dev = qdev->ndev; napi->dev = qdev->ndev;
skb = napi_get_frags(napi); skb = napi_get_frags(napi);
...@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, ...@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va; addr = lbq_desc->p.pg_chunk.va;
prefetch(addr); prefetch(addr);
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
goto err_out;
}
/* The max framesize filter on this chip is set higher than /* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames. * MTU since FCoE uses 2k frames.
*/ */
...@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, ...@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length); memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb; skb = new_skb;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* loopback self test for ethtool */ /* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) { if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb); ql_check_lb_frame(qdev, skb);
...@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, ...@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return; return;
} }
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than /* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames. * MTU since FCoE uses 2k frames.
*/ */
...@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, ...@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);