Commit 726ba84b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix BPF divides by zero, from Eric Dumazet and Alexei Starovoitov.

 2) Reject stores into bpf context via st and xadd, from Daniel
    Borkmann.

 3) Fix a memory leak in TUN, from Cong Wang.

 4) Disable RX aggregation on a specific troublesome configuration of
    r8152 in a Dell TB16b dock.

 5) Fix sw_ctx leak in tls, from Sabrina Dubroca.

 6) Fix program replacement in cls_bpf, from Daniel Borkmann.

 7) Fix uninitialized station_info structures in cfg80211, from Johannes
    Berg.

 8) Fix miscalculation of transport header offset field in flow
    dissector, from Eric Dumazet.

 9) Fix LPM tree leak on failure in mlxsw driver, from Ido Schimmel.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (29 commits)
  ibmvnic: Fix IPv6 packet descriptors
  ibmvnic: Fix IP offload control buffer
  ipv6: don't let tb6_root node share routes with other node
  ip6_gre: init dev->mtu and dev->hard_header_len correctly
  mlxsw: spectrum_router: Free LPM tree upon failure
  flow_dissector: properly cap thoff field
  fm10k: mark PM functions as __maybe_unused
  cfg80211: fix station info handling bugs
  netlink: reset extack earlier in netlink_rcv_skb
  can: af_can: canfd_rcv(): replace WARN_ONCE by pr_warn_once
  can: af_can: can_rcv(): replace WARN_ONCE by pr_warn_once
  bpf: mark dst unknown on inconsistent {s, u}bounds adjustments
  bpf: fix cls_bpf on filter replace
  Net: ethernet: ti: netcp: Fix inbound ping crash if MTU size is greater than 1500
  tls: reset crypto_info when do_tls_setsockopt_tx fails
  tls: return -EBUSY if crypto_info is already set
  tls: fix sw_ctx leak
  net/tls: Only attach to sockets in ESTABLISHED state
  net: fs_enet: do not call phy_stop() in interrupts
  r8152: disable RX aggregation on Dell TB16 dock
  ...
parents dda3e152 a0dca10f
...@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) ...@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
/* Stack must be multiples of 16B */ /* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15) #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
#define PROLOGUE_OFFSET 8 /* Tail call offset to jump into */
#define PROLOGUE_OFFSET 7
static int build_prologue(struct jit_ctx *ctx) static int build_prologue(struct jit_ctx *ctx)
{ {
...@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx) ...@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
/* Initialize tail_call_cnt */ /* Initialize tail_call_cnt */
emit(A64_MOVZ(1, tcc, 0, 0), ctx); emit(A64_MOVZ(1, tcc, 0, 0), ctx);
/* 4 byte extra for skb_copy_bits buffer */
ctx->stack_size = prog->aux->stack_depth + 4;
ctx->stack_size = STACK_ALIGN(ctx->stack_size);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
cur_offset = ctx->idx - idx0; cur_offset = ctx->idx - idx0;
if (cur_offset != PROLOGUE_OFFSET) { if (cur_offset != PROLOGUE_OFFSET) {
pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
cur_offset, PROLOGUE_OFFSET); cur_offset, PROLOGUE_OFFSET);
return -1; return -1;
} }
/* 4 byte extra for skb_copy_bits buffer */
ctx->stack_size = prog->aux->stack_depth + 4;
ctx->stack_size = STACK_ALIGN(ctx->stack_size);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
return 0; return 0;
} }
...@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) ...@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_LDR64(prg, tmp, prg), ctx); emit(A64_LDR64(prg, tmp, prg), ctx);
emit(A64_CBZ(1, prg, jmp_offset), ctx); emit(A64_CBZ(1, prg, jmp_offset), ctx);
/* goto *(prog->bpf_func + prologue_size); */ /* goto *(prog->bpf_func + prologue_offset); */
off = offsetof(struct bpf_prog, bpf_func); off = offsetof(struct bpf_prog, bpf_func);
emit_a64_mov_i64(tmp, off, ctx); emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR64(tmp, prg, tmp), ctx); emit(A64_LDR64(tmp, prg, tmp), ctx);
emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
emit(A64_BR(tmp), ctx); emit(A64_BR(tmp), ctx);
/* out: */ /* out: */
......
...@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI ...@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
config BCMA_DRIVER_PCI_HOSTMODE config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode" bool "Driver for PCI core working in hostmode"
depends on MIPS && BCMA_DRIVER_PCI depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
help help
PCI core hostmode operation (external PCI bus). PCI core hostmode operation (external PCI bus).
......
...@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) ...@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
void *cmd_head = pcan_usb_fd_cmd_buffer(dev); void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0; int err = 0;
u8 *packet_ptr; u8 *packet_ptr;
int i, n = 1, packet_len; int packet_len;
ptrdiff_t cmd_len; ptrdiff_t cmd_len;
/* usb device unregistered? */ /* usb device unregistered? */
...@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) ...@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
} }
packet_ptr = cmd_head; packet_ptr = cmd_head;
packet_len = cmd_len;
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */ /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
if ((dev->udev->speed != USB_SPEED_HIGH) && if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
(cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
n += cmd_len / packet_len;
} else {
packet_len = cmd_len;
}
for (i = 0; i < n; i++) { do {
err = usb_bulk_msg(dev->udev, err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev, usb_sndbulkpipe(dev->udev,
PCAN_USBPRO_EP_CMDOUT), PCAN_USBPRO_EP_CMDOUT),
...@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) ...@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
} }
packet_ptr += packet_len; packet_ptr += packet_len;
} cmd_len -= packet_len;
if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
packet_len = cmd_len;
} while (packet_len > 0);
return err; return err;
} }
......
...@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static void fs_timeout(struct net_device *dev) static void fs_timeout_work(struct work_struct *work)
{ {
struct fs_enet_private *fep = netdev_priv(dev); struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
timeout_work);
struct net_device *dev = fep->ndev;
unsigned long flags; unsigned long flags;
int wake = 0; int wake = 0;
...@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev) ...@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
phy_stop(dev->phydev); phy_stop(dev->phydev);
(*fep->ops->stop)(dev); (*fep->ops->stop)(dev);
(*fep->ops->restart)(dev); (*fep->ops->restart)(dev);
phy_start(dev->phydev);
} }
phy_start(dev->phydev); phy_start(dev->phydev);
...@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev) ...@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev); netif_wake_queue(dev);
} }
static void fs_timeout(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
schedule_work(&fep->timeout_work);
}
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* generic link-change handler - should be sufficient for most cases * generic link-change handler - should be sufficient for most cases
*-----------------------------------------------------------------------------*/ *-----------------------------------------------------------------------------*/
...@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev) ...@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev); netif_stop_queue(dev);
netif_carrier_off(dev); netif_carrier_off(dev);
napi_disable(&fep->napi); napi_disable(&fep->napi);
cancel_work_sync(&fep->timeout_work);
phy_stop(dev->phydev); phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags); spin_lock_irqsave(&fep->lock, flags);
...@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev) ...@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops; ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ; ndev->watchdog_timeo = 2 * HZ;
INIT_WORK(&fep->timeout_work, fs_timeout_work);
netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight); netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops; ndev->ethtool_ops = &fs_ethtool_ops;
......
...@@ -125,6 +125,7 @@ struct fs_enet_private { ...@@ -125,6 +125,7 @@ struct fs_enet_private {
spinlock_t lock; /* during all ops except TX pckt processing */ spinlock_t lock; /* during all ops except TX pckt processing */
spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
struct fs_platform_info *fpi; struct fs_platform_info *fpi;
struct work_struct timeout_work;
const struct fs_ops *ops; const struct fs_ops *ops;
int rx_ring, tx_ring; int rx_ring, tx_ring;
dma_addr_t ring_mem_addr; dma_addr_t ring_mem_addr;
......
...@@ -1276,6 +1276,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1276,6 +1276,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned char *dst; unsigned char *dst;
u64 *handle_array; u64 *handle_array;
int index = 0; int index = 0;
u8 proto = 0;
int ret = 0; int ret = 0;
if (adapter->resetting) { if (adapter->resetting) {
...@@ -1364,17 +1365,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1364,17 +1365,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
} }
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
if (ip_hdr(skb)->version == 4) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; proto = ip_hdr(skb)->protocol;
else if (ip_hdr(skb)->version == 6) } else if (skb->protocol == htons(ETH_P_IPV6)) {
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
proto = ipv6_hdr(skb)->nexthdr;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
} }
if (proto == IPPROTO_TCP)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
else if (proto == IPPROTO_UDP)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
hdrs += 2; hdrs += 2;
...@@ -3346,7 +3348,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) ...@@ -3346,7 +3348,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
return; return;
} }
adapter->ip_offload_ctrl.len =
cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
......
...@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) ...@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
return err; return err;
} }
#ifdef CONFIG_PM
/** /**
* fm10k_resume - Generic PM resume hook * fm10k_resume - Generic PM resume hook
* @dev: generic device structure * @dev: generic device structure
...@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) ...@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe * suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us. * device state as the stack takes care of that for us.
**/ **/
static int fm10k_resume(struct device *dev) static int __maybe_unused fm10k_resume(struct device *dev)
{ {
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev; struct net_device *netdev = interface->netdev;
...@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev) ...@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower * system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us. * PCIe device state as the stack takes care of that for us.
**/ **/
static int fm10k_suspend(struct device *dev) static int __maybe_unused fm10k_suspend(struct device *dev)
{ {
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev; struct net_device *netdev = interface->netdev;
...@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev) ...@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
return 0; return 0;
} }
#endif /* CONFIG_PM */
/** /**
* fm10k_io_error_detected - called when PCI error is detected * fm10k_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device * @pdev: Pointer to PCI device
...@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = { ...@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
.id_table = fm10k_pci_tbl, .id_table = fm10k_pci_tbl,
.probe = fm10k_probe, .probe = fm10k_probe,
.remove = fm10k_remove, .remove = fm10k_remove,
#ifdef CONFIG_PM
.driver = { .driver = {
.pm = &fm10k_pm_ops, .pm = &fm10k_pm_ops,
}, },
#endif /* CONFIG_PM */
.sriov_configure = fm10k_iov_configure, .sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler .err_handler = &fm10k_err_handler
}; };
......
...@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, ...@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
int err; int err;
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
if (err)
return err;
fib->lpm_tree = new_tree; fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree); mlxsw_sp_lpm_tree_hold(new_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
if (err)
goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
return 0; return 0;
err_tree_bind:
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
fib->lpm_tree = old_tree;
return err;
} }
static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
...@@ -868,11 +873,14 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, ...@@ -868,11 +873,14 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
return err; return err;
no_replace: no_replace:
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
if (err)
return err;
fib->lpm_tree = new_tree; fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree); mlxsw_sp_lpm_tree_hold(new_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
if (err) {
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
fib->lpm_tree = NULL;
return err;
}
return 0; return 0;
} }
......
...@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) ...@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
/* warning!!!! We are retrieving the virtual ptr in the sw_data /* warning!!!! We are retrieving the virtual ptr in the sw_data
* field as a 32bit value. Will not work on 64bit machines * field as a 32bit value. Will not work on 64bit machines
*/ */
page = (struct page *)GET_SW_DATA0(desc); page = (struct page *)GET_SW_DATA0(ndesc);
if (likely(dma_buff && buf_len && page)) { if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
......
...@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile) ...@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
skb_queue_purge(&tfile->sk.sk_error_queue); skb_queue_purge(&tfile->sk.sk_error_queue);
} }
static void tun_cleanup_tx_array(struct tun_file *tfile)
{
if (tfile->tx_array.ring.queue) {
skb_array_cleanup(&tfile->tx_array);
memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
}
}
static void __tun_detach(struct tun_file *tfile, bool clean) static void __tun_detach(struct tun_file *tfile, bool clean)
{ {
struct tun_file *ntfile; struct tun_file *ntfile;
...@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) ...@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED) tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev); unregister_netdevice(tun->dev);
} }
if (tun) tun_cleanup_tx_array(tfile);
skb_array_cleanup(&tfile->tx_array);
sock_put(&tfile->sk); sock_put(&tfile->sk);
} }
} }
...@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev) ...@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
/* Drop read queue */ /* Drop read queue */
tun_queue_purge(tfile); tun_queue_purge(tfile);
sock_put(&tfile->sk); sock_put(&tfile->sk);
tun_cleanup_tx_array(tfile);
} }
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile); tun_enable_queue(tfile);
tun_queue_purge(tfile); tun_queue_purge(tfile);
sock_put(&tfile->sk); sock_put(&tfile->sk);
tun_cleanup_tx_array(tfile);
} }
BUG_ON(tun->numdisabled != 0); BUG_ON(tun->numdisabled != 0);
...@@ -2851,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file) ...@@ -2851,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
return 0; return 0;
} }
......
...@@ -606,6 +606,7 @@ enum rtl8152_flags { ...@@ -606,6 +606,7 @@ enum rtl8152_flags {
PHY_RESET, PHY_RESET,
SCHEDULE_NAPI, SCHEDULE_NAPI,
GREEN_ETHERNET, GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
}; };
/* Define these values to match your device */ /* Define these values to match your device */
...@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) ...@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
break;
} }
if (!skb_queue_empty(&skb_head)) { if (!skb_queue_empty(&skb_head)) {
...@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp) ...@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
/* rx aggregation */ /* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
ocp_data |= RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
rtl_tally_reset(tp); rtl_tally_reset(tp);
...@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf, ...@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM; netdev->hw_features &= ~NETIF_F_RXCSUM;
} }
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
udev->serial && !strcmp(udev->serial, "000001000000")) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
}
netdev->ethtool_ops = &ops; netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
......
...@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) ...@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
err = request_firmware(&clm, clm_name, dev); err = request_firmware(&clm, clm_name, dev);
if (err) { if (err) {
if (err == -ENOENT) { brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n"); err);
return 0; return 0;
}
brcmf_err("request CLM blob file failed (%d)\n", err);
return err;
} }
chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL); chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
......
...@@ -32,7 +32,7 @@ config SSB_BLOCKIO ...@@ -32,7 +32,7 @@ config SSB_BLOCKIO
config SSB_PCIHOST_POSSIBLE config SSB_PCIHOST_POSSIBLE
bool