Commit fc08b197 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) MLX5 bug fixes from Saeed Mahameed et al:
     - released wrong resources when firmware timeout happens
     - fix wrong check for encapsulation size limits
     - UAR memory leak
     - ETHTOOL_GRXCLSRLALL failed to fill in info->data

 2) Don't cache l3mdev on mis-matches local route, causes net devices to
    leak refs. From Robert Shearman.

 3) Handle fragmented SKBs properly in macsec driver, the problem is
    that we were mis-sizing the sgvec table. From Jason A. Donenfeld.

 4) We cannot have checksum offload enabled for inner UDP tunneled
    packet during IPSEC, from Ansis Atteka.

 5) Fix double SKB free in ravb driver, from Dan Carpenter.

 6) Fix CPU port handling in b53 DSA driver, from Florian Dainelli.

 7) Don't use on-stack buffers for usb_control_msg() in CAN usb driver,
    from Maksim Salau.

 8) Fix device leak in macvlan driver, from Herbert Xu. We have to purge
    the broadcast queue properly on port destroy.

 9) Fix tx ring entry limit on EF10 devices in sfc driver. From Bert
    Kenward.

10) Fix memory leaks in team driver, from Pan Bian.

11) Don't setup ipv6_stub before it can be actually used, from Paolo
    Abeni.

12) Fix tipc socket flow control accounting, from Parthasarathy
    Bhuvaragan.

13) Fix crash on module unload in hso driver, from Andreas Kemnade.

14) Fix purging of bridge multicast entries, the problem is that if we
    don't defer it to ndo_uninit it's possible for new entries to get
    added after we purge. Fix from Xin Long.

15) Don't return garbage for PACKET_HDRLEN getsockopt, from Alexander
    Potapenko.

16) Fix autoneg stall properly in PHY layer, and revert micrel driver
    change that was papering over it. From Alexander Kochetkov.

17) Don't dereference an ipv4 route as an ipv6 one in the ip6_tunnnel
    code, from Cong Wang.

18) Clear out the congestion control private of the TCP socket in all of
    the right places, from Wei Wang.

19) rawv6_ioctl measures SKB length incorrectly, fix from Jamie
    Bainbridge.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
  ipv6: check raw payload size correctly in ioctl
  tcp: memset ca_priv data to 0 properly
  ipv6: check skb->protocol before lookup for nexthop
  net: core: Prevent from dereferencing null pointer when releasing SKB
  macsec: dynamically allocate space for sglist
  Revert "phy: micrel: Disable auto negotiation on startup"
  net: phy: fix auto-negotiation stall due to unavailable interrupt
  net/packet: check length in getsockopt() called with PACKET_HDRLEN
  net: ipv6: regenerate host route if moved to gc list
  bridge: move bridge multicast cleanup to ndo_uninit
  ipv6: fix source routing
  qed: Fix error in the dcbx app meta data initialization.
  netvsc: fix calculation of available send sections
  net: hso: fix module unloading
  tipc: fix socket flow control accounting error at tipc_recv_stream
  tipc: fix socket flow control accounting error at tipc_send_stream
  ipv6: move stub initialization after ipv6 setup completion
  team: fix memory leaks
  sfc: tx ring can only have 2048 entries for all EF10 NICs
  macvlan: Fix device ref leak when purging bc_queue
  ...
parents ea3a8596 105f5528
......@@ -72,6 +72,8 @@ config CAN_PEAK_USB
PCAN-USB Pro dual CAN 2.0b channels USB adapter
PCAN-USB FD single CAN-FD channel USB adapter
PCAN-USB Pro FD dual CAN-FD channels USB adapter
PCAN-Chip USB CAN-FD to USB stamp module
PCAN-USB X6 6 CAN-FD channels USB adapter
(see also http://www.peak-system.com).
......
......@@ -739,13 +739,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_identify_mode imode;
struct gs_identify_mode *imode;
int rc;
imode = kmalloc(sizeof(*imode), GFP_KERNEL);
if (!imode)
return -ENOMEM;
if (do_identify)
imode.mode = GS_CAN_IDENTIFY_ON;
imode->mode = GS_CAN_IDENTIFY_ON;
else
imode.mode = GS_CAN_IDENTIFY_OFF;
imode->mode = GS_CAN_IDENTIFY_OFF;
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface),
......@@ -755,10 +760,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
USB_RECIP_INTERFACE,
dev->channel,
0,
&imode,
sizeof(imode),
imode,
sizeof(*imode),
100);
kfree(imode);
return (rc > 0) ? 0 : rc;
}
......
......@@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
{} /* Terminating entry */
};
......@@ -51,6 +52,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
&pcan_usb_pro,
&pcan_usb_fd,
&pcan_usb_pro_fd,
&pcan_usb_chip,
&pcan_usb_x6,
};
......
......@@ -27,6 +27,7 @@
#define PCAN_USBPRO_PRODUCT_ID 0x000d
#define PCAN_USBPROFD_PRODUCT_ID 0x0011
#define PCAN_USBFD_PRODUCT_ID 0x0012
#define PCAN_USBCHIP_PRODUCT_ID 0x0013
#define PCAN_USBX6_PRODUCT_ID 0x0014
#define PCAN_USB_DRIVER_NAME "peak_usb"
......@@ -90,6 +91,7 @@ struct peak_usb_adapter {
extern const struct peak_usb_adapter pcan_usb;
extern const struct peak_usb_adapter pcan_usb_pro;
extern const struct peak_usb_adapter pcan_usb_fd;
extern const struct peak_usb_adapter pcan_usb_chip;
extern const struct peak_usb_adapter pcan_usb_pro_fd;
extern const struct peak_usb_adapter pcan_usb_x6;
......
......@@ -1061,6 +1061,78 @@ const struct peak_usb_adapter pcan_usb_fd = {
.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
};
/* describes the PCAN-CHIP USB */
static const struct can_bittiming_const pcan_usb_chip_const = {
.name = "pcan_chip_usb",
.tseg1_min = 1,
.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
.tseg2_min = 1,
.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
.brp_min = 1,
.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
.brp_inc = 1,
};
static const struct can_bittiming_const pcan_usb_chip_data_const = {
.name = "pcan_chip_usb",
.tseg1_min = 1,
.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
.tseg2_min = 1,
.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
.brp_min = 1,
.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
.brp_inc = 1,
};
const struct peak_usb_adapter pcan_usb_chip = {
.name = "PCAN-Chip USB",
.device_id = PCAN_USBCHIP_PRODUCT_ID,
.ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
.bittiming_const = &pcan_usb_chip_const,
.data_bittiming_const = &pcan_usb_chip_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
/* timestamps usage */
.ts_used_bits = 32,
.ts_period = 1000000, /* calibration period in ts. */
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
.us_per_ts_shift = 0,
/* give here messages in/out endpoints */
.ep_msg_in = PCAN_USBPRO_EP_MSGIN,
.ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
/* size of rx/tx usb buffers */
.rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
/* device callbacks */
.intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
.dev_init = pcan_usb_fd_init,
.dev_exit = pcan_usb_fd_exit,
.dev_free = pcan_usb_fd_free,
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
.dev_restart_async = pcan_usb_fd_restart_async,
.dev_encode_msg = pcan_usb_fd_encode_msg,
.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
};
/* describes the PCAN-USB Pro FD adapter */
static const struct can_bittiming_const pcan_usb_pro_fd_const = {
.name = "pcan_usb_pro_fd",
......
......@@ -326,6 +326,7 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
struct dsa_switch *ds = dev->ds;
u8 mgmt;
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
......@@ -336,6 +337,15 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
mgmt &= ~SM_SW_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
/* Include IMP port in dumb forwarding mode when no tagging protocol is
* set
*/
if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
mgmt |= B53_MII_DUMB_FWDG_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
}
}
static void b53_enable_vlan(struct b53_device *dev, bool enable)
......@@ -598,7 +608,8 @@ static void b53_switch_reset_gpio(struct b53_device *dev)
static int b53_switch_reset(struct b53_device *dev)
{
u8 mgmt;
unsigned int timeout = 1000;
u8 mgmt, reg;
b53_switch_reset_gpio(dev);
......@@ -607,6 +618,28 @@ static int b53_switch_reset(struct b53_device *dev)
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
}
/* This is specific to 58xx devices here, do not use is58xx() which
* covers the larger Starfigther 2 family, including 7445/7278 which
* still use this driver as a library and need to perform the reset
* earlier.
*/
if (dev->chip_id == BCM58XX_DEVICE_ID) {
b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
reg |= SW_RST | EN_SW_RST | EN_CH_RST;
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
do {
b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
if (!(reg & SW_RST))
break;
usleep_range(1000, 2000);
} while (timeout-- > 0);
if (timeout == 0)
return -ETIMEDOUT;
}
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
if (!(mgmt & SM_SW_FWD_EN)) {
......@@ -1731,7 +1764,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.vlans = 4096,
.enabled_ports = 0x1ff,
.arl_entries = 4,
.cpu_port = B53_CPU_PORT_25,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
......
......@@ -104,6 +104,10 @@
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
/* Switch control (8 bit) */
#define B53_SWITCH_CTRL 0x22
#define B53_MII_DUMB_FWDG_EN BIT(6)
/* (16 bit) */
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
......@@ -139,6 +143,7 @@
/* Software reset register (8 bit) */
#define B53_SOFTRESET 0x79
#define SW_RST BIT(7)
#define EN_CH_RST BIT(6)
#define EN_SW_RST BIT(4)
/* Fast Aging Control register (8 bit) */
......
......@@ -90,7 +90,7 @@
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
......
......@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
int idx = 0;
int err = 0;
info->data = MAX_NUM_OF_ETHTOOL_RULES;
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
err = mlx5e_ethtool_get_flow(priv, info, location);
if (!err)
......
......@@ -174,7 +174,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_sw_stats temp, *s = &temp;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
u64 tx_offload_none = 0;
......@@ -229,6 +229,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
memcpy(&priv->stats.sw, s, sizeof(*s));
}
static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
......@@ -243,7 +244,6 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
......
......@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
rep->vport != FDB_UPLINK_VPORT) {
if (min_inline > esw->offloads.inline_mode) {
if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < min_inline) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
min_inline, esw->offloads.inline_mode);
......@@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return 0;
}
static int gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[],
unsigned char h_dest[ETH_ALEN],
int ttl,
__be32 daddr,
__be32 saddr,
__be16 udp_dst_port,
__be32 vx_vni)
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
int ttl,
__be32 daddr,
__be32 saddr,
__be16 udp_dst_port,
__be32 vx_vni)
{
int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
struct ethhdr *eth = (struct ethhdr *)buf;
struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
......@@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
udp->dest = udp_dst_port;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vx_vni);
return encap_size;
}
static int gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[],
unsigned char h_dest[ETH_ALEN],
int ttl,
struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 udp_dst_port,
__be32 vx_vni)
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
int ttl,
struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 udp_dst_port,
__be32 vx_vni)
{
int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
struct ethhdr *eth = (struct ethhdr *)buf;
struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
......@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
udp->dest = udp_dst_port;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vx_vni);
return encap_size;
}
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
......@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
int encap_size, ttl, err;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
char *encap_header;
int ttl, err;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
return -EOPNOTSUPP;
}
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
if (!encap_header)
return -ENOMEM;
......@@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
e->h_dest, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
gen_vxlan_header_ipv4(*out_dev, encap_header,
ipv4_encap_size, e->h_dest, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
break;
default:
err = -EOPNOTSUPP;
......@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id);
ipv4_encap_size, encap_header, &e->encap_id);
out:
if (err && n)
neigh_release(n);
......@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
int encap_size, err, ttl = 0;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
char *encap_header;
int err, ttl = 0;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
return -EOPNOTSUPP;
}
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header)
return -ENOMEM;
......@@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
e->h_dest, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
gen_vxlan_header_ipv6(*out_dev, encap_header,
ipv6_encap_size, e->h_dest, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
break;
default:
err = -EOPNOTSUPP;
......@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id);
ipv6_encap_size, encap_header, &e->encap_id);
out:
if (err && n)
neigh_release(n);
......
......@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
int num_vports = esw->enabled_vports;
int err;
int vport;
int err, vport;
u8 mlx5_mode;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
......@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
return 0;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
esw_warn(dev, "Inline mode can't be set\n");
return -EOPNOTSUPP;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (esw->offloads.num_flows > 0) {
esw_warn(dev, "Can't set inline mode when flows are configured\n");
......@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
int vport;
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
......@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
mlx5_mode = MLX5_INLINE_MODE_NONE;
goto out;
case MLX5_CAP_INLINE_MODE_L2:
mlx5_mode = MLX5_INLINE_MODE_L2;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
goto query_vports;
}
query_vports:
for (vport = 1; vport <= nvfs; vport++) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
......@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
prev_mlx5_mode = mlx5_mode;
}
out:
*mode = mlx5_mode;
return 0;
}
......
......@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
FW_INIT_TIMEOUT_MILI);
goto out_err;
goto err_cmd_cleanup;
}
err = mlx5_core_enable_hca(dev, 0);
......
......@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
list_del(&up->list);
iounmap(up->map);
if (mlx5_cmd_free_uar(up->mdev, up->index))
mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
kfree(up->reg_bitmap);
......
......@@ -64,11 +64,11 @@
((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
{DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
{DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
{DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
{DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
{DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH},
};
static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
......
......@@ -1516,11 +1516,12 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_BUSY;
}
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
if (skb_put_padto(skb, ETH_ZLEN))
goto drop;
goto exit;
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN;
......
......@@ -74,7 +74,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
* other purposes when counting TxQ entries, so we halve the queue size.
*/
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
static inline bool efx_rss_enabled(struct efx_nic *efx)
......
......@@ -16,6 +16,7 @@
*/
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
#define EFX_WORKAROUND_10G(efx) 1
/* Bit-bashed I2C reads cause performance drop */
......
......@@ -1017,8 +1017,8 @@ tc35815_free_queues(struct net_device *dev)
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
dev_kfree_skb(skb);
pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
}
......
......@@ -751,7 +751,6 @@ struct netvsc_device {
u32 send_section_cnt;
u32 send_section_size;
unsigned long *send_section_map;
int map_words;
/* Used for NetVSP initialization protocol */
struct completion channel_init_wait;
......
......@@ -236,6 +236,7 @@ static int netvsc_init_buf(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
size_t map_words;
int node;
net_device = get_outbound_net_device(device);
......@@ -401,11 +402,9 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->send_section_size, net_device->send_section_cnt);
/* Setup state for managing the send buffer. */
net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
BITS_PER_LONG);
map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
net_device->send_section_map = kcalloc(net_device->map_words,
sizeof(ulong), GFP_KERNEL);
net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
if (net_device->send_section_map == NULL) {
ret = -ENOMEM;
goto cleanup;
......@@ -683,7 +682,7 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
unsigned long *map_addr = net_device->send_section_map;
unsigned int i;