Commit 9140d8bd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Doug Ledford:
 "No beating around the bush: this is a monster pull request for an -rc5
  kernel. Intel hit me with a series of fixes for TID processing.
  Mellanox hit me with a series for their UMR memory support.

  And we had one fix for siw that fixes the 32bit build warnings and
  because of the number of casts that had to be changed to properly
  silence the warnings, that one patch alone is a full 40% of the LOC of
  this entire pull request. Given that this is the initial release
  kernel for siw, I'm trying to fix anything in it that we can, so that
  adds to the impetus to take fixes for it like this one.

  I had to do a rebase early in the week. Jason had thought he put a
  patch on the rc queue that he needed to be there so he could base some
  work off of it, and it had actually not been placed there. So he asked
  me (on Tuesday) to fix that up before pushing my wip branch to the
  official rc branch. I did, and that's why the early patches look like
  they were all committed at the same time on Tuesday. That bunch had
  been in my queue prior.

  The various patches all pass my test for being legitimate fixes and
  not attempts to slide new features or development into a late rc.
  Well, they were all fixes with the exception of a couple clean up
  patches people wrote for making the fixes they also wrote better (like
  a cleanup patch to move UMR checking into a function so that the
  remaining UMR fix patches can reference that function), so I left
  those in place too.

  My apologies for the LOC count and the number of patches here, it's
  just how the cards fell this cycle.

  Summary:

   - Fix siw buffer mapping issue

   - Fix siw 32/64 casting issues

   - Fix a KASAN access issue in bnxt_re

   - Fix several memory leaks (hfi1, mlx4)

   - Fix a NULL deref in cma_cleanup

   - Fixes for UMR memory support in mlx5 (4 patch series)

   - Fix namespace check for restrack

   - Fixes for counter support

   - Fixes for hfi1 TID processing (5 patch series)

   - Fix potential NULL deref in siw

   - Fix memory page calculations in mlx5"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (21 commits)
  RDMA/siw: Fix 64/32bit pointer inconsistency
  RDMA/siw: Fix SGL mapping issues
  RDMA/bnxt_re: Fix stack-out-of-bounds in bnxt_qplib_rcfw_send_message
  infiniband: hfi1: fix memory leaks
  infiniband: hfi1: fix a memory leak bug
  IB/mlx4: Fix memory leaks
  RDMA/cma: fix null-ptr-deref Read in cma_cleanup
  IB/mlx5: Block MR WR if UMR is not possible
  IB/mlx5: Fix MR re-registration flow to use UMR properly
  IB/mlx5: Report and handle ODP support properly
  IB/mlx5: Consolidate use_umr checks into single function
  RDMA/restrack: Rewrite PID namespace check to be reliable
  RDMA/counters: Properly implement PID checks
  IB/core: Fix NULL pointer dereference when bind QP to counter
  IB/hfi1: Drop stale TID RDMA packets that cause TIDErr
  IB/hfi1: Add additional checks when handling TID RDMA WRITE DATA packet
  IB/hfi1: Add additional checks when handling TID RDMA READ RESP packet
  IB/hfi1: Unsafe PSN checking for TID RDMA READ Resp packet
  IB/hfi1: Drop stale TID RDMA packets
  RDMA/siw: Fix potential NULL de-ref
  ...
parents b9bd6806 c536277e
......@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
if (ret)
goto err;
cma_configfs_init();
ret = cma_configfs_init();
if (ret)
goto err_ib;
return 0;
err_ib:
ib_unregister_client(&cma_client);
err:
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
......
......@@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res))
if (!rdma_is_visible_in_pid_ns(&qp->res))
return false;
/* Ensure that counter belong to right PID */
if (!rdma_is_kernel_res(&counter->res) &&
!rdma_is_kernel_res(&qp->res) &&
(task_pid_vnr(counter->res.task) != current->pid))
/* Ensure that counter belongs to the right PID */
if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
......@@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
return qp;
err:
rdma_restrack_put(&qp->res);
rdma_restrack_put(res);
return NULL;
}
......
......@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i])
continue;
curr = rdma_restrack_count(device, i,
task_active_pid_ns(current));
curr = rdma_restrack_count(device, i);
ret = fill_res_info_entry(msg, names[i], curr);
if (ret)
goto err;
......
......@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
* rdma_restrack_count() - the current usage of specific object
* @dev: IB device
* @type: actual type of object to operate
* @ns: PID namespace
*/
int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
struct pid_namespace *ns)
int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
{
struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e;
......@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
xa_lock(&rt->xa);
xas_for_each(&xas, e, U32_MAX) {
if (ns == &init_pid_ns ||
(!rdma_is_kernel_res(e) &&
ns == task_active_pid_ns(e->task)))
cnt++;
if (!rdma_is_visible_in_pid_ns(e))
continue;
cnt++;
}
xa_unlock(&rt->xa);
return cnt;
......@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
*/
if (rdma_is_kernel_res(res))
return task_active_pid_ns(current) == &init_pid_ns;
return task_active_pid_ns(current) == task_active_pid_ns(res->task);
/* PID 0 means that resource is not found in current namespace */
return task_pid_vnr(res->task);
}
......@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem)
{
int i;
int n;
int i, n = 0;
struct scatterlist *sg;
if (umem->is_odp)
return ib_umem_num_pages(umem);
n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
n += sg_dma_len(sg) >> PAGE_SHIFT;
......
......@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EBUSY;
}
size = req->cmd_size;
/* change the cmd_size to the number of 16byte cmdq unit.
* req->cmd_size is modified here
*/
bnxt_qplib_set_cmd_slots(req);
memset(resp, 0, sizeof(*resp));
crsqe->resp = (struct creq_qp_event *)resp;
crsqe->resp->cookie = req->cookie;
......@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
......
......@@ -55,9 +55,7 @@
do { \
memset(&(req), 0, sizeof((req))); \
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
(req).cmd_size = (sizeof((req)) + \
BNXT_QPLIB_CMDQE_UNITS - 1) / \
BNXT_QPLIB_CMDQE_UNITS; \
(req).cmd_size = sizeof((req)); \
(req).flags = cpu_to_le16(cmd_flags); \
} while (0)
......@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
BNXT_QPLIB_CMDQE_UNITS);
}
/* Set the cmd_size to a factor of CMDQE unit */
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
{
req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
BNXT_QPLIB_CMDQE_UNITS;
}
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
......
......@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
if (!data)
return -ENOMEM;
copy = min(len, datalen - 1);
if (copy_from_user(data, buf, copy))
return -EFAULT;
if (copy_from_user(data, buf, copy)) {
ret = -EFAULT;
goto free_data;
}
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
return ret;
goto free_data;
ptr = data;
token = ptr;
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
......@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
ret = len;
debugfs_file_put(file->f_path.dentry);
free_data:
kfree(data);
return ret;
}
......@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
return -ENOMEM;
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
return ret;
goto free_data;
bit = find_first_bit(fault->opcodes, bitsize);
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
......@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
data[size - 1] = '\n';
data[size] = '\0';
ret = simple_read_from_buffer(buf, len, pos, data, size);
free_data:
kfree(data);
return ret;
}
......
......@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
hfi1_kern_clear_hw_flow(priv->rcd, qp);
}
static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
struct hfi1_packet *packet, u8 rcv_type,
u8 opcode)
static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
{
struct rvt_qp *qp = packet->qp;
struct hfi1_qp_priv *qpriv = qp->priv;
u32 ipsn;
struct ib_other_headers *ohdr = packet->ohdr;
struct rvt_ack_entry *e;
struct tid_rdma_request *req;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
u32 i;
if (rcv_type >= RHF_RCV_TYPE_IB)
goto done;
......@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
if (rcv_type == RHF_RCV_TYPE_EAGER) {
hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
goto done_unlock;
}
/*
* For TID READ response, error out QP after freeing the tid
* resources.
*/
if (opcode == TID_OP(READ_RESP)) {
ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
cmp_psn(ipsn, qp->s_psn) < 0) {
hfi1_kern_read_tid_flow_free(qp);
spin_unlock(&qp->s_lock);
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto done;
}
goto done_unlock;
}
/*
* Error out the qp for TID RDMA WRITE
*/
hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
for (i = 0; i < rvt_max_atomic(rdi); i++) {
e = &qp->s_ack_queue[i];
if (e->opcode == TID_OP(WRITE_REQ)) {
req = ack_to_tid_req(e);
hfi1_kern_exp_rcv_clear_all(req);
}
}
spin_unlock(&qp->s_lock);
rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
goto done;
done_unlock:
/* Since no payload is delivered, just drop the packet */
spin_unlock(&qp->s_lock);
done:
return true;
......@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
u32 fpsn;
lockdep_assert_held(&qp->r_lock);
spin_lock(&qp->s_lock);
/* If the psn is out of valid range, drop the packet */
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
cmp_psn(ibpsn, qp->s_psn) > 0)
return ret;
goto s_unlock;
spin_lock(&qp->s_lock);
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
......@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
wqe = do_rc_completion(qp, wqe, ibp);
if (qp->s_acked == qp->s_tail)
break;
goto s_unlock;
}
if (qp->s_acked == qp->s_tail)
goto s_unlock;
/* Handle the eflags for the request */
if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
goto s_unlock;
......@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
if (lnh == HFI1_LRH_GRH)
goto r_unlock;
if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
if (tid_rdma_tid_err(packet, rcv_type))
goto r_unlock;
}
......@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
*/
spin_lock(&qp->s_lock);
qpriv = qp->priv;
if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
qpriv->r_tid_tail == qpriv->r_tid_head)
goto unlock;
e = &qp->s_ack_queue[qpriv->r_tid_tail];
if (e->opcode != TID_OP(WRITE_REQ))
goto unlock;
req = ack_to_tid_req(e);
if (req->comp_seg == req->cur_seg)
goto unlock;
flow = &req->flows[req->clear_tail];
trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
......@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
struct rvt_swqe *wqe;
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
unsigned long flags;
u16 fidx;
......@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
ack_kpsn--;
}
if (unlikely(qp->s_acked == qp->s_tail))
goto ack_op_err;
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
......@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
/* Drop stale ACK/NAK */
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
goto ack_op_err;
while (cmp_psn(ack_kpsn,
......@@ -4712,7 +4685,12 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
IB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
if (!req->flows)
break;
flow = &req->flows[req->acked_tail];
flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
if (cmp_psn(psn, flpsn) > 0)
break;
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
flow);
req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
......
......@@ -1677,8 +1677,6 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
kfree(tun_qp->tx_ring);
tun_qp->tx_ring = NULL;
i = MLX4_NUM_TUNNEL_BUFS;
err:
while (i > 0) {
......@@ -1687,6 +1685,8 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
kfree(tun_qp->tx_ring);
tun_qp->tx_ring = NULL;
kfree(tun_qp->ring);
tun_qp->ring = NULL;
return -ENOMEM;
......
......@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
if (MLX5_CAP_GEN(mdev, pg))
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
}
......@@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
mlx5_ib_internal_fill_odp_caps(dev);
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
......@@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_internal_fill_odp_caps(dev);
return mlx5_ib_odp_init_one(dev);
}
......
......@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
int entry;
if (umem->is_odp) {
unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
struct ib_umem_odp *odp = to_ib_umem_odp(umem);
unsigned int page_shift = odp->page_shift;
*ncont = ib_umem_page_count(umem);
*ncont = ib_umem_odp_num_pages(odp);
*count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift;
if (order)
......
......@@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
bool dyn_bfreg);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
bool do_modify_atomic)
{
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return false;
if (do_modify_atomic &&
MLX5_CAP_GEN(dev->mdev, atomic) &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
return true;
}
#endif /* MLX5_IB_H */
......@@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
(!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
!MLX5_CAP_GEN(dev->mdev, atomic));
use_umr = mlx5_ib_can_use_umr(dev, true);
if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
......@@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
if (!mlx5_ib_can_use_umr(dev, true) ||
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
*/
......
......@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
memset(caps, 0, sizeof(*caps));
if (!MLX5_CAP_GEN(dev->mdev, pg))
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
!mlx5_ib_can_use_umr(dev, true))
return;
caps->general_caps = IB_ODP_SUPPORT;
......@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
return;
......@@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret = 0;
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return ret;
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
......@@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
}
}
if (!MLX5_CAP_GEN(dev->mdev, pg))
return ret;
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
return ret;
......@@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{
if (!MLX5_CAP_GEN(dev->mdev, pg))
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return;
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
......
......@@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
MLX5_IB_UMR_OCTOWORD;
}
static __be64 frwr_mkey_mask(void)
static __be64 frwr_mkey_mask(bool atomic)
{
u64 result;
......@@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
MLX5_MKEY_MASK_LW |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_SMALL_FENCE |
MLX5_MKEY_MASK_FREE;
if (atomic)
result |= MLX5_MKEY_MASK_A;
return cpu_to_be64(result);
}
......@@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
struct mlx5_ib_mr *mr, u8 flags)
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
......@@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->flags = flags;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
umr->mkey_mask = frwr_mkey_mask();
umr->mkey_mask = frwr_mkey_mask(atomic);
}
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
......@@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
if (!mlx5_ib_can_use_umr(dev, atomic)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
"Fast update of %s for MR is disabled\n",
(MLX5_CAP_GEN(dev->mdev,
umr_modify_entity_size_disabled)) ?
"entity size" :
"atomic access");
return -EINVAL;
}
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
"Invalid IB_SEND_INLINE send flag\n");
......@@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
if (umr_inline)
flags |= MLX5_UMR_INLINE;
set_reg_umr_seg(*seg, mr, flags);
set_reg_umr_seg(*seg, mr, flags, atomic);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
......
......@@ -138,9 +138,9 @@ struct siw_umem {
};
struct siw_pble {
u64 addr; /* Address of assigned user buffer */
u64 size; /* Size of this entry */
u64 pbl_off; /* Total offset from start of PBL */
dma_addr_t addr; /* Address of assigned buffer */
unsigned int size; /* Size of this entry */
unsigned long pbl_off; /* Total offset from start of PBL */
};
struct siw_pbl {
......@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
......
......@@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
getname_local(cep->sock, &event.local_addr);
getname_peer(cep->sock, &event.remote_addr);
}
siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n",
cep->qp ? qp_id(cep->qp) : -1, id, reason, status);
siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);