aboutsummaryrefslogtreecommitdiff
path: root/sys/ofed
diff options
context:
space:
mode:
authorSlava Shwartsman <slavash@FreeBSD.org>2018-12-05 13:24:12 +0000
committerSlava Shwartsman <slavash@FreeBSD.org>2018-12-05 13:24:12 +0000
commit5ace00dffe729c3c98248820210596da18a4bd88 (patch)
treeec09142a89c30ed5e8727bc0d72b96ae1124e88c /sys/ofed
parent0231669450f728f84b351ae7a987ddb6d89dac0e (diff)
downloadsrc-5ace00dffe729c3c98248820210596da18a4bd88.tar.gz
src-5ace00dffe729c3c98248820210596da18a4bd88.zip
ibcore: Fix sleeping in atomic when RoCE is used
A couple of places in the CM do spin_lock_irq(&cm_id_priv->lock); ... if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) However when the underlying transport is RoCE, this leads to a sleeping function being called with the lock held - the callchain is cm_alloc_response_msg() -> ib_create_ah_from_wc() -> ib_init_ah_from_wc() -> rdma_addr_find_l2_eth_by_grh() -> rdma_resolve_ip() and rdma_resolve_ip() starts out by doing req = kzalloc(sizeof *req, GFP_KERNEL); not to mention rdma_addr_find_l2_eth_by_grh() doing wait_for_completion(&ctx.comp); to wait for the task that rdma_resolve_ip() queues up. Fix this by moving the AH creation out of the lock. Linux commit: c76161181193985087cd716fdf69b5cb6cf9ee85 Submitted by: hselasky@ Approved by: hselasky (mentor) MFC after: 1 week Sponsored by: Mellanox Technologies
Notes
Notes: svn path=/head/; revision=341532
Diffstat (limited to 'sys/ofed')
-rw-r--r--sys/ofed/drivers/infiniband/core/ib_cm.c63
1 files changed, 44 insertions, 19 deletions
diff --git a/sys/ofed/drivers/infiniband/core/ib_cm.c b/sys/ofed/drivers/infiniband/core/ib_cm.c
index da3998b9e3a2..753d3ca00e46 100644
--- a/sys/ofed/drivers/infiniband/core/ib_cm.c
+++ b/sys/ofed/drivers/infiniband/core/ib_cm.c
@@ -332,11 +332,19 @@ out:
return ret;
}
-static int cm_alloc_response_msg(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc,
- struct ib_mad_send_buf **msg)
+static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
+ 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
+}
+
+static int cm_create_response_msg_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ struct ib_mad_send_buf *msg)
{
- struct ib_mad_send_buf *m;
struct ib_ah *ah;
ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
@@ -344,27 +352,40 @@ static int cm_alloc_response_msg(struct cm_port *port,
if (IS_ERR(ah))
return PTR_ERR(ah);
- m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
- 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(m)) {
- ib_destroy_ah(ah);
- return PTR_ERR(m);
- }
- m->ah = ah;
- *msg = m;
+ msg->ah = ah;
return 0;
}
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
- ib_destroy_ah(msg->ah);
+ if (msg->ah)
+ ib_destroy_ah(msg->ah);
if (msg->context[0])
cm_deref_id(msg->context[0]);
ib_free_send_mad(msg);
}
+static int cm_alloc_response_msg(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ struct ib_mad_send_buf **msg)
+{
+ struct ib_mad_send_buf *m;
+ int ret;
+
+ m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
+ if (ret) {
+ cm_free_msg(m);
+ return ret;
+ }
+
+ *msg = m;
+ return 0;
+}
+
static void * cm_copy_private_data(const void *private_data,
u8 private_data_len)
{
@@ -2331,7 +2352,8 @@ static int cm_dreq_handler(struct cm_work *work)
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
+ if (IS_ERR(msg))
goto unlock;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
@@ -2339,7 +2361,8 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
- if (ib_post_send_mad(msg, NULL))
+ if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
+ ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
@@ -2884,7 +2907,8 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_MRA_LAP_SENT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_LAP_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
+ if (IS_ERR(msg))
goto unlock;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
@@ -2894,7 +2918,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
- if (ib_post_send_mad(msg, NULL))
+ if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
+ ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD: