1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc.  All rights reserved.
8 */
9
10#include <linux/completion.h>
11#include <linux/dma-mapping.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/idr.h>
16#include <linux/interrupt.h>
17#include <linux/random.h>
18#include <linux/rbtree.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/sysfs.h>
22#include <linux/workqueue.h>
23#include <linux/kdev_t.h>
24#include <linux/etherdevice.h>
25
26#include <rdma/ib_cache.h>
27#include <rdma/ib_cm.h>
28#include <rdma/ib_sysfs.h>
29#include "cm_msgs.h"
30#include "core_priv.h"
31#include "cm_trace.h"
32
33MODULE_AUTHOR("Sean Hefty");
34MODULE_DESCRIPTION("InfiniBand CM");
35MODULE_LICENSE("Dual BSD/GPL");
36
37static const char * const ibcm_rej_reason_strs[] = {
38	[IB_CM_REJ_NO_QP]			= "no QP",
39	[IB_CM_REJ_NO_EEC]			= "no EEC",
40	[IB_CM_REJ_NO_RESOURCES]		= "no resources",
41	[IB_CM_REJ_TIMEOUT]			= "timeout",
42	[IB_CM_REJ_UNSUPPORTED]			= "unsupported",
43	[IB_CM_REJ_INVALID_COMM_ID]		= "invalid comm ID",
44	[IB_CM_REJ_INVALID_COMM_INSTANCE]	= "invalid comm instance",
45	[IB_CM_REJ_INVALID_SERVICE_ID]		= "invalid service ID",
46	[IB_CM_REJ_INVALID_TRANSPORT_TYPE]	= "invalid transport type",
47	[IB_CM_REJ_STALE_CONN]			= "stale conn",
48	[IB_CM_REJ_RDC_NOT_EXIST]		= "RDC not exist",
49	[IB_CM_REJ_INVALID_GID]			= "invalid GID",
50	[IB_CM_REJ_INVALID_LID]			= "invalid LID",
51	[IB_CM_REJ_INVALID_SL]			= "invalid SL",
52	[IB_CM_REJ_INVALID_TRAFFIC_CLASS]	= "invalid traffic class",
53	[IB_CM_REJ_INVALID_HOP_LIMIT]		= "invalid hop limit",
54	[IB_CM_REJ_INVALID_PACKET_RATE]		= "invalid packet rate",
55	[IB_CM_REJ_INVALID_ALT_GID]		= "invalid alt GID",
56	[IB_CM_REJ_INVALID_ALT_LID]		= "invalid alt LID",
57	[IB_CM_REJ_INVALID_ALT_SL]		= "invalid alt SL",
58	[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS]	= "invalid alt traffic class",
59	[IB_CM_REJ_INVALID_ALT_HOP_LIMIT]	= "invalid alt hop limit",
60	[IB_CM_REJ_INVALID_ALT_PACKET_RATE]	= "invalid alt packet rate",
61	[IB_CM_REJ_PORT_CM_REDIRECT]		= "port CM redirect",
62	[IB_CM_REJ_PORT_REDIRECT]		= "port redirect",
63	[IB_CM_REJ_INVALID_MTU]			= "invalid MTU",
64	[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES]	= "insufficient resp resources",
65	[IB_CM_REJ_CONSUMER_DEFINED]		= "consumer defined",
66	[IB_CM_REJ_INVALID_RNR_RETRY]		= "invalid RNR retry",
67	[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID]	= "duplicate local comm ID",
68	[IB_CM_REJ_INVALID_CLASS_VERSION]	= "invalid class version",
69	[IB_CM_REJ_INVALID_FLOW_LABEL]		= "invalid flow label",
70	[IB_CM_REJ_INVALID_ALT_FLOW_LABEL]	= "invalid alt flow label",
71	[IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
72		"vendor option is not supported",
73};
74
75const char *__attribute_const__ ibcm_reject_msg(int reason)
76{
77	size_t index = reason;
78
79	if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
80	    ibcm_rej_reason_strs[index])
81		return ibcm_rej_reason_strs[index];
82	else
83		return "unrecognized reason";
84}
85EXPORT_SYMBOL(ibcm_reject_msg);
86
87struct cm_id_private;
88struct cm_work;
89static int cm_add_one(struct ib_device *device);
90static void cm_remove_one(struct ib_device *device, void *client_data);
91static void cm_process_work(struct cm_id_private *cm_id_priv,
92			    struct cm_work *work);
93static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
94				   struct ib_cm_sidr_rep_param *param);
95static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
96			       const void *private_data, u8 private_data_len);
97static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
98			       void *private_data, u8 private_data_len);
99static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
100			      enum ib_cm_rej_reason reason, void *ari,
101			      u8 ari_length, const void *private_data,
102			      u8 private_data_len);
103
104static struct ib_client cm_client = {
105	.name   = "cm",
106	.add    = cm_add_one,
107	.remove = cm_remove_one
108};
109
110static struct ib_cm {
111	spinlock_t lock;
112	struct list_head device_list;
113	rwlock_t device_lock;
114	struct rb_root listen_service_table;
115	u64 listen_service_id;
116	/* struct rb_root peer_service_table; todo: fix peer to peer */
117	struct rb_root remote_qp_table;
118	struct rb_root remote_id_table;
119	struct rb_root remote_sidr_table;
120	struct xarray local_id_table;
121	u32 local_id_next;
122	__be32 random_id_operand;
123	struct list_head timewait_list;
124	struct workqueue_struct *wq;
125} cm;
126
127/* Counter indexes ordered by attribute ID */
128enum {
129	CM_REQ_COUNTER,
130	CM_MRA_COUNTER,
131	CM_REJ_COUNTER,
132	CM_REP_COUNTER,
133	CM_RTU_COUNTER,
134	CM_DREQ_COUNTER,
135	CM_DREP_COUNTER,
136	CM_SIDR_REQ_COUNTER,
137	CM_SIDR_REP_COUNTER,
138	CM_LAP_COUNTER,
139	CM_APR_COUNTER,
140	CM_ATTR_COUNT,
141	CM_ATTR_ID_OFFSET = 0x0010,
142};
143
144enum {
145	CM_XMIT,
146	CM_XMIT_RETRIES,
147	CM_RECV,
148	CM_RECV_DUPLICATES,
149	CM_COUNTER_GROUPS
150};
151
152struct cm_counter_attribute {
153	struct ib_port_attribute attr;
154	unsigned short group;
155	unsigned short index;
156};
157
158struct cm_port {
159	struct cm_device *cm_dev;
160	struct ib_mad_agent *mad_agent;
161	u32 port_num;
162	atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
163};
164
165struct cm_device {
166	struct kref kref;
167	struct list_head list;
168	spinlock_t mad_agent_lock;
169	struct ib_device *ib_device;
170	u8 ack_delay;
171	int going_down;
172	struct cm_port *port[];
173};
174
175struct cm_av {
176	struct cm_port *port;
177	struct rdma_ah_attr ah_attr;
178	u16 dlid_datapath;
179	u16 pkey_index;
180	u8 timeout;
181};
182
183struct cm_work {
184	struct delayed_work work;
185	struct list_head list;
186	struct cm_port *port;
187	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
188	__be32 local_id;			/* Established / timewait */
189	__be32 remote_id;
190	struct ib_cm_event cm_event;
191	struct sa_path_rec path[];
192};
193
194struct cm_timewait_info {
195	struct cm_work work;
196	struct list_head list;
197	struct rb_node remote_qp_node;
198	struct rb_node remote_id_node;
199	__be64 remote_ca_guid;
200	__be32 remote_qpn;
201	u8 inserted_remote_qp;
202	u8 inserted_remote_id;
203};
204
205struct cm_id_private {
206	struct ib_cm_id	id;
207
208	struct rb_node service_node;
209	struct rb_node sidr_id_node;
210	u32 sidr_slid;
211	spinlock_t lock;	/* Do not acquire inside cm.lock */
212	struct completion comp;
213	refcount_t refcount;
214	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
215	 * Protected by the cm.lock spinlock.
216	 */
217	int listen_sharecount;
218	struct rcu_head rcu;
219
220	struct ib_mad_send_buf *msg;
221	struct cm_timewait_info *timewait_info;
222	/* todo: use alternate port on send failure */
223	struct cm_av av;
224	struct cm_av alt_av;
225
226	void *private_data;
227	__be64 tid;
228	__be32 local_qpn;
229	__be32 remote_qpn;
230	enum ib_qp_type qp_type;
231	__be32 sq_psn;
232	__be32 rq_psn;
233	int timeout_ms;
234	enum ib_mtu path_mtu;
235	__be16 pkey;
236	u8 private_data_len;
237	u8 max_cm_retries;
238	u8 responder_resources;
239	u8 initiator_depth;
240	u8 retry_count;
241	u8 rnr_retry_count;
242	u8 service_timeout;
243	u8 target_ack_delay;
244
245	struct list_head work_list;
246	atomic_t work_count;
247
248	struct rdma_ucm_ece ece;
249};
250
251static void cm_dev_release(struct kref *kref)
252{
253	struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
254	u32 i;
255
256	rdma_for_each_port(cm_dev->ib_device, i)
257		kfree(cm_dev->port[i - 1]);
258
259	kfree(cm_dev);
260}
261
262static void cm_device_put(struct cm_device *cm_dev)
263{
264	kref_put(&cm_dev->kref, cm_dev_release);
265}
266
267static void cm_work_handler(struct work_struct *work);
268
269static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
270{
271	if (refcount_dec_and_test(&cm_id_priv->refcount))
272		complete(&cm_id_priv->comp);
273}
274
275static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
276{
277	struct ib_mad_agent *mad_agent;
278	struct ib_mad_send_buf *m;
279	struct ib_ah *ah;
280
281	lockdep_assert_held(&cm_id_priv->lock);
282
283	if (!cm_id_priv->av.port)
284		return ERR_PTR(-EINVAL);
285
286	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
287	mad_agent = cm_id_priv->av.port->mad_agent;
288	if (!mad_agent) {
289		m = ERR_PTR(-EINVAL);
290		goto out;
291	}
292
293	ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
294	if (IS_ERR(ah)) {
295		m = ERR_CAST(ah);
296		goto out;
297	}
298
299	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
300			       cm_id_priv->av.pkey_index,
301			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
302			       GFP_ATOMIC,
303			       IB_MGMT_BASE_VERSION);
304	if (IS_ERR(m)) {
305		rdma_destroy_ah(ah, 0);
306		goto out;
307	}
308
309	/* Timeout set by caller if response is expected. */
310	m->ah = ah;
311	m->retries = cm_id_priv->max_cm_retries;
312
313	refcount_inc(&cm_id_priv->refcount);
314	m->context[0] = cm_id_priv;
315
316out:
317	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
318	return m;
319}
320
321static void cm_free_msg(struct ib_mad_send_buf *msg)
322{
323	struct cm_id_private *cm_id_priv = msg->context[0];
324
325	if (msg->ah)
326		rdma_destroy_ah(msg->ah, 0);
327	cm_deref_id(cm_id_priv);
328	ib_free_send_mad(msg);
329}
330
331static struct ib_mad_send_buf *
332cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
333{
334	struct ib_mad_send_buf *msg;
335
336	lockdep_assert_held(&cm_id_priv->lock);
337
338	msg = cm_alloc_msg(cm_id_priv);
339	if (IS_ERR(msg))
340		return msg;
341	cm_id_priv->msg = msg;
342	return msg;
343}
344
345static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
346{
347	struct cm_id_private *cm_id_priv = msg->context[0];
348
349	lockdep_assert_held(&cm_id_priv->lock);
350
351	if (!WARN_ON(cm_id_priv->msg != msg))
352		cm_id_priv->msg = NULL;
353
354	if (msg->ah)
355		rdma_destroy_ah(msg->ah, 0);
356	cm_deref_id(cm_id_priv);
357	ib_free_send_mad(msg);
358}
359
360static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
361							   struct ib_mad_recv_wc *mad_recv_wc)
362{
363	return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
364				  0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
365				  GFP_ATOMIC,
366				  IB_MGMT_BASE_VERSION);
367}
368
369static int cm_create_response_msg_ah(struct cm_port *port,
370				     struct ib_mad_recv_wc *mad_recv_wc,
371				     struct ib_mad_send_buf *msg)
372{
373	struct ib_ah *ah;
374
375	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
376				  mad_recv_wc->recv_buf.grh, port->port_num);
377	if (IS_ERR(ah))
378		return PTR_ERR(ah);
379
380	msg->ah = ah;
381	return 0;
382}
383
384static int cm_alloc_response_msg(struct cm_port *port,
385				 struct ib_mad_recv_wc *mad_recv_wc,
386				 struct ib_mad_send_buf **msg)
387{
388	struct ib_mad_send_buf *m;
389	int ret;
390
391	m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
392	if (IS_ERR(m))
393		return PTR_ERR(m);
394
395	ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
396	if (ret) {
397		ib_free_send_mad(m);
398		return ret;
399	}
400
401	*msg = m;
402	return 0;
403}
404
405static void cm_free_response_msg(struct ib_mad_send_buf *msg)
406{
407	if (msg->ah)
408		rdma_destroy_ah(msg->ah, 0);
409	ib_free_send_mad(msg);
410}
411
412static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
413{
414	void *data;
415
416	if (!private_data || !private_data_len)
417		return NULL;
418
419	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
420	if (!data)
421		return ERR_PTR(-ENOMEM);
422
423	return data;
424}
425
426static void cm_set_private_data(struct cm_id_private *cm_id_priv,
427				 void *private_data, u8 private_data_len)
428{
429	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
430		kfree(cm_id_priv->private_data);
431
432	cm_id_priv->private_data = private_data;
433	cm_id_priv->private_data_len = private_data_len;
434}
435
436static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
437{
438	struct cm_port *old_port = av->port;
439
440	if (old_port == port)
441		return;
442
443	av->port = port;
444	if (old_port)
445		cm_device_put(old_port->cm_dev);
446	if (port)
447		kref_get(&port->cm_dev->kref);
448}
449
450static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
451			       struct rdma_ah_attr *ah_attr, struct cm_av *av)
452{
453	cm_set_av_port(av, port);
454	av->pkey_index = wc->pkey_index;
455	rdma_move_ah_attr(&av->ah_attr, ah_attr);
456}
457
458static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
459				   struct ib_grh *grh, struct cm_av *av)
460{
461	cm_set_av_port(av, port);
462	av->pkey_index = wc->pkey_index;
463	return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
464				       port->port_num, wc,
465				       grh, &av->ah_attr);
466}
467
468static struct cm_port *
469get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
470{
471	struct cm_device *cm_dev;
472	struct cm_port *port = NULL;
473	unsigned long flags;
474
475	if (attr) {
476		read_lock_irqsave(&cm.device_lock, flags);
477		list_for_each_entry(cm_dev, &cm.device_list, list) {
478			if (cm_dev->ib_device == attr->device) {
479				port = cm_dev->port[attr->port_num - 1];
480				break;
481			}
482		}
483		read_unlock_irqrestore(&cm.device_lock, flags);
484	} else {
485		/* SGID attribute can be NULL in following
486		 * conditions.
487		 * (a) Alternative path
488		 * (b) IB link layer without GRH
489		 * (c) LAP send messages
490		 */
491		read_lock_irqsave(&cm.device_lock, flags);
492		list_for_each_entry(cm_dev, &cm.device_list, list) {
493			attr = rdma_find_gid(cm_dev->ib_device,
494					     &path->sgid,
495					     sa_conv_pathrec_to_gid_type(path),
496					     NULL);
497			if (!IS_ERR(attr)) {
498				port = cm_dev->port[attr->port_num - 1];
499				break;
500			}
501		}
502		read_unlock_irqrestore(&cm.device_lock, flags);
503		if (port)
504			rdma_put_gid_attr(attr);
505	}
506	return port;
507}
508
509static int cm_init_av_by_path(struct sa_path_rec *path,
510			      const struct ib_gid_attr *sgid_attr,
511			      struct cm_av *av)
512{
513	struct rdma_ah_attr new_ah_attr;
514	struct cm_device *cm_dev;
515	struct cm_port *port;
516	int ret;
517
518	port = get_cm_port_from_path(path, sgid_attr);
519	if (!port)
520		return -EINVAL;
521	cm_dev = port->cm_dev;
522
523	ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
524				  be16_to_cpu(path->pkey), &av->pkey_index);
525	if (ret)
526		return ret;
527
528	cm_set_av_port(av, port);
529
530	/*
531	 * av->ah_attr might be initialized based on wc or during
532	 * request processing time which might have reference to sgid_attr.
533	 * So initialize a new ah_attr on stack.
534	 * If initialization fails, old ah_attr is used for sending any
535	 * responses. If initialization is successful, than new ah_attr
536	 * is used by overwriting the old one. So that right ah_attr
537	 * can be used to return an error response.
538	 */
539	ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
540					&new_ah_attr, sgid_attr);
541	if (ret)
542		return ret;
543
544	av->timeout = path->packet_life_time + 1;
545	rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
546	return 0;
547}
548
549/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
550static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
551{
552	cm_set_av_port(dest, src->port);
553	cm_set_av_port(src, NULL);
554	dest->pkey_index = src->pkey_index;
555	rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
556	dest->timeout = src->timeout;
557}
558
559static void cm_destroy_av(struct cm_av *av)
560{
561	rdma_destroy_ah_attr(&av->ah_attr);
562	cm_set_av_port(av, NULL);
563}
564
565static u32 cm_local_id(__be32 local_id)
566{
567	return (__force u32) (local_id ^ cm.random_id_operand);
568}
569
570static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
571{
572	struct cm_id_private *cm_id_priv;
573
574	rcu_read_lock();
575	cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
576	if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
577	    !refcount_inc_not_zero(&cm_id_priv->refcount))
578		cm_id_priv = NULL;
579	rcu_read_unlock();
580
581	return cm_id_priv;
582}
583
584/*
585 * Trivial helpers to strip endian annotation and compare; the
586 * endianness doesn't actually matter since we just need a stable
587 * order for the RB tree.
588 */
589static int be32_lt(__be32 a, __be32 b)
590{
591	return (__force u32) a < (__force u32) b;
592}
593
594static int be32_gt(__be32 a, __be32 b)
595{
596	return (__force u32) a > (__force u32) b;
597}
598
599static int be64_lt(__be64 a, __be64 b)
600{
601	return (__force u64) a < (__force u64) b;
602}
603
604static int be64_gt(__be64 a, __be64 b)
605{
606	return (__force u64) a > (__force u64) b;
607}
608
609/*
610 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
611 * if the new ID was inserted, NULL if it could not be inserted due to a
612 * collision, or the existing cm_id_priv ready for shared usage.
613 */
614static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
615					      ib_cm_handler shared_handler)
616{
617	struct rb_node **link = &cm.listen_service_table.rb_node;
618	struct rb_node *parent = NULL;
619	struct cm_id_private *cur_cm_id_priv;
620	__be64 service_id = cm_id_priv->id.service_id;
621	unsigned long flags;
622
623	spin_lock_irqsave(&cm.lock, flags);
624	while (*link) {
625		parent = *link;
626		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
627					  service_node);
628
629		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
630			link = &(*link)->rb_left;
631		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
632			link = &(*link)->rb_right;
633		else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
634			link = &(*link)->rb_left;
635		else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
636			link = &(*link)->rb_right;
637		else {
638			/*
639			 * Sharing an ib_cm_id with different handlers is not
640			 * supported
641			 */
642			if (cur_cm_id_priv->id.cm_handler != shared_handler ||
643			    cur_cm_id_priv->id.context ||
644			    WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
645				spin_unlock_irqrestore(&cm.lock, flags);
646				return NULL;
647			}
648			refcount_inc(&cur_cm_id_priv->refcount);
649			cur_cm_id_priv->listen_sharecount++;
650			spin_unlock_irqrestore(&cm.lock, flags);
651			return cur_cm_id_priv;
652		}
653	}
654	cm_id_priv->listen_sharecount++;
655	rb_link_node(&cm_id_priv->service_node, parent, link);
656	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
657	spin_unlock_irqrestore(&cm.lock, flags);
658	return cm_id_priv;
659}
660
661static struct cm_id_private *cm_find_listen(struct ib_device *device,
662					    __be64 service_id)
663{
664	struct rb_node *node = cm.listen_service_table.rb_node;
665	struct cm_id_private *cm_id_priv;
666
667	while (node) {
668		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
669
670		if (device < cm_id_priv->id.device)
671			node = node->rb_left;
672		else if (device > cm_id_priv->id.device)
673			node = node->rb_right;
674		else if (be64_lt(service_id, cm_id_priv->id.service_id))
675			node = node->rb_left;
676		else if (be64_gt(service_id, cm_id_priv->id.service_id))
677			node = node->rb_right;
678		else {
679			refcount_inc(&cm_id_priv->refcount);
680			return cm_id_priv;
681		}
682	}
683	return NULL;
684}
685
686static struct cm_timewait_info *
687cm_insert_remote_id(struct cm_timewait_info *timewait_info)
688{
689	struct rb_node **link = &cm.remote_id_table.rb_node;
690	struct rb_node *parent = NULL;
691	struct cm_timewait_info *cur_timewait_info;
692	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
693	__be32 remote_id = timewait_info->work.remote_id;
694
695	while (*link) {
696		parent = *link;
697		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
698					     remote_id_node);
699		if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
700			link = &(*link)->rb_left;
701		else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
702			link = &(*link)->rb_right;
703		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
704			link = &(*link)->rb_left;
705		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
706			link = &(*link)->rb_right;
707		else
708			return cur_timewait_info;
709	}
710	timewait_info->inserted_remote_id = 1;
711	rb_link_node(&timewait_info->remote_id_node, parent, link);
712	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
713	return NULL;
714}
715
716static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
717					       __be32 remote_id)
718{
719	struct rb_node *node = cm.remote_id_table.rb_node;
720	struct cm_timewait_info *timewait_info;
721	struct cm_id_private *res = NULL;
722
723	spin_lock_irq(&cm.lock);
724	while (node) {
725		timewait_info = rb_entry(node, struct cm_timewait_info,
726					 remote_id_node);
727		if (be32_lt(remote_id, timewait_info->work.remote_id))
728			node = node->rb_left;
729		else if (be32_gt(remote_id, timewait_info->work.remote_id))
730			node = node->rb_right;
731		else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
732			node = node->rb_left;
733		else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
734			node = node->rb_right;
735		else {
736			res = cm_acquire_id(timewait_info->work.local_id,
737					     timewait_info->work.remote_id);
738			break;
739		}
740	}
741	spin_unlock_irq(&cm.lock);
742	return res;
743}
744
745static struct cm_timewait_info *
746cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
747{
748	struct rb_node **link = &cm.remote_qp_table.rb_node;
749	struct rb_node *parent = NULL;
750	struct cm_timewait_info *cur_timewait_info;
751	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
752	__be32 remote_qpn = timewait_info->remote_qpn;
753
754	while (*link) {
755		parent = *link;
756		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
757					     remote_qp_node);
758		if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
759			link = &(*link)->rb_left;
760		else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
761			link = &(*link)->rb_right;
762		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
763			link = &(*link)->rb_left;
764		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
765			link = &(*link)->rb_right;
766		else
767			return cur_timewait_info;
768	}
769	timewait_info->inserted_remote_qp = 1;
770	rb_link_node(&timewait_info->remote_qp_node, parent, link);
771	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
772	return NULL;
773}
774
775static struct cm_id_private *
776cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
777{
778	struct rb_node **link = &cm.remote_sidr_table.rb_node;
779	struct rb_node *parent = NULL;
780	struct cm_id_private *cur_cm_id_priv;
781	__be32 remote_id = cm_id_priv->id.remote_id;
782
783	while (*link) {
784		parent = *link;
785		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
786					  sidr_id_node);
787		if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
788			link = &(*link)->rb_left;
789		else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
790			link = &(*link)->rb_right;
791		else {
792			if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
793				link = &(*link)->rb_left;
794			else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
795				link = &(*link)->rb_right;
796			else
797				return cur_cm_id_priv;
798		}
799	}
800	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
801	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
802	return NULL;
803}
804
805static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
806					      ib_cm_handler cm_handler,
807					      void *context)
808{
809	struct cm_id_private *cm_id_priv;
810	u32 id;
811	int ret;
812
813	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
814	if (!cm_id_priv)
815		return ERR_PTR(-ENOMEM);
816
817	cm_id_priv->id.state = IB_CM_IDLE;
818	cm_id_priv->id.device = device;
819	cm_id_priv->id.cm_handler = cm_handler;
820	cm_id_priv->id.context = context;
821	cm_id_priv->id.remote_cm_qpn = 1;
822
823	RB_CLEAR_NODE(&cm_id_priv->service_node);
824	RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
825	spin_lock_init(&cm_id_priv->lock);
826	init_completion(&cm_id_priv->comp);
827	INIT_LIST_HEAD(&cm_id_priv->work_list);
828	atomic_set(&cm_id_priv->work_count, -1);
829	refcount_set(&cm_id_priv->refcount, 1);
830
831	ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
832			      &cm.local_id_next, GFP_KERNEL);
833	if (ret < 0)
834		goto error;
835	cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
836
837	return cm_id_priv;
838
839error:
840	kfree(cm_id_priv);
841	return ERR_PTR(ret);
842}
843
844/*
845 * Make the ID visible to the MAD handlers and other threads that use the
846 * xarray.
847 */
848static void cm_finalize_id(struct cm_id_private *cm_id_priv)
849{
850	xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
851		 cm_id_priv, GFP_ATOMIC);
852}
853
854struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
855				 ib_cm_handler cm_handler,
856				 void *context)
857{
858	struct cm_id_private *cm_id_priv;
859
860	cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
861	if (IS_ERR(cm_id_priv))
862		return ERR_CAST(cm_id_priv);
863
864	cm_finalize_id(cm_id_priv);
865	return &cm_id_priv->id;
866}
867EXPORT_SYMBOL(ib_create_cm_id);
868
869static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
870{
871	struct cm_work *work;
872
873	if (list_empty(&cm_id_priv->work_list))
874		return NULL;
875
876	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
877	list_del(&work->list);
878	return work;
879}
880
881static void cm_free_work(struct cm_work *work)
882{
883	if (work->mad_recv_wc)
884		ib_free_recv_mad(work->mad_recv_wc);
885	kfree(work);
886}
887
888static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
889				 struct cm_work *work)
890	__releases(&cm_id_priv->lock)
891{
892	bool immediate;
893
894	/*
895	 * To deliver the event to the user callback we have the drop the
896	 * spinlock, however, we need to ensure that the user callback is single
897	 * threaded and receives events in the temporal order. If there are
898	 * already events being processed then thread new events onto a list,
899	 * the thread currently processing will pick them up.
900	 */
901	immediate = atomic_inc_and_test(&cm_id_priv->work_count);
902	if (!immediate) {
903		list_add_tail(&work->list, &cm_id_priv->work_list);
904		/*
905		 * This routine always consumes incoming reference. Once queued
906		 * to the work_list then a reference is held by the thread
907		 * currently running cm_process_work() and this reference is not
908		 * needed.
909		 */
910		cm_deref_id(cm_id_priv);
911	}
912	spin_unlock_irq(&cm_id_priv->lock);
913
914	if (immediate)
915		cm_process_work(cm_id_priv, work);
916}
917
918static inline int cm_convert_to_ms(int iba_time)
919{
920	/* approximate conversion to ms from 4.096us x 2^iba_time */
921	return 1 << max(iba_time - 8, 0);
922}
923
924/*
925 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
926 * Because of how ack_timeout is stored, adding one doubles the timeout.
927 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
928 * increment it (round up) only if the other is within 50%.
929 */
930static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
931{
932	int ack_timeout = packet_life_time + 1;
933
934	if (ack_timeout >= ca_ack_delay)
935		ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
936	else
937		ack_timeout = ca_ack_delay +
938			      (ack_timeout >= (ca_ack_delay - 1));
939
940	return min(31, ack_timeout);
941}
942
943static void cm_remove_remote(struct cm_id_private *cm_id_priv)
944{
945	struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
946
947	if (timewait_info->inserted_remote_id) {
948		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
949		timewait_info->inserted_remote_id = 0;
950	}
951
952	if (timewait_info->inserted_remote_qp) {
953		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
954		timewait_info->inserted_remote_qp = 0;
955	}
956}
957
958static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
959{
960	struct cm_timewait_info *timewait_info;
961
962	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
963	if (!timewait_info)
964		return ERR_PTR(-ENOMEM);
965
966	timewait_info->work.local_id = local_id;
967	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
968	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
969	return timewait_info;
970}
971
972static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
973{
974	int wait_time;
975	unsigned long flags;
976	struct cm_device *cm_dev;
977
978	lockdep_assert_held(&cm_id_priv->lock);
979
980	cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
981	if (!cm_dev)
982		return;
983
984	spin_lock_irqsave(&cm.lock, flags);
985	cm_remove_remote(cm_id_priv);
986	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
987	spin_unlock_irqrestore(&cm.lock, flags);
988
989	/*
990	 * The cm_id could be destroyed by the user before we exit timewait.
991	 * To protect against this, we search for the cm_id after exiting
992	 * timewait before notifying the user that we've exited timewait.
993	 */
994	cm_id_priv->id.state = IB_CM_TIMEWAIT;
995	wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
996
997	/* Check if the device started its remove_one */
998	spin_lock_irqsave(&cm.lock, flags);
999	if (!cm_dev->going_down)
1000		queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1001				   msecs_to_jiffies(wait_time));
1002	spin_unlock_irqrestore(&cm.lock, flags);
1003
1004	/*
1005	 * The timewait_info is converted into a work and gets freed during
1006	 * cm_free_work() in cm_timewait_handler().
1007	 */
1008	BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1009	cm_id_priv->timewait_info = NULL;
1010}
1011
1012static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1013{
1014	unsigned long flags;
1015
1016	lockdep_assert_held(&cm_id_priv->lock);
1017
1018	cm_id_priv->id.state = IB_CM_IDLE;
1019	if (cm_id_priv->timewait_info) {
1020		spin_lock_irqsave(&cm.lock, flags);
1021		cm_remove_remote(cm_id_priv);
1022		spin_unlock_irqrestore(&cm.lock, flags);
1023		kfree(cm_id_priv->timewait_info);
1024		cm_id_priv->timewait_info = NULL;
1025	}
1026}
1027
1028static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1029{
1030	struct cm_id_private *cm_id_priv;
1031	struct cm_work *work;
1032
1033	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1034	spin_lock_irq(&cm_id_priv->lock);
1035retest:
1036	switch (cm_id->state) {
1037	case IB_CM_LISTEN:
1038		spin_lock(&cm.lock);
1039		if (--cm_id_priv->listen_sharecount > 0) {
1040			/* The id is still shared. */
1041			WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1042			spin_unlock(&cm.lock);
1043			spin_unlock_irq(&cm_id_priv->lock);
1044			cm_deref_id(cm_id_priv);
1045			return;
1046		}
1047		cm_id->state = IB_CM_IDLE;
1048		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1049		RB_CLEAR_NODE(&cm_id_priv->service_node);
1050		spin_unlock(&cm.lock);
1051		break;
1052	case IB_CM_SIDR_REQ_SENT:
1053		cm_id->state = IB_CM_IDLE;
1054		ib_cancel_mad(cm_id_priv->msg);
1055		break;
1056	case IB_CM_SIDR_REQ_RCVD:
1057		cm_send_sidr_rep_locked(cm_id_priv,
1058					&(struct ib_cm_sidr_rep_param){
1059						.status = IB_SIDR_REJECT });
1060		/* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1061		cm_id->state = IB_CM_IDLE;
1062		break;
1063	case IB_CM_REQ_SENT:
1064	case IB_CM_MRA_REQ_RCVD:
1065		ib_cancel_mad(cm_id_priv->msg);
1066		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1067				   &cm_id_priv->id.device->node_guid,
1068				   sizeof(cm_id_priv->id.device->node_guid),
1069				   NULL, 0);
1070		break;
1071	case IB_CM_REQ_RCVD:
1072		if (err == -ENOMEM) {
1073			/* Do not reject to allow future retries. */
1074			cm_reset_to_idle(cm_id_priv);
1075		} else {
1076			cm_send_rej_locked(cm_id_priv,
1077					   IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1078					   NULL, 0);
1079		}
1080		break;
1081	case IB_CM_REP_SENT:
1082	case IB_CM_MRA_REP_RCVD:
1083		ib_cancel_mad(cm_id_priv->msg);
1084		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1085				   0, NULL, 0);
1086		goto retest;
1087	case IB_CM_MRA_REQ_SENT:
1088	case IB_CM_REP_RCVD:
1089	case IB_CM_MRA_REP_SENT:
1090		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1091				   0, NULL, 0);
1092		break;
1093	case IB_CM_ESTABLISHED:
1094		if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1095			cm_id->state = IB_CM_IDLE;
1096			break;
1097		}
1098		cm_send_dreq_locked(cm_id_priv, NULL, 0);
1099		goto retest;
1100	case IB_CM_DREQ_SENT:
1101		ib_cancel_mad(cm_id_priv->msg);
1102		cm_enter_timewait(cm_id_priv);
1103		goto retest;
1104	case IB_CM_DREQ_RCVD:
1105		cm_send_drep_locked(cm_id_priv, NULL, 0);
1106		WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1107		goto retest;
1108	case IB_CM_TIMEWAIT:
1109		/*
1110		 * The cm_acquire_id in cm_timewait_handler will stop working
1111		 * once we do xa_erase below, so just move to idle here for
1112		 * consistency.
1113		 */
1114		cm_id->state = IB_CM_IDLE;
1115		break;
1116	case IB_CM_IDLE:
1117		break;
1118	}
1119	WARN_ON(cm_id->state != IB_CM_IDLE);
1120
1121	spin_lock(&cm.lock);
1122	/* Required for cleanup paths related cm_req_handler() */
1123	if (cm_id_priv->timewait_info) {
1124		cm_remove_remote(cm_id_priv);
1125		kfree(cm_id_priv->timewait_info);
1126		cm_id_priv->timewait_info = NULL;
1127	}
1128
1129	WARN_ON(cm_id_priv->listen_sharecount);
1130	WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1131	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1132		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1133	spin_unlock(&cm.lock);
1134	spin_unlock_irq(&cm_id_priv->lock);
1135
1136	xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1137	cm_deref_id(cm_id_priv);
1138	wait_for_completion(&cm_id_priv->comp);
1139	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1140		cm_free_work(work);
1141
1142	cm_destroy_av(&cm_id_priv->av);
1143	cm_destroy_av(&cm_id_priv->alt_av);
1144	kfree(cm_id_priv->private_data);
1145	kfree_rcu(cm_id_priv, rcu);
1146}
1147
1148void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1149{
1150	cm_destroy_id(cm_id, 0);
1151}
1152EXPORT_SYMBOL(ib_destroy_cm_id);
1153
1154static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
1155{
1156	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1157	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
1158		return -EINVAL;
1159
1160	if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1161		cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1162	else
1163		cm_id_priv->id.service_id = service_id;
1164
1165	return 0;
1166}
1167
1168/**
1169 * ib_cm_listen - Initiates listening on the specified service ID for
1170 *   connection and service ID resolution requests.
1171 * @cm_id: Connection identifier associated with the listen request.
1172 * @service_id: Service identifier matched against incoming connection
1173 *   and service ID resolution requests.  The service ID should be specified
1174 *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1175 *   assign a service ID to the caller.
1176 */
1177int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
1178{
1179	struct cm_id_private *cm_id_priv =
1180		container_of(cm_id, struct cm_id_private, id);
1181	unsigned long flags;
1182	int ret;
1183
1184	spin_lock_irqsave(&cm_id_priv->lock, flags);
1185	if (cm_id_priv->id.state != IB_CM_IDLE) {
1186		ret = -EINVAL;
1187		goto out;
1188	}
1189
1190	ret = cm_init_listen(cm_id_priv, service_id);
1191	if (ret)
1192		goto out;
1193
1194	if (!cm_insert_listen(cm_id_priv, NULL)) {
1195		ret = -EBUSY;
1196		goto out;
1197	}
1198
1199	cm_id_priv->id.state = IB_CM_LISTEN;
1200	ret = 0;
1201
1202out:
1203	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1204	return ret;
1205}
1206EXPORT_SYMBOL(ib_cm_listen);
1207
1208/**
1209 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1210 *			 the given service ID.
1211 *
1212 * If there's an existing ID listening on that same device and service ID,
1213 * return it.
1214 *
1215 * @device: Device associated with the cm_id.  All related communication will
1216 * be associated with the specified device.
1217 * @cm_handler: Callback invoked to notify the user of CM events.
1218 * @service_id: Service identifier matched against incoming connection
1219 *   and service ID resolution requests.  The service ID should be specified
1220 *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1221 *   assign a service ID to the caller.
1222 *
1223 * Callers should call ib_destroy_cm_id when done with the listener ID.
1224 */
1225struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1226				     ib_cm_handler cm_handler,
1227				     __be64 service_id)
1228{
1229	struct cm_id_private *listen_id_priv;
1230	struct cm_id_private *cm_id_priv;
1231	int err = 0;
1232
1233	/* Create an ID in advance, since the creation may sleep */
1234	cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1235	if (IS_ERR(cm_id_priv))
1236		return ERR_CAST(cm_id_priv);
1237
1238	err = cm_init_listen(cm_id_priv, service_id);
1239	if (err) {
1240		ib_destroy_cm_id(&cm_id_priv->id);
1241		return ERR_PTR(err);
1242	}
1243
1244	spin_lock_irq(&cm_id_priv->lock);
1245	listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1246	if (listen_id_priv != cm_id_priv) {
1247		spin_unlock_irq(&cm_id_priv->lock);
1248		ib_destroy_cm_id(&cm_id_priv->id);
1249		if (!listen_id_priv)
1250			return ERR_PTR(-EINVAL);
1251		return &listen_id_priv->id;
1252	}
1253	cm_id_priv->id.state = IB_CM_LISTEN;
1254	spin_unlock_irq(&cm_id_priv->lock);
1255
1256	/*
1257	 * A listen ID does not need to be in the xarray since it does not
1258	 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1259	 * and does not enter timewait.
1260	 */
1261
1262	return &cm_id_priv->id;
1263}
1264EXPORT_SYMBOL(ib_cm_insert_listen);
1265
1266static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1267{
1268	u64 hi_tid = 0, low_tid;
1269
1270	lockdep_assert_held(&cm_id_priv->lock);
1271
1272	low_tid = (u64)cm_id_priv->id.local_id;
1273	if (!cm_id_priv->av.port)
1274		return cpu_to_be64(low_tid);
1275
1276	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1277	if (cm_id_priv->av.port->mad_agent)
1278		hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1279	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1280	return cpu_to_be64(hi_tid | low_tid);
1281}
1282
1283static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1284			      __be16 attr_id, __be64 tid)
1285{
1286	hdr->base_version  = IB_MGMT_BASE_VERSION;
1287	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
1288	hdr->class_version = IB_CM_CLASS_VERSION;
1289	hdr->method	   = IB_MGMT_METHOD_SEND;
1290	hdr->attr_id	   = attr_id;
1291	hdr->tid	   = tid;
1292}
1293
1294static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1295				  __be64 tid, u32 attr_mod)
1296{
1297	cm_format_mad_hdr(hdr, attr_id, tid);
1298	hdr->attr_mod = cpu_to_be32(attr_mod);
1299}
1300
1301static void cm_format_req(struct cm_req_msg *req_msg,
1302			  struct cm_id_private *cm_id_priv,
1303			  struct ib_cm_req_param *param)
1304{
1305	struct sa_path_rec *pri_path = param->primary_path;
1306	struct sa_path_rec *alt_path = param->alternate_path;
1307	bool pri_ext = false;
1308	__be16 lid;
1309
1310	if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1311		pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1312					      pri_path->opa.slid);
1313
1314	cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1315			      cm_form_tid(cm_id_priv), param->ece.attr_mod);
1316
1317	IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1318		be32_to_cpu(cm_id_priv->id.local_id));
1319	IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1320	IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1321		be64_to_cpu(cm_id_priv->id.device->node_guid));
1322	IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1323	IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1324	IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1325		param->remote_cm_response_timeout);
1326	cm_req_set_qp_type(req_msg, param->qp_type);
1327	IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1328	IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1329	IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1330		param->local_cm_response_timeout);
1331	IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1332		be16_to_cpu(param->primary_path->pkey));
1333	IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1334		param->primary_path->mtu);
1335	IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1336
1337	if (param->qp_type != IB_QPT_XRC_INI) {
1338		IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1339			param->responder_resources);
1340		IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1341		IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1342			param->rnr_retry_count);
1343		IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1344	}
1345
1346	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1347		pri_path->sgid;
1348	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1349		pri_path->dgid;
1350	if (pri_ext) {
1351		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1352			->global.interface_id =
1353			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1354		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1355			->global.interface_id =
1356			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1357	}
1358	if (pri_path->hop_limit <= 1) {
1359		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1360			be16_to_cpu(pri_ext ? 0 :
1361					      htons(ntohl(sa_path_get_slid(
1362						      pri_path)))));
1363		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1364			be16_to_cpu(pri_ext ? 0 :
1365					      htons(ntohl(sa_path_get_dlid(
1366						      pri_path)))));
1367	} else {
1368
1369		if (param->primary_path_inbound) {
1370			lid = param->primary_path_inbound->ib.dlid;
1371			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1372				be16_to_cpu(lid));
1373		} else
1374			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1375				be16_to_cpu(IB_LID_PERMISSIVE));
1376
1377		/* Work-around until there's a way to obtain remote LID info */
1378		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1379			be16_to_cpu(IB_LID_PERMISSIVE));
1380	}
1381	IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1382		be32_to_cpu(pri_path->flow_label));
1383	IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1384	IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1385	IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1386	IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1387	IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1388		(pri_path->hop_limit <= 1));
1389	IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1390		cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1391			       pri_path->packet_life_time));
1392
1393	if (alt_path) {
1394		bool alt_ext = false;
1395
1396		if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1397			alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1398						      alt_path->opa.slid);
1399
1400		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1401			alt_path->sgid;
1402		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1403			alt_path->dgid;
1404		if (alt_ext) {
1405			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1406					req_msg)
1407				->global.interface_id =
1408				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1409			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1410					req_msg)
1411				->global.interface_id =
1412				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1413		}
1414		if (alt_path->hop_limit <= 1) {
1415			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1416				be16_to_cpu(
1417					alt_ext ? 0 :
1418						  htons(ntohl(sa_path_get_slid(
1419							  alt_path)))));
1420			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1421				be16_to_cpu(
1422					alt_ext ? 0 :
1423						  htons(ntohl(sa_path_get_dlid(
1424							  alt_path)))));
1425		} else {
1426			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1427				be16_to_cpu(IB_LID_PERMISSIVE));
1428			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1429				be16_to_cpu(IB_LID_PERMISSIVE));
1430		}
1431		IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1432			be32_to_cpu(alt_path->flow_label));
1433		IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1434		IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1435			alt_path->traffic_class);
1436		IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1437			alt_path->hop_limit);
1438		IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1439		IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1440			(alt_path->hop_limit <= 1));
1441		IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1442			cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1443				       alt_path->packet_life_time));
1444	}
1445	IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1446
1447	if (param->private_data && param->private_data_len)
1448		IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1449			    param->private_data_len);
1450}
1451
1452static int cm_validate_req_param(struct ib_cm_req_param *param)
1453{
1454	if (!param->primary_path)
1455		return -EINVAL;
1456
1457	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1458	    param->qp_type != IB_QPT_XRC_INI)
1459		return -EINVAL;
1460
1461	if (param->private_data &&
1462	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1463		return -EINVAL;
1464
1465	if (param->alternate_path &&
1466	    (param->alternate_path->pkey != param->primary_path->pkey ||
1467	     param->alternate_path->mtu != param->primary_path->mtu))
1468		return -EINVAL;
1469
1470	return 0;
1471}
1472
1473int ib_send_cm_req(struct ib_cm_id *cm_id,
1474		   struct ib_cm_req_param *param)
1475{
1476	struct cm_av av = {}, alt_av = {};
1477	struct cm_id_private *cm_id_priv;
1478	struct ib_mad_send_buf *msg;
1479	struct cm_req_msg *req_msg;
1480	unsigned long flags;
1481	int ret;
1482
1483	ret = cm_validate_req_param(param);
1484	if (ret)
1485		return ret;
1486
1487	/* Verify that we're not in timewait. */
1488	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1489	spin_lock_irqsave(&cm_id_priv->lock, flags);
1490	if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1491		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1492		return -EINVAL;
1493	}
1494	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1495
1496	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1497							    id.local_id);
1498	if (IS_ERR(cm_id_priv->timewait_info)) {
1499		ret = PTR_ERR(cm_id_priv->timewait_info);
1500		cm_id_priv->timewait_info = NULL;
1501		return ret;
1502	}
1503
1504	ret = cm_init_av_by_path(param->primary_path,
1505				 param->ppath_sgid_attr, &av);
1506	if (ret)
1507		return ret;
1508	if (param->alternate_path) {
1509		ret = cm_init_av_by_path(param->alternate_path, NULL,
1510					 &alt_av);
1511		if (ret) {
1512			cm_destroy_av(&av);
1513			return ret;
1514		}
1515	}
1516	cm_id->service_id = param->service_id;
1517	cm_id_priv->timeout_ms = cm_convert_to_ms(
1518				    param->primary_path->packet_life_time) * 2 +
1519				 cm_convert_to_ms(
1520				    param->remote_cm_response_timeout);
1521	cm_id_priv->max_cm_retries = param->max_cm_retries;
1522	cm_id_priv->initiator_depth = param->initiator_depth;
1523	cm_id_priv->responder_resources = param->responder_resources;
1524	cm_id_priv->retry_count = param->retry_count;
1525	cm_id_priv->path_mtu = param->primary_path->mtu;
1526	cm_id_priv->pkey = param->primary_path->pkey;
1527	cm_id_priv->qp_type = param->qp_type;
1528
1529	spin_lock_irqsave(&cm_id_priv->lock, flags);
1530
1531	cm_move_av_from_path(&cm_id_priv->av, &av);
1532	if (param->primary_path_outbound)
1533		cm_id_priv->av.dlid_datapath =
1534			be16_to_cpu(param->primary_path_outbound->ib.dlid);
1535
1536	if (param->alternate_path)
1537		cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
1538
1539	msg = cm_alloc_priv_msg(cm_id_priv);
1540	if (IS_ERR(msg)) {
1541		ret = PTR_ERR(msg);
1542		goto out_unlock;
1543	}
1544
1545	req_msg = (struct cm_req_msg *)msg->mad;
1546	cm_format_req(req_msg, cm_id_priv, param);
1547	cm_id_priv->tid = req_msg->hdr.tid;
1548	msg->timeout_ms = cm_id_priv->timeout_ms;
1549	msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
1550
1551	cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1552	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1553
1554	trace_icm_send_req(&cm_id_priv->id);
1555	ret = ib_post_send_mad(msg, NULL);
1556	if (ret)
1557		goto out_free;
1558	BUG_ON(cm_id->state != IB_CM_IDLE);
1559	cm_id->state = IB_CM_REQ_SENT;
1560	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1561	return 0;
1562out_free:
1563	cm_free_priv_msg(msg);
1564out_unlock:
1565	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1566	return ret;
1567}
1568EXPORT_SYMBOL(ib_send_cm_req);
1569
1570static int cm_issue_rej(struct cm_port *port,
1571			struct ib_mad_recv_wc *mad_recv_wc,
1572			enum ib_cm_rej_reason reason,
1573			enum cm_msg_response msg_rejected,
1574			void *ari, u8 ari_length)
1575{
1576	struct ib_mad_send_buf *msg = NULL;
1577	struct cm_rej_msg *rej_msg, *rcv_msg;
1578	int ret;
1579
1580	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1581	if (ret)
1582		return ret;
1583
1584	/* We just need common CM header information.  Cast to any message. */
1585	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1586	rej_msg = (struct cm_rej_msg *) msg->mad;
1587
1588	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1589	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1590		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1591	IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1592		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1593	IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1594	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1595
1596	if (ari && ari_length) {
1597		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1598		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1599	}
1600
1601	trace_icm_issue_rej(
1602		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1603		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1604	ret = ib_post_send_mad(msg, NULL);
1605	if (ret)
1606		cm_free_response_msg(msg);
1607
1608	return ret;
1609}
1610
1611static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1612{
1613	return ((cpu_to_be16(
1614			IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1615		(ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1616					       req_msg))));
1617}
1618
1619static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
1620				 struct sa_path_rec *path, union ib_gid *gid)
1621{
1622	if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1623		path->rec_type = SA_PATH_REC_TYPE_OPA;
1624	else
1625		path->rec_type = SA_PATH_REC_TYPE_IB;
1626}
1627
1628static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1629					struct sa_path_rec *primary_path,
1630					struct sa_path_rec *alt_path,
1631					struct ib_wc *wc)
1632{
1633	u32 lid;
1634
1635	if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1636		sa_path_set_dlid(primary_path, wc->slid);
1637		sa_path_set_slid(primary_path,
1638				 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1639					 req_msg));
1640	} else {
1641		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1642			CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1643		sa_path_set_dlid(primary_path, lid);
1644
1645		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1646			CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1647		sa_path_set_slid(primary_path, lid);
1648	}
1649
1650	if (!cm_req_has_alt_path(req_msg))
1651		return;
1652
1653	if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1654		sa_path_set_dlid(alt_path,
1655				 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1656					 req_msg));
1657		sa_path_set_slid(alt_path,
1658				 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1659					 req_msg));
1660	} else {
1661		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1662			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1663		sa_path_set_dlid(alt_path, lid);
1664
1665		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1666			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1667		sa_path_set_slid(alt_path, lid);
1668	}
1669}
1670
1671static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1672				     struct sa_path_rec *primary_path,
1673				     struct sa_path_rec *alt_path,
1674				     struct ib_wc *wc)
1675{
1676	primary_path->dgid =
1677		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1678	primary_path->sgid =
1679		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1680	primary_path->flow_label =
1681		cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1682	primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1683	primary_path->traffic_class =
1684		IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1685	primary_path->reversible = 1;
1686	primary_path->pkey =
1687		cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1688	primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1689	primary_path->mtu_selector = IB_SA_EQ;
1690	primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1691	primary_path->rate_selector = IB_SA_EQ;
1692	primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1693	primary_path->packet_life_time_selector = IB_SA_EQ;
1694	primary_path->packet_life_time =
1695		IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1696	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1697	primary_path->service_id =
1698		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1699	if (sa_path_is_roce(primary_path))
1700		primary_path->roce.route_resolved = false;
1701
1702	if (cm_req_has_alt_path(req_msg)) {
1703		alt_path->dgid = *IBA_GET_MEM_PTR(
1704			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1705		alt_path->sgid = *IBA_GET_MEM_PTR(
1706			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1707		alt_path->flow_label = cpu_to_be32(
1708			IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1709		alt_path->hop_limit =
1710			IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1711		alt_path->traffic_class =
1712			IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1713		alt_path->reversible = 1;
1714		alt_path->pkey =
1715			cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1716		alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1717		alt_path->mtu_selector = IB_SA_EQ;
1718		alt_path->mtu =
1719			IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1720		alt_path->rate_selector = IB_SA_EQ;
1721		alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1722		alt_path->packet_life_time_selector = IB_SA_EQ;
1723		alt_path->packet_life_time =
1724			IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1725		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1726		alt_path->service_id =
1727			cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1728
1729		if (sa_path_is_roce(alt_path))
1730			alt_path->roce.route_resolved = false;
1731	}
1732	cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
1733}
1734
1735static u16 cm_get_bth_pkey(struct cm_work *work)
1736{
1737	struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1738	u32 port_num = work->port->port_num;
1739	u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1740	u16 pkey;
1741	int ret;
1742
1743	ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1744	if (ret) {
1745		dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
1746				     port_num, pkey_index, ret);
1747		return 0;
1748	}
1749
1750	return pkey;
1751}
1752
1753/**
1754 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1755 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1756 * reject them as the local_gid will not match the sgid. Therefore,
1757 * change the pathrec's SGID to an IB SGID.
1758 *
1759 * @work: Work completion
1760 * @path: Path record
1761 */
1762static void cm_opa_to_ib_sgid(struct cm_work *work,
1763			      struct sa_path_rec *path)
1764{
1765	struct ib_device *dev = work->port->cm_dev->ib_device;
1766	u32 port_num = work->port->port_num;
1767
1768	if (rdma_cap_opa_ah(dev, port_num) &&
1769	    (ib_is_opa_gid(&path->sgid))) {
1770		union ib_gid sgid;
1771
1772		if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1773			dev_warn(&dev->dev,
1774				 "Error updating sgid in CM request\n");
1775			return;
1776		}
1777
1778		path->sgid = sgid;
1779	}
1780}
1781
1782static void cm_format_req_event(struct cm_work *work,
1783				struct cm_id_private *cm_id_priv,
1784				struct ib_cm_id *listen_id)
1785{
1786	struct cm_req_msg *req_msg;
1787	struct ib_cm_req_event_param *param;
1788
1789	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1790	param = &work->cm_event.param.req_rcvd;
1791	param->listen_id = listen_id;
1792	param->bth_pkey = cm_get_bth_pkey(work);
1793	param->port = cm_id_priv->av.port->port_num;
1794	param->primary_path = &work->path[0];
1795	cm_opa_to_ib_sgid(work, param->primary_path);
1796	if (cm_req_has_alt_path(req_msg)) {
1797		param->alternate_path = &work->path[1];
1798		cm_opa_to_ib_sgid(work, param->alternate_path);
1799	} else {
1800		param->alternate_path = NULL;
1801	}
1802	param->remote_ca_guid =
1803		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1804	param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1805	param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1806	param->qp_type = cm_req_get_qp_type(req_msg);
1807	param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1808	param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1809	param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1810	param->local_cm_response_timeout =
1811		IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1812	param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1813	param->remote_cm_response_timeout =
1814		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1815	param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1816	param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1817	param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1818	param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1819	param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1820	param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1821
1822	work->cm_event.private_data =
1823		IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1824}
1825
1826static void cm_process_work(struct cm_id_private *cm_id_priv,
1827			    struct cm_work *work)
1828{
1829	int ret;
1830
1831	/* We will typically only have the current event to report. */
1832	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1833	cm_free_work(work);
1834
1835	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1836		spin_lock_irq(&cm_id_priv->lock);
1837		work = cm_dequeue_work(cm_id_priv);
1838		spin_unlock_irq(&cm_id_priv->lock);
1839		if (!work)
1840			return;
1841
1842		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1843						&work->cm_event);
1844		cm_free_work(work);
1845	}
1846	cm_deref_id(cm_id_priv);
1847	if (ret)
1848		cm_destroy_id(&cm_id_priv->id, ret);
1849}
1850
1851static void cm_format_mra(struct cm_mra_msg *mra_msg,
1852			  struct cm_id_private *cm_id_priv,
1853			  enum cm_msg_response msg_mraed, u8 service_timeout,
1854			  const void *private_data, u8 private_data_len)
1855{
1856	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1857	IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1858	IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1859		be32_to_cpu(cm_id_priv->id.local_id));
1860	IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1861		be32_to_cpu(cm_id_priv->id.remote_id));
1862	IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1863
1864	if (private_data && private_data_len)
1865		IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1866			    private_data_len);
1867}
1868
1869static void cm_format_rej(struct cm_rej_msg *rej_msg,
1870			  struct cm_id_private *cm_id_priv,
1871			  enum ib_cm_rej_reason reason, void *ari,
1872			  u8 ari_length, const void *private_data,
1873			  u8 private_data_len, enum ib_cm_state state)
1874{
1875	lockdep_assert_held(&cm_id_priv->lock);
1876
1877	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1878	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1879		be32_to_cpu(cm_id_priv->id.remote_id));
1880
1881	switch (state) {
1882	case IB_CM_REQ_RCVD:
1883		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1884		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1885		break;
1886	case IB_CM_MRA_REQ_SENT:
1887		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1888			be32_to_cpu(cm_id_priv->id.local_id));
1889		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1890		break;
1891	case IB_CM_REP_RCVD:
1892	case IB_CM_MRA_REP_SENT:
1893		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1894			be32_to_cpu(cm_id_priv->id.local_id));
1895		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1896		break;
1897	default:
1898		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1899			be32_to_cpu(cm_id_priv->id.local_id));
1900		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1901			CM_MSG_RESPONSE_OTHER);
1902		break;
1903	}
1904
1905	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1906	if (ari && ari_length) {
1907		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1908		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1909	}
1910
1911	if (private_data && private_data_len)
1912		IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1913			    private_data_len);
1914}
1915
1916static void cm_dup_req_handler(struct cm_work *work,
1917			       struct cm_id_private *cm_id_priv)
1918{
1919	struct ib_mad_send_buf *msg = NULL;
1920	int ret;
1921
1922	atomic_long_inc(
1923		&work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
1924
1925	/* Quick state check to discard duplicate REQs. */
1926	spin_lock_irq(&cm_id_priv->lock);
1927	if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1928		spin_unlock_irq(&cm_id_priv->lock);
1929		return;
1930	}
1931	spin_unlock_irq(&cm_id_priv->lock);
1932
1933	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1934	if (ret)
1935		return;
1936
1937	spin_lock_irq(&cm_id_priv->lock);
1938	switch (cm_id_priv->id.state) {
1939	case IB_CM_MRA_REQ_SENT:
1940		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1941			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1942			      cm_id_priv->private_data,
1943			      cm_id_priv->private_data_len);
1944		break;
1945	case IB_CM_TIMEWAIT:
1946		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1947			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1948			      IB_CM_TIMEWAIT);
1949		break;
1950	default:
1951		goto unlock;
1952	}
1953	spin_unlock_irq(&cm_id_priv->lock);
1954
1955	trace_icm_send_dup_req(&cm_id_priv->id);
1956	ret = ib_post_send_mad(msg, NULL);
1957	if (ret)
1958		goto free;
1959	return;
1960
1961unlock:	spin_unlock_irq(&cm_id_priv->lock);
1962free:	cm_free_response_msg(msg);
1963}
1964
1965static struct cm_id_private *cm_match_req(struct cm_work *work,
1966					  struct cm_id_private *cm_id_priv)
1967{
1968	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1969	struct cm_timewait_info *timewait_info;
1970	struct cm_req_msg *req_msg;
1971
1972	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1973
1974	/* Check for possible duplicate REQ. */
1975	spin_lock_irq(&cm.lock);
1976	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1977	if (timewait_info) {
1978		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1979					   timewait_info->work.remote_id);
1980		spin_unlock_irq(&cm.lock);
1981		if (cur_cm_id_priv) {
1982			cm_dup_req_handler(work, cur_cm_id_priv);
1983			cm_deref_id(cur_cm_id_priv);
1984		}
1985		return NULL;
1986	}
1987
1988	/* Check for stale connections. */
1989	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1990	if (timewait_info) {
1991		cm_remove_remote(cm_id_priv);
1992		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1993					   timewait_info->work.remote_id);
1994
1995		spin_unlock_irq(&cm.lock);
1996		cm_issue_rej(work->port, work->mad_recv_wc,
1997			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1998			     NULL, 0);
1999		if (cur_cm_id_priv) {
2000			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2001			cm_deref_id(cur_cm_id_priv);
2002		}
2003		return NULL;
2004	}
2005
2006	/* Find matching listen request. */
2007	listen_cm_id_priv = cm_find_listen(
2008		cm_id_priv->id.device,
2009		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2010	if (!listen_cm_id_priv) {
2011		cm_remove_remote(cm_id_priv);
2012		spin_unlock_irq(&cm.lock);
2013		cm_issue_rej(work->port, work->mad_recv_wc,
2014			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2015			     NULL, 0);
2016		return NULL;
2017	}
2018	spin_unlock_irq(&cm.lock);
2019	return listen_cm_id_priv;
2020}
2021
2022/*
2023 * Work-around for inter-subnet connections.  If the LIDs are permissive,
2024 * we need to override the LID/SL data in the REQ with the LID information
2025 * in the work completion.
2026 */
2027static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2028{
2029	if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2030		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2031					req_msg)) == IB_LID_PERMISSIVE) {
2032			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2033				be16_to_cpu(ib_lid_be16(wc->slid)));
2034			IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2035		}
2036
2037		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2038					req_msg)) == IB_LID_PERMISSIVE)
2039			IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2040				wc->dlid_path_bits);
2041	}
2042
2043	if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2044		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2045					req_msg)) == IB_LID_PERMISSIVE) {
2046			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2047				be16_to_cpu(ib_lid_be16(wc->slid)));
2048			IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2049		}
2050
2051		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2052					req_msg)) == IB_LID_PERMISSIVE)
2053			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2054				wc->dlid_path_bits);
2055	}
2056}
2057
2058static int cm_req_handler(struct cm_work *work)
2059{
2060	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2061	struct cm_req_msg *req_msg;
2062	const struct ib_global_route *grh;
2063	const struct ib_gid_attr *gid_attr;
2064	int ret;
2065
2066	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2067
2068	cm_id_priv =
2069		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2070	if (IS_ERR(cm_id_priv))
2071		return PTR_ERR(cm_id_priv);
2072
2073	cm_id_priv->id.remote_id =
2074		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2075	cm_id_priv->id.service_id =
2076		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2077	cm_id_priv->tid = req_msg->hdr.tid;
2078	cm_id_priv->timeout_ms = cm_convert_to_ms(
2079		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2080	cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2081	cm_id_priv->remote_qpn =
2082		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2083	cm_id_priv->initiator_depth =
2084		IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2085	cm_id_priv->responder_resources =
2086		IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2087	cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2088	cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2089	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2090	cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2091	cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2092	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2093
2094	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2095				      work->mad_recv_wc->recv_buf.grh,
2096				      &cm_id_priv->av);
2097	if (ret)
2098		goto destroy;
2099	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2100							    id.local_id);
2101	if (IS_ERR(cm_id_priv->timewait_info)) {
2102		ret = PTR_ERR(cm_id_priv->timewait_info);
2103		cm_id_priv->timewait_info = NULL;
2104		goto destroy;
2105	}
2106	cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2107	cm_id_priv->timewait_info->remote_ca_guid =
2108		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2109	cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2110
2111	/*
2112	 * Note that the ID pointer is not in the xarray at this point,
2113	 * so this set is only visible to the local thread.
2114	 */
2115	cm_id_priv->id.state = IB_CM_REQ_RCVD;
2116
2117	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2118	if (!listen_cm_id_priv) {
2119		trace_icm_no_listener_err(&cm_id_priv->id);
2120		cm_id_priv->id.state = IB_CM_IDLE;
2121		ret = -EINVAL;
2122		goto destroy;
2123	}
2124
2125	memset(&work->path[0], 0, sizeof(work->path[0]));
2126	if (cm_req_has_alt_path(req_msg))
2127		memset(&work->path[1], 0, sizeof(work->path[1]));
2128	grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2129	gid_attr = grh->sgid_attr;
2130
2131	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
2132		work->path[0].rec_type =
2133			sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2134	} else {
2135		cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2136		cm_path_set_rec_type(
2137			work->port->cm_dev->ib_device, work->port->port_num,
2138			&work->path[0],
2139			IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2140					req_msg));
2141	}
2142	if (cm_req_has_alt_path(req_msg))
2143		work->path[1].rec_type = work->path[0].rec_type;
2144	cm_format_paths_from_req(req_msg, &work->path[0],
2145				 &work->path[1], work->mad_recv_wc->wc);
2146	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2147		sa_path_set_dmac(&work->path[0],
2148				 cm_id_priv->av.ah_attr.roce.dmac);
2149	work->path[0].hop_limit = grh->hop_limit;
2150
2151	/* This destroy call is needed to pair with cm_init_av_for_response */
2152	cm_destroy_av(&cm_id_priv->av);
2153	ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
2154	if (ret) {
2155		int err;
2156
2157		err = rdma_query_gid(work->port->cm_dev->ib_device,
2158				     work->port->port_num, 0,
2159				     &work->path[0].sgid);
2160		if (err)
2161			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2162				       NULL, 0, NULL, 0);
2163		else
2164			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2165				       &work->path[0].sgid,
2166				       sizeof(work->path[0].sgid),
2167				       NULL, 0);
2168		goto rejected;
2169	}
2170	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
2171		cm_id_priv->av.dlid_datapath =
2172			IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
2173
2174	if (cm_req_has_alt_path(req_msg)) {
2175		ret = cm_init_av_by_path(&work->path[1], NULL,
2176					 &cm_id_priv->alt_av);
2177		if (ret) {
2178			ib_send_cm_rej(&cm_id_priv->id,
2179				       IB_CM_REJ_INVALID_ALT_GID,
2180				       &work->path[0].sgid,
2181				       sizeof(work->path[0].sgid), NULL, 0);
2182			goto rejected;
2183		}
2184	}
2185
2186	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2187	cm_id_priv->id.context = listen_cm_id_priv->id.context;
2188	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2189
2190	/* Now MAD handlers can see the new ID */
2191	spin_lock_irq(&cm_id_priv->lock);
2192	cm_finalize_id(cm_id_priv);
2193
2194	/* Refcount belongs to the event, pairs with cm_process_work() */
2195	refcount_inc(&cm_id_priv->refcount);
2196	cm_queue_work_unlock(cm_id_priv, work);
2197	/*
2198	 * Since this ID was just created and was not made visible to other MAD
2199	 * handlers until the cm_finalize_id() above we know that the
2200	 * cm_process_work() will deliver the event and the listen_cm_id
2201	 * embedded in the event can be derefed here.
2202	 */
2203	cm_deref_id(listen_cm_id_priv);
2204	return 0;
2205
2206rejected:
2207	cm_deref_id(listen_cm_id_priv);
2208destroy:
2209	ib_destroy_cm_id(&cm_id_priv->id);
2210	return ret;
2211}
2212
2213static void cm_format_rep(struct cm_rep_msg *rep_msg,
2214			  struct cm_id_private *cm_id_priv,
2215			  struct ib_cm_rep_param *param)
2216{
2217	cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2218			      param->ece.attr_mod);
2219	IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2220		be32_to_cpu(cm_id_priv->id.local_id));
2221	IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2222		be32_to_cpu(cm_id_priv->id.remote_id));
2223	IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2224	IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2225		param->responder_resources);
2226	IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2227		cm_id_priv->av.port->cm_dev->ack_delay);
2228	IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2229	IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2230	IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2231		be64_to_cpu(cm_id_priv->id.device->node_guid));
2232
2233	if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2234		IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2235			param->initiator_depth);
2236		IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2237			param->flow_control);
2238		IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2239		IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2240	} else {
2241		IBA_SET(CM_REP_SRQ, rep_msg, 1);
2242		IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2243	}
2244
2245	IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2246	IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2247	IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2248
2249	if (param->private_data && param->private_data_len)
2250		IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2251			    param->private_data_len);
2252}
2253
2254int ib_send_cm_rep(struct ib_cm_id *cm_id,
2255		   struct ib_cm_rep_param *param)
2256{
2257	struct cm_id_private *cm_id_priv;
2258	struct ib_mad_send_buf *msg;
2259	struct cm_rep_msg *rep_msg;
2260	unsigned long flags;
2261	int ret;
2262
2263	if (param->private_data &&
2264	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2265		return -EINVAL;
2266
2267	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2268	spin_lock_irqsave(&cm_id_priv->lock, flags);
2269	if (cm_id->state != IB_CM_REQ_RCVD &&
2270	    cm_id->state != IB_CM_MRA_REQ_SENT) {
2271		trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2272		ret = -EINVAL;
2273		goto out;
2274	}
2275
2276	msg = cm_alloc_priv_msg(cm_id_priv);
2277	if (IS_ERR(msg)) {
2278		ret = PTR_ERR(msg);
2279		goto out;
2280	}
2281
2282	rep_msg = (struct cm_rep_msg *) msg->mad;
2283	cm_format_rep(rep_msg, cm_id_priv, param);
2284	msg->timeout_ms = cm_id_priv->timeout_ms;
2285	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2286
2287	trace_icm_send_rep(cm_id);
2288	ret = ib_post_send_mad(msg, NULL);
2289	if (ret)
2290		goto out_free;
2291
2292	cm_id->state = IB_CM_REP_SENT;
2293	cm_id_priv->initiator_depth = param->initiator_depth;
2294	cm_id_priv->responder_resources = param->responder_resources;
2295	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2296	WARN_ONCE(param->qp_num & 0xFF000000,
2297		  "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2298		  param->qp_num);
2299	cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2300	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2301	return 0;
2302
2303out_free:
2304	cm_free_priv_msg(msg);
2305out:
2306	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2307	return ret;
2308}
2309EXPORT_SYMBOL(ib_send_cm_rep);
2310
2311static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2312			  struct cm_id_private *cm_id_priv,
2313			  const void *private_data,
2314			  u8 private_data_len)
2315{
2316	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2317	IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2318		be32_to_cpu(cm_id_priv->id.local_id));
2319	IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2320		be32_to_cpu(cm_id_priv->id.remote_id));
2321
2322	if (private_data && private_data_len)
2323		IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2324			    private_data_len);
2325}
2326
2327int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2328		   const void *private_data,
2329		   u8 private_data_len)
2330{
2331	struct cm_id_private *cm_id_priv;
2332	struct ib_mad_send_buf *msg;
2333	unsigned long flags;
2334	void *data;
2335	int ret;
2336
2337	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2338		return -EINVAL;
2339
2340	data = cm_copy_private_data(private_data, private_data_len);
2341	if (IS_ERR(data))
2342		return PTR_ERR(data);
2343
2344	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2345	spin_lock_irqsave(&cm_id_priv->lock, flags);
2346	if (cm_id->state != IB_CM_REP_RCVD &&
2347	    cm_id->state != IB_CM_MRA_REP_SENT) {
2348		trace_icm_send_cm_rtu_err(cm_id);
2349		ret = -EINVAL;
2350		goto error;
2351	}
2352
2353	msg = cm_alloc_msg(cm_id_priv);
2354	if (IS_ERR(msg)) {
2355		ret = PTR_ERR(msg);
2356		goto error;
2357	}
2358
2359	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2360		      private_data, private_data_len);
2361
2362	trace_icm_send_rtu(cm_id);
2363	ret = ib_post_send_mad(msg, NULL);
2364	if (ret) {
2365		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2366		cm_free_msg(msg);
2367		kfree(data);
2368		return ret;
2369	}
2370
2371	cm_id->state = IB_CM_ESTABLISHED;
2372	cm_set_private_data(cm_id_priv, data, private_data_len);
2373	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2374	return 0;
2375
2376error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2377	kfree(data);
2378	return ret;
2379}
2380EXPORT_SYMBOL(ib_send_cm_rtu);
2381
2382static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2383{
2384	struct cm_rep_msg *rep_msg;
2385	struct ib_cm_rep_event_param *param;
2386
2387	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2388	param = &work->cm_event.param.rep_rcvd;
2389	param->remote_ca_guid =
2390		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2391	param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2392	param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2393	param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2394	param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2395	param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2396	param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2397	param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2398	param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2399	param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2400	param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2401	param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2402	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2403	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2404	param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2405
2406	work->cm_event.private_data =
2407		IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2408}
2409
2410static void cm_dup_rep_handler(struct cm_work *work)
2411{
2412	struct cm_id_private *cm_id_priv;
2413	struct cm_rep_msg *rep_msg;
2414	struct ib_mad_send_buf *msg = NULL;
2415	int ret;
2416
2417	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2418	cm_id_priv = cm_acquire_id(
2419		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2420		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2421	if (!cm_id_priv)
2422		return;
2423
2424	atomic_long_inc(
2425		&work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
2426	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2427	if (ret)
2428		goto deref;
2429
2430	spin_lock_irq(&cm_id_priv->lock);
2431	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2432		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2433			      cm_id_priv->private_data,
2434			      cm_id_priv->private_data_len);
2435	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2436		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2437			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2438			      cm_id_priv->private_data,
2439			      cm_id_priv->private_data_len);
2440	else
2441		goto unlock;
2442	spin_unlock_irq(&cm_id_priv->lock);
2443
2444	trace_icm_send_dup_rep(&cm_id_priv->id);
2445	ret = ib_post_send_mad(msg, NULL);
2446	if (ret)
2447		goto free;
2448	goto deref;
2449
2450unlock:	spin_unlock_irq(&cm_id_priv->lock);
2451free:	cm_free_response_msg(msg);
2452deref:	cm_deref_id(cm_id_priv);
2453}
2454
2455static int cm_rep_handler(struct cm_work *work)
2456{
2457	struct cm_id_private *cm_id_priv;
2458	struct cm_rep_msg *rep_msg;
2459	int ret;
2460	struct cm_id_private *cur_cm_id_priv;
2461	struct cm_timewait_info *timewait_info;
2462
2463	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2464	cm_id_priv = cm_acquire_id(
2465		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2466	if (!cm_id_priv) {
2467		cm_dup_rep_handler(work);
2468		trace_icm_remote_no_priv_err(
2469			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2470		return -EINVAL;
2471	}
2472
2473	cm_format_rep_event(work, cm_id_priv->qp_type);
2474
2475	spin_lock_irq(&cm_id_priv->lock);
2476	switch (cm_id_priv->id.state) {
2477	case IB_CM_REQ_SENT:
2478	case IB_CM_MRA_REQ_RCVD:
2479		break;
2480	default:
2481		ret = -EINVAL;
2482		trace_icm_rep_unknown_err(
2483			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2484			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2485			cm_id_priv->id.state);
2486		spin_unlock_irq(&cm_id_priv->lock);
2487		goto error;
2488	}
2489
2490	cm_id_priv->timewait_info->work.remote_id =
2491		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2492	cm_id_priv->timewait_info->remote_ca_guid =
2493		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2494	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2495
2496	spin_lock(&cm.lock);
2497	/* Check for duplicate REP. */
2498	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2499		spin_unlock(&cm.lock);
2500		spin_unlock_irq(&cm_id_priv->lock);
2501		ret = -EINVAL;
2502		trace_icm_insert_failed_err(
2503			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2504		goto error;
2505	}
2506	/* Check for a stale connection. */
2507	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2508	if (timewait_info) {
2509		cm_remove_remote(cm_id_priv);
2510		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2511					   timewait_info->work.remote_id);
2512
2513		spin_unlock(&cm.lock);
2514		spin_unlock_irq(&cm_id_priv->lock);
2515		cm_issue_rej(work->port, work->mad_recv_wc,
2516			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2517			     NULL, 0);
2518		ret = -EINVAL;
2519		trace_icm_staleconn_err(
2520			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2521			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2522
2523		if (cur_cm_id_priv) {
2524			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2525			cm_deref_id(cur_cm_id_priv);
2526		}
2527
2528		goto error;
2529	}
2530	spin_unlock(&cm.lock);
2531
2532	cm_id_priv->id.state = IB_CM_REP_RCVD;
2533	cm_id_priv->id.remote_id =
2534		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2535	cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2536	cm_id_priv->initiator_depth =
2537		IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2538	cm_id_priv->responder_resources =
2539		IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2540	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2541	cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2542	cm_id_priv->target_ack_delay =
2543		IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2544	cm_id_priv->av.timeout =
2545			cm_ack_timeout(cm_id_priv->target_ack_delay,
2546				       cm_id_priv->av.timeout - 1);
2547	cm_id_priv->alt_av.timeout =
2548			cm_ack_timeout(cm_id_priv->target_ack_delay,
2549				       cm_id_priv->alt_av.timeout - 1);
2550
2551	ib_cancel_mad(cm_id_priv->msg);
2552	cm_queue_work_unlock(cm_id_priv, work);
2553	return 0;
2554
2555error:
2556	cm_deref_id(cm_id_priv);
2557	return ret;
2558}
2559
2560static int cm_establish_handler(struct cm_work *work)
2561{
2562	struct cm_id_private *cm_id_priv;
2563
2564	/* See comment in cm_establish about lookup. */
2565	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2566	if (!cm_id_priv)
2567		return -EINVAL;
2568
2569	spin_lock_irq(&cm_id_priv->lock);
2570	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2571		spin_unlock_irq(&cm_id_priv->lock);
2572		goto out;
2573	}
2574
2575	ib_cancel_mad(cm_id_priv->msg);
2576	cm_queue_work_unlock(cm_id_priv, work);
2577	return 0;
2578out:
2579	cm_deref_id(cm_id_priv);
2580	return -EINVAL;
2581}
2582
2583static int cm_rtu_handler(struct cm_work *work)
2584{
2585	struct cm_id_private *cm_id_priv;
2586	struct cm_rtu_msg *rtu_msg;
2587
2588	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2589	cm_id_priv = cm_acquire_id(
2590		cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2591		cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2592	if (!cm_id_priv)
2593		return -EINVAL;
2594
2595	work->cm_event.private_data =
2596		IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2597
2598	spin_lock_irq(&cm_id_priv->lock);
2599	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2600	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2601		spin_unlock_irq(&cm_id_priv->lock);
2602		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2603						     [CM_RTU_COUNTER]);
2604		goto out;
2605	}
2606	cm_id_priv->id.state = IB_CM_ESTABLISHED;
2607
2608	ib_cancel_mad(cm_id_priv->msg);
2609	cm_queue_work_unlock(cm_id_priv, work);
2610	return 0;
2611out:
2612	cm_deref_id(cm_id_priv);
2613	return -EINVAL;
2614}
2615
2616static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2617			  struct cm_id_private *cm_id_priv,
2618			  const void *private_data,
2619			  u8 private_data_len)
2620{
2621	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2622			  cm_form_tid(cm_id_priv));
2623	IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2624		be32_to_cpu(cm_id_priv->id.local_id));
2625	IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2626		be32_to_cpu(cm_id_priv->id.remote_id));
2627	IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2628		be32_to_cpu(cm_id_priv->remote_qpn));
2629
2630	if (private_data && private_data_len)
2631		IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2632			    private_data_len);
2633}
2634
2635static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2636			       const void *private_data, u8 private_data_len)
2637{
2638	struct ib_mad_send_buf *msg;
2639	int ret;
2640
2641	lockdep_assert_held(&cm_id_priv->lock);
2642
2643	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2644		return -EINVAL;
2645
2646	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2647		trace_icm_dreq_skipped(&cm_id_priv->id);
2648		return -EINVAL;
2649	}
2650
2651	if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2652	    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2653		ib_cancel_mad(cm_id_priv->msg);
2654
2655	msg = cm_alloc_priv_msg(cm_id_priv);
2656	if (IS_ERR(msg)) {
2657		cm_enter_timewait(cm_id_priv);
2658		return PTR_ERR(msg);
2659	}
2660
2661	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2662		       private_data, private_data_len);
2663	msg->timeout_ms = cm_id_priv->timeout_ms;
2664	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2665
2666	trace_icm_send_dreq(&cm_id_priv->id);
2667	ret = ib_post_send_mad(msg, NULL);
2668	if (ret) {
2669		cm_enter_timewait(cm_id_priv);
2670		cm_free_priv_msg(msg);
2671		return ret;
2672	}
2673
2674	cm_id_priv->id.state = IB_CM_DREQ_SENT;
2675	return 0;
2676}
2677
2678int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2679		    u8 private_data_len)
2680{
2681	struct cm_id_private *cm_id_priv =
2682		container_of(cm_id, struct cm_id_private, id);
2683	unsigned long flags;
2684	int ret;
2685
2686	spin_lock_irqsave(&cm_id_priv->lock, flags);
2687	ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2688	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2689	return ret;
2690}
2691EXPORT_SYMBOL(ib_send_cm_dreq);
2692
2693static void cm_format_drep(struct cm_drep_msg *drep_msg,
2694			  struct cm_id_private *cm_id_priv,
2695			  const void *private_data,
2696			  u8 private_data_len)
2697{
2698	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2699	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2700		be32_to_cpu(cm_id_priv->id.local_id));
2701	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2702		be32_to_cpu(cm_id_priv->id.remote_id));
2703
2704	if (private_data && private_data_len)
2705		IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2706			    private_data_len);
2707}
2708
2709static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2710			       void *private_data, u8 private_data_len)
2711{
2712	struct ib_mad_send_buf *msg;
2713	int ret;
2714
2715	lockdep_assert_held(&cm_id_priv->lock);
2716
2717	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2718		return -EINVAL;
2719
2720	if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2721		trace_icm_send_drep_err(&cm_id_priv->id);
2722		kfree(private_data);
2723		return -EINVAL;
2724	}
2725
2726	cm_set_private_data(cm_id_priv, private_data, private_data_len);
2727	cm_enter_timewait(cm_id_priv);
2728
2729	msg = cm_alloc_msg(cm_id_priv);
2730	if (IS_ERR(msg))
2731		return PTR_ERR(msg);
2732
2733	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2734		       private_data, private_data_len);
2735
2736	trace_icm_send_drep(&cm_id_priv->id);
2737	ret = ib_post_send_mad(msg, NULL);
2738	if (ret) {
2739		cm_free_msg(msg);
2740		return ret;
2741	}
2742	return 0;
2743}
2744
2745int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2746		    u8 private_data_len)
2747{
2748	struct cm_id_private *cm_id_priv =
2749		container_of(cm_id, struct cm_id_private, id);
2750	unsigned long flags;
2751	void *data;
2752	int ret;
2753
2754	data = cm_copy_private_data(private_data, private_data_len);
2755	if (IS_ERR(data))
2756		return PTR_ERR(data);
2757
2758	spin_lock_irqsave(&cm_id_priv->lock, flags);
2759	ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2760	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2761	return ret;
2762}
2763EXPORT_SYMBOL(ib_send_cm_drep);
2764
2765static int cm_issue_drep(struct cm_port *port,
2766			 struct ib_mad_recv_wc *mad_recv_wc)
2767{
2768	struct ib_mad_send_buf *msg = NULL;
2769	struct cm_dreq_msg *dreq_msg;
2770	struct cm_drep_msg *drep_msg;
2771	int ret;
2772
2773	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2774	if (ret)
2775		return ret;
2776
2777	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2778	drep_msg = (struct cm_drep_msg *) msg->mad;
2779
2780	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2781	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2782		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2783	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2784		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2785
2786	trace_icm_issue_drep(
2787		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2788		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2789	ret = ib_post_send_mad(msg, NULL);
2790	if (ret)
2791		cm_free_response_msg(msg);
2792
2793	return ret;
2794}
2795
2796static int cm_dreq_handler(struct cm_work *work)
2797{
2798	struct cm_id_private *cm_id_priv;
2799	struct cm_dreq_msg *dreq_msg;
2800	struct ib_mad_send_buf *msg = NULL;
2801
2802	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2803	cm_id_priv = cm_acquire_id(
2804		cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2805		cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2806	if (!cm_id_priv) {
2807		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2808						     [CM_DREQ_COUNTER]);
2809		cm_issue_drep(work->port, work->mad_recv_wc);
2810		trace_icm_no_priv_err(
2811			IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2812			IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2813		return -EINVAL;
2814	}
2815
2816	work->cm_event.private_data =
2817		IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2818
2819	spin_lock_irq(&cm_id_priv->lock);
2820	if (cm_id_priv->local_qpn !=
2821	    cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2822		goto unlock;
2823
2824	switch (cm_id_priv->id.state) {
2825	case IB_CM_REP_SENT:
2826	case IB_CM_DREQ_SENT:
2827	case IB_CM_MRA_REP_RCVD:
2828		ib_cancel_mad(cm_id_priv->msg);
2829		break;
2830	case IB_CM_ESTABLISHED:
2831		if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2832		    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2833			ib_cancel_mad(cm_id_priv->msg);
2834		break;
2835	case IB_CM_TIMEWAIT:
2836		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2837						     [CM_DREQ_COUNTER]);
2838		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2839		if (IS_ERR(msg))
2840			goto unlock;
2841
2842		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2843			       cm_id_priv->private_data,
2844			       cm_id_priv->private_data_len);
2845		spin_unlock_irq(&cm_id_priv->lock);
2846
2847		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2848		    ib_post_send_mad(msg, NULL))
2849			cm_free_response_msg(msg);
2850		goto deref;
2851	case IB_CM_DREQ_RCVD:
2852		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2853						     [CM_DREQ_COUNTER]);
2854		goto unlock;
2855	default:
2856		trace_icm_dreq_unknown_err(&cm_id_priv->id);
2857		goto unlock;
2858	}
2859	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2860	cm_id_priv->tid = dreq_msg->hdr.tid;
2861	cm_queue_work_unlock(cm_id_priv, work);
2862	return 0;
2863
2864unlock:	spin_unlock_irq(&cm_id_priv->lock);
2865deref:	cm_deref_id(cm_id_priv);
2866	return -EINVAL;
2867}
2868
2869static int cm_drep_handler(struct cm_work *work)
2870{
2871	struct cm_id_private *cm_id_priv;
2872	struct cm_drep_msg *drep_msg;
2873
2874	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2875	cm_id_priv = cm_acquire_id(
2876		cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2877		cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2878	if (!cm_id_priv)
2879		return -EINVAL;
2880
2881	work->cm_event.private_data =
2882		IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2883
2884	spin_lock_irq(&cm_id_priv->lock);
2885	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2886	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2887		spin_unlock_irq(&cm_id_priv->lock);
2888		goto out;
2889	}
2890	cm_enter_timewait(cm_id_priv);
2891
2892	ib_cancel_mad(cm_id_priv->msg);
2893	cm_queue_work_unlock(cm_id_priv, work);
2894	return 0;
2895out:
2896	cm_deref_id(cm_id_priv);
2897	return -EINVAL;
2898}
2899
2900static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2901			      enum ib_cm_rej_reason reason, void *ari,
2902			      u8 ari_length, const void *private_data,
2903			      u8 private_data_len)
2904{
2905	enum ib_cm_state state = cm_id_priv->id.state;
2906	struct ib_mad_send_buf *msg;
2907	int ret;
2908
2909	lockdep_assert_held(&cm_id_priv->lock);
2910
2911	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2912	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2913		return -EINVAL;
2914
2915	trace_icm_send_rej(&cm_id_priv->id, reason);
2916
2917	switch (state) {
2918	case IB_CM_REQ_SENT:
2919	case IB_CM_MRA_REQ_RCVD:
2920	case IB_CM_REQ_RCVD:
2921	case IB_CM_MRA_REQ_SENT:
2922	case IB_CM_REP_RCVD:
2923	case IB_CM_MRA_REP_SENT:
2924		cm_reset_to_idle(cm_id_priv);
2925		msg = cm_alloc_msg(cm_id_priv);
2926		if (IS_ERR(msg))
2927			return PTR_ERR(msg);
2928		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2929			      ari, ari_length, private_data, private_data_len,
2930			      state);
2931		break;
2932	case IB_CM_REP_SENT:
2933	case IB_CM_MRA_REP_RCVD:
2934		cm_enter_timewait(cm_id_priv);
2935		msg = cm_alloc_msg(cm_id_priv);
2936		if (IS_ERR(msg))
2937			return PTR_ERR(msg);
2938		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2939			      ari, ari_length, private_data, private_data_len,
2940			      state);
2941		break;
2942	default:
2943		trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2944		return -EINVAL;
2945	}
2946
2947	ret = ib_post_send_mad(msg, NULL);
2948	if (ret) {
2949		cm_free_msg(msg);
2950		return ret;
2951	}
2952
2953	return 0;
2954}
2955
2956int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2957		   void *ari, u8 ari_length, const void *private_data,
2958		   u8 private_data_len)
2959{
2960	struct cm_id_private *cm_id_priv =
2961		container_of(cm_id, struct cm_id_private, id);
2962	unsigned long flags;
2963	int ret;
2964
2965	spin_lock_irqsave(&cm_id_priv->lock, flags);
2966	ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2967				 private_data, private_data_len);
2968	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2969	return ret;
2970}
2971EXPORT_SYMBOL(ib_send_cm_rej);
2972
2973static void cm_format_rej_event(struct cm_work *work)
2974{
2975	struct cm_rej_msg *rej_msg;
2976	struct ib_cm_rej_event_param *param;
2977
2978	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2979	param = &work->cm_event.param.rej_rcvd;
2980	param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2981	param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2982	param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2983	work->cm_event.private_data =
2984		IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2985}
2986
2987static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2988{
2989	struct cm_id_private *cm_id_priv;
2990	__be32 remote_id;
2991
2992	remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
2993
2994	if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
2995		cm_id_priv = cm_find_remote_id(
2996			*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
2997			remote_id);
2998	} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
2999		   CM_MSG_RESPONSE_REQ)
3000		cm_id_priv = cm_acquire_id(
3001			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3002			0);
3003	else
3004		cm_id_priv = cm_acquire_id(
3005			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3006			remote_id);
3007
3008	return cm_id_priv;
3009}
3010
3011static int cm_rej_handler(struct cm_work *work)
3012{
3013	struct cm_id_private *cm_id_priv;
3014	struct cm_rej_msg *rej_msg;
3015
3016	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3017	cm_id_priv = cm_acquire_rejected_id(rej_msg);
3018	if (!cm_id_priv)
3019		return -EINVAL;
3020
3021	cm_format_rej_event(work);
3022
3023	spin_lock_irq(&cm_id_priv->lock);
3024	switch (cm_id_priv->id.state) {
3025	case IB_CM_REQ_SENT:
3026	case IB_CM_MRA_REQ_RCVD:
3027	case IB_CM_REP_SENT:
3028	case IB_CM_MRA_REP_RCVD:
3029		ib_cancel_mad(cm_id_priv->msg);
3030		fallthrough;
3031	case IB_CM_REQ_RCVD:
3032	case IB_CM_MRA_REQ_SENT:
3033		if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3034			cm_enter_timewait(cm_id_priv);
3035		else
3036			cm_reset_to_idle(cm_id_priv);
3037		break;
3038	case IB_CM_DREQ_SENT:
3039		ib_cancel_mad(cm_id_priv->msg);
3040		fallthrough;
3041	case IB_CM_REP_RCVD:
3042	case IB_CM_MRA_REP_SENT:
3043		cm_enter_timewait(cm_id_priv);
3044		break;
3045	case IB_CM_ESTABLISHED:
3046		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3047		    cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3048			if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3049				ib_cancel_mad(cm_id_priv->msg);
3050			cm_enter_timewait(cm_id_priv);
3051			break;
3052		}
3053		fallthrough;
3054	default:
3055		trace_icm_rej_unknown_err(&cm_id_priv->id);
3056		spin_unlock_irq(&cm_id_priv->lock);
3057		goto out;
3058	}
3059
3060	cm_queue_work_unlock(cm_id_priv, work);
3061	return 0;
3062out:
3063	cm_deref_id(cm_id_priv);
3064	return -EINVAL;
3065}
3066
3067int ib_send_cm_mra(struct ib_cm_id *cm_id,
3068		   u8 service_timeout,
3069		   const void *private_data,
3070		   u8 private_data_len)
3071{
3072	struct cm_id_private *cm_id_priv;
3073	struct ib_mad_send_buf *msg;
3074	enum ib_cm_state cm_state;
3075	enum ib_cm_lap_state lap_state;
3076	enum cm_msg_response msg_response;
3077	void *data;
3078	unsigned long flags;
3079	int ret;
3080
3081	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3082		return -EINVAL;
3083
3084	data = cm_copy_private_data(private_data, private_data_len);
3085	if (IS_ERR(data))
3086		return PTR_ERR(data);
3087
3088	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3089
3090	spin_lock_irqsave(&cm_id_priv->lock, flags);
3091	switch (cm_id_priv->id.state) {
3092	case IB_CM_REQ_RCVD:
3093		cm_state = IB_CM_MRA_REQ_SENT;
3094		lap_state = cm_id->lap_state;
3095		msg_response = CM_MSG_RESPONSE_REQ;
3096		break;
3097	case IB_CM_REP_RCVD:
3098		cm_state = IB_CM_MRA_REP_SENT;
3099		lap_state = cm_id->lap_state;
3100		msg_response = CM_MSG_RESPONSE_REP;
3101		break;
3102	case IB_CM_ESTABLISHED:
3103		if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3104			cm_state = cm_id->state;
3105			lap_state = IB_CM_MRA_LAP_SENT;
3106			msg_response = CM_MSG_RESPONSE_OTHER;
3107			break;
3108		}
3109		fallthrough;
3110	default:
3111		trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3112		ret = -EINVAL;
3113		goto error_unlock;
3114	}
3115
3116	if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3117		msg = cm_alloc_msg(cm_id_priv);
3118		if (IS_ERR(msg)) {
3119			ret = PTR_ERR(msg);
3120			goto error_unlock;
3121		}
3122
3123		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3124			      msg_response, service_timeout,
3125			      private_data, private_data_len);
3126		trace_icm_send_mra(cm_id);
3127		ret = ib_post_send_mad(msg, NULL);
3128		if (ret)
3129			goto error_free_msg;
3130	}
3131
3132	cm_id->state = cm_state;
3133	cm_id->lap_state = lap_state;
3134	cm_id_priv->service_timeout = service_timeout;
3135	cm_set_private_data(cm_id_priv, data, private_data_len);
3136	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3137	return 0;
3138
3139error_free_msg:
3140	cm_free_msg(msg);
3141error_unlock:
3142	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3143	kfree(data);
3144	return ret;
3145}
3146EXPORT_SYMBOL(ib_send_cm_mra);
3147
3148static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3149{
3150	switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3151	case CM_MSG_RESPONSE_REQ:
3152		return cm_acquire_id(
3153			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3154			0);
3155	case CM_MSG_RESPONSE_REP:
3156	case CM_MSG_RESPONSE_OTHER:
3157		return cm_acquire_id(
3158			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3159			cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3160	default:
3161		return NULL;
3162	}
3163}
3164
3165static int cm_mra_handler(struct cm_work *work)
3166{
3167	struct cm_id_private *cm_id_priv;
3168	struct cm_mra_msg *mra_msg;
3169	int timeout;
3170
3171	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3172	cm_id_priv = cm_acquire_mraed_id(mra_msg);
3173	if (!cm_id_priv)
3174		return -EINVAL;
3175
3176	work->cm_event.private_data =
3177		IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3178	work->cm_event.param.mra_rcvd.service_timeout =
3179		IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3180	timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3181		  cm_convert_to_ms(cm_id_priv->av.timeout);
3182
3183	spin_lock_irq(&cm_id_priv->lock);
3184	switch (cm_id_priv->id.state) {
3185	case IB_CM_REQ_SENT:
3186		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3187			    CM_MSG_RESPONSE_REQ ||
3188		    ib_modify_mad(cm_id_priv->msg, timeout))
3189			goto out;
3190		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3191		break;
3192	case IB_CM_REP_SENT:
3193		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3194			    CM_MSG_RESPONSE_REP ||
3195		    ib_modify_mad(cm_id_priv->msg, timeout))
3196			goto out;
3197		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3198		break;
3199	case IB_CM_ESTABLISHED:
3200		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3201			    CM_MSG_RESPONSE_OTHER ||
3202		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3203		    ib_modify_mad(cm_id_priv->msg, timeout)) {
3204			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3205				atomic_long_inc(
3206					&work->port->counters[CM_RECV_DUPLICATES]
3207							     [CM_MRA_COUNTER]);
3208			goto out;
3209		}
3210		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3211		break;
3212	case IB_CM_MRA_REQ_RCVD:
3213	case IB_CM_MRA_REP_RCVD:
3214		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3215						     [CM_MRA_COUNTER]);
3216		fallthrough;
3217	default:
3218		trace_icm_mra_unknown_err(&cm_id_priv->id);
3219		goto out;
3220	}
3221
3222	cm_id_priv->msg->context[1] = (void *) (unsigned long)
3223				      cm_id_priv->id.state;
3224	cm_queue_work_unlock(cm_id_priv, work);
3225	return 0;
3226out:
3227	spin_unlock_irq(&cm_id_priv->lock);
3228	cm_deref_id(cm_id_priv);
3229	return -EINVAL;
3230}
3231
3232static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3233					struct sa_path_rec *path)
3234{
3235	u32 lid;
3236
3237	if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3238		sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3239					       lap_msg));
3240		sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3241					       lap_msg));
3242	} else {
3243		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3244			CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3245		sa_path_set_dlid(path, lid);
3246
3247		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3248			CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3249		sa_path_set_slid(path, lid);
3250	}
3251}
3252
3253static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3254				    struct sa_path_rec *path,
3255				    struct cm_lap_msg *lap_msg)
3256{
3257	path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3258	path->sgid =
3259		*IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3260	path->flow_label =
3261		cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3262	path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3263	path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3264	path->reversible = 1;
3265	path->pkey = cm_id_priv->pkey;
3266	path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3267	path->mtu_selector = IB_SA_EQ;
3268	path->mtu = cm_id_priv->path_mtu;
3269	path->rate_selector = IB_SA_EQ;
3270	path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3271	path->packet_life_time_selector = IB_SA_EQ;
3272	path->packet_life_time =
3273		IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3274	path->packet_life_time -= (path->packet_life_time > 0);
3275	cm_format_path_lid_from_lap(lap_msg, path);
3276}
3277
3278static int cm_lap_handler(struct cm_work *work)
3279{
3280	struct cm_id_private *cm_id_priv;
3281	struct cm_lap_msg *lap_msg;
3282	struct ib_cm_lap_event_param *param;
3283	struct ib_mad_send_buf *msg = NULL;
3284	struct rdma_ah_attr ah_attr;
3285	struct cm_av alt_av = {};
3286	int ret;
3287
3288	/* Currently Alternate path messages are not supported for
3289	 * RoCE link layer.
3290	 */
3291	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3292			       work->port->port_num))
3293		return -EINVAL;
3294
3295	/* todo: verify LAP request and send reject APR if invalid. */
3296	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3297	cm_id_priv = cm_acquire_id(
3298		cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3299		cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3300	if (!cm_id_priv)
3301		return -EINVAL;
3302
3303	param = &work->cm_event.param.lap_rcvd;
3304	memset(&work->path[0], 0, sizeof(work->path[1]));
3305	cm_path_set_rec_type(work->port->cm_dev->ib_device,
3306			     work->port->port_num, &work->path[0],
3307			     IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3308					     lap_msg));
3309	param->alternate_path = &work->path[0];
3310	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3311	work->cm_event.private_data =
3312		IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3313
3314	ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
3315				      work->port->port_num,
3316				      work->mad_recv_wc->wc,
3317				      work->mad_recv_wc->recv_buf.grh,
3318				      &ah_attr);
3319	if (ret)
3320		goto deref;
3321
3322	ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
3323	if (ret) {
3324		rdma_destroy_ah_attr(&ah_attr);
3325		goto deref;
3326	}
3327
3328	spin_lock_irq(&cm_id_priv->lock);
3329	cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3330			   &ah_attr, &cm_id_priv->av);
3331	cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
3332
3333	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3334		goto unlock;
3335
3336	switch (cm_id_priv->id.lap_state) {
3337	case IB_CM_LAP_UNINIT:
3338	case IB_CM_LAP_IDLE:
3339		break;
3340	case IB_CM_MRA_LAP_SENT:
3341		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3342						     [CM_LAP_COUNTER]);
3343		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3344		if (IS_ERR(msg))
3345			goto unlock;
3346
3347		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3348			      CM_MSG_RESPONSE_OTHER,
3349			      cm_id_priv->service_timeout,
3350			      cm_id_priv->private_data,
3351			      cm_id_priv->private_data_len);
3352		spin_unlock_irq(&cm_id_priv->lock);
3353
3354		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3355		    ib_post_send_mad(msg, NULL))
3356			cm_free_response_msg(msg);
3357		goto deref;
3358	case IB_CM_LAP_RCVD:
3359		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3360						     [CM_LAP_COUNTER]);
3361		goto unlock;
3362	default:
3363		goto unlock;
3364	}
3365
3366	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3367	cm_id_priv->tid = lap_msg->hdr.tid;
3368	cm_queue_work_unlock(cm_id_priv, work);
3369	return 0;
3370
3371unlock:	spin_unlock_irq(&cm_id_priv->lock);
3372deref:	cm_deref_id(cm_id_priv);
3373	return -EINVAL;
3374}
3375
3376static int cm_apr_handler(struct cm_work *work)
3377{
3378	struct cm_id_private *cm_id_priv;
3379	struct cm_apr_msg *apr_msg;
3380
3381	/* Currently Alternate path messages are not supported for
3382	 * RoCE link layer.
3383	 */
3384	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3385			       work->port->port_num))
3386		return -EINVAL;
3387
3388	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3389	cm_id_priv = cm_acquire_id(
3390		cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3391		cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3392	if (!cm_id_priv)
3393		return -EINVAL; /* Unmatched reply. */
3394
3395	work->cm_event.param.apr_rcvd.ap_status =
3396		IBA_GET(CM_APR_AR_STATUS, apr_msg);
3397	work->cm_event.param.apr_rcvd.apr_info =
3398		IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3399	work->cm_event.param.apr_rcvd.info_len =
3400		IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3401	work->cm_event.private_data =
3402		IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3403
3404	spin_lock_irq(&cm_id_priv->lock);
3405	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3406	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3407	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3408		spin_unlock_irq(&cm_id_priv->lock);
3409		goto out;
3410	}
3411	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3412	ib_cancel_mad(cm_id_priv->msg);
3413	cm_queue_work_unlock(cm_id_priv, work);
3414	return 0;
3415out:
3416	cm_deref_id(cm_id_priv);
3417	return -EINVAL;
3418}
3419
3420static int cm_timewait_handler(struct cm_work *work)
3421{
3422	struct cm_timewait_info *timewait_info;
3423	struct cm_id_private *cm_id_priv;
3424
3425	timewait_info = container_of(work, struct cm_timewait_info, work);
3426	spin_lock_irq(&cm.lock);
3427	list_del(&timewait_info->list);
3428	spin_unlock_irq(&cm.lock);
3429
3430	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3431				   timewait_info->work.remote_id);
3432	if (!cm_id_priv)
3433		return -EINVAL;
3434
3435	spin_lock_irq(&cm_id_priv->lock);
3436	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3437	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3438		spin_unlock_irq(&cm_id_priv->lock);
3439		goto out;
3440	}
3441	cm_id_priv->id.state = IB_CM_IDLE;
3442	cm_queue_work_unlock(cm_id_priv, work);
3443	return 0;
3444out:
3445	cm_deref_id(cm_id_priv);
3446	return -EINVAL;
3447}
3448
3449static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3450			       struct cm_id_private *cm_id_priv,
3451			       struct ib_cm_sidr_req_param *param)
3452{
3453	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3454			  cm_form_tid(cm_id_priv));
3455	IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3456		be32_to_cpu(cm_id_priv->id.local_id));
3457	IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3458		be16_to_cpu(param->path->pkey));
3459	IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3460		be64_to_cpu(param->service_id));
3461
3462	if (param->private_data && param->private_data_len)
3463		IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3464			    param->private_data, param->private_data_len);
3465}
3466
3467int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3468			struct ib_cm_sidr_req_param *param)
3469{
3470	struct cm_id_private *cm_id_priv;
3471	struct ib_mad_send_buf *msg;
3472	struct cm_av av = {};
3473	unsigned long flags;
3474	int ret;
3475
3476	if (!param->path || (param->private_data &&
3477	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3478		return -EINVAL;
3479
3480	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3481	ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
3482	if (ret)
3483		return ret;
3484
3485	spin_lock_irqsave(&cm_id_priv->lock, flags);
3486	cm_move_av_from_path(&cm_id_priv->av, &av);
3487	cm_id->service_id = param->service_id;
3488	cm_id_priv->timeout_ms = param->timeout_ms;
3489	cm_id_priv->max_cm_retries = param->max_cm_retries;
3490	if (cm_id->state != IB_CM_IDLE) {
3491		ret = -EINVAL;
3492		goto out_unlock;
3493	}
3494
3495	msg = cm_alloc_priv_msg(cm_id_priv);
3496	if (IS_ERR(msg)) {
3497		ret = PTR_ERR(msg);
3498		goto out_unlock;
3499	}
3500
3501	cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
3502			   param);
3503	msg->timeout_ms = cm_id_priv->timeout_ms;
3504	msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
3505
3506	trace_icm_send_sidr_req(&cm_id_priv->id);
3507	ret = ib_post_send_mad(msg, NULL);
3508	if (ret)
3509		goto out_free;
3510	cm_id->state = IB_CM_SIDR_REQ_SENT;
3511	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3512	return 0;
3513out_free:
3514	cm_free_priv_msg(msg);
3515out_unlock:
3516	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3517	return ret;
3518}
3519EXPORT_SYMBOL(ib_send_cm_sidr_req);
3520
3521static void cm_format_sidr_req_event(struct cm_work *work,
3522				     const struct cm_id_private *rx_cm_id,
3523				     struct ib_cm_id *listen_id)
3524{
3525	struct cm_sidr_req_msg *sidr_req_msg;
3526	struct ib_cm_sidr_req_event_param *param;
3527
3528	sidr_req_msg = (struct cm_sidr_req_msg *)
3529				work->mad_recv_wc->recv_buf.mad;
3530	param = &work->cm_event.param.sidr_req_rcvd;
3531	param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3532	param->listen_id = listen_id;
3533	param->service_id =
3534		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3535	param->bth_pkey = cm_get_bth_pkey(work);
3536	param->port = work->port->port_num;
3537	param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3538	work->cm_event.private_data =
3539		IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3540}
3541
3542static int cm_sidr_req_handler(struct cm_work *work)
3543{
3544	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3545	struct cm_sidr_req_msg *sidr_req_msg;
3546	struct ib_wc *wc;
3547	int ret;
3548
3549	cm_id_priv =
3550		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3551	if (IS_ERR(cm_id_priv))
3552		return PTR_ERR(cm_id_priv);
3553
3554	/* Record SGID/SLID and request ID for lookup. */
3555	sidr_req_msg = (struct cm_sidr_req_msg *)
3556				work->mad_recv_wc->recv_buf.mad;
3557
3558	cm_id_priv->id.remote_id =
3559		cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3560	cm_id_priv->id.service_id =
3561		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3562	cm_id_priv->tid = sidr_req_msg->hdr.tid;
3563
3564	wc = work->mad_recv_wc->wc;
3565	cm_id_priv->sidr_slid = wc->slid;
3566	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3567				      work->mad_recv_wc->recv_buf.grh,
3568				      &cm_id_priv->av);
3569	if (ret)
3570		goto out;
3571
3572	spin_lock_irq(&cm.lock);
3573	listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3574	if (listen_cm_id_priv) {
3575		spin_unlock_irq(&cm.lock);
3576		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3577						     [CM_SIDR_REQ_COUNTER]);
3578		goto out; /* Duplicate message. */
3579	}
3580	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3581	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3582					   cm_id_priv->id.service_id);
3583	if (!listen_cm_id_priv) {
3584		spin_unlock_irq(&cm.lock);
3585		ib_send_cm_sidr_rep(&cm_id_priv->id,
3586				    &(struct ib_cm_sidr_rep_param){
3587					    .status = IB_SIDR_UNSUPPORTED });
3588		goto out; /* No match. */
3589	}
3590	spin_unlock_irq(&cm.lock);
3591
3592	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3593	cm_id_priv->id.context = listen_cm_id_priv->id.context;
3594
3595	/*
3596	 * A SIDR ID does not need to be in the xarray since it does not receive
3597	 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3598	 * not enter timewait.
3599	 */
3600
3601	cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3602	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3603	cm_free_work(work);
3604	/*
3605	 * A pointer to the listen_cm_id is held in the event, so this deref
3606	 * must be after the event is delivered above.
3607	 */
3608	cm_deref_id(listen_cm_id_priv);
3609	if (ret)
3610		cm_destroy_id(&cm_id_priv->id, ret);
3611	return 0;
3612out:
3613	ib_destroy_cm_id(&cm_id_priv->id);
3614	return -EINVAL;
3615}
3616
3617static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3618			       struct cm_id_private *cm_id_priv,
3619			       struct ib_cm_sidr_rep_param *param)
3620{
3621	cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3622			      cm_id_priv->tid, param->ece.attr_mod);
3623	IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3624		be32_to_cpu(cm_id_priv->id.remote_id));
3625	IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3626	IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3627	IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3628		be64_to_cpu(cm_id_priv->id.service_id));
3629	IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3630	IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3631		param->ece.vendor_id & 0xFF);
3632	IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3633		(param->ece.vendor_id >> 8) & 0xFF);
3634
3635	if (param->info && param->info_length)
3636		IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3637			    param->info, param->info_length);
3638
3639	if (param->private_data && param->private_data_len)
3640		IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3641			    param->private_data, param->private_data_len);
3642}
3643
3644static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3645				   struct ib_cm_sidr_rep_param *param)
3646{
3647	struct ib_mad_send_buf *msg;
3648	unsigned long flags;
3649	int ret;
3650
3651	lockdep_assert_held(&cm_id_priv->lock);
3652
3653	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3654	    (param->private_data &&
3655	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3656		return -EINVAL;
3657
3658	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3659		return -EINVAL;
3660
3661	msg = cm_alloc_msg(cm_id_priv);
3662	if (IS_ERR(msg))
3663		return PTR_ERR(msg);
3664
3665	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3666			   param);
3667	trace_icm_send_sidr_rep(&cm_id_priv->id);
3668	ret = ib_post_send_mad(msg, NULL);
3669	if (ret) {
3670		cm_free_msg(msg);
3671		return ret;
3672	}
3673	cm_id_priv->id.state = IB_CM_IDLE;
3674	spin_lock_irqsave(&cm.lock, flags);
3675	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3676		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3677		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3678	}
3679	spin_unlock_irqrestore(&cm.lock, flags);
3680	return 0;
3681}
3682
3683int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3684			struct ib_cm_sidr_rep_param *param)
3685{
3686	struct cm_id_private *cm_id_priv =
3687		container_of(cm_id, struct cm_id_private, id);
3688	unsigned long flags;
3689	int ret;
3690
3691	spin_lock_irqsave(&cm_id_priv->lock, flags);
3692	ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3693	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3694	return ret;
3695}
3696EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3697
3698static void cm_format_sidr_rep_event(struct cm_work *work,
3699				     const struct cm_id_private *cm_id_priv)
3700{
3701	struct cm_sidr_rep_msg *sidr_rep_msg;
3702	struct ib_cm_sidr_rep_event_param *param;
3703
3704	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3705				work->mad_recv_wc->recv_buf.mad;
3706	param = &work->cm_event.param.sidr_rep_rcvd;
3707	param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3708	param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3709	param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3710	param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3711				      sidr_rep_msg);
3712	param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3713				  sidr_rep_msg);
3714	param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3715	work->cm_event.private_data =
3716		IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3717}
3718
3719static int cm_sidr_rep_handler(struct cm_work *work)
3720{
3721	struct cm_sidr_rep_msg *sidr_rep_msg;
3722	struct cm_id_private *cm_id_priv;
3723
3724	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3725				work->mad_recv_wc->recv_buf.mad;
3726	cm_id_priv = cm_acquire_id(
3727		cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3728	if (!cm_id_priv)
3729		return -EINVAL; /* Unmatched reply. */
3730
3731	spin_lock_irq(&cm_id_priv->lock);
3732	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3733		spin_unlock_irq(&cm_id_priv->lock);
3734		goto out;
3735	}
3736	cm_id_priv->id.state = IB_CM_IDLE;
3737	ib_cancel_mad(cm_id_priv->msg);
3738	spin_unlock_irq(&cm_id_priv->lock);
3739
3740	cm_format_sidr_rep_event(work, cm_id_priv);
3741	cm_process_work(cm_id_priv, work);
3742	return 0;
3743out:
3744	cm_deref_id(cm_id_priv);
3745	return -EINVAL;
3746}
3747
3748static void cm_process_send_error(struct cm_id_private *cm_id_priv,
3749				  struct ib_mad_send_buf *msg,
3750				  enum ib_cm_state state,
3751				  enum ib_wc_status wc_status)
3752{
3753	struct ib_cm_event cm_event = {};
3754	int ret;
3755
3756	/* Discard old sends or ones without a response. */
3757	spin_lock_irq(&cm_id_priv->lock);
3758	if (msg != cm_id_priv->msg) {
3759		spin_unlock_irq(&cm_id_priv->lock);
3760		cm_free_msg(msg);
3761		return;
3762	}
3763	cm_free_priv_msg(msg);
3764
3765	if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
3766	    wc_status == IB_WC_WR_FLUSH_ERR)
3767		goto out_unlock;
3768
3769	trace_icm_mad_send_err(state, wc_status);
3770	switch (state) {
3771	case IB_CM_REQ_SENT:
3772	case IB_CM_MRA_REQ_RCVD:
3773		cm_reset_to_idle(cm_id_priv);
3774		cm_event.event = IB_CM_REQ_ERROR;
3775		break;
3776	case IB_CM_REP_SENT:
3777	case IB_CM_MRA_REP_RCVD:
3778		cm_reset_to_idle(cm_id_priv);
3779		cm_event.event = IB_CM_REP_ERROR;
3780		break;
3781	case IB_CM_DREQ_SENT:
3782		cm_enter_timewait(cm_id_priv);
3783		cm_event.event = IB_CM_DREQ_ERROR;
3784		break;
3785	case IB_CM_SIDR_REQ_SENT:
3786		cm_id_priv->id.state = IB_CM_IDLE;
3787		cm_event.event = IB_CM_SIDR_REQ_ERROR;
3788		break;
3789	default:
3790		goto out_unlock;
3791	}
3792	spin_unlock_irq(&cm_id_priv->lock);
3793	cm_event.param.send_status = wc_status;
3794
3795	/* No other events can occur on the cm_id at this point. */
3796	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3797	if (ret)
3798		ib_destroy_cm_id(&cm_id_priv->id);
3799	return;
3800out_unlock:
3801	spin_unlock_irq(&cm_id_priv->lock);
3802}
3803
3804static void cm_send_handler(struct ib_mad_agent *mad_agent,
3805			    struct ib_mad_send_wc *mad_send_wc)
3806{
3807	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3808	struct cm_id_private *cm_id_priv = msg->context[0];
3809	enum ib_cm_state state =
3810		(enum ib_cm_state)(unsigned long)msg->context[1];
3811	struct cm_port *port;
3812	u16 attr_index;
3813
3814	port = mad_agent->context;
3815	attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3816				  msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3817
3818	/*
3819	 * If the send was in response to a received message (context[0] is not
3820	 * set to a cm_id), and is not a REJ, then it is a send that was
3821	 * manually retried.
3822	 */
3823	if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
3824		msg->retries = 1;
3825
3826	atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
3827	if (msg->retries)
3828		atomic_long_add(msg->retries,
3829				&port->counters[CM_XMIT_RETRIES][attr_index]);
3830
3831	if (cm_id_priv)
3832		cm_process_send_error(cm_id_priv, msg, state,
3833				      mad_send_wc->status);
3834	else
3835		cm_free_response_msg(msg);
3836}
3837
3838static void cm_work_handler(struct work_struct *_work)
3839{
3840	struct cm_work *work = container_of(_work, struct cm_work, work.work);
3841	int ret;
3842
3843	switch (work->cm_event.event) {
3844	case IB_CM_REQ_RECEIVED:
3845		ret = cm_req_handler(work);
3846		break;
3847	case IB_CM_MRA_RECEIVED:
3848		ret = cm_mra_handler(work);
3849		break;
3850	case IB_CM_REJ_RECEIVED:
3851		ret = cm_rej_handler(work);
3852		break;
3853	case IB_CM_REP_RECEIVED:
3854		ret = cm_rep_handler(work);
3855		break;
3856	case IB_CM_RTU_RECEIVED:
3857		ret = cm_rtu_handler(work);
3858		break;
3859	case IB_CM_USER_ESTABLISHED:
3860		ret = cm_establish_handler(work);
3861		break;
3862	case IB_CM_DREQ_RECEIVED:
3863		ret = cm_dreq_handler(work);
3864		break;
3865	case IB_CM_DREP_RECEIVED:
3866		ret = cm_drep_handler(work);
3867		break;
3868	case IB_CM_SIDR_REQ_RECEIVED:
3869		ret = cm_sidr_req_handler(work);
3870		break;
3871	case IB_CM_SIDR_REP_RECEIVED:
3872		ret = cm_sidr_rep_handler(work);
3873		break;
3874	case IB_CM_LAP_RECEIVED:
3875		ret = cm_lap_handler(work);
3876		break;
3877	case IB_CM_APR_RECEIVED:
3878		ret = cm_apr_handler(work);
3879		break;
3880	case IB_CM_TIMEWAIT_EXIT:
3881		ret = cm_timewait_handler(work);
3882		break;
3883	default:
3884		trace_icm_handler_err(work->cm_event.event);
3885		ret = -EINVAL;
3886		break;
3887	}
3888	if (ret)
3889		cm_free_work(work);
3890}
3891
3892static int cm_establish(struct ib_cm_id *cm_id)
3893{
3894	struct cm_id_private *cm_id_priv;
3895	struct cm_work *work;
3896	unsigned long flags;
3897	int ret = 0;
3898	struct cm_device *cm_dev;
3899
3900	cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3901	if (!cm_dev)
3902		return -ENODEV;
3903
3904	work = kmalloc(sizeof *work, GFP_ATOMIC);
3905	if (!work)
3906		return -ENOMEM;
3907
3908	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3909	spin_lock_irqsave(&cm_id_priv->lock, flags);
3910	switch (cm_id->state) {
3911	case IB_CM_REP_SENT:
3912	case IB_CM_MRA_REP_RCVD:
3913		cm_id->state = IB_CM_ESTABLISHED;
3914		break;
3915	case IB_CM_ESTABLISHED:
3916		ret = -EISCONN;
3917		break;
3918	default:
3919		trace_icm_establish_err(cm_id);
3920		ret = -EINVAL;
3921		break;
3922	}
3923	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3924
3925	if (ret) {
3926		kfree(work);
3927		goto out;
3928	}
3929
3930	/*
3931	 * The CM worker thread may try to destroy the cm_id before it
3932	 * can execute this work item.  To prevent potential deadlock,
3933	 * we need to find the cm_id once we're in the context of the
3934	 * worker thread, rather than holding a reference on it.
3935	 */
3936	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3937	work->local_id = cm_id->local_id;
3938	work->remote_id = cm_id->remote_id;
3939	work->mad_recv_wc = NULL;
3940	work->cm_event.event = IB_CM_USER_ESTABLISHED;
3941
3942	/* Check if the device started its remove_one */
3943	spin_lock_irqsave(&cm.lock, flags);
3944	if (!cm_dev->going_down) {
3945		queue_delayed_work(cm.wq, &work->work, 0);
3946	} else {
3947		kfree(work);
3948		ret = -ENODEV;
3949	}
3950	spin_unlock_irqrestore(&cm.lock, flags);
3951
3952out:
3953	return ret;
3954}
3955
3956static int cm_migrate(struct ib_cm_id *cm_id)
3957{
3958	struct cm_id_private *cm_id_priv;
3959	unsigned long flags;
3960	int ret = 0;
3961
3962	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3963	spin_lock_irqsave(&cm_id_priv->lock, flags);
3964	if (cm_id->state == IB_CM_ESTABLISHED &&
3965	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3966	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
3967		cm_id->lap_state = IB_CM_LAP_IDLE;
3968		cm_id_priv->av = cm_id_priv->alt_av;
3969	} else
3970		ret = -EINVAL;
3971	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3972
3973	return ret;
3974}
3975
3976int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3977{
3978	int ret;
3979
3980	switch (event) {
3981	case IB_EVENT_COMM_EST:
3982		ret = cm_establish(cm_id);
3983		break;
3984	case IB_EVENT_PATH_MIG:
3985		ret = cm_migrate(cm_id);
3986		break;
3987	default:
3988		ret = -EINVAL;
3989	}
3990	return ret;
3991}
3992EXPORT_SYMBOL(ib_cm_notify);
3993
3994static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3995			    struct ib_mad_send_buf *send_buf,
3996			    struct ib_mad_recv_wc *mad_recv_wc)
3997{
3998	struct cm_port *port = mad_agent->context;
3999	struct cm_work *work;
4000	enum ib_cm_event_type event;
4001	bool alt_path = false;
4002	u16 attr_id;
4003	int paths = 0;
4004	int going_down = 0;
4005
4006	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4007	case CM_REQ_ATTR_ID:
4008		alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4009						mad_recv_wc->recv_buf.mad);
4010		paths = 1 + (alt_path != 0);
4011		event = IB_CM_REQ_RECEIVED;
4012		break;
4013	case CM_MRA_ATTR_ID:
4014		event = IB_CM_MRA_RECEIVED;
4015		break;
4016	case CM_REJ_ATTR_ID:
4017		event = IB_CM_REJ_RECEIVED;
4018		break;
4019	case CM_REP_ATTR_ID:
4020		event = IB_CM_REP_RECEIVED;
4021		break;
4022	case CM_RTU_ATTR_ID:
4023		event = IB_CM_RTU_RECEIVED;
4024		break;
4025	case CM_DREQ_ATTR_ID:
4026		event = IB_CM_DREQ_RECEIVED;
4027		break;
4028	case CM_DREP_ATTR_ID:
4029		event = IB_CM_DREP_RECEIVED;
4030		break;
4031	case CM_SIDR_REQ_ATTR_ID:
4032		event = IB_CM_SIDR_REQ_RECEIVED;
4033		break;
4034	case CM_SIDR_REP_ATTR_ID:
4035		event = IB_CM_SIDR_REP_RECEIVED;
4036		break;
4037	case CM_LAP_ATTR_ID:
4038		paths = 1;
4039		event = IB_CM_LAP_RECEIVED;
4040		break;
4041	case CM_APR_ATTR_ID:
4042		event = IB_CM_APR_RECEIVED;
4043		break;
4044	default:
4045		ib_free_recv_mad(mad_recv_wc);
4046		return;
4047	}
4048
4049	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4050	atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
4051
4052	work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4053	if (!work) {
4054		ib_free_recv_mad(mad_recv_wc);
4055		return;
4056	}
4057
4058	INIT_DELAYED_WORK(&work->work, cm_work_handler);
4059	work->cm_event.event = event;
4060	work->mad_recv_wc = mad_recv_wc;
4061	work->port = port;
4062
4063	/* Check if the device started its remove_one */
4064	spin_lock_irq(&cm.lock);
4065	if (!port->cm_dev->going_down)
4066		queue_delayed_work(cm.wq, &work->work, 0);
4067	else
4068		going_down = 1;
4069	spin_unlock_irq(&cm.lock);
4070
4071	if (going_down) {
4072		kfree(work);
4073		ib_free_recv_mad(mad_recv_wc);
4074	}
4075}
4076
4077static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4078				struct ib_qp_attr *qp_attr,
4079				int *qp_attr_mask)
4080{
4081	unsigned long flags;
4082	int ret;
4083
4084	spin_lock_irqsave(&cm_id_priv->lock, flags);
4085	switch (cm_id_priv->id.state) {
4086	case IB_CM_REQ_SENT:
4087	case IB_CM_MRA_REQ_RCVD:
4088	case IB_CM_REQ_RCVD:
4089	case IB_CM_MRA_REQ_SENT:
4090	case IB_CM_REP_RCVD:
4091	case IB_CM_MRA_REP_SENT:
4092	case IB_CM_REP_SENT:
4093	case IB_CM_MRA_REP_RCVD:
4094	case IB_CM_ESTABLISHED:
4095		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4096				IB_QP_PKEY_INDEX | IB_QP_PORT;
4097		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4098		if (cm_id_priv->responder_resources) {
4099			struct ib_device *ib_dev = cm_id_priv->id.device;
4100			u64 support_flush = ib_dev->attrs.device_cap_flags &
4101			  (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT);
4102			u32 flushable = support_flush ?
4103					(IB_ACCESS_FLUSH_GLOBAL |
4104					 IB_ACCESS_FLUSH_PERSISTENT) : 0;
4105
4106			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4107						    IB_ACCESS_REMOTE_ATOMIC |
4108						    flushable;
4109		}
4110		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4111		if (cm_id_priv->av.port)
4112			qp_attr->port_num = cm_id_priv->av.port->port_num;
4113		ret = 0;
4114		break;
4115	default:
4116		trace_icm_qp_init_err(&cm_id_priv->id);
4117		ret = -EINVAL;
4118		break;
4119	}
4120	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4121	return ret;
4122}
4123
4124static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4125			       struct ib_qp_attr *qp_attr,
4126			       int *qp_attr_mask)
4127{
4128	unsigned long flags;
4129	int ret;
4130
4131	spin_lock_irqsave(&cm_id_priv->lock, flags);
4132	switch (cm_id_priv->id.state) {
4133	case IB_CM_REQ_RCVD:
4134	case IB_CM_MRA_REQ_SENT:
4135	case IB_CM_REP_RCVD:
4136	case IB_CM_MRA_REP_SENT:
4137	case IB_CM_REP_SENT:
4138	case IB_CM_MRA_REP_RCVD:
4139	case IB_CM_ESTABLISHED:
4140		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4141				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4142		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4143		if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
4144		    cm_id_priv->av.dlid_datapath &&
4145		    (cm_id_priv->av.dlid_datapath != 0xffff))
4146			qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
4147		qp_attr->path_mtu = cm_id_priv->path_mtu;
4148		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4149		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4150		if (cm_id_priv->qp_type == IB_QPT_RC ||
4151		    cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4152			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4153					 IB_QP_MIN_RNR_TIMER;
4154			qp_attr->max_dest_rd_atomic =
4155					cm_id_priv->responder_resources;
4156			qp_attr->min_rnr_timer = 0;
4157		}
4158		if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
4159		    cm_id_priv->alt_av.port) {
4160			*qp_attr_mask |= IB_QP_ALT_PATH;
4161			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4162			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4163			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4164			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4165		}
4166		ret = 0;
4167		break;
4168	default:
4169		trace_icm_qp_rtr_err(&cm_id_priv->id);
4170		ret = -EINVAL;
4171		break;
4172	}
4173	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4174	return ret;
4175}
4176
4177static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4178			       struct ib_qp_attr *qp_attr,
4179			       int *qp_attr_mask)
4180{
4181	unsigned long flags;
4182	int ret;
4183
4184	spin_lock_irqsave(&cm_id_priv->lock, flags);
4185	switch (cm_id_priv->id.state) {
4186	/* Allow transition to RTS before sending REP */
4187	case IB_CM_REQ_RCVD:
4188	case IB_CM_MRA_REQ_SENT:
4189
4190	case IB_CM_REP_RCVD:
4191	case IB_CM_MRA_REP_SENT:
4192	case IB_CM_REP_SENT:
4193	case IB_CM_MRA_REP_RCVD:
4194	case IB_CM_ESTABLISHED:
4195		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4196			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4197			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4198			switch (cm_id_priv->qp_type) {
4199			case IB_QPT_RC:
4200			case IB_QPT_XRC_INI:
4201				*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4202						 IB_QP_MAX_QP_RD_ATOMIC;
4203				qp_attr->retry_cnt = cm_id_priv->retry_count;
4204				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4205				qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4206				fallthrough;
4207			case IB_QPT_XRC_TGT:
4208				*qp_attr_mask |= IB_QP_TIMEOUT;
4209				qp_attr->timeout = cm_id_priv->av.timeout;
4210				break;
4211			default:
4212				break;
4213			}
4214			if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4215				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4216				qp_attr->path_mig_state = IB_MIG_REARM;
4217			}
4218		} else {
4219			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4220			if (cm_id_priv->alt_av.port)
4221				qp_attr->alt_port_num =
4222					cm_id_priv->alt_av.port->port_num;
4223			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4224			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4225			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4226			qp_attr->path_mig_state = IB_MIG_REARM;
4227		}
4228		ret = 0;
4229		break;
4230	default:
4231		trace_icm_qp_rts_err(&cm_id_priv->id);
4232		ret = -EINVAL;
4233		break;
4234	}
4235	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4236	return ret;
4237}
4238
4239int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4240		       struct ib_qp_attr *qp_attr,
4241		       int *qp_attr_mask)
4242{
4243	struct cm_id_private *cm_id_priv;
4244	int ret;
4245
4246	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4247	switch (qp_attr->qp_state) {
4248	case IB_QPS_INIT:
4249		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4250		break;
4251	case IB_QPS_RTR:
4252		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4253		break;
4254	case IB_QPS_RTS:
4255		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4256		break;
4257	default:
4258		ret = -EINVAL;
4259		break;
4260	}
4261	return ret;
4262}
4263EXPORT_SYMBOL(ib_cm_init_qp_attr);
4264
4265static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
4266			       struct ib_port_attribute *attr, char *buf)
4267{
4268	struct cm_counter_attribute *cm_attr =
4269		container_of(attr, struct cm_counter_attribute, attr);
4270	struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
4271
4272	if (WARN_ON(!cm_dev))
4273		return -EINVAL;
4274
4275	return sysfs_emit(
4276		buf, "%ld\n",
4277		atomic_long_read(
4278			&cm_dev->port[port_num - 1]
4279				 ->counters[cm_attr->group][cm_attr->index]));
4280}
4281
4282#define CM_COUNTER_ATTR(_name, _group, _index)                                 \
4283	{                                                                      \
4284		.attr = __ATTR(_name, 0444, cm_show_counter, NULL),            \
4285		.group = _group, .index = _index                               \
4286	}
4287
4288#define CM_COUNTER_GROUP(_group, _name)                                        \
4289	static struct cm_counter_attribute cm_counter_attr_##_group[] = {      \
4290		CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER),                  \
4291		CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER),                  \
4292		CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER),                  \
4293		CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER),                  \
4294		CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER),                  \
4295		CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER),                \
4296		CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER),                \
4297		CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER),        \
4298		CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER),        \
4299		CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER),                  \
4300		CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER),                  \
4301	};                                                                     \
4302	static struct attribute *cm_counter_attrs_##_group[] = {               \
4303		&cm_counter_attr_##_group[0].attr.attr,                        \
4304		&cm_counter_attr_##_group[1].attr.attr,                        \
4305		&cm_counter_attr_##_group[2].attr.attr,                        \
4306		&cm_counter_attr_##_group[3].attr.attr,                        \
4307		&cm_counter_attr_##_group[4].attr.attr,                        \
4308		&cm_counter_attr_##_group[5].attr.attr,                        \
4309		&cm_counter_attr_##_group[6].attr.attr,                        \
4310		&cm_counter_attr_##_group[7].attr.attr,                        \
4311		&cm_counter_attr_##_group[8].attr.attr,                        \
4312		&cm_counter_attr_##_group[9].attr.attr,                        \
4313		&cm_counter_attr_##_group[10].attr.attr,                       \
4314		NULL,                                                          \
4315	};                                                                     \
4316	static const struct attribute_group cm_counter_group_##_group = {      \
4317		.name = _name,                                                 \
4318		.attrs = cm_counter_attrs_##_group,                            \
4319	};
4320
4321CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
4322CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
4323CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
4324CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
4325
4326static const struct attribute_group *cm_counter_groups[] = {
4327	&cm_counter_group_CM_XMIT,
4328	&cm_counter_group_CM_XMIT_RETRIES,
4329	&cm_counter_group_CM_RECV,
4330	&cm_counter_group_CM_RECV_DUPLICATES,
4331	NULL,
4332};
4333
4334static int cm_add_one(struct ib_device *ib_device)
4335{
4336	struct cm_device *cm_dev;
4337	struct cm_port *port;
4338	struct ib_mad_reg_req reg_req = {
4339		.mgmt_class = IB_MGMT_CLASS_CM,
4340		.mgmt_class_version = IB_CM_CLASS_VERSION,
4341	};
4342	struct ib_port_modify port_modify = {
4343		.set_port_cap_mask = IB_PORT_CM_SUP
4344	};
4345	unsigned long flags;
4346	int ret;
4347	int count = 0;
4348	u32 i;
4349
4350	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4351			 GFP_KERNEL);
4352	if (!cm_dev)
4353		return -ENOMEM;
4354
4355	kref_init(&cm_dev->kref);
4356	spin_lock_init(&cm_dev->mad_agent_lock);
4357	cm_dev->ib_device = ib_device;
4358	cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4359	cm_dev->going_down = 0;
4360
4361	ib_set_client_data(ib_device, &cm_client, cm_dev);
4362
4363	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4364	rdma_for_each_port (ib_device, i) {
4365		if (!rdma_cap_ib_cm(ib_device, i))
4366			continue;
4367
4368		port = kzalloc(sizeof *port, GFP_KERNEL);
4369		if (!port) {
4370			ret = -ENOMEM;
4371			goto error1;
4372		}
4373
4374		cm_dev->port[i-1] = port;
4375		port->cm_dev = cm_dev;
4376		port->port_num = i;
4377
4378		ret = ib_port_register_client_groups(ib_device, i,
4379						     cm_counter_groups);
4380		if (ret)
4381			goto error1;
4382
4383		port->mad_agent = ib_register_mad_agent(ib_device, i,
4384							IB_QPT_GSI,
4385							&reg_req,
4386							0,
4387							cm_send_handler,
4388							cm_recv_handler,
4389							port,
4390							0);
4391		if (IS_ERR(port->mad_agent)) {
4392			ret = PTR_ERR(port->mad_agent);
4393			goto error2;
4394		}
4395
4396		ret = ib_modify_port(ib_device, i, 0, &port_modify);
4397		if (ret)
4398			goto error3;
4399
4400		count++;
4401	}
4402
4403	if (!count) {
4404		ret = -EOPNOTSUPP;
4405		goto free;
4406	}
4407
4408	write_lock_irqsave(&cm.device_lock, flags);
4409	list_add_tail(&cm_dev->list, &cm.device_list);
4410	write_unlock_irqrestore(&cm.device_lock, flags);
4411	return 0;
4412
4413error3:
4414	ib_unregister_mad_agent(port->mad_agent);
4415error2:
4416	ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
4417error1:
4418	port_modify.set_port_cap_mask = 0;
4419	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4420	while (--i) {
4421		if (!rdma_cap_ib_cm(ib_device, i))
4422			continue;
4423
4424		port = cm_dev->port[i-1];
4425		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4426		ib_unregister_mad_agent(port->mad_agent);
4427		ib_port_unregister_client_groups(ib_device, i,
4428						 cm_counter_groups);
4429	}
4430free:
4431	cm_device_put(cm_dev);
4432	return ret;
4433}
4434
4435static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4436{
4437	struct cm_device *cm_dev = client_data;
4438	struct cm_port *port;
4439	struct ib_port_modify port_modify = {
4440		.clr_port_cap_mask = IB_PORT_CM_SUP
4441	};
4442	unsigned long flags;
4443	u32 i;
4444
4445	write_lock_irqsave(&cm.device_lock, flags);
4446	list_del(&cm_dev->list);
4447	write_unlock_irqrestore(&cm.device_lock, flags);
4448
4449	spin_lock_irq(&cm.lock);
4450	cm_dev->going_down = 1;
4451	spin_unlock_irq(&cm.lock);
4452
4453	rdma_for_each_port (ib_device, i) {
4454		struct ib_mad_agent *mad_agent;
4455
4456		if (!rdma_cap_ib_cm(ib_device, i))
4457			continue;
4458
4459		port = cm_dev->port[i-1];
4460		mad_agent = port->mad_agent;
4461		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4462		/*
4463		 * We flush the queue here after the going_down set, this
4464		 * verify that no new works will be queued in the recv handler,
4465		 * after that we can call the unregister_mad_agent
4466		 */
4467		flush_workqueue(cm.wq);
4468		/*
4469		 * The above ensures no call paths from the work are running,
4470		 * the remaining paths all take the mad_agent_lock.
4471		 */
4472		spin_lock(&cm_dev->mad_agent_lock);
4473		port->mad_agent = NULL;
4474		spin_unlock(&cm_dev->mad_agent_lock);
4475		ib_unregister_mad_agent(mad_agent);
4476		ib_port_unregister_client_groups(ib_device, i,
4477						 cm_counter_groups);
4478	}
4479
4480	cm_device_put(cm_dev);
4481}
4482
4483static int __init ib_cm_init(void)
4484{
4485	int ret;
4486
4487	INIT_LIST_HEAD(&cm.device_list);
4488	rwlock_init(&cm.device_lock);
4489	spin_lock_init(&cm.lock);
4490	cm.listen_service_table = RB_ROOT;
4491	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4492	cm.remote_id_table = RB_ROOT;
4493	cm.remote_qp_table = RB_ROOT;
4494	cm.remote_sidr_table = RB_ROOT;
4495	xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4496	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4497	INIT_LIST_HEAD(&cm.timewait_list);
4498
4499	cm.wq = alloc_workqueue("ib_cm", 0, 1);
4500	if (!cm.wq) {
4501		ret = -ENOMEM;
4502		goto error2;
4503	}
4504
4505	ret = ib_register_client(&cm_client);
4506	if (ret)
4507		goto error3;
4508
4509	return 0;
4510error3:
4511	destroy_workqueue(cm.wq);
4512error2:
4513	return ret;
4514}
4515
4516static void __exit ib_cm_cleanup(void)
4517{
4518	struct cm_timewait_info *timewait_info, *tmp;
4519
4520	spin_lock_irq(&cm.lock);
4521	list_for_each_entry(timewait_info, &cm.timewait_list, list)
4522		cancel_delayed_work(&timewait_info->work.work);
4523	spin_unlock_irq(&cm.lock);
4524
4525	ib_unregister_client(&cm_client);
4526	destroy_workqueue(cm.wq);
4527
4528	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4529		list_del(&timewait_info->list);
4530		kfree(timewait_info);
4531	}
4532
4533	WARN_ON(!xa_empty(&cm.local_id_table));
4534}
4535
4536module_init(ib_cm_init);
4537module_exit(ib_cm_cleanup);
4538