1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/vport.h>
34#include <rdma/ib_mad.h>
35#include <rdma/ib_smi.h>
36#include <rdma/ib_pma.h>
37#include "mlx5_ib.h"
38#include "cmd.h"
39
40enum {
41	MLX5_IB_VENDOR_CLASS1 = 0x9,
42	MLX5_IB_VENDOR_CLASS2 = 0xa
43};
44
45static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
46			   struct ib_mad *in_mad)
47{
48	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
49	    in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
50		return true;
51	return dev->mdev->port_caps[port_num - 1].has_smi;
52}
53
54static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
55			int ignore_bkey, u8 port, const struct ib_wc *in_wc,
56			const struct ib_grh *in_grh, const void *in_mad,
57			void *response_mad)
58{
59	u8 op_modifier = 0;
60
61	if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad))
62		return -EPERM;
63
64	/* Key check traps can't be generated unless we have in_wc to
65	 * tell us where to send the trap.
66	 */
67	if (ignore_mkey || !in_wc)
68		op_modifier |= 0x1;
69	if (ignore_bkey || !in_wc)
70		op_modifier |= 0x2;
71
72	return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
73				port);
74}
75
76static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
77			       void *out)
78{
79#define MLX5_SUM_CNT(p, cntr1, cntr2)	\
80	(MLX5_GET64(query_vport_counter_out, p, cntr1) + \
81	MLX5_GET64(query_vport_counter_out, p, cntr2))
82
83	pma_cnt_ext->port_xmit_data =
84		cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
85					 transmitted_ib_multicast.octets) >> 2);
86	pma_cnt_ext->port_rcv_data =
87		cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
88					 received_ib_multicast.octets) >> 2);
89	pma_cnt_ext->port_xmit_packets =
90		cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
91					 transmitted_ib_multicast.packets));
92	pma_cnt_ext->port_rcv_packets =
93		cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
94					 received_ib_multicast.packets));
95	pma_cnt_ext->port_unicast_xmit_packets =
96		MLX5_GET64_BE(query_vport_counter_out,
97			      out, transmitted_ib_unicast.packets);
98	pma_cnt_ext->port_unicast_rcv_packets =
99		MLX5_GET64_BE(query_vport_counter_out,
100			      out, received_ib_unicast.packets);
101	pma_cnt_ext->port_multicast_xmit_packets =
102		MLX5_GET64_BE(query_vport_counter_out,
103			      out, transmitted_ib_multicast.packets);
104	pma_cnt_ext->port_multicast_rcv_packets =
105		MLX5_GET64_BE(query_vport_counter_out,
106			      out, received_ib_multicast.packets);
107}
108
109static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
110			   void *out)
111{
112	/* Traffic counters will be reported in
113	 * their 64bit form via ib_pma_portcounters_ext by default.
114	 */
115	void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
116				     counter_set);
117
118#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name)	{		\
119	counter_var = MLX5_GET_BE(typeof(counter_var),			\
120				  ib_port_cntrs_grp_data_layout,	\
121				  out_pma, counter_name);		\
122	}
123
124	MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
125			     symbol_error_counter);
126	MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
127			     link_error_recovery_counter);
128	MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
129			     link_downed_counter);
130	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
131			     port_rcv_errors);
132	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
133			     port_rcv_remote_physical_errors);
134	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
135			     port_rcv_switch_relay_errors);
136	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
137			     port_xmit_discards);
138	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
139			     port_xmit_constraint_errors);
140	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait,
141			     port_xmit_wait);
142	MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
143			     port_rcv_constraint_errors);
144	MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
145			     link_overrun_errors);
146	MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
147			     vl_15_dropped);
148}
149
150static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
151			   const struct ib_mad *in_mad, struct ib_mad *out_mad)
152{
153	struct mlx5_core_dev *mdev;
154	bool native_port = true;
155	u8 mdev_port_num;
156	void *out_cnt;
157	int err;
158
159	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
160	if (!mdev) {
161		/* Fail to get the native port, likely due to 2nd port is still
162		 * unaffiliated. In such case default to 1st port and attached
163		 * PF device.
164		 */
165		native_port = false;
166		mdev = dev->mdev;
167		mdev_port_num = 1;
168	}
169	if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
170		/* set local port to one for Function-Per-Port HCA. */
171		mdev = dev->mdev;
172		mdev_port_num = 1;
173	}
174
175	/* Declaring support of extended counters */
176	if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
177		struct ib_class_port_info cpi = {};
178
179		cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
180		memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
181		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
182		goto done;
183	}
184
185	if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
186		struct ib_pma_portcounters_ext *pma_cnt_ext =
187			(struct ib_pma_portcounters_ext *)(out_mad->data + 40);
188		int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
189
190		out_cnt = kvzalloc(sz, GFP_KERNEL);
191		if (!out_cnt) {
192			err = IB_MAD_RESULT_FAILURE;
193			goto done;
194		}
195
196		err = mlx5_core_query_vport_counter(mdev, 0, 0, mdev_port_num,
197						    out_cnt);
198		if (!err)
199			pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
200	} else {
201		struct ib_pma_portcounters *pma_cnt =
202			(struct ib_pma_portcounters *)(out_mad->data + 40);
203		int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
204
205		out_cnt = kvzalloc(sz, GFP_KERNEL);
206		if (!out_cnt) {
207			err = IB_MAD_RESULT_FAILURE;
208			goto done;
209		}
210
211		err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
212					       out_cnt, sz);
213		if (!err)
214			pma_cnt_assign(pma_cnt, out_cnt);
215	}
216	kvfree(out_cnt);
217	err = err ? IB_MAD_RESULT_FAILURE :
218		    IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
219done:
220	if (native_port)
221		mlx5_ib_put_native_port_mdev(dev, port_num);
222	return err;
223}
224
225int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
226			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
227			const struct ib_mad *in, struct ib_mad *out,
228			size_t *out_mad_size, u16 *out_mad_pkey_index)
229{
230	struct mlx5_ib_dev *dev = to_mdev(ibdev);
231	u8 mgmt_class = in->mad_hdr.mgmt_class;
232	u8 method = in->mad_hdr.method;
233	u16 slid;
234	int err;
235
236	slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
237		       be16_to_cpu(IB_LID_PERMISSIVE);
238
239	if (method == IB_MGMT_METHOD_TRAP && !slid)
240		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
241
242	switch (mgmt_class) {
243	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
244	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
245		if (method != IB_MGMT_METHOD_GET &&
246		    method != IB_MGMT_METHOD_SET &&
247		    method != IB_MGMT_METHOD_TRAP_REPRESS)
248			return IB_MAD_RESULT_SUCCESS;
249
250		/* Don't process SMInfo queries -- the SMA can't handle them.
251		 */
252		if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
253			return IB_MAD_RESULT_SUCCESS;
254	} break;
255	case IB_MGMT_CLASS_PERF_MGMT:
256		if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
257		    method == IB_MGMT_METHOD_GET)
258			return process_pma_cmd(dev, port_num, in, out);
259		fallthrough;
260	case MLX5_IB_VENDOR_CLASS1:
261	case MLX5_IB_VENDOR_CLASS2:
262	case IB_MGMT_CLASS_CONG_MGMT: {
263		if (method != IB_MGMT_METHOD_GET &&
264		    method != IB_MGMT_METHOD_SET)
265			return IB_MAD_RESULT_SUCCESS;
266	} break;
267	default:
268		return IB_MAD_RESULT_SUCCESS;
269	}
270
271	err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
272			   mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
273			   in_grh, in, out);
274	if (err)
275		return IB_MAD_RESULT_FAILURE;
276
277	/* set return bit in status of directed route responses */
278	if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
279		out->mad_hdr.status |= cpu_to_be16(1 << 15);
280
281	if (method == IB_MGMT_METHOD_TRAP_REPRESS)
282		/* no response for trap repress */
283		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
284
285	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
286}
287
288int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
289{
290	struct ib_smp *in_mad  = NULL;
291	struct ib_smp *out_mad = NULL;
292	int err = -ENOMEM;
293	u16 packet_error;
294
295	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
296	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
297	if (!in_mad || !out_mad)
298		goto out;
299
300	init_query_mad(in_mad);
301	in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
302	in_mad->attr_mod = cpu_to_be32(port);
303
304	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
305
306	packet_error = be16_to_cpu(out_mad->status);
307
308	dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
309		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
310
311out:
312	kfree(in_mad);
313	kfree(out_mad);
314	return err;
315}
316
317int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
318					  struct ib_smp *out_mad)
319{
320	struct ib_smp *in_mad = NULL;
321	int err = -ENOMEM;
322
323	in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
324	if (!in_mad)
325		return -ENOMEM;
326
327	init_query_mad(in_mad);
328	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
329
330	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
331			   out_mad);
332
333	kfree(in_mad);
334	return err;
335}
336
337int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
338					 __be64 *sys_image_guid)
339{
340	struct ib_smp *out_mad = NULL;
341	int err = -ENOMEM;
342
343	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
344	if (!out_mad)
345		return -ENOMEM;
346
347	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
348	if (err)
349		goto out;
350
351	memcpy(sys_image_guid, out_mad->data + 4, 8);
352
353out:
354	kfree(out_mad);
355
356	return err;
357}
358
359int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
360				 u16 *max_pkeys)
361{
362	struct ib_smp *out_mad = NULL;
363	int err = -ENOMEM;
364
365	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
366	if (!out_mad)
367		return -ENOMEM;
368
369	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
370	if (err)
371		goto out;
372
373	*max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
374
375out:
376	kfree(out_mad);
377
378	return err;
379}
380
381int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
382				 u32 *vendor_id)
383{
384	struct ib_smp *out_mad = NULL;
385	int err = -ENOMEM;
386
387	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
388	if (!out_mad)
389		return -ENOMEM;
390
391	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
392	if (err)
393		goto out;
394
395	*vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
396
397out:
398	kfree(out_mad);
399
400	return err;
401}
402
403int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
404{
405	struct ib_smp *in_mad  = NULL;
406	struct ib_smp *out_mad = NULL;
407	int err = -ENOMEM;
408
409	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
410	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
411	if (!in_mad || !out_mad)
412		goto out;
413
414	init_query_mad(in_mad);
415	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
416
417	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
418	if (err)
419		goto out;
420
421	memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
422out:
423	kfree(in_mad);
424	kfree(out_mad);
425	return err;
426}
427
428int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
429{
430	struct ib_smp *in_mad  = NULL;
431	struct ib_smp *out_mad = NULL;
432	int err = -ENOMEM;
433
434	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
435	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
436	if (!in_mad || !out_mad)
437		goto out;
438
439	init_query_mad(in_mad);
440	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
441
442	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
443	if (err)
444		goto out;
445
446	memcpy(node_guid, out_mad->data + 12, 8);
447out:
448	kfree(in_mad);
449	kfree(out_mad);
450	return err;
451}
452
453int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
454			    u16 *pkey)
455{
456	struct ib_smp *in_mad  = NULL;
457	struct ib_smp *out_mad = NULL;
458	int err = -ENOMEM;
459
460	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
461	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
462	if (!in_mad || !out_mad)
463		goto out;
464
465	init_query_mad(in_mad);
466	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
467	in_mad->attr_mod = cpu_to_be32(index / 32);
468
469	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
470			   out_mad);
471	if (err)
472		goto out;
473
474	*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
475
476out:
477	kfree(in_mad);
478	kfree(out_mad);
479	return err;
480}
481
482int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
483			    union ib_gid *gid)
484{
485	struct ib_smp *in_mad  = NULL;
486	struct ib_smp *out_mad = NULL;
487	int err = -ENOMEM;
488
489	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
490	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
491	if (!in_mad || !out_mad)
492		goto out;
493
494	init_query_mad(in_mad);
495	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
496	in_mad->attr_mod = cpu_to_be32(port);
497
498	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
499			   out_mad);
500	if (err)
501		goto out;
502
503	memcpy(gid->raw, out_mad->data + 8, 8);
504
505	init_query_mad(in_mad);
506	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
507	in_mad->attr_mod = cpu_to_be32(index / 8);
508
509	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
510			   out_mad);
511	if (err)
512		goto out;
513
514	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
515
516out:
517	kfree(in_mad);
518	kfree(out_mad);
519	return err;
520}
521
522int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
523			    struct ib_port_attr *props)
524{
525	struct mlx5_ib_dev *dev = to_mdev(ibdev);
526	struct mlx5_core_dev *mdev = dev->mdev;
527	struct ib_smp *in_mad  = NULL;
528	struct ib_smp *out_mad = NULL;
529	int ext_active_speed;
530	int err = -ENOMEM;
531
532	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
533	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
534	if (!in_mad || !out_mad)
535		goto out;
536
537	/* props being zeroed by the caller, avoid zeroing it here */
538
539	init_query_mad(in_mad);
540	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
541	in_mad->attr_mod = cpu_to_be32(port);
542
543	err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
544	if (err) {
545		mlx5_ib_warn(dev, "err %d\n", err);
546		goto out;
547	}
548
549	props->lid		= be16_to_cpup((__be16 *)(out_mad->data + 16));
550	props->lmc		= out_mad->data[34] & 0x7;
551	props->sm_lid		= be16_to_cpup((__be16 *)(out_mad->data + 18));
552	props->sm_sl		= out_mad->data[36] & 0xf;
553	props->state		= out_mad->data[32] & 0xf;
554	props->phys_state	= out_mad->data[33] >> 4;
555	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
556	props->gid_tbl_len	= out_mad->data[50];
557	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
558	props->pkey_tbl_len	= mdev->port_caps[port - 1].pkey_table_len;
559	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
560	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
561	props->active_width	= out_mad->data[31] & 0xf;
562	props->active_speed	= out_mad->data[35] >> 4;
563	props->max_mtu		= out_mad->data[41] & 0xf;
564	props->active_mtu	= out_mad->data[36] >> 4;
565	props->subnet_timeout	= out_mad->data[51] & 0x1f;
566	props->max_vl_num	= out_mad->data[37] >> 4;
567	props->init_type_reply	= out_mad->data[41] >> 4;
568
569	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) {
570		props->port_cap_flags2 =
571			be16_to_cpup((__be16 *)(out_mad->data + 60));
572
573		if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP)
574			props->active_width = out_mad->data[31] & 0x1f;
575	}
576
577	/* Check if extended speeds (EDR/FDR/...) are supported */
578	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
579		ext_active_speed = out_mad->data[62] >> 4;
580
581		switch (ext_active_speed) {
582		case 1:
583			props->active_speed = 16; /* FDR */
584			break;
585		case 2:
586			props->active_speed = 32; /* EDR */
587			break;
588		case 4:
589			if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
590			    props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
591				props->active_speed = IB_SPEED_HDR;
592			break;
593		}
594	}
595
596	/* If reported active speed is QDR, check if is FDR-10 */
597	if (props->active_speed == 4) {
598		if (mdev->port_caps[port - 1].ext_port_cap &
599		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
600			init_query_mad(in_mad);
601			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
602			in_mad->attr_mod = cpu_to_be32(port);
603
604			err = mlx5_MAD_IFC(dev, 1, 1, port,
605					   NULL, NULL, in_mad, out_mad);
606			if (err)
607				goto out;
608
609			/* Checking LinkSpeedActive for FDR-10 */
610			if (out_mad->data[15] & 0x1)
611				props->active_speed = 8;
612		}
613	}
614
615out:
616	kfree(in_mad);
617	kfree(out_mad);
618
619	return err;
620}
621