1/*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license.  When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 *  - Redistributions of source code must retain the above copyright
25 *    notice, this list of conditions and the following disclaimer.
26 *  - Redistributions in binary form must reproduce the above copyright
27 *    notice, this list of conditions and the following disclaimer in
28 *    the documentation and/or other materials provided with the
29 *    distribution.
30 *  - Neither the name of Intel Corporation nor the names of its
31 *    contributors may be used to endorse or promote products derived
32 *    from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/firmware.h>
49
50#include "hfi.h"
51#include "efivar.h"
52#include "eprom.h"
53
54#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
55
56static int validate_scratch_checksum(struct hfi1_devdata *dd)
57{
58	u64 checksum = 0, temp_scratch = 0;
59	int i, j, version;
60
61	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
62	version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
63
64	/* Prevent power on default of all zeroes from passing checksum */
65	if (!version) {
66		dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
67		dd_dev_err(dd,
68			   "%s: Please update your BIOS to support active channels\n",
69			   __func__);
70		return 0;
71	}
72
73	/*
74	 * ASIC scratch 0 only contains the checksum and bitmap version as
75	 * fields of interest, both of which are handled separately from the
76	 * loop below, so skip it
77	 */
78	checksum += version;
79	for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
80		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
81		for (j = sizeof(u64); j != 0; j -= 2) {
82			checksum += (temp_scratch & 0xFFFF);
83			temp_scratch >>= 16;
84		}
85	}
86
87	while (checksum >> 16)
88		checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
89
90	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
91	temp_scratch &= CHECKSUM_SMASK;
92	temp_scratch >>= CHECKSUM_SHIFT;
93
94	if (checksum + temp_scratch == 0xFFFF)
95		return 1;
96
97	dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
98	return 0;
99}
100
101static void save_platform_config_fields(struct hfi1_devdata *dd)
102{
103	struct hfi1_pportdata *ppd = dd->pport;
104	u64 temp_scratch = 0, temp_dest = 0;
105
106	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
107
108	temp_dest = temp_scratch &
109		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
110		     PORT0_PORT_TYPE_SMASK);
111	ppd->port_type = temp_dest >>
112			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
113			  PORT0_PORT_TYPE_SHIFT);
114
115	temp_dest = temp_scratch &
116		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
117		     PORT0_LOCAL_ATTEN_SMASK);
118	ppd->local_atten = temp_dest >>
119			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
120			    PORT0_LOCAL_ATTEN_SHIFT);
121
122	temp_dest = temp_scratch &
123		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
124		     PORT0_REMOTE_ATTEN_SMASK);
125	ppd->remote_atten = temp_dest >>
126			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
127			     PORT0_REMOTE_ATTEN_SHIFT);
128
129	temp_dest = temp_scratch &
130		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
131		     PORT0_DEFAULT_ATTEN_SMASK);
132	ppd->default_atten = temp_dest >>
133			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
134			      PORT0_DEFAULT_ATTEN_SHIFT);
135
136	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
137				ASIC_CFG_SCRATCH_2);
138
139	ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
140	ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
141	ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
142
143	ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
144				QSFP_MAX_POWER_SHIFT;
145
146	ppd->config_from_scratch = true;
147}
148
149void get_platform_config(struct hfi1_devdata *dd)
150{
151	int ret = 0;
152	u8 *temp_platform_config = NULL;
153	u32 esize;
154	const struct firmware *platform_config_file = NULL;
155
156	if (is_integrated(dd)) {
157		if (validate_scratch_checksum(dd)) {
158			save_platform_config_fields(dd);
159			return;
160		}
161	} else {
162		ret = eprom_read_platform_config(dd,
163						 (void **)&temp_platform_config,
164						 &esize);
165		if (!ret) {
166			/* success */
167			dd->platform_config.data = temp_platform_config;
168			dd->platform_config.size = esize;
169			return;
170		}
171	}
172	dd_dev_err(dd,
173		   "%s: Failed to get platform config, falling back to sub-optimal default file\n",
174		   __func__);
175
176	ret = request_firmware(&platform_config_file,
177			       DEFAULT_PLATFORM_CONFIG_NAME,
178			       &dd->pcidev->dev);
179	if (ret) {
180		dd_dev_err(dd,
181			   "%s: No default platform config file found\n",
182			   __func__);
183		return;
184	}
185
186	/*
187	 * Allocate separate memory block to store data and free firmware
188	 * structure. This allows free_platform_config to treat EPROM and
189	 * fallback configs in the same manner.
190	 */
191	dd->platform_config.data = kmemdup(platform_config_file->data,
192					   platform_config_file->size,
193					   GFP_KERNEL);
194	dd->platform_config.size = platform_config_file->size;
195	release_firmware(platform_config_file);
196}
197
198void free_platform_config(struct hfi1_devdata *dd)
199{
200	/* Release memory allocated for eprom or fallback file read. */
201	kfree(dd->platform_config.data);
202	dd->platform_config.data = NULL;
203}
204
205void get_port_type(struct hfi1_pportdata *ppd)
206{
207	int ret;
208	u32 temp;
209
210	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
211					PORT_TABLE_PORT_TYPE, &temp,
212					4);
213	if (ret) {
214		ppd->port_type = PORT_TYPE_UNKNOWN;
215		return;
216	}
217	ppd->port_type = temp;
218}
219
220int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
221{
222	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
223	int ret = 0;
224
225	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
226			 &tx_ctrl_byte, 1);
227	/* we expected 1, so consider 0 an error */
228	if (ret == 0)
229		ret = -EIO;
230	else if (ret == 1)
231		ret = 0;
232	return ret;
233}
234
235static int qual_power(struct hfi1_pportdata *ppd)
236{
237	u32 cable_power_class = 0, power_class_max = 0;
238	u8 *cache = ppd->qsfp_info.cache;
239	int ret = 0;
240
241	ret = get_platform_config_field(
242		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
243		SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
244	if (ret)
245		return ret;
246
247	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
248
249	if (cable_power_class > power_class_max)
250		ppd->offline_disabled_reason =
251			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
252
253	if (ppd->offline_disabled_reason ==
254			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
255		dd_dev_err(
256			ppd->dd,
257			"%s: Port disabled due to system power restrictions\n",
258			__func__);
259		ret = -EPERM;
260	}
261	return ret;
262}
263
264static int qual_bitrate(struct hfi1_pportdata *ppd)
265{
266	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
267	u8 *cache = ppd->qsfp_info.cache;
268
269	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
270	    cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
271		ppd->offline_disabled_reason =
272			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
273
274	if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
275	    cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
276		ppd->offline_disabled_reason =
277			   HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
278
279	if (ppd->offline_disabled_reason ==
280			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
281		dd_dev_err(
282			ppd->dd,
283			"%s: Cable failed bitrate check, disabling port\n",
284			__func__);
285		return -EPERM;
286	}
287	return 0;
288}
289
290static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
291{
292	u8 cable_power_class = 0, power_ctrl_byte = 0;
293	u8 *cache = ppd->qsfp_info.cache;
294	int ret;
295
296	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
297
298	if (cable_power_class > QSFP_POWER_CLASS_1) {
299		power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
300
301		power_ctrl_byte |= 1;
302		power_ctrl_byte &= ~(0x2);
303
304		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
305				 QSFP_PWR_CTRL_BYTE_OFFS,
306				 &power_ctrl_byte, 1);
307		if (ret != 1)
308			return -EIO;
309
310		if (cable_power_class > QSFP_POWER_CLASS_4) {
311			power_ctrl_byte |= (1 << 2);
312			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
313					 QSFP_PWR_CTRL_BYTE_OFFS,
314					 &power_ctrl_byte, 1);
315			if (ret != 1)
316				return -EIO;
317		}
318
319		/* SFF 8679 rev 1.7 LPMode Deassert time */
320		msleep(300);
321	}
322	return 0;
323}
324
325static void apply_rx_cdr(struct hfi1_pportdata *ppd,
326			 u32 rx_preset_index,
327			 u8 *cdr_ctrl_byte)
328{
329	u32 rx_preset;
330	u8 *cache = ppd->qsfp_info.cache;
331	int cable_power_class;
332
333	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
334	      (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
335		return;
336
337	/* RX CDR present, bypass supported */
338	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
339
340	if (cable_power_class <= QSFP_POWER_CLASS_3) {
341		/* Power class <= 3, ignore config & turn RX CDR on */
342		*cdr_ctrl_byte |= 0xF;
343		return;
344	}
345
346	get_platform_config_field(
347		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
348		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
349		&rx_preset, 4);
350
351	if (!rx_preset) {
352		dd_dev_info(
353			ppd->dd,
354			"%s: RX_CDR_APPLY is set to disabled\n",
355			__func__);
356		return;
357	}
358	get_platform_config_field(
359		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
360		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
361		&rx_preset, 4);
362
363	/* Expand cdr setting to all 4 lanes */
364	rx_preset = (rx_preset | (rx_preset << 1) |
365			(rx_preset << 2) | (rx_preset << 3));
366
367	if (rx_preset) {
368		*cdr_ctrl_byte |= rx_preset;
369	} else {
370		*cdr_ctrl_byte &= rx_preset;
371		/* Preserve current TX CDR status */
372		*cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
373	}
374}
375
376static void apply_tx_cdr(struct hfi1_pportdata *ppd,
377			 u32 tx_preset_index,
378			 u8 *cdr_ctrl_byte)
379{
380	u32 tx_preset;
381	u8 *cache = ppd->qsfp_info.cache;
382	int cable_power_class;
383
384	if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
385	      (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
386		return;
387
388	/* TX CDR present, bypass supported */
389	cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
390
391	if (cable_power_class <= QSFP_POWER_CLASS_3) {
392		/* Power class <= 3, ignore config & turn TX CDR on */
393		*cdr_ctrl_byte |= 0xF0;
394		return;
395	}
396
397	get_platform_config_field(
398		ppd->dd,
399		PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
400		TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
401
402	if (!tx_preset) {
403		dd_dev_info(
404			ppd->dd,
405			"%s: TX_CDR_APPLY is set to disabled\n",
406			__func__);
407		return;
408	}
409	get_platform_config_field(
410		ppd->dd,
411		PLATFORM_CONFIG_TX_PRESET_TABLE,
412		tx_preset_index,
413		TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
414
415	/* Expand cdr setting to all 4 lanes */
416	tx_preset = (tx_preset | (tx_preset << 1) |
417			(tx_preset << 2) | (tx_preset << 3));
418
419	if (tx_preset)
420		*cdr_ctrl_byte |= (tx_preset << 4);
421	else
422		/* Preserve current/determined RX CDR status */
423		*cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
424}
425
426static void apply_cdr_settings(
427		struct hfi1_pportdata *ppd, u32 rx_preset_index,
428		u32 tx_preset_index)
429{
430	u8 *cache = ppd->qsfp_info.cache;
431	u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
432
433	apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
434
435	apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
436
437	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
438		   &cdr_ctrl_byte, 1);
439}
440
441static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
442{
443	u8 *cache = ppd->qsfp_info.cache;
444	u8 tx_eq;
445
446	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
447		return;
448	/* Disable adaptive TX EQ if present */
449	tx_eq = cache[(128 * 3) + 241];
450	tx_eq &= 0xF0;
451	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
452}
453
454static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
455{
456	u8 *cache = ppd->qsfp_info.cache;
457	u32 tx_preset;
458	u8 tx_eq;
459
460	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
461		return;
462
463	get_platform_config_field(
464		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
465		tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
466		&tx_preset, 4);
467	if (!tx_preset) {
468		dd_dev_info(
469			ppd->dd,
470			"%s: TX_EQ_APPLY is set to disabled\n",
471			__func__);
472		return;
473	}
474	get_platform_config_field(
475			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
476			tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
477			&tx_preset, 4);
478
479	if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
480		dd_dev_info(
481			ppd->dd,
482			"%s: TX EQ %x unsupported\n",
483			__func__, tx_preset);
484
485		dd_dev_info(
486			ppd->dd,
487			"%s: Applying EQ %x\n",
488			__func__, cache[608] & 0xF0);
489
490		tx_preset = (cache[608] & 0xF0) >> 4;
491	}
492
493	tx_eq = tx_preset | (tx_preset << 4);
494	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
495	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
496}
497
498static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
499{
500	u32 rx_preset;
501	u8 rx_eq, *cache = ppd->qsfp_info.cache;
502
503	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
504		return;
505	get_platform_config_field(
506			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
507			rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
508			&rx_preset, 4);
509
510	if (!rx_preset) {
511		dd_dev_info(
512			ppd->dd,
513			"%s: RX_EMP_APPLY is set to disabled\n",
514			__func__);
515		return;
516	}
517	get_platform_config_field(
518		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
519		rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
520		&rx_preset, 4);
521
522	if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
523		dd_dev_info(
524			ppd->dd,
525			"%s: Requested RX EMP %x\n",
526			__func__, rx_preset);
527
528		dd_dev_info(
529			ppd->dd,
530			"%s: Applying supported EMP %x\n",
531			__func__, cache[608] & 0xF);
532
533		rx_preset = cache[608] & 0xF;
534	}
535
536	rx_eq = rx_preset | (rx_preset << 4);
537
538	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
539	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
540}
541
542static void apply_eq_settings(struct hfi1_pportdata *ppd,
543			      u32 rx_preset_index, u32 tx_preset_index)
544{
545	u8 *cache = ppd->qsfp_info.cache;
546
547	/* no point going on w/o a page 3 */
548	if (cache[2] & 4) {
549		dd_dev_info(ppd->dd,
550			    "%s: Upper page 03 not present\n",
551			    __func__);
552		return;
553	}
554
555	apply_tx_eq_auto(ppd);
556
557	apply_tx_eq_prog(ppd, tx_preset_index);
558
559	apply_rx_eq_emp(ppd, rx_preset_index);
560}
561
562static void apply_rx_amplitude_settings(
563		struct hfi1_pportdata *ppd, u32 rx_preset_index,
564		u32 tx_preset_index)
565{
566	u32 rx_preset;
567	u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
568
569	/* no point going on w/o a page 3 */
570	if (cache[2] & 4) {
571		dd_dev_info(ppd->dd,
572			    "%s: Upper page 03 not present\n",
573			    __func__);
574		return;
575	}
576	if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
577		dd_dev_info(ppd->dd,
578			    "%s: RX_AMP_APPLY is set to disabled\n",
579			    __func__);
580		return;
581	}
582
583	get_platform_config_field(ppd->dd,
584				  PLATFORM_CONFIG_RX_PRESET_TABLE,
585				  rx_preset_index,
586				  RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
587				  &rx_preset, 4);
588
589	if (!rx_preset) {
590		dd_dev_info(ppd->dd,
591			    "%s: RX_AMP_APPLY is set to disabled\n",
592			    __func__);
593		return;
594	}
595	get_platform_config_field(ppd->dd,
596				  PLATFORM_CONFIG_RX_PRESET_TABLE,
597				  rx_preset_index,
598				  RX_PRESET_TABLE_QSFP_RX_AMP,
599				  &rx_preset, 4);
600
601	dd_dev_info(ppd->dd,
602		    "%s: Requested RX AMP %x\n",
603		    __func__,
604		    rx_preset);
605
606	for (i = 0; i < 4; i++) {
607		if (cache[(128 * 3) + 225] & (1 << i)) {
608			preferred = i;
609			if (preferred == rx_preset)
610				break;
611		}
612	}
613
614	/*
615	 * Verify that preferred RX amplitude is not just a
616	 * fall through of the default
617	 */
618	if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
619		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
620		return;
621	}
622
623	dd_dev_info(ppd->dd,
624		    "%s: Applying RX AMP %x\n", __func__, preferred);
625
626	rx_amp = preferred | (preferred << 4);
627	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
628	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
629}
630
631#define OPA_INVALID_INDEX 0xFFF
632
633static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
634			   u32 config_data, const char *message)
635{
636	u8 i;
637	int ret;
638
639	for (i = 0; i < 4; i++) {
640		ret = load_8051_config(ppd->dd, field_id, i, config_data);
641		if (ret != HCMD_SUCCESS) {
642			dd_dev_err(
643				ppd->dd,
644				"%s: %s for lane %u failed\n",
645				message, __func__, i);
646		}
647	}
648}
649
650/*
651 * Return a special SerDes setting for low power AOC cables.  The power class
652 * threshold and setting being used were all found by empirical testing.
653 *
654 * Summary of the logic:
655 *
656 * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
657 *     return 0xe
658 * return 0; // leave at default
659 */
660static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
661{
662	u8 *cache = ppd->qsfp_info.cache;
663	int power_class;
664
665	/* QSFP only */
666	if (ppd->port_type != PORT_TYPE_QSFP)
667		return 0; /* leave at default */
668
669	/* active optical cables only */
670	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
671	case 0x0 ... 0x9: fallthrough;
672	case 0xC: fallthrough;
673	case 0xE:
674		/* active AOC */
675		power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
676		if (power_class < QSFP_POWER_CLASS_4)
677			return 0xe;
678	}
679	return 0; /* leave at default */
680}
681
682static void apply_tunings(
683		struct hfi1_pportdata *ppd, u32 tx_preset_index,
684		u8 tuning_method, u32 total_atten, u8 limiting_active)
685{
686	int ret = 0;
687	u32 config_data = 0, tx_preset = 0;
688	u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
689	u8 *cache = ppd->qsfp_info.cache;
690
691	/* Pass tuning method to 8051 */
692	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
693			 &config_data);
694	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
695	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
696	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
697			       config_data);
698	if (ret != HCMD_SUCCESS)
699		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
700			   __func__);
701
702	/* Set same channel loss for both TX and RX */
703	config_data = 0 | (total_atten << 16) | (total_atten << 24);
704	apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
705		       "Setting channel loss");
706
707	/* Inform 8051 of cable capabilities */
708	if (ppd->qsfp_info.cache_valid) {
709		external_device_config =
710			((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
711			((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
712			((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
713			(cache[QSFP_EQ_INFO_OFFS] & 0x4);
714		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
715				       GENERAL_CONFIG, &config_data);
716		/* Clear, then set the external device config field */
717		config_data &= ~(u32)0xFF;
718		config_data |= external_device_config;
719		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
720				       GENERAL_CONFIG, config_data);
721		if (ret != HCMD_SUCCESS)
722			dd_dev_err(ppd->dd,
723				   "%s: Failed set ext device config params\n",
724				   __func__);
725	}
726
727	if (tx_preset_index == OPA_INVALID_INDEX) {
728		if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
729			dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
730				   __func__);
731		return;
732	}
733
734	/* Following for limiting active channels only */
735	get_platform_config_field(
736		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
737		TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
738	precur = tx_preset;
739
740	get_platform_config_field(
741		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
742		tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
743	attn = tx_preset;
744
745	get_platform_config_field(
746		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
747		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
748	postcur = tx_preset;
749
750	/*
751	 * NOTES:
752	 * o The aoc_low_power_setting is applied to all lanes even
753	 *   though only lane 0's value is examined by the firmware.
754	 * o A lingering low power setting after a cable swap does
755	 *   not occur.  On cable unplug the 8051 is reset and
756	 *   restarted on cable insert.  This resets all settings to
757	 *   their default, erasing any previous low power setting.
758	 */
759	config_data = precur | (attn << 8) | (postcur << 16) |
760			(aoc_low_power_setting(ppd) << 24);
761
762	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
763		       "Applying TX settings");
764}
765
766/* Must be holding the QSFP i2c resource */
767static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
768			    u32 *ptr_rx_preset, u32 *ptr_total_atten)
769{
770	int ret;
771	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
772	u8 *cache = ppd->qsfp_info.cache;
773
774	ppd->qsfp_info.limiting_active = 1;
775
776	ret = set_qsfp_tx(ppd, 0);
777	if (ret)
778		return ret;
779
780	ret = qual_power(ppd);
781	if (ret)
782		return ret;
783
784	ret = qual_bitrate(ppd);
785	if (ret)
786		return ret;
787
788	/*
789	 * We'll change the QSFP memory contents from here on out, thus we set a
790	 * flag here to remind ourselves to reset the QSFP module. This prevents
791	 * reuse of stale settings established in our previous pass through.
792	 */
793	if (ppd->qsfp_info.reset_needed) {
794		ret = reset_qsfp(ppd);
795		if (ret)
796			return ret;
797		refresh_qsfp_cache(ppd, &ppd->qsfp_info);
798	} else {
799		ppd->qsfp_info.reset_needed = 1;
800	}
801
802	ret = set_qsfp_high_power(ppd);
803	if (ret)
804		return ret;
805
806	if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
807		ret = get_platform_config_field(
808			ppd->dd,
809			PLATFORM_CONFIG_PORT_TABLE, 0,
810			PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
811			ptr_tx_preset, 4);
812		if (ret) {
813			*ptr_tx_preset = OPA_INVALID_INDEX;
814			return ret;
815		}
816	} else {
817		ret = get_platform_config_field(
818			ppd->dd,
819			PLATFORM_CONFIG_PORT_TABLE, 0,
820			PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
821			ptr_tx_preset, 4);
822		if (ret) {
823			*ptr_tx_preset = OPA_INVALID_INDEX;
824			return ret;
825		}
826	}
827
828	ret = get_platform_config_field(
829		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
830		PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
831	if (ret) {
832		*ptr_rx_preset = OPA_INVALID_INDEX;
833		return ret;
834	}
835
836	if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
837		get_platform_config_field(
838			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
839			PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
840	else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
841		get_platform_config_field(
842			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
843			PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
844
845	apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
846
847	apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
848
849	apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
850
851	ret = set_qsfp_tx(ppd, 1);
852
853	return ret;
854}
855
856static int tune_qsfp(struct hfi1_pportdata *ppd,
857		     u32 *ptr_tx_preset, u32 *ptr_rx_preset,
858		     u8 *ptr_tuning_method, u32 *ptr_total_atten)
859{
860	u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
861	u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
862	int ret = 0;
863	u8 *cache = ppd->qsfp_info.cache;
864
865	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
866	case 0xA ... 0xB:
867		ret = get_platform_config_field(
868			ppd->dd,
869			PLATFORM_CONFIG_PORT_TABLE, 0,
870			PORT_TABLE_LOCAL_ATTEN_25G,
871			&platform_atten, 4);
872		if (ret)
873			return ret;
874
875		if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
876			cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
877		else if ((lss & OPA_LINK_SPEED_12_5G) &&
878			 (lse & OPA_LINK_SPEED_12_5G))
879			cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
880
881		/* Fallback to configured attenuation if cable memory is bad */
882		if (cable_atten == 0 || cable_atten > 36) {
883			ret = get_platform_config_field(
884				ppd->dd,
885				PLATFORM_CONFIG_SYSTEM_TABLE, 0,
886				SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
887				&cable_atten, 4);
888			if (ret)
889				return ret;
890		}
891
892		ret = get_platform_config_field(
893			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
894			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
895		if (ret)
896			return ret;
897
898		*ptr_total_atten = platform_atten + cable_atten + remote_atten;
899
900		*ptr_tuning_method = OPA_PASSIVE_TUNING;
901		break;
902	case 0x0 ... 0x9: fallthrough;
903	case 0xC: fallthrough;
904	case 0xE:
905		ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
906				       ptr_total_atten);
907		if (ret)
908			return ret;
909
910		*ptr_tuning_method = OPA_ACTIVE_TUNING;
911		break;
912	case 0xD: fallthrough;
913	case 0xF:
914	default:
915		dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
916			    __func__);
917		break;
918	}
919	return ret;
920}
921
922/*
923 * This function communicates its success or failure via ppd->driver_link_ready
924 * Thus, it depends on its association with start_link(...) which checks
925 * driver_link_ready before proceeding with the link negotiation and
926 * initialization process.
927 */
928void tune_serdes(struct hfi1_pportdata *ppd)
929{
930	int ret = 0;
931	u32 total_atten = 0;
932	u32 remote_atten = 0, platform_atten = 0;
933	u32 rx_preset_index, tx_preset_index;
934	u8 tuning_method = 0, limiting_active = 0;
935	struct hfi1_devdata *dd = ppd->dd;
936
937	rx_preset_index = OPA_INVALID_INDEX;
938	tx_preset_index = OPA_INVALID_INDEX;
939
940	/* the link defaults to enabled */
941	ppd->link_enabled = 1;
942	/* the driver link ready state defaults to not ready */
943	ppd->driver_link_ready = 0;
944	ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
945
946	/* Skip the tuning for testing (loopback != none) and simulations */
947	if (loopback != LOOPBACK_NONE ||
948	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
949		ppd->driver_link_ready = 1;
950
951		if (qsfp_mod_present(ppd)) {
952			ret = acquire_chip_resource(ppd->dd,
953						    qsfp_resource(ppd->dd),
954						    QSFP_WAIT);
955			if (ret) {
956				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
957					   __func__, (int)ppd->dd->hfi1_id);
958				goto bail;
959			}
960
961			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
962			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
963		}
964
965		return;
966	}
967
968	switch (ppd->port_type) {
969	case PORT_TYPE_DISCONNECTED:
970		ppd->offline_disabled_reason =
971			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
972		dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
973			    __func__);
974		goto bail;
975	case PORT_TYPE_FIXED:
976		/* platform_atten, remote_atten pre-zeroed to catch error */
977		get_platform_config_field(
978			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
979			PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
980
981		get_platform_config_field(
982			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
983			PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
984
985		total_atten = platform_atten + remote_atten;
986
987		tuning_method = OPA_PASSIVE_TUNING;
988		break;
989	case PORT_TYPE_VARIABLE:
990		if (qsfp_mod_present(ppd)) {
991			/*
992			 * platform_atten, remote_atten pre-zeroed to
993			 * catch error
994			 */
995			get_platform_config_field(
996				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
997				PORT_TABLE_LOCAL_ATTEN_25G,
998				&platform_atten, 4);
999
1000			get_platform_config_field(
1001				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
1002				PORT_TABLE_REMOTE_ATTEN_25G,
1003				&remote_atten, 4);
1004
1005			total_atten = platform_atten + remote_atten;
1006
1007			tuning_method = OPA_PASSIVE_TUNING;
1008		} else {
1009			ppd->offline_disabled_reason =
1010			     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
1011			goto bail;
1012		}
1013		break;
1014	case PORT_TYPE_QSFP:
1015		if (qsfp_mod_present(ppd)) {
1016			ret = acquire_chip_resource(ppd->dd,
1017						    qsfp_resource(ppd->dd),
1018						    QSFP_WAIT);
1019			if (ret) {
1020				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
1021					   __func__, (int)ppd->dd->hfi1_id);
1022				goto bail;
1023			}
1024			refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1025
1026			if (ppd->qsfp_info.cache_valid) {
1027				ret = tune_qsfp(ppd,
1028						&tx_preset_index,
1029						&rx_preset_index,
1030						&tuning_method,
1031						&total_atten);
1032
1033				/*
1034				 * We may have modified the QSFP memory, so
1035				 * update the cache to reflect the changes
1036				 */
1037				refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1038				limiting_active =
1039						ppd->qsfp_info.limiting_active;
1040			} else {
1041				dd_dev_err(dd,
1042					   "%s: Reading QSFP memory failed\n",
1043					   __func__);
1044				ret = -EINVAL; /* a fail indication */
1045			}
1046			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1047			if (ret)
1048				goto bail;
1049		} else {
1050			ppd->offline_disabled_reason =
1051			   HFI1_ODR_MASK(
1052				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1053			goto bail;
1054		}
1055		break;
1056	default:
1057		dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
1058		ppd->port_type = PORT_TYPE_UNKNOWN;
1059		tuning_method = OPA_UNKNOWN_TUNING;
1060		total_atten = 0;
1061		limiting_active = 0;
1062		tx_preset_index = OPA_INVALID_INDEX;
1063		break;
1064	}
1065
1066	if (ppd->offline_disabled_reason ==
1067			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1068		apply_tunings(ppd, tx_preset_index, tuning_method,
1069			      total_atten, limiting_active);
1070
1071	if (!ret)
1072		ppd->driver_link_ready = 1;
1073
1074	return;
1075bail:
1076	ppd->driver_link_ready = 0;
1077}
1078