1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2/* Copyright(c) 2014 - 2020 Intel Corporation */
3#include <linux/slab.h>
4#include <linux/delay.h>
5#include <linux/pci_ids.h>
6
7#include "adf_accel_devices.h"
8#include "adf_common_drv.h"
9#include "icp_qat_hal.h"
10#include "icp_qat_uclo.h"
11
12#define BAD_REGADDR	       0xffff
13#define MAX_RETRY_TIMES	   10000
14#define INIT_CTX_ARB_VALUE	0x0
15#define INIT_CTX_ENABLE_VALUE     0x0
16#define INIT_PC_VALUE	     0x0
17#define INIT_WAKEUP_EVENTS_VALUE  0x1
18#define INIT_SIG_EVENTS_VALUE     0x1
19#define INIT_CCENABLE_VALUE       0x2000
20#define RST_CSR_QAT_LSB	   20
21#define RST_CSR_AE_LSB		  0
22#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
23
24#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
25	(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
26	(~(1 << CE_REG_PAR_ERR_BITPOS)))
27#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
28	(inst = ((inst & 0xFFFF00C03FFull) | \
29		((((const_val) << 12) & 0x0FF00000ull) | \
30		(((const_val) << 10) & 0x0003FC00ull))))
31#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
32	(inst = ((inst & 0xFFFF00FFF00ull) | \
33		((((const_val) << 12) & 0x0FF00000ull) | \
34		(((const_val) <<  0) & 0x000000FFull))))
35
36#define AE(handle, ae) handle->hal_handle->aes[ae]
37
38static const u64 inst_4b[] = {
39	0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
40	0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
41	0x0A021000000ull
42};
43
44static const u64 inst[] = {
45	0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
46	0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
47	0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
48	0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
49	0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
50	0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
51	0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
52	0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
53	0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
54	0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
55	0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
56	0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
57	0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
58	0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
59	0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
60	0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
61	0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
62	0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
63	0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
64	0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
65	0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
66	0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
67};
68
69void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
70			  unsigned char ae, unsigned int ctx_mask)
71{
72	AE(handle, ae).live_ctx_mask = ctx_mask;
73}
74
75#define CSR_RETRY_TIMES 500
76static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
77			     unsigned char ae, unsigned int csr)
78{
79	unsigned int iterations = CSR_RETRY_TIMES;
80	int value;
81
82	do {
83		value = GET_AE_CSR(handle, ae, csr);
84		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
85			return value;
86	} while (iterations--);
87
88	pr_err("QAT: Read CSR timeout\n");
89	return 0;
90}
91
92static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
93			     unsigned char ae, unsigned int csr,
94			     unsigned int value)
95{
96	unsigned int iterations = CSR_RETRY_TIMES;
97
98	do {
99		SET_AE_CSR(handle, ae, csr, value);
100		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
101			return 0;
102	} while (iterations--);
103
104	pr_err("QAT: Write CSR Timeout\n");
105	return -EFAULT;
106}
107
108static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
109				     unsigned char ae, unsigned char ctx,
110				     unsigned int *events)
111{
112	unsigned int cur_ctx;
113
114	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
115	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
116	*events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
117	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
118}
119
120static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
121			       unsigned char ae, unsigned int cycles,
122			       int chk_inactive)
123{
124	unsigned int base_cnt = 0, cur_cnt = 0;
125	unsigned int csr = (1 << ACS_ABO_BITPOS);
126	int times = MAX_RETRY_TIMES;
127	int elapsed_cycles = 0;
128
129	base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
130	base_cnt &= 0xffff;
131	while ((int)cycles > elapsed_cycles && times--) {
132		if (chk_inactive)
133			csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
134
135		cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
136		cur_cnt &= 0xffff;
137		elapsed_cycles = cur_cnt - base_cnt;
138
139		if (elapsed_cycles < 0)
140			elapsed_cycles += 0x10000;
141
142		/* ensure at least 8 time cycles elapsed in wait_cycles */
143		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
144			return 0;
145	}
146	if (times < 0) {
147		pr_err("QAT: wait_num_cycles time out\n");
148		return -EFAULT;
149	}
150	return 0;
151}
152
153#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
154#define SET_BIT(wrd, bit) (wrd | 1 << bit)
155
156int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
157			    unsigned char ae, unsigned char mode)
158{
159	unsigned int csr, new_csr;
160
161	if ((mode != 4) && (mode != 8)) {
162		pr_err("QAT: bad ctx mode=%d\n", mode);
163		return -EINVAL;
164	}
165
166	/* Sets the accelaration engine context mode to either four or eight */
167	csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
168	csr = IGNORE_W1C_MASK & csr;
169	new_csr = (mode == 4) ?
170		SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
171		CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
172	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
173	return 0;
174}
175
176int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
177			   unsigned char ae, unsigned char mode)
178{
179	unsigned int csr, new_csr;
180
181	csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
182	csr &= IGNORE_W1C_MASK;
183
184	new_csr = (mode) ?
185		SET_BIT(csr, CE_NN_MODE_BITPOS) :
186		CLR_BIT(csr, CE_NN_MODE_BITPOS);
187
188	if (new_csr != csr)
189		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
190
191	return 0;
192}
193
194int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
195			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
196			   unsigned char mode)
197{
198	unsigned int csr, new_csr;
199
200	csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
201	csr &= IGNORE_W1C_MASK;
202	switch (lm_type) {
203	case ICP_LMEM0:
204		new_csr = (mode) ?
205			SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
206			CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
207		break;
208	case ICP_LMEM1:
209		new_csr = (mode) ?
210			SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
211			CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
212		break;
213	default:
214		pr_err("QAT: lmType = 0x%x\n", lm_type);
215		return -EINVAL;
216	}
217
218	if (new_csr != csr)
219		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
220	return 0;
221}
222
223static unsigned short qat_hal_get_reg_addr(unsigned int type,
224					   unsigned short reg_num)
225{
226	unsigned short reg_addr;
227
228	switch (type) {
229	case ICP_GPA_ABS:
230	case ICP_GPB_ABS:
231		reg_addr = 0x80 | (reg_num & 0x7f);
232		break;
233	case ICP_GPA_REL:
234	case ICP_GPB_REL:
235		reg_addr = reg_num & 0x1f;
236		break;
237	case ICP_SR_RD_REL:
238	case ICP_SR_WR_REL:
239	case ICP_SR_REL:
240		reg_addr = 0x180 | (reg_num & 0x1f);
241		break;
242	case ICP_SR_ABS:
243		reg_addr = 0x140 | ((reg_num & 0x3) << 1);
244		break;
245	case ICP_DR_RD_REL:
246	case ICP_DR_WR_REL:
247	case ICP_DR_REL:
248		reg_addr = 0x1c0 | (reg_num & 0x1f);
249		break;
250	case ICP_DR_ABS:
251		reg_addr = 0x100 | ((reg_num & 0x3) << 1);
252		break;
253	case ICP_NEIGH_REL:
254		reg_addr = 0x280 | (reg_num & 0x1f);
255		break;
256	case ICP_LMEM0:
257		reg_addr = 0x200;
258		break;
259	case ICP_LMEM1:
260		reg_addr = 0x220;
261		break;
262	case ICP_NO_DEST:
263		reg_addr = 0x300 | (reg_num & 0xff);
264		break;
265	default:
266		reg_addr = BAD_REGADDR;
267		break;
268	}
269	return reg_addr;
270}
271
272void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
273{
274	unsigned int ae_reset_csr;
275
276	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
277	ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
278	ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
279	SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
280}
281
282static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
283				unsigned char ae, unsigned int ctx_mask,
284				unsigned int ae_csr, unsigned int csr_val)
285{
286	unsigned int ctx, cur_ctx;
287
288	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
289
290	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
291		if (!(ctx_mask & (1 << ctx)))
292			continue;
293		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
294		qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
295	}
296
297	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
298}
299
300static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
301				unsigned char ae, unsigned char ctx,
302				unsigned int ae_csr)
303{
304	unsigned int cur_ctx, csr_val;
305
306	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
307	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
308	csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
309	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
310
311	return csr_val;
312}
313
314static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
315				  unsigned char ae, unsigned int ctx_mask,
316				  unsigned int events)
317{
318	unsigned int ctx, cur_ctx;
319
320	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
321	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
322		if (!(ctx_mask & (1 << ctx)))
323			continue;
324		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
325		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
326	}
327	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
328}
329
330static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
331				     unsigned char ae, unsigned int ctx_mask,
332				     unsigned int events)
333{
334	unsigned int ctx, cur_ctx;
335
336	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
337	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
338		if (!(ctx_mask & (1 << ctx)))
339			continue;
340		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
341		qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
342				  events);
343	}
344	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
345}
346
347static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
348{
349	unsigned long ae_mask = handle->hal_handle->ae_mask;
350	unsigned int base_cnt, cur_cnt;
351	unsigned char ae;
352	int times = MAX_RETRY_TIMES;
353
354	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
355		base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
356		base_cnt &= 0xffff;
357
358		do {
359			cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
360			cur_cnt &= 0xffff;
361		} while (times-- && (cur_cnt == base_cnt));
362
363		if (times < 0) {
364			pr_err("QAT: AE%d is inactive!!\n", ae);
365			return -EFAULT;
366		}
367	}
368
369	return 0;
370}
371
372int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
373			    unsigned int ae)
374{
375	unsigned int enable = 0, active = 0;
376
377	enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
378	active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
379	if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
380	    (active & (1 << ACS_ABO_BITPOS)))
381		return 1;
382	else
383		return 0;
384}
385
386static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
387{
388	unsigned long ae_mask = handle->hal_handle->ae_mask;
389	unsigned int misc_ctl;
390	unsigned char ae;
391
392	/* stop the timestamp timers */
393	misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
394	if (misc_ctl & MC_TIMESTAMP_ENABLE)
395		SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
396			    (~MC_TIMESTAMP_ENABLE));
397
398	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
399		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
400		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
401	}
402	/* start timestamp timers */
403	SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
404}
405
406#define ESRAM_AUTO_TINIT	BIT(2)
407#define ESRAM_AUTO_TINIT_DONE	BIT(3)
408#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
409#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
410static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
411{
412	void __iomem *csr_addr =
413			(void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
414			ESRAM_AUTO_INIT_CSR_OFFSET);
415	unsigned int csr_val;
416	int times = 30;
417
418	if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
419		return 0;
420
421	csr_val = ADF_CSR_RD(csr_addr, 0);
422	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
423		return 0;
424
425	csr_val = ADF_CSR_RD(csr_addr, 0);
426	csr_val |= ESRAM_AUTO_TINIT;
427	ADF_CSR_WR(csr_addr, 0, csr_val);
428
429	do {
430		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
431		csr_val = ADF_CSR_RD(csr_addr, 0);
432	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
433	if ((times < 0)) {
434		pr_err("QAT: Fail to init eSram!\n");
435		return -EFAULT;
436	}
437	return 0;
438}
439
440#define SHRAM_INIT_CYCLES 2060
441int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
442{
443	unsigned long ae_mask = handle->hal_handle->ae_mask;
444	unsigned int ae_reset_csr;
445	unsigned char ae;
446	unsigned int clk_csr;
447	unsigned int times = 100;
448	unsigned int csr;
449
450	/* write to the reset csr */
451	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
452	ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
453	ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
454	do {
455		SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
456		if (!(times--))
457			goto out_err;
458		csr = GET_GLB_CSR(handle, ICP_RESET);
459	} while ((handle->hal_handle->ae_mask |
460		 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
461	/* enable clock */
462	clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
463	clk_csr |= handle->hal_handle->ae_mask << 0;
464	clk_csr |= handle->hal_handle->slice_mask << 20;
465	SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
466	if (qat_hal_check_ae_alive(handle))
467		goto out_err;
468
469	/* Set undefined power-up/reset states to reasonable default values */
470	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
471		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
472				  INIT_CTX_ENABLE_VALUE);
473		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
474				    CTX_STS_INDIRECT,
475				    handle->hal_handle->upc_mask &
476				    INIT_PC_VALUE);
477		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
478		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
479		qat_hal_put_wakeup_event(handle, ae,
480					 ICP_QAT_UCLO_AE_ALL_CTX,
481					 INIT_WAKEUP_EVENTS_VALUE);
482		qat_hal_put_sig_event(handle, ae,
483				      ICP_QAT_UCLO_AE_ALL_CTX,
484				      INIT_SIG_EVENTS_VALUE);
485	}
486	if (qat_hal_init_esram(handle))
487		goto out_err;
488	if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
489		goto out_err;
490	qat_hal_reset_timestamp(handle);
491
492	return 0;
493out_err:
494	pr_err("QAT: failed to get device out of reset\n");
495	return -EFAULT;
496}
497
498static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
499				unsigned char ae, unsigned int ctx_mask)
500{
501	unsigned int ctx;
502
503	ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
504	ctx &= IGNORE_W1C_MASK &
505		(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
506	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
507}
508
509static u64 qat_hal_parity_64bit(u64 word)
510{
511	word ^= word >> 1;
512	word ^= word >> 2;
513	word ^= word >> 4;
514	word ^= word >> 8;
515	word ^= word >> 16;
516	word ^= word >> 32;
517	return word & 1;
518}
519
520static u64 qat_hal_set_uword_ecc(u64 uword)
521{
522	u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
523		bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
524		bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
525		bit6_mask = 0xdaf69a46910ULL;
526
527	/* clear the ecc bits */
528	uword &= ~(0x7fULL << 0x2C);
529	uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
530	uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
531	uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
532	uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
533	uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
534	uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
535	uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
536	return uword;
537}
538
539void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
540		       unsigned char ae, unsigned int uaddr,
541		       unsigned int words_num, u64 *uword)
542{
543	unsigned int ustore_addr;
544	unsigned int i;
545
546	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
547	uaddr |= UA_ECS;
548	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
549	for (i = 0; i < words_num; i++) {
550		unsigned int uwrd_lo, uwrd_hi;
551		u64 tmp;
552
553		tmp = qat_hal_set_uword_ecc(uword[i]);
554		uwrd_lo = (unsigned int)(tmp & 0xffffffff);
555		uwrd_hi = (unsigned int)(tmp >> 0x20);
556		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
557		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
558	}
559	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
560}
561
562static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
563			       unsigned char ae, unsigned int ctx_mask)
564{
565	unsigned int ctx;
566
567	ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
568	ctx &= IGNORE_W1C_MASK;
569	ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
570	ctx |= (ctx_mask << CE_ENABLE_BITPOS);
571	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
572}
573
574static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
575{
576	unsigned long ae_mask = handle->hal_handle->ae_mask;
577	unsigned char ae;
578	unsigned short reg;
579
580	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
581		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
582			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
583					     reg, 0);
584			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
585					     reg, 0);
586		}
587	}
588}
589
590static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
591{
592	unsigned long ae_mask = handle->hal_handle->ae_mask;
593	unsigned char ae;
594	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
595	int times = MAX_RETRY_TIMES;
596	unsigned int csr_val = 0;
597	unsigned int savctx = 0;
598	int ret = 0;
599
600	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
601		csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
602		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
603		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
604		csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
605		csr_val &= IGNORE_W1C_MASK;
606		csr_val |= CE_NN_MODE;
607		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
608		qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
609				  (u64 *)inst);
610		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
611				    handle->hal_handle->upc_mask &
612				    INIT_PC_VALUE);
613		savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
614		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
615		qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
616		qat_hal_wr_indr_csr(handle, ae, ctx_mask,
617				    CTX_SIG_EVENTS_INDIRECT, 0);
618		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
619		qat_hal_enable_ctx(handle, ae, ctx_mask);
620	}
621	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
622		/* wait for AE to finish */
623		do {
624			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
625		} while (ret && times--);
626
627		if (times < 0) {
628			pr_err("QAT: clear GPR of AE %d failed", ae);
629			return -EINVAL;
630		}
631		qat_hal_disable_ctx(handle, ae, ctx_mask);
632		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
633				  savctx & ACS_ACNO);
634		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
635				  INIT_CTX_ENABLE_VALUE);
636		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
637				    handle->hal_handle->upc_mask &
638				    INIT_PC_VALUE);
639		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
640		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
641		qat_hal_put_wakeup_event(handle, ae, ctx_mask,
642					 INIT_WAKEUP_EVENTS_VALUE);
643		qat_hal_put_sig_event(handle, ae, ctx_mask,
644				      INIT_SIG_EVENTS_VALUE);
645	}
646	return 0;
647}
648
649#define ICP_QAT_AE_OFFSET	0x20000
650#define ICP_QAT_CAP_OFFSET       (ICP_QAT_AE_OFFSET + 0x10000)
651#define LOCAL_TO_XFER_REG_OFFSET    0x800
652#define ICP_QAT_EP_OFFSET	0x3a000
653int qat_hal_init(struct adf_accel_dev *accel_dev)
654{
655	unsigned char ae;
656	unsigned int max_en_ae_id = 0;
657	struct icp_qat_fw_loader_handle *handle;
658	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
659	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
660	struct adf_bar *misc_bar =
661			&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
662	unsigned long ae_mask = hw_data->ae_mask;
663	unsigned int csr_val = 0;
664	struct adf_bar *sram_bar;
665
666	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
667	if (!handle)
668		return -ENOMEM;
669
670	handle->hal_cap_g_ctl_csr_addr_v =
671		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
672				 ICP_QAT_CAP_OFFSET);
673	handle->hal_cap_ae_xfer_csr_addr_v =
674		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
675				 ICP_QAT_AE_OFFSET);
676	handle->hal_ep_csr_addr_v =
677		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
678				 ICP_QAT_EP_OFFSET);
679	handle->hal_cap_ae_local_csr_addr_v =
680		(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
681				 LOCAL_TO_XFER_REG_OFFSET);
682	handle->pci_dev = pci_info->pci_dev;
683	if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_DH895XCC) {
684		sram_bar =
685			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
686		handle->hal_sram_addr_v = sram_bar->virt_addr;
687	}
688	handle->fw_auth = (handle->pci_dev->device ==
689			   PCI_DEVICE_ID_INTEL_QAT_DH895XCC) ? false : true;
690	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
691	if (!handle->hal_handle)
692		goto out_hal_handle;
693	handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
694	handle->hal_handle->ae_mask = hw_data->ae_mask;
695	handle->hal_handle->slice_mask = hw_data->accel_mask;
696	/* create AE objects */
697	handle->hal_handle->upc_mask = 0x1ffff;
698	handle->hal_handle->max_ustore = 0x4000;
699	for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
700		handle->hal_handle->aes[ae].free_addr = 0;
701		handle->hal_handle->aes[ae].free_size =
702		    handle->hal_handle->max_ustore;
703		handle->hal_handle->aes[ae].ustore_size =
704		    handle->hal_handle->max_ustore;
705		handle->hal_handle->aes[ae].live_ctx_mask =
706						ICP_QAT_UCLO_AE_ALL_CTX;
707		max_en_ae_id = ae;
708	}
709	handle->hal_handle->ae_max_num = max_en_ae_id + 1;
710	/* take all AEs out of reset */
711	if (qat_hal_clr_reset(handle)) {
712		dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
713		goto out_err;
714	}
715	qat_hal_clear_xfer(handle);
716	if (!handle->fw_auth) {
717		if (qat_hal_clear_gpr(handle))
718			goto out_err;
719	}
720
721	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
722	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
723		csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
724		csr_val |= 0x1;
725		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
726	}
727	accel_dev->fw_loader->fw_loader = handle;
728	return 0;
729
730out_err:
731	kfree(handle->hal_handle);
732out_hal_handle:
733	kfree(handle);
734	return -EFAULT;
735}
736
737void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
738{
739	if (!handle)
740		return;
741	kfree(handle->hal_handle);
742	kfree(handle);
743}
744
745void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
746		   unsigned int ctx_mask)
747{
748	int retry = 0;
749	unsigned int fcu_sts = 0;
750
751	if (handle->fw_auth) {
752		SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START);
753		do {
754			msleep(FW_AUTH_WAIT_PERIOD);
755			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
756			if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
757				return;
758		} while (retry++ < FW_AUTH_MAX_RETRY);
759		pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae,
760		       fcu_sts);
761	} else {
762		qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
763				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
764		qat_hal_enable_ctx(handle, ae, ctx_mask);
765	}
766}
767
768void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
769		  unsigned int ctx_mask)
770{
771	if (!handle->fw_auth)
772		qat_hal_disable_ctx(handle, ae, ctx_mask);
773}
774
775void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
776		    unsigned char ae, unsigned int ctx_mask, unsigned int upc)
777{
778	qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
779			    handle->hal_handle->upc_mask & upc);
780}
781
782static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
783			       unsigned char ae, unsigned int uaddr,
784			       unsigned int words_num, u64 *uword)
785{
786	unsigned int i, uwrd_lo, uwrd_hi;
787	unsigned int ustore_addr, misc_control;
788
789	misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
790	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
791			  misc_control & 0xfffffffb);
792	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
793	uaddr |= UA_ECS;
794	for (i = 0; i < words_num; i++) {
795		qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
796		uaddr++;
797		uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
798		uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
799		uword[i] = uwrd_hi;
800		uword[i] = (uword[i] << 0x20) | uwrd_lo;
801	}
802	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
803	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
804}
805
806void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
807		     unsigned char ae, unsigned int uaddr,
808		     unsigned int words_num, unsigned int *data)
809{
810	unsigned int i, ustore_addr;
811
812	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
813	uaddr |= UA_ECS;
814	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
815	for (i = 0; i < words_num; i++) {
816		unsigned int uwrd_lo, uwrd_hi, tmp;
817
818		uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
819			  ((data[i] & 0xff00) << 2) |
820			  (0x3 << 8) | (data[i] & 0xff);
821		uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
822		uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
823		tmp = ((data[i] >> 0x10) & 0xffff);
824		uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
825		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
826		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
827	}
828	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
829}
830
831#define MAX_EXEC_INST 100
832static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
833				   unsigned char ae, unsigned char ctx,
834				   u64 *micro_inst, unsigned int inst_num,
835				   int code_off, unsigned int max_cycle,
836				   unsigned int *endpc)
837{
838	u64 savuwords[MAX_EXEC_INST];
839	unsigned int ind_lm_addr0, ind_lm_addr1;
840	unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
841	unsigned int ind_cnt_sig;
842	unsigned int ind_sig, act_sig;
843	unsigned int csr_val = 0, newcsr_val;
844	unsigned int savctx;
845	unsigned int savcc, wakeup_events, savpc;
846	unsigned int ctxarb_ctl, ctx_enables;
847
848	if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
849		pr_err("QAT: invalid instruction num %d\n", inst_num);
850		return -EINVAL;
851	}
852	/* save current context */
853	ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
854	ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
855	ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
856						INDIRECT_LM_ADDR_0_BYTE_INDEX);
857	ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
858						INDIRECT_LM_ADDR_1_BYTE_INDEX);
859	if (inst_num <= MAX_EXEC_INST)
860		qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
861	qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
862	savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
863	savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
864	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
865	ctx_enables &= IGNORE_W1C_MASK;
866	savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
867	savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
868	ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
869	ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
870					  FUTURE_COUNT_SIGNAL_INDIRECT);
871	ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
872				      CTX_SIG_EVENTS_INDIRECT);
873	act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
874	/* execute micro codes */
875	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
876	qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
877	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
878	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
879	if (code_off)
880		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
881	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
882	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
883	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
884	qat_hal_enable_ctx(handle, ae, (1 << ctx));
885	/* wait for micro codes to finish */
886	if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
887		return -EFAULT;
888	if (endpc) {
889		unsigned int ctx_status;
890
891		ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
892						 CTX_STS_INDIRECT);
893		*endpc = ctx_status & handle->hal_handle->upc_mask;
894	}
895	/* retore to saved context */
896	qat_hal_disable_ctx(handle, ae, (1 << ctx));
897	if (inst_num <= MAX_EXEC_INST)
898		qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
899	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
900	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
901			    handle->hal_handle->upc_mask & savpc);
902	csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
903	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
904	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
905	qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
906	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
907	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
908	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
909			    LM_ADDR_0_INDIRECT, ind_lm_addr0);
910	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
911			    LM_ADDR_1_INDIRECT, ind_lm_addr1);
912	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
913			    INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
914	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
915			    INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
916	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
917			    FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
918	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
919			    CTX_SIG_EVENTS_INDIRECT, ind_sig);
920	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
921	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
922
923	return 0;
924}
925
926static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
927			      unsigned char ae, unsigned char ctx,
928			      enum icp_qat_uof_regtype reg_type,
929			      unsigned short reg_num, unsigned int *data)
930{
931	unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
932	unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
933	unsigned short reg_addr;
934	int status = 0;
935	u64 insts, savuword;
936
937	reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
938	if (reg_addr == BAD_REGADDR) {
939		pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
940		return -EINVAL;
941	}
942	switch (reg_type) {
943	case ICP_GPA_REL:
944		insts = 0xA070000000ull | (reg_addr & 0x3ff);
945		break;
946	default:
947		insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
948		break;
949	}
950	savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
951	ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
952	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
953	ctx_enables &= IGNORE_W1C_MASK;
954	if (ctx != (savctx & ACS_ACNO))
955		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
956				  ctx & ACS_ACNO);
957	qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
958	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
959	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
960	uaddr = UA_ECS;
961	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
962	insts = qat_hal_set_uword_ecc(insts);
963	uwrd_lo = (unsigned int)(insts & 0xffffffff);
964	uwrd_hi = (unsigned int)(insts >> 0x20);
965	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
966	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
967	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
968	/* delay for at least 8 cycles */
969	qat_hal_wait_cycles(handle, ae, 0x8, 0);
970	/*
971	 * read ALU output
972	 * the instruction should have been executed
973	 * prior to clearing the ECS in putUwords
974	 */
975	*data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
976	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
977	qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
978	if (ctx != (savctx & ACS_ACNO))
979		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
980				  savctx & ACS_ACNO);
981	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
982	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
983
984	return status;
985}
986
987static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
988			      unsigned char ae, unsigned char ctx,
989			      enum icp_qat_uof_regtype reg_type,
990			      unsigned short reg_num, unsigned int data)
991{
992	unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
993	u64 insts[] = {
994		0x0F440000000ull,
995		0x0F040000000ull,
996		0x0F0000C0300ull,
997		0x0E000010000ull
998	};
999	const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1000	const int imm_w1 = 0, imm_w0 = 1;
1001
1002	dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1003	if (dest_addr == BAD_REGADDR) {
1004		pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1005		return -EINVAL;
1006	}
1007
1008	data16lo = 0xffff & data;
1009	data16hi = 0xffff & (data >> 0x10);
1010	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1011					  (0xff & data16hi));
1012	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1013					   (0xff & data16lo));
1014	switch (reg_type) {
1015	case ICP_GPA_REL:
1016		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1017		    ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1018		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1019		    ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1020		break;
1021	default:
1022		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1023		    ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1024
1025		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1026		    ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1027		break;
1028	}
1029
1030	return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1031				       code_off, num_inst * 0x5, NULL);
1032}
1033
1034int qat_hal_get_ins_num(void)
1035{
1036	return ARRAY_SIZE(inst_4b);
1037}
1038
1039static int qat_hal_concat_micro_code(u64 *micro_inst,
1040				     unsigned int inst_num, unsigned int size,
1041				     unsigned int addr, unsigned int *value)
1042{
1043	int i;
1044	unsigned int cur_value;
1045	const u64 *inst_arr;
1046	int fixup_offset;
1047	int usize = 0;
1048	int orig_num;
1049
1050	orig_num = inst_num;
1051	cur_value = value[0];
1052	inst_arr = inst_4b;
1053	usize = ARRAY_SIZE(inst_4b);
1054	fixup_offset = inst_num;
1055	for (i = 0; i < usize; i++)
1056		micro_inst[inst_num++] = inst_arr[i];
1057	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1058	fixup_offset++;
1059	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1060	fixup_offset++;
1061	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1062	fixup_offset++;
1063	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1064
1065	return inst_num - orig_num;
1066}
1067
1068static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1069				      unsigned char ae, unsigned char ctx,
1070				      int *pfirst_exec, u64 *micro_inst,
1071				      unsigned int inst_num)
1072{
1073	int stat = 0;
1074	unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1075	unsigned int gprb0 = 0, gprb1 = 0;
1076
1077	if (*pfirst_exec) {
1078		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1079		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1080		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1081		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1082		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1083		*pfirst_exec = 0;
1084	}
1085	stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1086				       inst_num * 0x5, NULL);
1087	if (stat != 0)
1088		return -EFAULT;
1089	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1090	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1091	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1092	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1093	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1094
1095	return 0;
1096}
1097
1098int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1099			unsigned char ae,
1100			struct icp_qat_uof_batch_init *lm_init_header)
1101{
1102	struct icp_qat_uof_batch_init *plm_init;
1103	u64 *micro_inst_arry;
1104	int micro_inst_num;
1105	int alloc_inst_size;
1106	int first_exec = 1;
1107	int stat = 0;
1108
1109	plm_init = lm_init_header->next;
1110	alloc_inst_size = lm_init_header->size;
1111	if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1112		alloc_inst_size = handle->hal_handle->max_ustore;
1113	micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
1114					GFP_KERNEL);
1115	if (!micro_inst_arry)
1116		return -ENOMEM;
1117	micro_inst_num = 0;
1118	while (plm_init) {
1119		unsigned int addr, *value, size;
1120
1121		ae = plm_init->ae;
1122		addr = plm_init->addr;
1123		value = plm_init->value;
1124		size = plm_init->size;
1125		micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1126							    micro_inst_num,
1127							    size, addr, value);
1128		plm_init = plm_init->next;
1129	}
1130	/* exec micro codes */
1131	if (micro_inst_arry && (micro_inst_num > 0)) {
1132		micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1133		stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1134						  micro_inst_arry,
1135						  micro_inst_num);
1136	}
1137	kfree(micro_inst_arry);
1138	return stat;
1139}
1140
1141static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1142				   unsigned char ae, unsigned char ctx,
1143				   enum icp_qat_uof_regtype reg_type,
1144				   unsigned short reg_num, unsigned int val)
1145{
1146	int status = 0;
1147	unsigned int reg_addr;
1148	unsigned int ctx_enables;
1149	unsigned short mask;
1150	unsigned short dr_offset = 0x10;
1151
1152	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1153	if (CE_INUSE_CONTEXTS & ctx_enables) {
1154		if (ctx & 0x1) {
1155			pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1156			return -EINVAL;
1157		}
1158		mask = 0x1f;
1159		dr_offset = 0x20;
1160	} else {
1161		mask = 0x0f;
1162	}
1163	if (reg_num & ~mask)
1164		return -EINVAL;
1165	reg_addr = reg_num + (ctx << 0x5);
1166	switch (reg_type) {
1167	case ICP_SR_RD_REL:
1168	case ICP_SR_REL:
1169		SET_AE_XFER(handle, ae, reg_addr, val);
1170		break;
1171	case ICP_DR_RD_REL:
1172	case ICP_DR_REL:
1173		SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1174		break;
1175	default:
1176		status = -EINVAL;
1177		break;
1178	}
1179	return status;
1180}
1181
1182static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1183				   unsigned char ae, unsigned char ctx,
1184				   enum icp_qat_uof_regtype reg_type,
1185				   unsigned short reg_num, unsigned int data)
1186{
1187	unsigned int gprval, ctx_enables;
1188	unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1189	    data16low;
1190	unsigned short reg_mask;
1191	int status = 0;
1192	u64 micro_inst[] = {
1193		0x0F440000000ull,
1194		0x0F040000000ull,
1195		0x0A000000000ull,
1196		0x0F0000C0300ull,
1197		0x0E000010000ull
1198	};
1199	const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1200	const unsigned short gprnum = 0, dly = num_inst * 0x5;
1201
1202	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1203	if (CE_INUSE_CONTEXTS & ctx_enables) {
1204		if (ctx & 0x1) {
1205			pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1206			return -EINVAL;
1207		}
1208		reg_mask = (unsigned short)~0x1f;
1209	} else {
1210		reg_mask = (unsigned short)~0xf;
1211	}
1212	if (reg_num & reg_mask)
1213		return -EINVAL;
1214	xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1215	if (xfr_addr == BAD_REGADDR) {
1216		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1217		return -EINVAL;
1218	}
1219	status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1220	if (status) {
1221		pr_err("QAT: failed to read register");
1222		return status;
1223	}
1224	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1225	data16low = 0xffff & data;
1226	data16hi = 0xffff & (data >> 0x10);
1227	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1228					  (unsigned short)(0xff & data16hi));
1229	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1230					   (unsigned short)(0xff & data16low));
1231	micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1232	    ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1233	micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1234	    ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1235	micro_inst[0x2] = micro_inst[0x2] |
1236	    ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1237	status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1238					 code_off, dly, NULL);
1239	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1240	return status;
1241}
1242
1243static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1244			      unsigned char ae, unsigned char ctx,
1245			      unsigned short nn, unsigned int val)
1246{
1247	unsigned int ctx_enables;
1248	int stat = 0;
1249
1250	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1251	ctx_enables &= IGNORE_W1C_MASK;
1252	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1253
1254	stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1255	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1256	return stat;
1257}
1258
1259static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1260				      *handle, unsigned char ae,
1261				      unsigned short absreg_num,
1262				      unsigned short *relreg,
1263				      unsigned char *ctx)
1264{
1265	unsigned int ctx_enables;
1266
1267	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1268	if (ctx_enables & CE_INUSE_CONTEXTS) {
1269		/* 4-ctx mode */
1270		*relreg = absreg_num & 0x1F;
1271		*ctx = (absreg_num >> 0x4) & 0x6;
1272	} else {
1273		/* 8-ctx mode */
1274		*relreg = absreg_num & 0x0F;
1275		*ctx = (absreg_num >> 0x4) & 0x7;
1276	}
1277	return 0;
1278}
1279
1280int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1281		     unsigned char ae, unsigned char ctx_mask,
1282		     enum icp_qat_uof_regtype reg_type,
1283		     unsigned short reg_num, unsigned int regdata)
1284{
1285	int stat = 0;
1286	unsigned short reg;
1287	unsigned char ctx = 0;
1288	enum icp_qat_uof_regtype type;
1289
1290	if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1291		return -EINVAL;
1292
1293	do {
1294		if (ctx_mask == 0) {
1295			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1296						   &ctx);
1297			type = reg_type - 1;
1298		} else {
1299			reg = reg_num;
1300			type = reg_type;
1301			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1302				continue;
1303		}
1304		stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1305		if (stat) {
1306			pr_err("QAT: write gpr fail\n");
1307			return -EINVAL;
1308		}
1309	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1310
1311	return 0;
1312}
1313
1314int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1315			 unsigned char ae, unsigned char ctx_mask,
1316			 enum icp_qat_uof_regtype reg_type,
1317			 unsigned short reg_num, unsigned int regdata)
1318{
1319	int stat = 0;
1320	unsigned short reg;
1321	unsigned char ctx = 0;
1322	enum icp_qat_uof_regtype type;
1323
1324	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1325		return -EINVAL;
1326
1327	do {
1328		if (ctx_mask == 0) {
1329			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1330						   &ctx);
1331			type = reg_type - 3;
1332		} else {
1333			reg = reg_num;
1334			type = reg_type;
1335			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1336				continue;
1337		}
1338		stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1339					       regdata);
1340		if (stat) {
1341			pr_err("QAT: write wr xfer fail\n");
1342			return -EINVAL;
1343		}
1344	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1345
1346	return 0;
1347}
1348
1349int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1350			 unsigned char ae, unsigned char ctx_mask,
1351			 enum icp_qat_uof_regtype reg_type,
1352			 unsigned short reg_num, unsigned int regdata)
1353{
1354	int stat = 0;
1355	unsigned short reg;
1356	unsigned char ctx = 0;
1357	enum icp_qat_uof_regtype type;
1358
1359	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1360		return -EINVAL;
1361
1362	do {
1363		if (ctx_mask == 0) {
1364			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1365						   &ctx);
1366			type = reg_type - 3;
1367		} else {
1368			reg = reg_num;
1369			type = reg_type;
1370			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1371				continue;
1372		}
1373		stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1374					       regdata);
1375		if (stat) {
1376			pr_err("QAT: write rd xfer fail\n");
1377			return -EINVAL;
1378		}
1379	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1380
1381	return 0;
1382}
1383
1384int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1385		    unsigned char ae, unsigned char ctx_mask,
1386		    unsigned short reg_num, unsigned int regdata)
1387{
1388	int stat = 0;
1389	unsigned char ctx;
1390
1391	if (ctx_mask == 0)
1392		return -EINVAL;
1393
1394	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1395		if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1396			continue;
1397		stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1398		if (stat) {
1399			pr_err("QAT: write neigh error\n");
1400			return -EINVAL;
1401		}
1402	}
1403
1404	return 0;
1405}
1406