1/* SPDX-License-Identifier: GPL-2.0-only
2 * Copyright (C) 2020 Marvell.
3 */
4
5#ifndef __OTX2_CPT_REQMGR_H
6#define __OTX2_CPT_REQMGR_H
7
8#include "otx2_cpt_common.h"
9
10/* Completion code size and initial value */
11#define OTX2_CPT_COMPLETION_CODE_SIZE 8
12#define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
13/*
14 * Maximum total number of SG buffers is 100, we divide it equally
15 * between input and output
16 */
17#define OTX2_CPT_MAX_SG_IN_CNT  50
18#define OTX2_CPT_MAX_SG_OUT_CNT 50
19
20/* DMA mode direct or SG */
21#define OTX2_CPT_DMA_MODE_DIRECT 0
22#define OTX2_CPT_DMA_MODE_SG     1
23
24/* Context source CPTR or DPTR */
25#define OTX2_CPT_FROM_CPTR 0
26#define OTX2_CPT_FROM_DPTR 1
27
28#define OTX2_CPT_MAX_REQ_SIZE 65535
29
30union otx2_cpt_opcode {
31	u16 flags;
32	struct {
33		u8 major;
34		u8 minor;
35	} s;
36};
37
38struct otx2_cptvf_request {
39	u32 param1;
40	u32 param2;
41	u16 dlen;
42	union otx2_cpt_opcode opcode;
43};
44
45/*
46 * CPT_INST_S software command definitions
47 * Words EI (0-3)
48 */
49union otx2_cpt_iq_cmd_word0 {
50	u64 u;
51	struct {
52		__be16 opcode;
53		__be16 param1;
54		__be16 param2;
55		__be16 dlen;
56	} s;
57};
58
59union otx2_cpt_iq_cmd_word3 {
60	u64 u;
61	struct {
62		u64 cptr:61;
63		u64 grp:3;
64	} s;
65};
66
67struct otx2_cpt_iq_command {
68	union otx2_cpt_iq_cmd_word0 cmd;
69	u64 dptr;
70	u64 rptr;
71	union otx2_cpt_iq_cmd_word3 cptr;
72};
73
74struct otx2_cpt_pending_entry {
75	void *completion_addr;	/* Completion address */
76	void *info;
77	/* Kernel async request callback */
78	void (*callback)(int status, void *arg1, void *arg2);
79	struct crypto_async_request *areq; /* Async request callback arg */
80	u8 resume_sender;	/* Notify sender to resume sending requests */
81	u8 busy;		/* Entry status (free/busy) */
82};
83
84struct otx2_cpt_pending_queue {
85	struct otx2_cpt_pending_entry *head; /* Head of the queue */
86	u32 front;		/* Process work from here */
87	u32 rear;		/* Append new work here */
88	u32 pending_count;	/* Pending requests count */
89	u32 qlen;		/* Queue length */
90	spinlock_t lock;	/* Queue lock */
91};
92
93struct otx2_cpt_buf_ptr {
94	u8 *vptr;
95	dma_addr_t dma_addr;
96	u16 size;
97};
98
99union otx2_cpt_ctrl_info {
100	u32 flags;
101	struct {
102#if defined(__BIG_ENDIAN_BITFIELD)
103		u32 reserved_6_31:26;
104		u32 grp:3;	/* Group bits */
105		u32 dma_mode:2;	/* DMA mode */
106		u32 se_req:1;	/* To SE core */
107#else
108		u32 se_req:1;	/* To SE core */
109		u32 dma_mode:2;	/* DMA mode */
110		u32 grp:3;	/* Group bits */
111		u32 reserved_6_31:26;
112#endif
113	} s;
114};
115
116struct otx2_cpt_req_info {
117	/* Kernel async request callback */
118	void (*callback)(int status, void *arg1, void *arg2);
119	struct crypto_async_request *areq; /* Async request callback arg */
120	struct otx2_cptvf_request req;/* Request information (core specific) */
121	union otx2_cpt_ctrl_info ctrl;/* User control information */
122	struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
123	struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
124	u8 *iv_out;     /* IV to send back */
125	u16 rlen;	/* Output length */
126	u8 in_cnt;	/* Number of input buffers */
127	u8 out_cnt;	/* Number of output buffers */
128	u8 req_type;	/* Type of request */
129	u8 is_enc;	/* Is a request an encryption request */
130	u8 is_trunc_hmac;/* Is truncated hmac used */
131};
132
133struct otx2_cpt_inst_info {
134	struct otx2_cpt_pending_entry *pentry;
135	struct otx2_cpt_req_info *req;
136	struct pci_dev *pdev;
137	void *completion_addr;
138	u8 *out_buffer;
139	u8 *in_buffer;
140	dma_addr_t dptr_baddr;
141	dma_addr_t rptr_baddr;
142	dma_addr_t comp_baddr;
143	unsigned long time_in;
144	u32 dlen;
145	u32 dma_len;
146	u8 extra_time;
147};
148
149struct otx2_cpt_sglist_component {
150	__be16 len0;
151	__be16 len1;
152	__be16 len2;
153	__be16 len3;
154	__be64 ptr0;
155	__be64 ptr1;
156	__be64 ptr2;
157	__be64 ptr3;
158};
159
160static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
161					 struct otx2_cpt_inst_info *info)
162{
163	struct otx2_cpt_req_info *req;
164	int i;
165
166	if (info->dptr_baddr)
167		dma_unmap_single(&pdev->dev, info->dptr_baddr,
168				 info->dma_len, DMA_BIDIRECTIONAL);
169
170	if (info->req) {
171		req = info->req;
172		for (i = 0; i < req->out_cnt; i++) {
173			if (req->out[i].dma_addr)
174				dma_unmap_single(&pdev->dev,
175						 req->out[i].dma_addr,
176						 req->out[i].size,
177						 DMA_BIDIRECTIONAL);
178		}
179
180		for (i = 0; i < req->in_cnt; i++) {
181			if (req->in[i].dma_addr)
182				dma_unmap_single(&pdev->dev,
183						 req->in[i].dma_addr,
184						 req->in[i].size,
185						 DMA_BIDIRECTIONAL);
186		}
187	}
188	kfree(info);
189}
190
191struct otx2_cptlf_wqe;
192int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
193			int cpu_num);
194void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
195int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
196
197#endif /* __OTX2_CPT_REQMGR_H */
198