1/*
2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_SRP_H
34#define IB_SRP_H
35
36#include <linux/types.h>
37#include <linux/list.h>
38#include <linux/mutex.h>
39#include <linux/scatterlist.h>
40
41#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h>
43
44#include <rdma/ib_verbs.h>
45#include <rdma/ib_sa.h>
46#include <rdma/ib_cm.h>
47#include <rdma/rdma_cm.h>
48
49enum {
50	SRP_PATH_REC_TIMEOUT_MS	= 1000,
51	SRP_ABORT_TIMEOUT_MS	= 5000,
52
53	SRP_PORT_REDIRECT	= 1,
54	SRP_DLID_REDIRECT	= 2,
55	SRP_STALE_CONN		= 3,
56
57	SRP_DEF_SG_TABLESIZE	= 12,
58
59	SRP_DEFAULT_QUEUE_SIZE	= 1 << 6,
60	SRP_RSP_SQ_SIZE		= 1,
61	SRP_TSK_MGMT_SQ_SIZE	= 1,
62	SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
63				  SRP_TSK_MGMT_SQ_SIZE,
64
65	SRP_MAX_PAGES_PER_MR	= 512,
66
67	SRP_MAX_ADD_CDB_LEN	= 16,
68
69	SRP_MAX_IMM_SGE		= 2,
70	SRP_MAX_SGE		= SRP_MAX_IMM_SGE + 1,
71	/*
72	 * Choose the immediate data offset such that a 32 byte CDB still fits.
73	 */
74	SRP_IMM_DATA_OFFSET	= sizeof(struct srp_cmd) +
75				  SRP_MAX_ADD_CDB_LEN +
76				  sizeof(struct srp_imm_buf),
77};
78
79enum {
80	SRP_TAG_NO_REQ		= ~0U,
81	SRP_TAG_TSK_MGMT	= BIT(31),
82};
83
84enum srp_target_state {
85	SRP_TARGET_SCANNING,
86	SRP_TARGET_LIVE,
87	SRP_TARGET_REMOVED,
88};
89
90enum srp_iu_type {
91	SRP_IU_CMD,
92	SRP_IU_TSK_MGMT,
93	SRP_IU_RSP,
94};
95
96/*
97 * @mr_page_mask: HCA memory registration page mask.
98 * @mr_page_size: HCA memory registration page size.
99 * @mr_max_size: Maximum size in bytes of a single FR registration request.
100 */
101struct srp_device {
102	struct list_head	dev_list;
103	struct ib_device       *dev;
104	struct ib_pd	       *pd;
105	u32			global_rkey;
106	u64			mr_page_mask;
107	int			mr_page_size;
108	int			mr_max_size;
109	int			max_pages_per_mr;
110	bool			has_fr;
111	bool			use_fast_reg;
112};
113
114struct srp_host {
115	struct srp_device      *srp_dev;
116	u8			port;
117	struct device		dev;
118	struct list_head	target_list;
119	spinlock_t		target_lock;
120	struct completion	released;
121	struct list_head	list;
122	struct mutex		add_target_mutex;
123};
124
125struct srp_request {
126	struct scsi_cmnd       *scmnd;
127	struct srp_iu	       *cmd;
128	struct srp_fr_desc     **fr_list;
129	struct srp_direct_buf  *indirect_desc;
130	dma_addr_t		indirect_dma_addr;
131	short			nmdesc;
132	struct ib_cqe		reg_cqe;
133};
134
135/**
136 * struct srp_rdma_ch
137 * @comp_vector: Completion vector used by this RDMA channel.
138 * @max_it_iu_len: Maximum initiator-to-target information unit length.
139 * @max_ti_iu_len: Maximum target-to-initiator information unit length.
140 */
141struct srp_rdma_ch {
142	/* These are RW in the hot path, and commonly used together */
143	struct list_head	free_tx;
144	spinlock_t		lock;
145	s32			req_lim;
146
147	/* These are read-only in the hot path */
148	struct srp_target_port *target ____cacheline_aligned_in_smp;
149	struct ib_cq	       *send_cq;
150	struct ib_cq	       *recv_cq;
151	struct ib_qp	       *qp;
152	struct srp_fr_pool     *fr_pool;
153	uint32_t		max_it_iu_len;
154	uint32_t		max_ti_iu_len;
155	u8			max_imm_sge;
156	bool			use_imm_data;
157
158	/* Everything above this point is used in the hot path of
159	 * command processing. Try to keep them packed into cachelines.
160	 */
161
162	struct completion	done;
163	int			status;
164
165	union {
166		struct ib_cm {
167			struct sa_path_rec	path;
168			struct ib_sa_query	*path_query;
169			int			path_query_id;
170			struct ib_cm_id		*cm_id;
171		} ib_cm;
172		struct rdma_cm {
173			struct rdma_cm_id	*cm_id;
174		} rdma_cm;
175	};
176
177	struct srp_iu	      **tx_ring;
178	struct srp_iu	      **rx_ring;
179	int			comp_vector;
180
181	u64			tsk_mgmt_tag;
182	struct completion	tsk_mgmt_done;
183	u8			tsk_mgmt_status;
184	bool			connected;
185};
186
187/**
188 * struct srp_target_port
189 * @comp_vector: Completion vector used by the first RDMA channel created for
190 *   this target port.
191 */
192struct srp_target_port {
193	/* read and written in the hot path */
194	spinlock_t		lock;
195
196	/* read only in the hot path */
197	u32			global_rkey;
198	struct srp_rdma_ch	*ch;
199	struct net		*net;
200	u32			ch_count;
201	u32			lkey;
202	enum srp_target_state	state;
203	uint32_t		max_it_iu_size;
204	unsigned int		cmd_sg_cnt;
205	unsigned int		indirect_size;
206	bool			allow_ext_sg;
207
208	/* other member variables */
209	union ib_gid		sgid;
210	__be64			id_ext;
211	__be64			ioc_guid;
212	__be64			initiator_ext;
213	u16			io_class;
214	struct srp_host	       *srp_host;
215	struct Scsi_Host       *scsi_host;
216	struct srp_rport       *rport;
217	char			target_name[32];
218	unsigned int		scsi_id;
219	unsigned int		sg_tablesize;
220	unsigned int		target_can_queue;
221	int			mr_pool_size;
222	int			mr_per_cmd;
223	int			queue_size;
224	int			comp_vector;
225	int			tl_retry_count;
226
227	bool			using_rdma_cm;
228
229	union {
230		struct {
231			__be64			service_id;
232			union ib_gid		orig_dgid;
233			__be16			pkey;
234		} ib_cm;
235		struct {
236			union {
237				struct sockaddr_in	ip4;
238				struct sockaddr_in6	ip6;
239				struct sockaddr		sa;
240				struct sockaddr_storage ss;
241			} src;
242			union {
243				struct sockaddr_in	ip4;
244				struct sockaddr_in6	ip6;
245				struct sockaddr		sa;
246				struct sockaddr_storage ss;
247			} dst;
248			bool src_specified;
249		} rdma_cm;
250	};
251
252	u32			rq_tmo_jiffies;
253
254	int			zero_req_lim;
255
256	struct work_struct	tl_err_work;
257	struct work_struct	remove_work;
258
259	struct list_head	list;
260	bool			qp_in_error;
261};
262
263struct srp_iu {
264	struct list_head	list;
265	u64			dma;
266	void		       *buf;
267	size_t			size;
268	enum dma_data_direction	direction;
269	u32			num_sge;
270	struct ib_sge		sge[SRP_MAX_SGE];
271	struct ib_cqe		cqe;
272};
273
274/**
275 * struct srp_fr_desc - fast registration work request arguments
276 * @entry: Entry in srp_fr_pool.free_list.
277 * @mr:    Memory region.
278 * @frpl:  Fast registration page list.
279 */
280struct srp_fr_desc {
281	struct list_head		entry;
282	struct ib_mr			*mr;
283};
284
285/**
286 * struct srp_fr_pool - pool of fast registration descriptors
287 *
288 * An entry is available for allocation if and only if it occurs in @free_list.
289 *
290 * @size:      Number of descriptors in this pool.
291 * @max_page_list_len: Maximum fast registration work request page list length.
292 * @lock:      Protects free_list.
293 * @free_list: List of free descriptors.
294 * @desc:      Fast registration descriptor pool.
295 */
296struct srp_fr_pool {
297	int			size;
298	int			max_page_list_len;
299	spinlock_t		lock;
300	struct list_head	free_list;
301	struct srp_fr_desc	desc[];
302};
303
304/**
305 * struct srp_map_state - per-request DMA memory mapping state
306 * @desc:	    Pointer to the element of the SRP buffer descriptor array
307 *		    that is being filled in.
308 * @pages:	    Array with DMA addresses of pages being considered for
309 *		    memory registration.
310 * @base_dma_addr:  DMA address of the first page that has not yet been mapped.
311 * @dma_len:	    Number of bytes that will be registered with the next FR
312 *                  memory registration call.
313 * @total_len:	    Total number of bytes in the sg-list being mapped.
314 * @npages:	    Number of page addresses in the pages[] array.
315 * @nmdesc:	    Number of FR memory descriptors used for mapping.
316 * @ndesc:	    Number of SRP buffer descriptors that have been filled in.
317 */
318struct srp_map_state {
319	union {
320		struct {
321			struct srp_fr_desc **next;
322			struct srp_fr_desc **end;
323		} fr;
324		struct {
325			void		   **next;
326			void		   **end;
327		} gen;
328	};
329	struct srp_direct_buf  *desc;
330	union {
331		u64			*pages;
332		struct scatterlist	*sg;
333	};
334	dma_addr_t		base_dma_addr;
335	u32			dma_len;
336	u32			total_len;
337	unsigned int		npages;
338	unsigned int		nmdesc;
339	unsigned int		ndesc;
340};
341
342#endif /* IB_SRP_H */
343