1/*
2 * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 *     Redistribution and use in source and binary forms, with or
20 *     without modification, are permitted provided that the following
21 *     conditions are met:
22 *
23 *      - Redistributions of source code must retain the above
24 *        copyright notice, this list of conditions and the following
25 *        disclaimer.
26 *
27 *      - Redistributions in binary form must reproduce the above
28 *        copyright notice, this list of conditions and the following
29 *        disclaimer in the documentation and/or other materials
30 *        provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#ifndef __PVRDMA_H__
47#define __PVRDMA_H__
48
49#include <linux/compiler.h>
50#include <linux/interrupt.h>
51#include <linux/list.h>
52#include <linux/mutex.h>
53#include <linux/pci.h>
54#include <linux/semaphore.h>
55#include <linux/workqueue.h>
56#include <rdma/ib_umem.h>
57#include <rdma/ib_verbs.h>
58#include <rdma/vmw_pvrdma-abi.h>
59
60#include "pvrdma_ring.h"
61#include "pvrdma_dev_api.h"
62#include "pvrdma_verbs.h"
63
64/* NOT the same as BIT_MASK(). */
65#define PVRDMA_MASK(n) ((n << 1) - 1)
66
67/*
68 * VMware PVRDMA PCI device id.
69 */
70#define PCI_DEVICE_ID_VMWARE_PVRDMA	0x0820
71
72#define PVRDMA_NUM_RING_PAGES		4
73#define PVRDMA_QP_NUM_HEADER_PAGES	1
74
75struct pvrdma_dev;
76
77struct pvrdma_page_dir {
78	dma_addr_t dir_dma;
79	u64 *dir;
80	int ntables;
81	u64 **tables;
82	u64 npages;
83	void **pages;
84};
85
86struct pvrdma_cq {
87	struct ib_cq ibcq;
88	int offset;
89	spinlock_t cq_lock; /* Poll lock. */
90	struct pvrdma_uar_map *uar;
91	struct ib_umem *umem;
92	struct pvrdma_ring_state *ring_state;
93	struct pvrdma_page_dir pdir;
94	u32 cq_handle;
95	bool is_kernel;
96	refcount_t refcnt;
97	struct completion free;
98};
99
100struct pvrdma_id_table {
101	u32 last;
102	u32 top;
103	u32 max;
104	u32 mask;
105	spinlock_t lock; /* Table lock. */
106	unsigned long *table;
107};
108
109struct pvrdma_uar_map {
110	unsigned long pfn;
111	void __iomem *map;
112	int index;
113};
114
115struct pvrdma_uar_table {
116	struct pvrdma_id_table tbl;
117	int size;
118};
119
120struct pvrdma_ucontext {
121	struct ib_ucontext ibucontext;
122	struct pvrdma_dev *dev;
123	struct pvrdma_uar_map uar;
124	u64 ctx_handle;
125};
126
127struct pvrdma_pd {
128	struct ib_pd ibpd;
129	u32 pdn;
130	u32 pd_handle;
131	int privileged;
132};
133
134struct pvrdma_mr {
135	u32 mr_handle;
136	u64 iova;
137	u64 size;
138};
139
140struct pvrdma_user_mr {
141	struct ib_mr ibmr;
142	struct ib_umem *umem;
143	struct pvrdma_mr mmr;
144	struct pvrdma_page_dir pdir;
145	u64 *pages;
146	u32 npages;
147	u32 max_pages;
148	u32 page_shift;
149};
150
151struct pvrdma_wq {
152	struct pvrdma_ring *ring;
153	spinlock_t lock; /* Work queue lock. */
154	int wqe_cnt;
155	int wqe_size;
156	int max_sg;
157	int offset;
158};
159
160struct pvrdma_ah {
161	struct ib_ah ibah;
162	struct pvrdma_av av;
163};
164
165struct pvrdma_srq {
166	struct ib_srq ibsrq;
167	int offset;
168	spinlock_t lock; /* SRQ lock. */
169	int wqe_cnt;
170	int wqe_size;
171	int max_gs;
172	struct ib_umem *umem;
173	struct pvrdma_ring_state *ring;
174	struct pvrdma_page_dir pdir;
175	u32 srq_handle;
176	int npages;
177	refcount_t refcnt;
178	struct completion free;
179};
180
181struct pvrdma_qp {
182	struct ib_qp ibqp;
183	u32 qp_handle;
184	u32 qkey;
185	struct pvrdma_wq sq;
186	struct pvrdma_wq rq;
187	struct ib_umem *rumem;
188	struct ib_umem *sumem;
189	struct pvrdma_page_dir pdir;
190	struct pvrdma_srq *srq;
191	int npages;
192	int npages_send;
193	int npages_recv;
194	u32 flags;
195	u8 port;
196	u8 state;
197	bool is_kernel;
198	struct mutex mutex; /* QP state mutex. */
199	refcount_t refcnt;
200	struct completion free;
201};
202
203struct pvrdma_dev {
204	/* PCI device-related information. */
205	struct ib_device ib_dev;
206	struct pci_dev *pdev;
207	void __iomem *regs;
208	struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
209	dma_addr_t dsrbase; /* Shared region base address */
210	void *cmd_slot;
211	void *resp_slot;
212	unsigned long flags;
213	struct list_head device_link;
214	unsigned int dsr_version;
215
216	/* Locking and interrupt information. */
217	spinlock_t cmd_lock; /* Command lock. */
218	struct semaphore cmd_sema;
219	struct completion cmd_done;
220	unsigned int nr_vectors;
221
222	/* RDMA-related device information. */
223	union ib_gid *sgid_tbl;
224	struct pvrdma_ring_state *async_ring_state;
225	struct pvrdma_page_dir async_pdir;
226	struct pvrdma_ring_state *cq_ring_state;
227	struct pvrdma_page_dir cq_pdir;
228	struct pvrdma_cq **cq_tbl;
229	spinlock_t cq_tbl_lock;
230	struct pvrdma_srq **srq_tbl;
231	spinlock_t srq_tbl_lock;
232	struct pvrdma_qp **qp_tbl;
233	spinlock_t qp_tbl_lock;
234	struct pvrdma_uar_table uar_table;
235	struct pvrdma_uar_map driver_uar;
236	__be64 sys_image_guid;
237	spinlock_t desc_lock; /* Device modification lock. */
238	u32 port_cap_mask;
239	struct mutex port_mutex; /* Port modification mutex. */
240	bool ib_active;
241	atomic_t num_qps;
242	atomic_t num_cqs;
243	atomic_t num_srqs;
244	atomic_t num_pds;
245	atomic_t num_ahs;
246
247	/* Network device information. */
248	struct net_device *netdev;
249	struct notifier_block nb_netdev;
250};
251
252struct pvrdma_netdevice_work {
253	struct work_struct work;
254	struct net_device *event_netdev;
255	unsigned long event;
256};
257
258static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
259{
260	return container_of(ibdev, struct pvrdma_dev, ib_dev);
261}
262
263static inline struct
264pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
265{
266	return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
267}
268
269static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
270{
271	return container_of(ibpd, struct pvrdma_pd, ibpd);
272}
273
274static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
275{
276	return container_of(ibcq, struct pvrdma_cq, ibcq);
277}
278
279static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
280{
281	return container_of(ibsrq, struct pvrdma_srq, ibsrq);
282}
283
284static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
285{
286	return container_of(ibmr, struct pvrdma_user_mr, ibmr);
287}
288
289static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
290{
291	return container_of(ibqp, struct pvrdma_qp, ibqp);
292}
293
294static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
295{
296	return container_of(ibah, struct pvrdma_ah, ibah);
297}
298
299static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
300{
301	writel(cpu_to_le32(val), dev->regs + reg);
302}
303
304static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
305{
306	return le32_to_cpu(readl(dev->regs + reg));
307}
308
309static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
310{
311	writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
312}
313
314static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
315{
316	writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
317}
318
319static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
320					    u64 offset)
321{
322	return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
323}
324
325static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
326{
327	return (enum pvrdma_mtu)mtu;
328}
329
330static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
331{
332	return (enum ib_mtu)mtu;
333}
334
335static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
336					enum ib_port_state state)
337{
338	return (enum pvrdma_port_state)state;
339}
340
341static inline enum ib_port_state pvrdma_port_state_to_ib(
342					enum pvrdma_port_state state)
343{
344	return (enum ib_port_state)state;
345}
346
347static inline int ib_port_cap_flags_to_pvrdma(int flags)
348{
349	return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
350}
351
352static inline int pvrdma_port_cap_flags_to_ib(int flags)
353{
354	return flags;
355}
356
357static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
358					enum ib_port_width width)
359{
360	return (enum pvrdma_port_width)width;
361}
362
363static inline enum ib_port_width pvrdma_port_width_to_ib(
364					enum pvrdma_port_width width)
365{
366	return (enum ib_port_width)width;
367}
368
369static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
370					enum ib_port_speed speed)
371{
372	return (enum pvrdma_port_speed)speed;
373}
374
375static inline enum ib_port_speed pvrdma_port_speed_to_ib(
376					enum pvrdma_port_speed speed)
377{
378	return (enum ib_port_speed)speed;
379}
380
381static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
382{
383	return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
384}
385
386static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
387					enum ib_mig_state state)
388{
389	return (enum pvrdma_mig_state)state;
390}
391
392static inline enum ib_mig_state pvrdma_mig_state_to_ib(
393					enum pvrdma_mig_state state)
394{
395	return (enum ib_mig_state)state;
396}
397
398static inline int ib_access_flags_to_pvrdma(int flags)
399{
400	return flags;
401}
402
403static inline int pvrdma_access_flags_to_ib(int flags)
404{
405	return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
406}
407
408static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
409{
410	return (enum pvrdma_qp_type)type;
411}
412
413static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
414{
415	return (enum ib_qp_type)type;
416}
417
418static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
419{
420	return (enum pvrdma_qp_state)state;
421}
422
423static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
424{
425	return (enum ib_qp_state)state;
426}
427
428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
429{
430	switch (op) {
431	case IB_WR_RDMA_WRITE:
432		return PVRDMA_WR_RDMA_WRITE;
433	case IB_WR_RDMA_WRITE_WITH_IMM:
434		return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
435	case IB_WR_SEND:
436		return PVRDMA_WR_SEND;
437	case IB_WR_SEND_WITH_IMM:
438		return PVRDMA_WR_SEND_WITH_IMM;
439	case IB_WR_RDMA_READ:
440		return PVRDMA_WR_RDMA_READ;
441	case IB_WR_ATOMIC_CMP_AND_SWP:
442		return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
443	case IB_WR_ATOMIC_FETCH_AND_ADD:
444		return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
445	case IB_WR_LSO:
446		return PVRDMA_WR_LSO;
447	case IB_WR_SEND_WITH_INV:
448		return PVRDMA_WR_SEND_WITH_INV;
449	case IB_WR_RDMA_READ_WITH_INV:
450		return PVRDMA_WR_RDMA_READ_WITH_INV;
451	case IB_WR_LOCAL_INV:
452		return PVRDMA_WR_LOCAL_INV;
453	case IB_WR_REG_MR:
454		return PVRDMA_WR_FAST_REG_MR;
455	case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
456		return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
457	case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
458		return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
459	case IB_WR_REG_MR_INTEGRITY:
460		return PVRDMA_WR_REG_SIG_MR;
461	default:
462		return PVRDMA_WR_ERROR;
463	}
464}
465
466static inline enum ib_wc_status pvrdma_wc_status_to_ib(
467					enum pvrdma_wc_status status)
468{
469	return (enum ib_wc_status)status;
470}
471
472static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
473{
474	switch (opcode) {
475	case PVRDMA_WC_SEND:
476		return IB_WC_SEND;
477	case PVRDMA_WC_RDMA_WRITE:
478		return IB_WC_RDMA_WRITE;
479	case PVRDMA_WC_RDMA_READ:
480		return IB_WC_RDMA_READ;
481	case PVRDMA_WC_COMP_SWAP:
482		return IB_WC_COMP_SWAP;
483	case PVRDMA_WC_FETCH_ADD:
484		return IB_WC_FETCH_ADD;
485	case PVRDMA_WC_LOCAL_INV:
486		return IB_WC_LOCAL_INV;
487	case PVRDMA_WC_FAST_REG_MR:
488		return IB_WC_REG_MR;
489	case PVRDMA_WC_MASKED_COMP_SWAP:
490		return IB_WC_MASKED_COMP_SWAP;
491	case PVRDMA_WC_MASKED_FETCH_ADD:
492		return IB_WC_MASKED_FETCH_ADD;
493	case PVRDMA_WC_RECV:
494		return IB_WC_RECV;
495	case PVRDMA_WC_RECV_RDMA_WITH_IMM:
496		return IB_WC_RECV_RDMA_WITH_IMM;
497	default:
498		return IB_WC_SEND;
499	}
500}
501
502static inline int pvrdma_wc_flags_to_ib(int flags)
503{
504	return flags;
505}
506
507static inline int ib_send_flags_to_pvrdma(int flags)
508{
509	return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
510}
511
512static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
513{
514	switch (type) {
515	case PVRDMA_NETWORK_ROCE_V1:
516		return RDMA_NETWORK_ROCE_V1;
517	case PVRDMA_NETWORK_IPV4:
518		return RDMA_NETWORK_IPV4;
519	case PVRDMA_NETWORK_IPV6:
520		return RDMA_NETWORK_IPV6;
521	default:
522		return RDMA_NETWORK_IPV6;
523	}
524}
525
526void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
527			 const struct pvrdma_qp_cap *src);
528void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
529			 const struct ib_qp_cap *src);
530void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
531void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
532void pvrdma_global_route_to_ib(struct ib_global_route *dst,
533			       const struct pvrdma_global_route *src);
534void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
535			       const struct ib_global_route *src);
536void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
537			    const struct pvrdma_ah_attr *src);
538void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
539			    const struct rdma_ah_attr *src);
540u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
541
542int pvrdma_uar_table_init(struct pvrdma_dev *dev);
543void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
544
545int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
546void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
547
548void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
549
550int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
551			 u64 npages, bool alloc_pages);
552void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
553			     struct pvrdma_page_dir *pdir);
554int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
555			       dma_addr_t daddr);
556int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
557				struct ib_umem *umem, u64 offset);
558dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
559int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
560				     u64 *page_list, int num_pages);
561
562int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
563		    union pvrdma_cmd_resp *rsp, unsigned resp_code);
564
565#endif /* __PVRDMA_H__ */
566