1/*
2 * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 *     Redistribution and use in source and binary forms, with or
20 *     without modification, are permitted provided that the following
21 *     conditions are met:
22 *
23 *      - Redistributions of source code must retain the above
24 *        copyright notice, this list of conditions and the following
25 *        disclaimer.
26 *
27 *      - Redistributions in binary form must reproduce the above
28 *        copyright notice, this list of conditions and the following
29 *        disclaimer in the documentation and/or other materials
30 *        provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <linux/errno.h>
47#include <linux/inetdevice.h>
48#include <linux/init.h>
49#include <linux/module.h>
50#include <linux/slab.h>
51#include <rdma/ib_addr.h>
52#include <rdma/ib_smi.h>
53#include <rdma/ib_user_verbs.h>
54#include <net/addrconf.h>
55
56#include "pvrdma.h"
57
58#define DRV_NAME	"vmw_pvrdma"
59#define DRV_VERSION	"1.0.1.0-k"
60
61static DEFINE_MUTEX(pvrdma_device_list_lock);
62static LIST_HEAD(pvrdma_device_list);
63static struct workqueue_struct *event_wq;
64
65static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
66static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
67
68static ssize_t hca_type_show(struct device *device,
69			     struct device_attribute *attr, char *buf)
70{
71	return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
72}
73static DEVICE_ATTR_RO(hca_type);
74
75static ssize_t hw_rev_show(struct device *device,
76			   struct device_attribute *attr, char *buf)
77{
78	return sprintf(buf, "%d\n", PVRDMA_REV_ID);
79}
80static DEVICE_ATTR_RO(hw_rev);
81
82static ssize_t board_id_show(struct device *device,
83			     struct device_attribute *attr, char *buf)
84{
85	return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
86}
87static DEVICE_ATTR_RO(board_id);
88
89static struct attribute *pvrdma_class_attributes[] = {
90	&dev_attr_hw_rev.attr,
91	&dev_attr_hca_type.attr,
92	&dev_attr_board_id.attr,
93	NULL,
94};
95
96static const struct attribute_group pvrdma_attr_group = {
97	.attrs = pvrdma_class_attributes,
98};
99
100static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
101{
102	struct pvrdma_dev *dev =
103		container_of(device, struct pvrdma_dev, ib_dev);
104	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
105		 (int) (dev->dsr->caps.fw_ver >> 32),
106		 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
107		 (int) dev->dsr->caps.fw_ver & 0xffff);
108}
109
110static int pvrdma_init_device(struct pvrdma_dev *dev)
111{
112	/*  Initialize some device related stuff */
113	spin_lock_init(&dev->cmd_lock);
114	sema_init(&dev->cmd_sema, 1);
115	atomic_set(&dev->num_qps, 0);
116	atomic_set(&dev->num_srqs, 0);
117	atomic_set(&dev->num_cqs, 0);
118	atomic_set(&dev->num_pds, 0);
119	atomic_set(&dev->num_ahs, 0);
120
121	return 0;
122}
123
124static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
125				 struct ib_port_immutable *immutable)
126{
127	struct pvrdma_dev *dev = to_vdev(ibdev);
128	struct ib_port_attr attr;
129	int err;
130
131	if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
132		immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
133	else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
134		immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
135
136	err = ib_query_port(ibdev, port_num, &attr);
137	if (err)
138		return err;
139
140	immutable->pkey_tbl_len = attr.pkey_tbl_len;
141	immutable->gid_tbl_len = attr.gid_tbl_len;
142	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
143	return 0;
144}
145
146static const struct ib_device_ops pvrdma_dev_ops = {
147	.owner = THIS_MODULE,
148	.driver_id = RDMA_DRIVER_VMW_PVRDMA,
149	.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION,
150
151	.add_gid = pvrdma_add_gid,
152	.alloc_mr = pvrdma_alloc_mr,
153	.alloc_pd = pvrdma_alloc_pd,
154	.alloc_ucontext = pvrdma_alloc_ucontext,
155	.create_ah = pvrdma_create_ah,
156	.create_cq = pvrdma_create_cq,
157	.create_qp = pvrdma_create_qp,
158	.dealloc_pd = pvrdma_dealloc_pd,
159	.dealloc_ucontext = pvrdma_dealloc_ucontext,
160	.del_gid = pvrdma_del_gid,
161	.dereg_mr = pvrdma_dereg_mr,
162	.destroy_ah = pvrdma_destroy_ah,
163	.destroy_cq = pvrdma_destroy_cq,
164	.destroy_qp = pvrdma_destroy_qp,
165	.get_dev_fw_str = pvrdma_get_fw_ver_str,
166	.get_dma_mr = pvrdma_get_dma_mr,
167	.get_link_layer = pvrdma_port_link_layer,
168	.get_port_immutable = pvrdma_port_immutable,
169	.map_mr_sg = pvrdma_map_mr_sg,
170	.mmap = pvrdma_mmap,
171	.modify_port = pvrdma_modify_port,
172	.modify_qp = pvrdma_modify_qp,
173	.poll_cq = pvrdma_poll_cq,
174	.post_recv = pvrdma_post_recv,
175	.post_send = pvrdma_post_send,
176	.query_device = pvrdma_query_device,
177	.query_gid = pvrdma_query_gid,
178	.query_pkey = pvrdma_query_pkey,
179	.query_port = pvrdma_query_port,
180	.query_qp = pvrdma_query_qp,
181	.reg_user_mr = pvrdma_reg_user_mr,
182	.req_notify_cq = pvrdma_req_notify_cq,
183
184	INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
185	INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
186	INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
187	INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
188};
189
190static const struct ib_device_ops pvrdma_dev_srq_ops = {
191	.create_srq = pvrdma_create_srq,
192	.destroy_srq = pvrdma_destroy_srq,
193	.modify_srq = pvrdma_modify_srq,
194	.query_srq = pvrdma_query_srq,
195
196	INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
197};
198
199static int pvrdma_register_device(struct pvrdma_dev *dev)
200{
201	int ret = -1;
202
203	dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
204	dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
205	dev->flags = 0;
206	dev->ib_dev.num_comp_vectors = 1;
207	dev->ib_dev.dev.parent = &dev->pdev->dev;
208	dev->ib_dev.uverbs_cmd_mask =
209		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
210		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
211		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
212		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
213		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
214		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
215		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
216		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
217		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
218		(1ull << IB_USER_VERBS_CMD_POLL_CQ)		|
219		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)	|
220		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
221		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
222		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
223		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
224		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
225		(1ull << IB_USER_VERBS_CMD_POST_SEND)		|
226		(1ull << IB_USER_VERBS_CMD_POST_RECV)		|
227		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
228		(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
229
230	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
231	dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
232
233	ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops);
234
235	mutex_init(&dev->port_mutex);
236	spin_lock_init(&dev->desc_lock);
237
238	dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
239			      GFP_KERNEL);
240	if (!dev->cq_tbl)
241		return ret;
242	spin_lock_init(&dev->cq_tbl_lock);
243
244	dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
245			      GFP_KERNEL);
246	if (!dev->qp_tbl)
247		goto err_cq_free;
248	spin_lock_init(&dev->qp_tbl_lock);
249
250	/* Check if SRQ is supported by backend */
251	if (dev->dsr->caps.max_srq) {
252		dev->ib_dev.uverbs_cmd_mask |=
253			(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)	|
254			(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)	|
255			(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)	|
256			(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)	|
257			(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
258
259		ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops);
260
261		dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
262				       sizeof(struct pvrdma_srq *),
263				       GFP_KERNEL);
264		if (!dev->srq_tbl)
265			goto err_qp_free;
266	}
267	ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
268	if (ret)
269		goto err_srq_free;
270	spin_lock_init(&dev->srq_tbl_lock);
271	rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
272
273	ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
274	if (ret)
275		goto err_srq_free;
276
277	dev->ib_active = true;
278
279	return 0;
280
281err_srq_free:
282	kfree(dev->srq_tbl);
283err_qp_free:
284	kfree(dev->qp_tbl);
285err_cq_free:
286	kfree(dev->cq_tbl);
287
288	return ret;
289}
290
291static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
292{
293	u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
294	struct pvrdma_dev *dev = dev_id;
295
296	dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
297
298	if (!dev->pdev->msix_enabled) {
299		/* Legacy intr */
300		icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
301		if (icr == 0)
302			return IRQ_NONE;
303	}
304
305	if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
306		complete(&dev->cmd_done);
307
308	return IRQ_HANDLED;
309}
310
311static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
312{
313	struct pvrdma_qp *qp;
314	unsigned long flags;
315
316	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
317	qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
318	if (qp)
319		refcount_inc(&qp->refcnt);
320	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
321
322	if (qp && qp->ibqp.event_handler) {
323		struct ib_qp *ibqp = &qp->ibqp;
324		struct ib_event e;
325
326		e.device = ibqp->device;
327		e.element.qp = ibqp;
328		e.event = type; /* 1:1 mapping for now. */
329		ibqp->event_handler(&e, ibqp->qp_context);
330	}
331	if (qp) {
332		if (refcount_dec_and_test(&qp->refcnt))
333			complete(&qp->free);
334	}
335}
336
337static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
338{
339	struct pvrdma_cq *cq;
340	unsigned long flags;
341
342	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
343	cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
344	if (cq)
345		refcount_inc(&cq->refcnt);
346	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
347
348	if (cq && cq->ibcq.event_handler) {
349		struct ib_cq *ibcq = &cq->ibcq;
350		struct ib_event e;
351
352		e.device = ibcq->device;
353		e.element.cq = ibcq;
354		e.event = type; /* 1:1 mapping for now. */
355		ibcq->event_handler(&e, ibcq->cq_context);
356	}
357	if (cq) {
358		if (refcount_dec_and_test(&cq->refcnt))
359			complete(&cq->free);
360	}
361}
362
363static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
364{
365	struct pvrdma_srq *srq;
366	unsigned long flags;
367
368	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
369	if (dev->srq_tbl)
370		srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
371	else
372		srq = NULL;
373	if (srq)
374		refcount_inc(&srq->refcnt);
375	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
376
377	if (srq && srq->ibsrq.event_handler) {
378		struct ib_srq *ibsrq = &srq->ibsrq;
379		struct ib_event e;
380
381		e.device = ibsrq->device;
382		e.element.srq = ibsrq;
383		e.event = type; /* 1:1 mapping for now. */
384		ibsrq->event_handler(&e, ibsrq->srq_context);
385	}
386	if (srq) {
387		if (refcount_dec_and_test(&srq->refcnt))
388			complete(&srq->free);
389	}
390}
391
392static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
393				  enum ib_event_type event)
394{
395	struct ib_event ib_event;
396
397	memset(&ib_event, 0, sizeof(ib_event));
398	ib_event.device = &dev->ib_dev;
399	ib_event.element.port_num = port;
400	ib_event.event = event;
401	ib_dispatch_event(&ib_event);
402}
403
404static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
405{
406	if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
407		dev_warn(&dev->pdev->dev, "event on port %d\n", port);
408		return;
409	}
410
411	pvrdma_dispatch_event(dev, port, type);
412}
413
414static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
415{
416	return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
417					&dev->async_pdir,
418					PAGE_SIZE +
419					sizeof(struct pvrdma_eqe) * i);
420}
421
422static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
423{
424	struct pvrdma_dev *dev = dev_id;
425	struct pvrdma_ring *ring = &dev->async_ring_state->rx;
426	int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
427			 PAGE_SIZE / sizeof(struct pvrdma_eqe);
428	unsigned int head;
429
430	dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
431
432	/*
433	 * Don't process events until the IB device is registered. Otherwise
434	 * we'll try to ib_dispatch_event() on an invalid device.
435	 */
436	if (!dev->ib_active)
437		return IRQ_HANDLED;
438
439	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
440		struct pvrdma_eqe *eqe;
441
442		eqe = get_eqe(dev, head);
443
444		switch (eqe->type) {
445		case PVRDMA_EVENT_QP_FATAL:
446		case PVRDMA_EVENT_QP_REQ_ERR:
447		case PVRDMA_EVENT_QP_ACCESS_ERR:
448		case PVRDMA_EVENT_COMM_EST:
449		case PVRDMA_EVENT_SQ_DRAINED:
450		case PVRDMA_EVENT_PATH_MIG:
451		case PVRDMA_EVENT_PATH_MIG_ERR:
452		case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
453			pvrdma_qp_event(dev, eqe->info, eqe->type);
454			break;
455
456		case PVRDMA_EVENT_CQ_ERR:
457			pvrdma_cq_event(dev, eqe->info, eqe->type);
458			break;
459
460		case PVRDMA_EVENT_SRQ_ERR:
461		case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
462			pvrdma_srq_event(dev, eqe->info, eqe->type);
463			break;
464
465		case PVRDMA_EVENT_PORT_ACTIVE:
466		case PVRDMA_EVENT_PORT_ERR:
467		case PVRDMA_EVENT_LID_CHANGE:
468		case PVRDMA_EVENT_PKEY_CHANGE:
469		case PVRDMA_EVENT_SM_CHANGE:
470		case PVRDMA_EVENT_CLIENT_REREGISTER:
471		case PVRDMA_EVENT_GID_CHANGE:
472			pvrdma_dev_event(dev, eqe->info, eqe->type);
473			break;
474
475		case PVRDMA_EVENT_DEVICE_FATAL:
476			pvrdma_dev_event(dev, 1, eqe->type);
477			break;
478
479		default:
480			break;
481		}
482
483		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
484	}
485
486	return IRQ_HANDLED;
487}
488
489static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
490					   unsigned int i)
491{
492	return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
493					&dev->cq_pdir,
494					PAGE_SIZE +
495					sizeof(struct pvrdma_cqne) * i);
496}
497
498static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
499{
500	struct pvrdma_dev *dev = dev_id;
501	struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
502	int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
503			 sizeof(struct pvrdma_cqne);
504	unsigned int head;
505	unsigned long flags;
506
507	dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
508
509	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
510		struct pvrdma_cqne *cqne;
511		struct pvrdma_cq *cq;
512
513		cqne = get_cqne(dev, head);
514		spin_lock_irqsave(&dev->cq_tbl_lock, flags);
515		cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
516		if (cq)
517			refcount_inc(&cq->refcnt);
518		spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
519
520		if (cq && cq->ibcq.comp_handler)
521			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
522		if (cq) {
523			if (refcount_dec_and_test(&cq->refcnt))
524				complete(&cq->free);
525		}
526		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
527	}
528
529	return IRQ_HANDLED;
530}
531
532static void pvrdma_free_irq(struct pvrdma_dev *dev)
533{
534	int i;
535
536	dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
537	for (i = 0; i < dev->nr_vectors; i++)
538		free_irq(pci_irq_vector(dev->pdev, i), dev);
539}
540
541static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
542{
543	dev_dbg(&dev->pdev->dev, "enable interrupts\n");
544	pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
545}
546
547static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
548{
549	dev_dbg(&dev->pdev->dev, "disable interrupts\n");
550	pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
551}
552
553static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
554{
555	struct pci_dev *pdev = dev->pdev;
556	int ret = 0, i;
557
558	ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
559			PCI_IRQ_MSIX);
560	if (ret < 0) {
561		ret = pci_alloc_irq_vectors(pdev, 1, 1,
562				PCI_IRQ_MSI | PCI_IRQ_LEGACY);
563		if (ret < 0)
564			return ret;
565	}
566	dev->nr_vectors = ret;
567
568	ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
569			pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
570	if (ret) {
571		dev_err(&dev->pdev->dev,
572			"failed to request interrupt 0\n");
573		goto out_free_vectors;
574	}
575
576	for (i = 1; i < dev->nr_vectors; i++) {
577		ret = request_irq(pci_irq_vector(dev->pdev, i),
578				i == 1 ? pvrdma_intr1_handler :
579					 pvrdma_intrx_handler,
580				0, DRV_NAME, dev);
581		if (ret) {
582			dev_err(&dev->pdev->dev,
583				"failed to request interrupt %d\n", i);
584			goto free_irqs;
585		}
586	}
587
588	return 0;
589
590free_irqs:
591	while (--i >= 0)
592		free_irq(pci_irq_vector(dev->pdev, i), dev);
593out_free_vectors:
594	pci_free_irq_vectors(pdev);
595	return ret;
596}
597
598static void pvrdma_free_slots(struct pvrdma_dev *dev)
599{
600	struct pci_dev *pdev = dev->pdev;
601
602	if (dev->resp_slot)
603		dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
604				  dev->dsr->resp_slot_dma);
605	if (dev->cmd_slot)
606		dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
607				  dev->dsr->cmd_slot_dma);
608}
609
610static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
611				   const union ib_gid *gid,
612				   u8 gid_type,
613				   int index)
614{
615	int ret;
616	union pvrdma_cmd_req req;
617	struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
618
619	if (!dev->sgid_tbl) {
620		dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
621		return -EINVAL;
622	}
623
624	memset(cmd_bind, 0, sizeof(*cmd_bind));
625	cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
626	memcpy(cmd_bind->new_gid, gid->raw, 16);
627	cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
628	cmd_bind->vlan = 0xfff;
629	cmd_bind->index = index;
630	cmd_bind->gid_type = gid_type;
631
632	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
633	if (ret < 0) {
634		dev_warn(&dev->pdev->dev,
635			 "could not create binding, error: %d\n", ret);
636		return -EFAULT;
637	}
638	memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
639	return 0;
640}
641
642static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
643{
644	struct pvrdma_dev *dev = to_vdev(attr->device);
645
646	return pvrdma_add_gid_at_index(dev, &attr->gid,
647				       ib_gid_type_to_pvrdma(attr->gid_type),
648				       attr->index);
649}
650
651static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
652{
653	int ret;
654	union pvrdma_cmd_req req;
655	struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
656
657	/* Update sgid table. */
658	if (!dev->sgid_tbl) {
659		dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
660		return -EINVAL;
661	}
662
663	memset(cmd_dest, 0, sizeof(*cmd_dest));
664	cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
665	memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
666	cmd_dest->index = index;
667
668	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
669	if (ret < 0) {
670		dev_warn(&dev->pdev->dev,
671			 "could not destroy binding, error: %d\n", ret);
672		return ret;
673	}
674	memset(&dev->sgid_tbl[index], 0, 16);
675	return 0;
676}
677
678static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
679{
680	struct pvrdma_dev *dev = to_vdev(attr->device);
681
682	dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
683		attr->index, dev->netdev->name);
684
685	return pvrdma_del_gid_at_index(dev, attr->index);
686}
687
688static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
689					  struct net_device *ndev,
690					  unsigned long event)
691{
692	struct pci_dev *pdev_net;
693	unsigned int slot;
694
695	switch (event) {
696	case NETDEV_REBOOT:
697	case NETDEV_DOWN:
698		pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
699		break;
700	case NETDEV_UP:
701		pvrdma_write_reg(dev, PVRDMA_REG_CTL,
702				 PVRDMA_DEVICE_CTL_UNQUIESCE);
703
704		mb();
705
706		if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
707			dev_err(&dev->pdev->dev,
708				"failed to activate device during link up\n");
709		else
710			pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
711		break;
712	case NETDEV_UNREGISTER:
713		ib_device_set_netdev(&dev->ib_dev, NULL, 1);
714		dev_put(dev->netdev);
715		dev->netdev = NULL;
716		break;
717	case NETDEV_REGISTER:
718		/* vmxnet3 will have same bus, slot. But func will be 0 */
719		slot = PCI_SLOT(dev->pdev->devfn);
720		pdev_net = pci_get_slot(dev->pdev->bus,
721					PCI_DEVFN(slot, 0));
722		if ((dev->netdev == NULL) &&
723		    (pci_get_drvdata(pdev_net) == ndev)) {
724			/* this is our netdev */
725			ib_device_set_netdev(&dev->ib_dev, ndev, 1);
726			dev->netdev = ndev;
727			dev_hold(ndev);
728		}
729		pci_dev_put(pdev_net);
730		break;
731
732	default:
733		dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
734			event, dev_name(&dev->ib_dev.dev));
735		break;
736	}
737}
738
739static void pvrdma_netdevice_event_work(struct work_struct *work)
740{
741	struct pvrdma_netdevice_work *netdev_work;
742	struct pvrdma_dev *dev;
743
744	netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
745
746	mutex_lock(&pvrdma_device_list_lock);
747	list_for_each_entry(dev, &pvrdma_device_list, device_link) {
748		if ((netdev_work->event == NETDEV_REGISTER) ||
749		    (dev->netdev == netdev_work->event_netdev)) {
750			pvrdma_netdevice_event_handle(dev,
751						      netdev_work->event_netdev,
752						      netdev_work->event);
753			break;
754		}
755	}
756	mutex_unlock(&pvrdma_device_list_lock);
757
758	kfree(netdev_work);
759}
760
761static int pvrdma_netdevice_event(struct notifier_block *this,
762				  unsigned long event, void *ptr)
763{
764	struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
765	struct pvrdma_netdevice_work *netdev_work;
766
767	netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
768	if (!netdev_work)
769		return NOTIFY_BAD;
770
771	INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
772	netdev_work->event_netdev = event_netdev;
773	netdev_work->event = event;
774	queue_work(event_wq, &netdev_work->work);
775
776	return NOTIFY_DONE;
777}
778
779static int pvrdma_pci_probe(struct pci_dev *pdev,
780			    const struct pci_device_id *id)
781{
782	struct pci_dev *pdev_net;
783	struct pvrdma_dev *dev;
784	int ret;
785	unsigned long start;
786	unsigned long len;
787	dma_addr_t slot_dma = 0;
788
789	dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
790
791	/* Allocate zero-out device */
792	dev = ib_alloc_device(pvrdma_dev, ib_dev);
793	if (!dev) {
794		dev_err(&pdev->dev, "failed to allocate IB device\n");
795		return -ENOMEM;
796	}
797
798	mutex_lock(&pvrdma_device_list_lock);
799	list_add(&dev->device_link, &pvrdma_device_list);
800	mutex_unlock(&pvrdma_device_list_lock);
801
802	ret = pvrdma_init_device(dev);
803	if (ret)
804		goto err_free_device;
805
806	dev->pdev = pdev;
807	pci_set_drvdata(pdev, dev);
808
809	ret = pci_enable_device(pdev);
810	if (ret) {
811		dev_err(&pdev->dev, "cannot enable PCI device\n");
812		goto err_free_device;
813	}
814
815	dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
816		pci_resource_flags(pdev, 0));
817	dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
818		(unsigned long long)pci_resource_len(pdev, 0));
819	dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
820		(unsigned long long)pci_resource_start(pdev, 0));
821	dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
822		pci_resource_flags(pdev, 1));
823	dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
824		(unsigned long long)pci_resource_len(pdev, 1));
825	dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
826		(unsigned long long)pci_resource_start(pdev, 1));
827
828	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
829	    !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
830		dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
831		ret = -ENOMEM;
832		goto err_disable_pdev;
833	}
834
835	ret = pci_request_regions(pdev, DRV_NAME);
836	if (ret) {
837		dev_err(&pdev->dev, "cannot request PCI resources\n");
838		goto err_disable_pdev;
839	}
840
841	/* Enable 64-Bit DMA */
842	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
843		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
844		if (ret != 0) {
845			dev_err(&pdev->dev,
846				"pci_set_consistent_dma_mask failed\n");
847			goto err_free_resource;
848		}
849	} else {
850		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
851		if (ret != 0) {
852			dev_err(&pdev->dev,
853				"pci_set_dma_mask failed\n");
854			goto err_free_resource;
855		}
856	}
857	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
858	pci_set_master(pdev);
859
860	/* Map register space */
861	start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
862	len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
863	dev->regs = ioremap(start, len);
864	if (!dev->regs) {
865		dev_err(&pdev->dev, "register mapping failed\n");
866		ret = -ENOMEM;
867		goto err_free_resource;
868	}
869
870	/* Setup per-device UAR. */
871	dev->driver_uar.index = 0;
872	dev->driver_uar.pfn =
873		pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
874		PAGE_SHIFT;
875	dev->driver_uar.map =
876		ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
877	if (!dev->driver_uar.map) {
878		dev_err(&pdev->dev, "failed to remap UAR pages\n");
879		ret = -ENOMEM;
880		goto err_unmap_regs;
881	}
882
883	dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
884	dev_info(&pdev->dev, "device version %d, driver version %d\n",
885		 dev->dsr_version, PVRDMA_VERSION);
886
887	dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
888				      &dev->dsrbase, GFP_KERNEL);
889	if (!dev->dsr) {
890		dev_err(&pdev->dev, "failed to allocate shared region\n");
891		ret = -ENOMEM;
892		goto err_uar_unmap;
893	}
894
895	/* Setup the shared region */
896	dev->dsr->driver_version = PVRDMA_VERSION;
897	dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
898		PVRDMA_GOS_BITS_32 :
899		PVRDMA_GOS_BITS_64;
900	dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
901	dev->dsr->gos_info.gos_ver = 1;
902
903	if (dev->dsr_version < PVRDMA_PPN64_VERSION)
904		dev->dsr->uar_pfn = dev->driver_uar.pfn;
905	else
906		dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
907
908	/* Command slot. */
909	dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
910					   &slot_dma, GFP_KERNEL);
911	if (!dev->cmd_slot) {
912		ret = -ENOMEM;
913		goto err_free_dsr;
914	}
915
916	dev->dsr->cmd_slot_dma = (u64)slot_dma;
917
918	/* Response slot. */
919	dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
920					    &slot_dma, GFP_KERNEL);
921	if (!dev->resp_slot) {
922		ret = -ENOMEM;
923		goto err_free_slots;
924	}
925
926	dev->dsr->resp_slot_dma = (u64)slot_dma;
927
928	/* Async event ring */
929	dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
930	ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
931				   dev->dsr->async_ring_pages.num_pages, true);
932	if (ret)
933		goto err_free_slots;
934	dev->async_ring_state = dev->async_pdir.pages[0];
935	dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
936
937	/* CQ notification ring */
938	dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
939	ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
940				   dev->dsr->cq_ring_pages.num_pages, true);
941	if (ret)
942		goto err_free_async_ring;
943	dev->cq_ring_state = dev->cq_pdir.pages[0];
944	dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
945
946	/*
947	 * Write the PA of the shared region to the device. The writes must be
948	 * ordered such that the high bits are written last. When the writes
949	 * complete, the device will have filled out the capabilities.
950	 */
951
952	pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
953	pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
954			 (u32)((u64)(dev->dsrbase) >> 32));
955
956	/* Make sure the write is complete before reading status. */
957	mb();
958
959	/* The driver supports RoCE V1 and V2. */
960	if (!PVRDMA_SUPPORTED(dev)) {
961		dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
962		ret = -EFAULT;
963		goto err_free_cq_ring;
964	}
965
966	/* Paired vmxnet3 will have same bus, slot. But func will be 0 */
967	pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
968	if (!pdev_net) {
969		dev_err(&pdev->dev, "failed to find paired net device\n");
970		ret = -ENODEV;
971		goto err_free_cq_ring;
972	}
973
974	if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
975	    pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
976		dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
977		pci_dev_put(pdev_net);
978		ret = -ENODEV;
979		goto err_free_cq_ring;
980	}
981
982	dev->netdev = pci_get_drvdata(pdev_net);
983	pci_dev_put(pdev_net);
984	if (!dev->netdev) {
985		dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
986		ret = -ENODEV;
987		goto err_free_cq_ring;
988	}
989	dev_hold(dev->netdev);
990
991	dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
992
993	/* Interrupt setup */
994	ret = pvrdma_alloc_intrs(dev);
995	if (ret) {
996		dev_err(&pdev->dev, "failed to allocate interrupts\n");
997		ret = -ENOMEM;
998		goto err_free_cq_ring;
999	}
1000
1001	/* Allocate UAR table. */
1002	ret = pvrdma_uar_table_init(dev);
1003	if (ret) {
1004		dev_err(&pdev->dev, "failed to allocate UAR table\n");
1005		ret = -ENOMEM;
1006		goto err_free_intrs;
1007	}
1008
1009	/* Allocate GID table */
1010	dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
1011				sizeof(union ib_gid), GFP_KERNEL);
1012	if (!dev->sgid_tbl) {
1013		ret = -ENOMEM;
1014		goto err_free_uar_table;
1015	}
1016	dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
1017
1018	pvrdma_enable_intrs(dev);
1019
1020	/* Activate pvrdma device */
1021	pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
1022
1023	/* Make sure the write is complete before reading status. */
1024	mb();
1025
1026	/* Check if device was successfully activated */
1027	ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
1028	if (ret != 0) {
1029		dev_err(&pdev->dev, "failed to activate device\n");
1030		ret = -EFAULT;
1031		goto err_disable_intr;
1032	}
1033
1034	/* Register IB device */
1035	ret = pvrdma_register_device(dev);
1036	if (ret) {
1037		dev_err(&pdev->dev, "failed to register IB device\n");
1038		goto err_disable_intr;
1039	}
1040
1041	dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
1042	ret = register_netdevice_notifier(&dev->nb_netdev);
1043	if (ret) {
1044		dev_err(&pdev->dev, "failed to register netdevice events\n");
1045		goto err_unreg_ibdev;
1046	}
1047
1048	dev_info(&pdev->dev, "attached to device\n");
1049	return 0;
1050
1051err_unreg_ibdev:
1052	ib_unregister_device(&dev->ib_dev);
1053err_disable_intr:
1054	pvrdma_disable_intrs(dev);
1055	kfree(dev->sgid_tbl);
1056err_free_uar_table:
1057	pvrdma_uar_table_cleanup(dev);
1058err_free_intrs:
1059	pvrdma_free_irq(dev);
1060	pci_free_irq_vectors(pdev);
1061err_free_cq_ring:
1062	if (dev->netdev) {
1063		dev_put(dev->netdev);
1064		dev->netdev = NULL;
1065	}
1066	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1067err_free_async_ring:
1068	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1069err_free_slots:
1070	pvrdma_free_slots(dev);
1071err_free_dsr:
1072	dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1073			  dev->dsrbase);
1074err_uar_unmap:
1075	iounmap(dev->driver_uar.map);
1076err_unmap_regs:
1077	iounmap(dev->regs);
1078err_free_resource:
1079	pci_release_regions(pdev);
1080err_disable_pdev:
1081	pci_disable_device(pdev);
1082	pci_set_drvdata(pdev, NULL);
1083err_free_device:
1084	mutex_lock(&pvrdma_device_list_lock);
1085	list_del(&dev->device_link);
1086	mutex_unlock(&pvrdma_device_list_lock);
1087	ib_dealloc_device(&dev->ib_dev);
1088	return ret;
1089}
1090
1091static void pvrdma_pci_remove(struct pci_dev *pdev)
1092{
1093	struct pvrdma_dev *dev = pci_get_drvdata(pdev);
1094
1095	if (!dev)
1096		return;
1097
1098	dev_info(&pdev->dev, "detaching from device\n");
1099
1100	unregister_netdevice_notifier(&dev->nb_netdev);
1101	dev->nb_netdev.notifier_call = NULL;
1102
1103	flush_workqueue(event_wq);
1104
1105	if (dev->netdev) {
1106		dev_put(dev->netdev);
1107		dev->netdev = NULL;
1108	}
1109
1110	/* Unregister ib device */
1111	ib_unregister_device(&dev->ib_dev);
1112
1113	mutex_lock(&pvrdma_device_list_lock);
1114	list_del(&dev->device_link);
1115	mutex_unlock(&pvrdma_device_list_lock);
1116
1117	pvrdma_disable_intrs(dev);
1118	pvrdma_free_irq(dev);
1119	pci_free_irq_vectors(pdev);
1120
1121	/* Deactivate pvrdma device */
1122	pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
1123	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1124	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1125	pvrdma_free_slots(dev);
1126	dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1127			  dev->dsrbase);
1128
1129	iounmap(dev->regs);
1130	kfree(dev->sgid_tbl);
1131	kfree(dev->cq_tbl);
1132	kfree(dev->srq_tbl);
1133	kfree(dev->qp_tbl);
1134	pvrdma_uar_table_cleanup(dev);
1135	iounmap(dev->driver_uar.map);
1136
1137	ib_dealloc_device(&dev->ib_dev);
1138
1139	/* Free pci resources */
1140	pci_release_regions(pdev);
1141	pci_disable_device(pdev);
1142	pci_set_drvdata(pdev, NULL);
1143}
1144
1145static const struct pci_device_id pvrdma_pci_table[] = {
1146	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
1147	{ 0 },
1148};
1149
1150MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
1151
1152static struct pci_driver pvrdma_driver = {
1153	.name		= DRV_NAME,
1154	.id_table	= pvrdma_pci_table,
1155	.probe		= pvrdma_pci_probe,
1156	.remove		= pvrdma_pci_remove,
1157};
1158
1159static int __init pvrdma_init(void)
1160{
1161	int err;
1162
1163	event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
1164	if (!event_wq)
1165		return -ENOMEM;
1166
1167	err = pci_register_driver(&pvrdma_driver);
1168	if (err)
1169		destroy_workqueue(event_wq);
1170
1171	return err;
1172}
1173
1174static void __exit pvrdma_cleanup(void)
1175{
1176	pci_unregister_driver(&pvrdma_driver);
1177
1178	destroy_workqueue(event_wq);
1179}
1180
1181module_init(pvrdma_init);
1182module_exit(pvrdma_cleanup);
1183
1184MODULE_AUTHOR("VMware, Inc");
1185MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1186MODULE_LICENSE("Dual BSD/GPL");
1187