1/*
2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/poll.h>
37#include <linux/cdev.h>
38#include <linux/swap.h>
39#include <linux/vmalloc.h>
40#include <linux/highmem.h>
41#include <linux/io.h>
42#include <linux/jiffies.h>
43#include <linux/delay.h>
44#include <linux/export.h>
45#include <linux/uio.h>
46#include <linux/pgtable.h>
47
48#include <rdma/ib.h>
49
50#include "qib.h"
51#include "qib_common.h"
52#include "qib_user_sdma.h"
53
54#undef pr_fmt
55#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
56
57static int qib_open(struct inode *, struct file *);
58static int qib_close(struct inode *, struct file *);
59static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
60static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
61static __poll_t qib_poll(struct file *, struct poll_table_struct *);
62static int qib_mmapf(struct file *, struct vm_area_struct *);
63
64/*
65 * This is really, really weird shit - write() and writev() here
66 * have completely unrelated semantics.  Sucky userland ABI,
67 * film at 11.
68 */
69static const struct file_operations qib_file_ops = {
70	.owner = THIS_MODULE,
71	.write = qib_write,
72	.write_iter = qib_write_iter,
73	.open = qib_open,
74	.release = qib_close,
75	.poll = qib_poll,
76	.mmap = qib_mmapf,
77	.llseek = noop_llseek,
78};
79
80/*
81 * Convert kernel virtual addresses to physical addresses so they don't
82 * potentially conflict with the chip addresses used as mmap offsets.
83 * It doesn't really matter what mmap offset we use as long as we can
84 * interpret it correctly.
85 */
86static u64 cvt_kvaddr(void *p)
87{
88	struct page *page;
89	u64 paddr = 0;
90
91	page = vmalloc_to_page(p);
92	if (page)
93		paddr = page_to_pfn(page) << PAGE_SHIFT;
94
95	return paddr;
96}
97
98static int qib_get_base_info(struct file *fp, void __user *ubase,
99			     size_t ubase_size)
100{
101	struct qib_ctxtdata *rcd = ctxt_fp(fp);
102	int ret = 0;
103	struct qib_base_info *kinfo = NULL;
104	struct qib_devdata *dd = rcd->dd;
105	struct qib_pportdata *ppd = rcd->ppd;
106	unsigned subctxt_cnt;
107	int shared, master;
108	size_t sz;
109
110	subctxt_cnt = rcd->subctxt_cnt;
111	if (!subctxt_cnt) {
112		shared = 0;
113		master = 0;
114		subctxt_cnt = 1;
115	} else {
116		shared = 1;
117		master = !subctxt_fp(fp);
118	}
119
120	sz = sizeof(*kinfo);
121	/* If context sharing is not requested, allow the old size structure */
122	if (!shared)
123		sz -= 7 * sizeof(u64);
124	if (ubase_size < sz) {
125		ret = -EINVAL;
126		goto bail;
127	}
128
129	kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
130	if (kinfo == NULL) {
131		ret = -ENOMEM;
132		goto bail;
133	}
134
135	ret = dd->f_get_base_info(rcd, kinfo);
136	if (ret < 0)
137		goto bail;
138
139	kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
140	kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
141	kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
142	kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
143	/*
144	 * have to mmap whole thing
145	 */
146	kinfo->spi_rcv_egrbuftotlen =
147		rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
148	kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
149	kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
150		rcd->rcvegrbuf_chunks;
151	kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
152	if (master)
153		kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
154	/*
155	 * for this use, may be cfgctxts summed over all chips that
156	 * are are configured and present
157	 */
158	kinfo->spi_nctxts = dd->cfgctxts;
159	/* unit (chip/board) our context is on */
160	kinfo->spi_unit = dd->unit;
161	kinfo->spi_port = ppd->port;
162	/* for now, only a single page */
163	kinfo->spi_tid_maxsize = PAGE_SIZE;
164
165	/*
166	 * Doing this per context, and based on the skip value, etc.  This has
167	 * to be the actual buffer size, since the protocol code treats it
168	 * as an array.
169	 *
170	 * These have to be set to user addresses in the user code via mmap.
171	 * These values are used on return to user code for the mmap target
172	 * addresses only.  For 32 bit, same 44 bit address problem, so use
173	 * the physical address, not virtual.  Before 2.6.11, using the
174	 * page_address() macro worked, but in 2.6.11, even that returns the
175	 * full 64 bit address (upper bits all 1's).  So far, using the
176	 * physical addresses (or chip offsets, for chip mapping) works, but
177	 * no doubt some future kernel release will change that, and we'll be
178	 * on to yet another method of dealing with this.
179	 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
180	 * since the chips with non-zero rhf_offset don't normally
181	 * enable tail register updates to host memory, but for testing,
182	 * both can be enabled and used.
183	 */
184	kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
185	kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
186	kinfo->spi_rhf_offset = dd->rhf_offset;
187	kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
188	kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
189	/* setup per-unit (not port) status area for user programs */
190	kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
191		(char *) ppd->statusp -
192		(char *) dd->pioavailregs_dma;
193	kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
194	if (!shared) {
195		kinfo->spi_piocnt = rcd->piocnt;
196		kinfo->spi_piobufbase = (u64) rcd->piobufs;
197		kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
198	} else if (master) {
199		kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
200				    (rcd->piocnt % subctxt_cnt);
201		/* Master's PIO buffers are after all the slave's */
202		kinfo->spi_piobufbase = (u64) rcd->piobufs +
203			dd->palign *
204			(rcd->piocnt - kinfo->spi_piocnt);
205	} else {
206		unsigned slave = subctxt_fp(fp) - 1;
207
208		kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
209		kinfo->spi_piobufbase = (u64) rcd->piobufs +
210			dd->palign * kinfo->spi_piocnt * slave;
211	}
212
213	if (shared) {
214		kinfo->spi_sendbuf_status =
215			cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
216		/* only spi_subctxt_* fields should be set in this block! */
217		kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
218
219		kinfo->spi_subctxt_rcvegrbuf =
220			cvt_kvaddr(rcd->subctxt_rcvegrbuf);
221		kinfo->spi_subctxt_rcvhdr_base =
222			cvt_kvaddr(rcd->subctxt_rcvhdr_base);
223	}
224
225	/*
226	 * All user buffers are 2KB buffers.  If we ever support
227	 * giving 4KB buffers to user processes, this will need some
228	 * work.  Can't use piobufbase directly, because it has
229	 * both 2K and 4K buffer base values.
230	 */
231	kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
232		dd->palign;
233	kinfo->spi_pioalign = dd->palign;
234	kinfo->spi_qpair = QIB_KD_QP;
235	/*
236	 * user mode PIO buffers are always 2KB, even when 4KB can
237	 * be received, and sent via the kernel; this is ibmaxlen
238	 * for 2K MTU.
239	 */
240	kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
241	kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
242	kinfo->spi_ctxt = rcd->ctxt;
243	kinfo->spi_subctxt = subctxt_fp(fp);
244	kinfo->spi_sw_version = QIB_KERN_SWVERSION;
245	kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
246	kinfo->spi_hw_version = dd->revision;
247
248	if (master)
249		kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
250
251	sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
252	if (copy_to_user(ubase, kinfo, sz))
253		ret = -EFAULT;
254bail:
255	kfree(kinfo);
256	return ret;
257}
258
259/**
260 * qib_tid_update - update a context TID
261 * @rcd: the context
262 * @fp: the qib device file
263 * @ti: the TID information
264 *
265 * The new implementation as of Oct 2004 is that the driver assigns
266 * the tid and returns it to the caller.   To reduce search time, we
267 * keep a cursor for each context, walking the shadow tid array to find
268 * one that's not in use.
269 *
270 * For now, if we can't allocate the full list, we fail, although
271 * in the long run, we'll allocate as many as we can, and the
272 * caller will deal with that by trying the remaining pages later.
273 * That means that when we fail, we have to mark the tids as not in
274 * use again, in our shadow copy.
275 *
276 * It's up to the caller to free the tids when they are done.
277 * We'll unlock the pages as they free them.
278 *
279 * Also, right now we are locking one page at a time, but since
280 * the intended use of this routine is for a single group of
281 * virtually contiguous pages, that should change to improve
282 * performance.
283 */
284static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
285			  const struct qib_tid_info *ti)
286{
287	int ret = 0, ntids;
288	u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
289	u16 *tidlist;
290	struct qib_devdata *dd = rcd->dd;
291	u64 physaddr;
292	unsigned long vaddr;
293	u64 __iomem *tidbase;
294	unsigned long tidmap[8];
295	struct page **pagep = NULL;
296	unsigned subctxt = subctxt_fp(fp);
297
298	if (!dd->pageshadow) {
299		ret = -ENOMEM;
300		goto done;
301	}
302
303	cnt = ti->tidcnt;
304	if (!cnt) {
305		ret = -EFAULT;
306		goto done;
307	}
308	ctxttid = rcd->ctxt * dd->rcvtidcnt;
309	if (!rcd->subctxt_cnt) {
310		tidcnt = dd->rcvtidcnt;
311		tid = rcd->tidcursor;
312		tidoff = 0;
313	} else if (!subctxt) {
314		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
315			 (dd->rcvtidcnt % rcd->subctxt_cnt);
316		tidoff = dd->rcvtidcnt - tidcnt;
317		ctxttid += tidoff;
318		tid = tidcursor_fp(fp);
319	} else {
320		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
321		tidoff = tidcnt * (subctxt - 1);
322		ctxttid += tidoff;
323		tid = tidcursor_fp(fp);
324	}
325	if (cnt > tidcnt) {
326		/* make sure it all fits in tid_pg_list */
327		qib_devinfo(dd->pcidev,
328			"Process tried to allocate %u TIDs, only trying max (%u)\n",
329			cnt, tidcnt);
330		cnt = tidcnt;
331	}
332	pagep = (struct page **) rcd->tid_pg_list;
333	tidlist = (u16 *) &pagep[dd->rcvtidcnt];
334	pagep += tidoff;
335	tidlist += tidoff;
336
337	memset(tidmap, 0, sizeof(tidmap));
338	/* before decrement; chip actual # */
339	ntids = tidcnt;
340	tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
341				   dd->rcvtidbase +
342				   ctxttid * sizeof(*tidbase));
343
344	/* virtual address of first page in transfer */
345	vaddr = ti->tidvaddr;
346	if (!access_ok((void __user *) vaddr,
347		       cnt * PAGE_SIZE)) {
348		ret = -EFAULT;
349		goto done;
350	}
351	ret = qib_get_user_pages(vaddr, cnt, pagep);
352	if (ret) {
353		/*
354		 * if (ret == -EBUSY)
355		 * We can't continue because the pagep array won't be
356		 * initialized. This should never happen,
357		 * unless perhaps the user has mpin'ed the pages
358		 * themselves.
359		 */
360		qib_devinfo(
361			dd->pcidev,
362			"Failed to lock addr %p, %u pages: errno %d\n",
363			(void *) vaddr, cnt, -ret);
364		goto done;
365	}
366	for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
367		dma_addr_t daddr;
368
369		for (; ntids--; tid++) {
370			if (tid == tidcnt)
371				tid = 0;
372			if (!dd->pageshadow[ctxttid + tid])
373				break;
374		}
375		if (ntids < 0) {
376			/*
377			 * Oops, wrapped all the way through their TIDs,
378			 * and didn't have enough free; see comments at
379			 * start of routine
380			 */
381			i--;    /* last tidlist[i] not filled in */
382			ret = -ENOMEM;
383			break;
384		}
385		ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
386		if (ret)
387			break;
388
389		tidlist[i] = tid + tidoff;
390		/* we "know" system pages and TID pages are same size */
391		dd->pageshadow[ctxttid + tid] = pagep[i];
392		dd->physshadow[ctxttid + tid] = daddr;
393		/*
394		 * don't need atomic or it's overhead
395		 */
396		__set_bit(tid, tidmap);
397		physaddr = dd->physshadow[ctxttid + tid];
398		/* PERFORMANCE: below should almost certainly be cached */
399		dd->f_put_tid(dd, &tidbase[tid],
400				  RCVHQ_RCV_TYPE_EXPECTED, physaddr);
401		/*
402		 * don't check this tid in qib_ctxtshadow, since we
403		 * just filled it in; start with the next one.
404		 */
405		tid++;
406	}
407
408	if (ret) {
409		u32 limit;
410cleanup:
411		/* jump here if copy out of updated info failed... */
412		/* same code that's in qib_free_tid() */
413		limit = sizeof(tidmap) * BITS_PER_BYTE;
414		if (limit > tidcnt)
415			/* just in case size changes in future */
416			limit = tidcnt;
417		tid = find_first_bit((const unsigned long *)tidmap, limit);
418		for (; tid < limit; tid++) {
419			if (!test_bit(tid, tidmap))
420				continue;
421			if (dd->pageshadow[ctxttid + tid]) {
422				dma_addr_t phys;
423
424				phys = dd->physshadow[ctxttid + tid];
425				dd->physshadow[ctxttid + tid] = dd->tidinvalid;
426				/* PERFORMANCE: below should almost certainly
427				 * be cached
428				 */
429				dd->f_put_tid(dd, &tidbase[tid],
430					      RCVHQ_RCV_TYPE_EXPECTED,
431					      dd->tidinvalid);
432				pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
433					       PCI_DMA_FROMDEVICE);
434				dd->pageshadow[ctxttid + tid] = NULL;
435			}
436		}
437		qib_release_user_pages(pagep, cnt);
438	} else {
439		/*
440		 * Copy the updated array, with qib_tid's filled in, back
441		 * to user.  Since we did the copy in already, this "should
442		 * never fail" If it does, we have to clean up...
443		 */
444		if (copy_to_user((void __user *)
445				 (unsigned long) ti->tidlist,
446				 tidlist, cnt * sizeof(*tidlist))) {
447			ret = -EFAULT;
448			goto cleanup;
449		}
450		if (copy_to_user(u64_to_user_ptr(ti->tidmap),
451				 tidmap, sizeof(tidmap))) {
452			ret = -EFAULT;
453			goto cleanup;
454		}
455		if (tid == tidcnt)
456			tid = 0;
457		if (!rcd->subctxt_cnt)
458			rcd->tidcursor = tid;
459		else
460			tidcursor_fp(fp) = tid;
461	}
462
463done:
464	return ret;
465}
466
467/**
468 * qib_tid_free - free a context TID
469 * @rcd: the context
470 * @subctxt: the subcontext
471 * @ti: the TID info
472 *
473 * right now we are unlocking one page at a time, but since
474 * the intended use of this routine is for a single group of
475 * virtually contiguous pages, that should change to improve
476 * performance.  We check that the TID is in range for this context
477 * but otherwise don't check validity; if user has an error and
478 * frees the wrong tid, it's only their own data that can thereby
479 * be corrupted.  We do check that the TID was in use, for sanity
480 * We always use our idea of the saved address, not the address that
481 * they pass in to us.
482 */
483static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484			const struct qib_tid_info *ti)
485{
486	int ret = 0;
487	u32 tid, ctxttid, cnt, limit, tidcnt;
488	struct qib_devdata *dd = rcd->dd;
489	u64 __iomem *tidbase;
490	unsigned long tidmap[8];
491
492	if (!dd->pageshadow) {
493		ret = -ENOMEM;
494		goto done;
495	}
496
497	if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap),
498			   sizeof(tidmap))) {
499		ret = -EFAULT;
500		goto done;
501	}
502
503	ctxttid = rcd->ctxt * dd->rcvtidcnt;
504	if (!rcd->subctxt_cnt)
505		tidcnt = dd->rcvtidcnt;
506	else if (!subctxt) {
507		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
508			 (dd->rcvtidcnt % rcd->subctxt_cnt);
509		ctxttid += dd->rcvtidcnt - tidcnt;
510	} else {
511		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
512		ctxttid += tidcnt * (subctxt - 1);
513	}
514	tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
515				   dd->rcvtidbase +
516				   ctxttid * sizeof(*tidbase));
517
518	limit = sizeof(tidmap) * BITS_PER_BYTE;
519	if (limit > tidcnt)
520		/* just in case size changes in future */
521		limit = tidcnt;
522	tid = find_first_bit(tidmap, limit);
523	for (cnt = 0; tid < limit; tid++) {
524		/*
525		 * small optimization; if we detect a run of 3 or so without
526		 * any set, use find_first_bit again.  That's mainly to
527		 * accelerate the case where we wrapped, so we have some at
528		 * the beginning, and some at the end, and a big gap
529		 * in the middle.
530		 */
531		if (!test_bit(tid, tidmap))
532			continue;
533		cnt++;
534		if (dd->pageshadow[ctxttid + tid]) {
535			struct page *p;
536			dma_addr_t phys;
537
538			p = dd->pageshadow[ctxttid + tid];
539			dd->pageshadow[ctxttid + tid] = NULL;
540			phys = dd->physshadow[ctxttid + tid];
541			dd->physshadow[ctxttid + tid] = dd->tidinvalid;
542			/* PERFORMANCE: below should almost certainly be
543			 * cached
544			 */
545			dd->f_put_tid(dd, &tidbase[tid],
546				      RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
547			pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
548				       PCI_DMA_FROMDEVICE);
549			qib_release_user_pages(&p, 1);
550		}
551	}
552done:
553	return ret;
554}
555
556/**
557 * qib_set_part_key - set a partition key
558 * @rcd: the context
559 * @key: the key
560 *
561 * We can have up to 4 active at a time (other than the default, which is
562 * always allowed).  This is somewhat tricky, since multiple contexts may set
563 * the same key, so we reference count them, and clean up at exit.  All 4
564 * partition keys are packed into a single qlogic_ib register.  It's an
565 * error for a process to set the same pkey multiple times.  We provide no
566 * mechanism to de-allocate a pkey at this time, we may eventually need to
567 * do that.  I've used the atomic operations, and no locking, and only make
568 * a single pass through what's available.  This should be more than
569 * adequate for some time. I'll think about spinlocks or the like if and as
570 * it's necessary.
571 */
572static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
573{
574	struct qib_pportdata *ppd = rcd->ppd;
575	int i, pidx = -1;
576	bool any = false;
577	u16 lkey = key & 0x7FFF;
578
579	if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
580		/* nothing to do; this key always valid */
581		return 0;
582
583	if (!lkey)
584		return -EINVAL;
585
586	/*
587	 * Set the full membership bit, because it has to be
588	 * set in the register or the packet, and it seems
589	 * cleaner to set in the register than to force all
590	 * callers to set it.
591	 */
592	key |= 0x8000;
593
594	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
595		if (!rcd->pkeys[i] && pidx == -1)
596			pidx = i;
597		if (rcd->pkeys[i] == key)
598			return -EEXIST;
599	}
600	if (pidx == -1)
601		return -EBUSY;
602	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
603		if (!ppd->pkeys[i]) {
604			any = true;
605			continue;
606		}
607		if (ppd->pkeys[i] == key) {
608			atomic_t *pkrefs = &ppd->pkeyrefs[i];
609
610			if (atomic_inc_return(pkrefs) > 1) {
611				rcd->pkeys[pidx] = key;
612				return 0;
613			}
614			/*
615			 * lost race, decrement count, catch below
616			 */
617			atomic_dec(pkrefs);
618			any = true;
619		}
620		if ((ppd->pkeys[i] & 0x7FFF) == lkey)
621			/*
622			 * It makes no sense to have both the limited and
623			 * full membership PKEY set at the same time since
624			 * the unlimited one will disable the limited one.
625			 */
626			return -EEXIST;
627	}
628	if (!any)
629		return -EBUSY;
630	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
631		if (!ppd->pkeys[i] &&
632		    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
633			rcd->pkeys[pidx] = key;
634			ppd->pkeys[i] = key;
635			(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
636			return 0;
637		}
638	}
639	return -EBUSY;
640}
641
642/**
643 * qib_manage_rcvq - manage a context's receive queue
644 * @rcd: the context
645 * @subctxt: the subcontext
646 * @start_stop: action to carry out
647 *
648 * start_stop == 0 disables receive on the context, for use in queue
649 * overflow conditions.  start_stop==1 re-enables, to be used to
650 * re-init the software copy of the head register
651 */
652static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
653			   int start_stop)
654{
655	struct qib_devdata *dd = rcd->dd;
656	unsigned int rcvctrl_op;
657
658	if (subctxt)
659		goto bail;
660	/* atomically clear receive enable ctxt. */
661	if (start_stop) {
662		/*
663		 * On enable, force in-memory copy of the tail register to
664		 * 0, so that protocol code doesn't have to worry about
665		 * whether or not the chip has yet updated the in-memory
666		 * copy or not on return from the system call. The chip
667		 * always resets it's tail register back to 0 on a
668		 * transition from disabled to enabled.
669		 */
670		if (rcd->rcvhdrtail_kvaddr)
671			qib_clear_rcvhdrtail(rcd);
672		rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
673	} else
674		rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
675	dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
676	/* always; new head should be equal to new tail; see above */
677bail:
678	return 0;
679}
680
681static void qib_clean_part_key(struct qib_ctxtdata *rcd,
682			       struct qib_devdata *dd)
683{
684	int i, j, pchanged = 0;
685	struct qib_pportdata *ppd = rcd->ppd;
686
687	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
688		if (!rcd->pkeys[i])
689			continue;
690		for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
691			/* check for match independent of the global bit */
692			if ((ppd->pkeys[j] & 0x7fff) !=
693			    (rcd->pkeys[i] & 0x7fff))
694				continue;
695			if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
696				ppd->pkeys[j] = 0;
697				pchanged++;
698			}
699			break;
700		}
701		rcd->pkeys[i] = 0;
702	}
703	if (pchanged)
704		(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
705}
706
707/* common code for the mappings on dma_alloc_coherent mem */
708static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
709			unsigned len, void *kvaddr, u32 write_ok, char *what)
710{
711	struct qib_devdata *dd = rcd->dd;
712	unsigned long pfn;
713	int ret;
714
715	if ((vma->vm_end - vma->vm_start) > len) {
716		qib_devinfo(dd->pcidev,
717			 "FAIL on %s: len %lx > %x\n", what,
718			 vma->vm_end - vma->vm_start, len);
719		ret = -EFAULT;
720		goto bail;
721	}
722
723	/*
724	 * shared context user code requires rcvhdrq mapped r/w, others
725	 * only allowed readonly mapping.
726	 */
727	if (!write_ok) {
728		if (vma->vm_flags & VM_WRITE) {
729			qib_devinfo(dd->pcidev,
730				 "%s must be mapped readonly\n", what);
731			ret = -EPERM;
732			goto bail;
733		}
734
735		/* don't allow them to later change with mprotect */
736		vma->vm_flags &= ~VM_MAYWRITE;
737	}
738
739	pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
740	ret = remap_pfn_range(vma, vma->vm_start, pfn,
741			      len, vma->vm_page_prot);
742	if (ret)
743		qib_devinfo(dd->pcidev,
744			"%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
745			what, rcd->ctxt, pfn, len, ret);
746bail:
747	return ret;
748}
749
750static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
751		     u64 ureg)
752{
753	unsigned long phys;
754	unsigned long sz;
755	int ret;
756
757	/*
758	 * This is real hardware, so use io_remap.  This is the mechanism
759	 * for the user process to update the head registers for their ctxt
760	 * in the chip.
761	 */
762	sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
763	if ((vma->vm_end - vma->vm_start) > sz) {
764		qib_devinfo(dd->pcidev,
765			"FAIL mmap userreg: reqlen %lx > PAGE\n",
766			vma->vm_end - vma->vm_start);
767		ret = -EFAULT;
768	} else {
769		phys = dd->physaddr + ureg;
770		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
771
772		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
773		ret = io_remap_pfn_range(vma, vma->vm_start,
774					 phys >> PAGE_SHIFT,
775					 vma->vm_end - vma->vm_start,
776					 vma->vm_page_prot);
777	}
778	return ret;
779}
780
781static int mmap_piobufs(struct vm_area_struct *vma,
782			struct qib_devdata *dd,
783			struct qib_ctxtdata *rcd,
784			unsigned piobufs, unsigned piocnt)
785{
786	unsigned long phys;
787	int ret;
788
789	/*
790	 * When we map the PIO buffers in the chip, we want to map them as
791	 * writeonly, no read possible; unfortunately, x86 doesn't allow
792	 * for this in hardware, but we still prevent users from asking
793	 * for it.
794	 */
795	if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
796		qib_devinfo(dd->pcidev,
797			"FAIL mmap piobufs: reqlen %lx > PAGE\n",
798			 vma->vm_end - vma->vm_start);
799		ret = -EINVAL;
800		goto bail;
801	}
802
803	phys = dd->physaddr + piobufs;
804
805#if defined(__powerpc__)
806	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
807#endif
808
809	/*
810	 * don't allow them to later change to readable with mprotect (for when
811	 * not initially mapped readable, as is normally the case)
812	 */
813	vma->vm_flags &= ~VM_MAYREAD;
814	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
815
816	/* We used PAT if wc_cookie == 0 */
817	if (!dd->wc_cookie)
818		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
819
820	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
821				 vma->vm_end - vma->vm_start,
822				 vma->vm_page_prot);
823bail:
824	return ret;
825}
826
827static int mmap_rcvegrbufs(struct vm_area_struct *vma,
828			   struct qib_ctxtdata *rcd)
829{
830	struct qib_devdata *dd = rcd->dd;
831	unsigned long start, size;
832	size_t total_size, i;
833	unsigned long pfn;
834	int ret;
835
836	size = rcd->rcvegrbuf_size;
837	total_size = rcd->rcvegrbuf_chunks * size;
838	if ((vma->vm_end - vma->vm_start) > total_size) {
839		qib_devinfo(dd->pcidev,
840			"FAIL on egr bufs: reqlen %lx > actual %lx\n",
841			 vma->vm_end - vma->vm_start,
842			 (unsigned long) total_size);
843		ret = -EINVAL;
844		goto bail;
845	}
846
847	if (vma->vm_flags & VM_WRITE) {
848		qib_devinfo(dd->pcidev,
849			"Can't map eager buffers as writable (flags=%lx)\n",
850			vma->vm_flags);
851		ret = -EPERM;
852		goto bail;
853	}
854	/* don't allow them to later change to writeable with mprotect */
855	vma->vm_flags &= ~VM_MAYWRITE;
856
857	start = vma->vm_start;
858
859	for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
860		pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
861		ret = remap_pfn_range(vma, start, pfn, size,
862				      vma->vm_page_prot);
863		if (ret < 0)
864			goto bail;
865	}
866	ret = 0;
867
868bail:
869	return ret;
870}
871
872/*
873 * qib_file_vma_fault - handle a VMA page fault.
874 */
875static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
876{
877	struct page *page;
878
879	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
880	if (!page)
881		return VM_FAULT_SIGBUS;
882
883	get_page(page);
884	vmf->page = page;
885
886	return 0;
887}
888
889static const struct vm_operations_struct qib_file_vm_ops = {
890	.fault = qib_file_vma_fault,
891};
892
893static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
894		       struct qib_ctxtdata *rcd, unsigned subctxt)
895{
896	struct qib_devdata *dd = rcd->dd;
897	unsigned subctxt_cnt;
898	unsigned long len;
899	void *addr;
900	size_t size;
901	int ret = 0;
902
903	subctxt_cnt = rcd->subctxt_cnt;
904	size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
905
906	/*
907	 * Each process has all the subctxt uregbase, rcvhdrq, and
908	 * rcvegrbufs mmapped - as an array for all the processes,
909	 * and also separately for this process.
910	 */
911	if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
912		addr = rcd->subctxt_uregbase;
913		size = PAGE_SIZE * subctxt_cnt;
914	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
915		addr = rcd->subctxt_rcvhdr_base;
916		size = rcd->rcvhdrq_size * subctxt_cnt;
917	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
918		addr = rcd->subctxt_rcvegrbuf;
919		size *= subctxt_cnt;
920	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
921					PAGE_SIZE * subctxt)) {
922		addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
923		size = PAGE_SIZE;
924	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
925					rcd->rcvhdrq_size * subctxt)) {
926		addr = rcd->subctxt_rcvhdr_base +
927			rcd->rcvhdrq_size * subctxt;
928		size = rcd->rcvhdrq_size;
929	} else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
930		addr = rcd->user_event_mask;
931		size = PAGE_SIZE;
932	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
933					size * subctxt)) {
934		addr = rcd->subctxt_rcvegrbuf + size * subctxt;
935		/* rcvegrbufs are read-only on the slave */
936		if (vma->vm_flags & VM_WRITE) {
937			qib_devinfo(dd->pcidev,
938				 "Can't map eager buffers as writable (flags=%lx)\n",
939				 vma->vm_flags);
940			ret = -EPERM;
941			goto bail;
942		}
943		/*
944		 * Don't allow permission to later change to writeable
945		 * with mprotect.
946		 */
947		vma->vm_flags &= ~VM_MAYWRITE;
948	} else
949		goto bail;
950	len = vma->vm_end - vma->vm_start;
951	if (len > size) {
952		ret = -EINVAL;
953		goto bail;
954	}
955
956	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
957	vma->vm_ops = &qib_file_vm_ops;
958	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
959	ret = 1;
960
961bail:
962	return ret;
963}
964
965/**
966 * qib_mmapf - mmap various structures into user space
967 * @fp: the file pointer
968 * @vma: the VM area
969 *
970 * We use this to have a shared buffer between the kernel and the user code
971 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
972 * buffers in the chip.  We have the open and close entries so we can bump
973 * the ref count and keep the driver from being unloaded while still mapped.
974 */
975static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
976{
977	struct qib_ctxtdata *rcd;
978	struct qib_devdata *dd;
979	u64 pgaddr, ureg;
980	unsigned piobufs, piocnt;
981	int ret, match = 1;
982
983	rcd = ctxt_fp(fp);
984	if (!rcd || !(vma->vm_flags & VM_SHARED)) {
985		ret = -EINVAL;
986		goto bail;
987	}
988	dd = rcd->dd;
989
990	/*
991	 * This is the qib_do_user_init() code, mapping the shared buffers
992	 * and per-context user registers into the user process. The address
993	 * referred to by vm_pgoff is the file offset passed via mmap().
994	 * For shared contexts, this is the kernel vmalloc() address of the
995	 * pages to share with the master.
996	 * For non-shared or master ctxts, this is a physical address.
997	 * We only do one mmap for each space mapped.
998	 */
999	pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1000
1001	/*
1002	 * Check for 0 in case one of the allocations failed, but user
1003	 * called mmap anyway.
1004	 */
1005	if (!pgaddr)  {
1006		ret = -EINVAL;
1007		goto bail;
1008	}
1009
1010	/*
1011	 * Physical addresses must fit in 40 bits for our hardware.
1012	 * Check for kernel virtual addresses first, anything else must
1013	 * match a HW or memory address.
1014	 */
1015	ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1016	if (ret) {
1017		if (ret > 0)
1018			ret = 0;
1019		goto bail;
1020	}
1021
1022	ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1023	if (!rcd->subctxt_cnt) {
1024		/* ctxt is not shared */
1025		piocnt = rcd->piocnt;
1026		piobufs = rcd->piobufs;
1027	} else if (!subctxt_fp(fp)) {
1028		/* caller is the master */
1029		piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1030			 (rcd->piocnt % rcd->subctxt_cnt);
1031		piobufs = rcd->piobufs +
1032			dd->palign * (rcd->piocnt - piocnt);
1033	} else {
1034		unsigned slave = subctxt_fp(fp) - 1;
1035
1036		/* caller is a slave */
1037		piocnt = rcd->piocnt / rcd->subctxt_cnt;
1038		piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1039	}
1040
1041	if (pgaddr == ureg)
1042		ret = mmap_ureg(vma, dd, ureg);
1043	else if (pgaddr == piobufs)
1044		ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1045	else if (pgaddr == dd->pioavailregs_phys)
1046		/* in-memory copy of pioavail registers */
1047		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1048				   (void *) dd->pioavailregs_dma, 0,
1049				   "pioavail registers");
1050	else if (pgaddr == rcd->rcvegr_phys)
1051		ret = mmap_rcvegrbufs(vma, rcd);
1052	else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1053		/*
1054		 * The rcvhdrq itself; multiple pages, contiguous
1055		 * from an i/o perspective.  Shared contexts need
1056		 * to map r/w, so we allow writing.
1057		 */
1058		ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1059				   rcd->rcvhdrq, 1, "rcvhdrq");
1060	else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1061		/* in-memory copy of rcvhdrq tail register */
1062		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1063				   rcd->rcvhdrtail_kvaddr, 0,
1064				   "rcvhdrq tail");
1065	else
1066		match = 0;
1067	if (!match)
1068		ret = -EINVAL;
1069
1070	vma->vm_private_data = NULL;
1071
1072	if (ret < 0)
1073		qib_devinfo(dd->pcidev,
1074			 "mmap Failure %d: off %llx len %lx\n",
1075			 -ret, (unsigned long long)pgaddr,
1076			 vma->vm_end - vma->vm_start);
1077bail:
1078	return ret;
1079}
1080
1081static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
1082				    struct file *fp,
1083				    struct poll_table_struct *pt)
1084{
1085	struct qib_devdata *dd = rcd->dd;
1086	__poll_t pollflag;
1087
1088	poll_wait(fp, &rcd->wait, pt);
1089
1090	spin_lock_irq(&dd->uctxt_lock);
1091	if (rcd->urgent != rcd->urgent_poll) {
1092		pollflag = EPOLLIN | EPOLLRDNORM;
1093		rcd->urgent_poll = rcd->urgent;
1094	} else {
1095		pollflag = 0;
1096		set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1097	}
1098	spin_unlock_irq(&dd->uctxt_lock);
1099
1100	return pollflag;
1101}
1102
1103static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
1104				  struct file *fp,
1105				  struct poll_table_struct *pt)
1106{
1107	struct qib_devdata *dd = rcd->dd;
1108	__poll_t pollflag;
1109
1110	poll_wait(fp, &rcd->wait, pt);
1111
1112	spin_lock_irq(&dd->uctxt_lock);
1113	if (dd->f_hdrqempty(rcd)) {
1114		set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1115		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1116		pollflag = 0;
1117	} else
1118		pollflag = EPOLLIN | EPOLLRDNORM;
1119	spin_unlock_irq(&dd->uctxt_lock);
1120
1121	return pollflag;
1122}
1123
1124static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
1125{
1126	struct qib_ctxtdata *rcd;
1127	__poll_t pollflag;
1128
1129	rcd = ctxt_fp(fp);
1130	if (!rcd)
1131		pollflag = EPOLLERR;
1132	else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1133		pollflag = qib_poll_urgent(rcd, fp, pt);
1134	else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1135		pollflag = qib_poll_next(rcd, fp, pt);
1136	else /* invalid */
1137		pollflag = EPOLLERR;
1138
1139	return pollflag;
1140}
1141
1142static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1143{
1144	struct qib_filedata *fd = fp->private_data;
1145	const unsigned int weight = current->nr_cpus_allowed;
1146	const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
1147	int local_cpu;
1148
1149	/*
1150	 * If process has NOT already set it's affinity, select and
1151	 * reserve a processor for it on the local NUMA node.
1152	 */
1153	if ((weight >= qib_cpulist_count) &&
1154		(cpumask_weight(local_mask) <= qib_cpulist_count)) {
1155		for_each_cpu(local_cpu, local_mask)
1156			if (!test_and_set_bit(local_cpu, qib_cpulist)) {
1157				fd->rec_cpu_num = local_cpu;
1158				return;
1159			}
1160	}
1161
1162	/*
1163	 * If process has NOT already set it's affinity, select and
1164	 * reserve a processor for it, as a rendevous for all
1165	 * users of the driver.  If they don't actually later
1166	 * set affinity to this cpu, or set it to some other cpu,
1167	 * it just means that sooner or later we don't recommend
1168	 * a cpu, and let the scheduler do it's best.
1169	 */
1170	if (weight >= qib_cpulist_count) {
1171		int cpu;
1172
1173		cpu = find_first_zero_bit(qib_cpulist,
1174					  qib_cpulist_count);
1175		if (cpu == qib_cpulist_count)
1176			qib_dev_err(dd,
1177			"no cpus avail for affinity PID %u\n",
1178			current->pid);
1179		else {
1180			__set_bit(cpu, qib_cpulist);
1181			fd->rec_cpu_num = cpu;
1182		}
1183	}
1184}
1185
1186/*
1187 * Check that userland and driver are compatible for subcontexts.
1188 */
1189static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1190{
1191	/* this code is written long-hand for clarity */
1192	if (QIB_USER_SWMAJOR != user_swmajor) {
1193		/* no promise of compatibility if major mismatch */
1194		return 0;
1195	}
1196	if (QIB_USER_SWMAJOR == 1) {
1197		switch (QIB_USER_SWMINOR) {
1198		case 0:
1199		case 1:
1200		case 2:
1201			/* no subctxt implementation so cannot be compatible */
1202			return 0;
1203		case 3:
1204			/* 3 is only compatible with itself */
1205			return user_swminor == 3;
1206		default:
1207			/* >= 4 are compatible (or are expected to be) */
1208			return user_swminor <= QIB_USER_SWMINOR;
1209		}
1210	}
1211	/* make no promises yet for future major versions */
1212	return 0;
1213}
1214
1215static int init_subctxts(struct qib_devdata *dd,
1216			 struct qib_ctxtdata *rcd,
1217			 const struct qib_user_info *uinfo)
1218{
1219	int ret = 0;
1220	unsigned num_subctxts;
1221	size_t size;
1222
1223	/*
1224	 * If the user is requesting zero subctxts,
1225	 * skip the subctxt allocation.
1226	 */
1227	if (uinfo->spu_subctxt_cnt <= 0)
1228		goto bail;
1229	num_subctxts = uinfo->spu_subctxt_cnt;
1230
1231	/* Check for subctxt compatibility */
1232	if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1233		uinfo->spu_userversion & 0xffff)) {
1234		qib_devinfo(dd->pcidev,
1235			 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1236			 (int) (uinfo->spu_userversion >> 16),
1237			 (int) (uinfo->spu_userversion & 0xffff),
1238			 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1239		goto bail;
1240	}
1241	if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1242		ret = -EINVAL;
1243		goto bail;
1244	}
1245
1246	rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1247	if (!rcd->subctxt_uregbase) {
1248		ret = -ENOMEM;
1249		goto bail;
1250	}
1251	/* Note: rcd->rcvhdrq_size isn't initialized yet. */
1252	size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1253		     sizeof(u32), PAGE_SIZE) * num_subctxts;
1254	rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1255	if (!rcd->subctxt_rcvhdr_base) {
1256		ret = -ENOMEM;
1257		goto bail_ureg;
1258	}
1259
1260	rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1261					      rcd->rcvegrbuf_size *
1262					      num_subctxts);
1263	if (!rcd->subctxt_rcvegrbuf) {
1264		ret = -ENOMEM;
1265		goto bail_rhdr;
1266	}
1267
1268	rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1269	rcd->subctxt_id = uinfo->spu_subctxt_id;
1270	rcd->active_slaves = 1;
1271	rcd->redirect_seq_cnt = 1;
1272	set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1273	goto bail;
1274
1275bail_rhdr:
1276	vfree(rcd->subctxt_rcvhdr_base);
1277bail_ureg:
1278	vfree(rcd->subctxt_uregbase);
1279	rcd->subctxt_uregbase = NULL;
1280bail:
1281	return ret;
1282}
1283
1284static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1285		      struct file *fp, const struct qib_user_info *uinfo)
1286{
1287	struct qib_filedata *fd = fp->private_data;
1288	struct qib_devdata *dd = ppd->dd;
1289	struct qib_ctxtdata *rcd;
1290	void *ptmp = NULL;
1291	int ret;
1292	int numa_id;
1293
1294	assign_ctxt_affinity(fp, dd);
1295
1296	numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
1297		cpu_to_node(fd->rec_cpu_num) :
1298		numa_node_id()) : dd->assigned_node_id;
1299
1300	rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
1301
1302	/*
1303	 * Allocate memory for use in qib_tid_update() at open to
1304	 * reduce cost of expected send setup per message segment
1305	 */
1306	if (rcd)
1307		ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1308			       dd->rcvtidcnt * sizeof(struct page **),
1309			       GFP_KERNEL);
1310
1311	if (!rcd || !ptmp) {
1312		qib_dev_err(dd,
1313			"Unable to allocate ctxtdata memory, failing open\n");
1314		ret = -ENOMEM;
1315		goto bailerr;
1316	}
1317	rcd->userversion = uinfo->spu_userversion;
1318	ret = init_subctxts(dd, rcd, uinfo);
1319	if (ret)
1320		goto bailerr;
1321	rcd->tid_pg_list = ptmp;
1322	rcd->pid = current->pid;
1323	init_waitqueue_head(&dd->rcd[ctxt]->wait);
1324	strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1325	ctxt_fp(fp) = rcd;
1326	qib_stats.sps_ctxts++;
1327	dd->freectxts--;
1328	ret = 0;
1329	goto bail;
1330
1331bailerr:
1332	if (fd->rec_cpu_num != -1)
1333		__clear_bit(fd->rec_cpu_num, qib_cpulist);
1334
1335	dd->rcd[ctxt] = NULL;
1336	kfree(rcd);
1337	kfree(ptmp);
1338bail:
1339	return ret;
1340}
1341
1342static inline int usable(struct qib_pportdata *ppd)
1343{
1344	struct qib_devdata *dd = ppd->dd;
1345
1346	return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1347		(ppd->lflags & QIBL_LINKACTIVE);
1348}
1349
1350/*
1351 * Select a context on the given device, either using a requested port
1352 * or the port based on the context number.
1353 */
1354static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1355			    const struct qib_user_info *uinfo)
1356{
1357	struct qib_pportdata *ppd = NULL;
1358	int ret, ctxt;
1359
1360	if (port) {
1361		if (!usable(dd->pport + port - 1)) {
1362			ret = -ENETDOWN;
1363			goto done;
1364		} else
1365			ppd = dd->pport + port - 1;
1366	}
1367	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1368	     ctxt++)
1369		;
1370	if (ctxt == dd->cfgctxts) {
1371		ret = -EBUSY;
1372		goto done;
1373	}
1374	if (!ppd) {
1375		u32 pidx = ctxt % dd->num_pports;
1376
1377		if (usable(dd->pport + pidx))
1378			ppd = dd->pport + pidx;
1379		else {
1380			for (pidx = 0; pidx < dd->num_pports && !ppd;
1381			     pidx++)
1382				if (usable(dd->pport + pidx))
1383					ppd = dd->pport + pidx;
1384		}
1385	}
1386	ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1387done:
1388	return ret;
1389}
1390
1391static int find_free_ctxt(int unit, struct file *fp,
1392			  const struct qib_user_info *uinfo)
1393{
1394	struct qib_devdata *dd = qib_lookup(unit);
1395	int ret;
1396
1397	if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1398		ret = -ENODEV;
1399	else
1400		ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1401
1402	return ret;
1403}
1404
1405static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1406		      unsigned alg)
1407{
1408	struct qib_devdata *udd = NULL;
1409	int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1410	u32 port = uinfo->spu_port, ctxt;
1411
1412	devmax = qib_count_units(&npresent, &nup);
1413	if (!npresent) {
1414		ret = -ENXIO;
1415		goto done;
1416	}
1417	if (nup == 0) {
1418		ret = -ENETDOWN;
1419		goto done;
1420	}
1421
1422	if (alg == QIB_PORT_ALG_ACROSS) {
1423		unsigned inuse = ~0U;
1424
1425		/* find device (with ACTIVE ports) with fewest ctxts in use */
1426		for (ndev = 0; ndev < devmax; ndev++) {
1427			struct qib_devdata *dd = qib_lookup(ndev);
1428			unsigned cused = 0, cfree = 0, pusable = 0;
1429
1430			if (!dd)
1431				continue;
1432			if (port && port <= dd->num_pports &&
1433			    usable(dd->pport + port - 1))
1434				pusable = 1;
1435			else
1436				for (i = 0; i < dd->num_pports; i++)
1437					if (usable(dd->pport + i))
1438						pusable++;
1439			if (!pusable)
1440				continue;
1441			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1442			     ctxt++)
1443				if (dd->rcd[ctxt])
1444					cused++;
1445				else
1446					cfree++;
1447			if (cfree && cused < inuse) {
1448				udd = dd;
1449				inuse = cused;
1450			}
1451		}
1452		if (udd) {
1453			ret = choose_port_ctxt(fp, udd, port, uinfo);
1454			goto done;
1455		}
1456	} else {
1457		for (ndev = 0; ndev < devmax; ndev++) {
1458			struct qib_devdata *dd = qib_lookup(ndev);
1459
1460			if (dd) {
1461				ret = choose_port_ctxt(fp, dd, port, uinfo);
1462				if (!ret)
1463					goto done;
1464				if (ret == -EBUSY)
1465					dusable++;
1466			}
1467		}
1468	}
1469	ret = dusable ? -EBUSY : -ENETDOWN;
1470
1471done:
1472	return ret;
1473}
1474
1475static int find_shared_ctxt(struct file *fp,
1476			    const struct qib_user_info *uinfo)
1477{
1478	int devmax, ndev, i;
1479	int ret = 0;
1480
1481	devmax = qib_count_units(NULL, NULL);
1482
1483	for (ndev = 0; ndev < devmax; ndev++) {
1484		struct qib_devdata *dd = qib_lookup(ndev);
1485
1486		/* device portion of usable() */
1487		if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1488			continue;
1489		for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1490			struct qib_ctxtdata *rcd = dd->rcd[i];
1491
1492			/* Skip ctxts which are not yet open */
1493			if (!rcd || !rcd->cnt)
1494				continue;
1495			/* Skip ctxt if it doesn't match the requested one */
1496			if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1497				continue;
1498			/* Verify the sharing process matches the master */
1499			if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1500			    rcd->userversion != uinfo->spu_userversion ||
1501			    rcd->cnt >= rcd->subctxt_cnt) {
1502				ret = -EINVAL;
1503				goto done;
1504			}
1505			ctxt_fp(fp) = rcd;
1506			subctxt_fp(fp) = rcd->cnt++;
1507			rcd->subpid[subctxt_fp(fp)] = current->pid;
1508			tidcursor_fp(fp) = 0;
1509			rcd->active_slaves |= 1 << subctxt_fp(fp);
1510			ret = 1;
1511			goto done;
1512		}
1513	}
1514
1515done:
1516	return ret;
1517}
1518
1519static int qib_open(struct inode *in, struct file *fp)
1520{
1521	/* The real work is performed later in qib_assign_ctxt() */
1522	fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1523	if (fp->private_data) /* no cpu affinity by default */
1524		((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1525	return fp->private_data ? 0 : -ENOMEM;
1526}
1527
1528static int find_hca(unsigned int cpu, int *unit)
1529{
1530	int ret = 0, devmax, npresent, nup, ndev;
1531
1532	*unit = -1;
1533
1534	devmax = qib_count_units(&npresent, &nup);
1535	if (!npresent) {
1536		ret = -ENXIO;
1537		goto done;
1538	}
1539	if (!nup) {
1540		ret = -ENETDOWN;
1541		goto done;
1542	}
1543	for (ndev = 0; ndev < devmax; ndev++) {
1544		struct qib_devdata *dd = qib_lookup(ndev);
1545
1546		if (dd) {
1547			if (pcibus_to_node(dd->pcidev->bus) < 0) {
1548				ret = -EINVAL;
1549				goto done;
1550			}
1551			if (cpu_to_node(cpu) ==
1552				pcibus_to_node(dd->pcidev->bus)) {
1553				*unit = ndev;
1554				goto done;
1555			}
1556		}
1557	}
1558done:
1559	return ret;
1560}
1561
1562static int do_qib_user_sdma_queue_create(struct file *fp)
1563{
1564	struct qib_filedata *fd = fp->private_data;
1565	struct qib_ctxtdata *rcd = fd->rcd;
1566	struct qib_devdata *dd = rcd->dd;
1567
1568	if (dd->flags & QIB_HAS_SEND_DMA) {
1569
1570		fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1571						    dd->unit,
1572						    rcd->ctxt,
1573						    fd->subctxt);
1574		if (!fd->pq)
1575			return -ENOMEM;
1576	}
1577
1578	return 0;
1579}
1580
1581/*
1582 * Get ctxt early, so can set affinity prior to memory allocation.
1583 */
1584static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1585{
1586	int ret;
1587	int i_minor;
1588	unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1589
1590	/* Check to be sure we haven't already initialized this file */
1591	if (ctxt_fp(fp)) {
1592		ret = -EINVAL;
1593		goto done;
1594	}
1595
1596	/* for now, if major version is different, bail */
1597	swmajor = uinfo->spu_userversion >> 16;
1598	if (swmajor != QIB_USER_SWMAJOR) {
1599		ret = -ENODEV;
1600		goto done;
1601	}
1602
1603	swminor = uinfo->spu_userversion & 0xffff;
1604
1605	if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1606		alg = uinfo->spu_port_alg;
1607
1608	mutex_lock(&qib_mutex);
1609
1610	if (qib_compatible_subctxts(swmajor, swminor) &&
1611	    uinfo->spu_subctxt_cnt) {
1612		ret = find_shared_ctxt(fp, uinfo);
1613		if (ret > 0) {
1614			ret = do_qib_user_sdma_queue_create(fp);
1615			if (!ret)
1616				assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
1617			goto done_ok;
1618		}
1619	}
1620
1621	i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
1622	if (i_minor)
1623		ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1624	else {
1625		int unit;
1626		const unsigned int cpu = cpumask_first(current->cpus_ptr);
1627		const unsigned int weight = current->nr_cpus_allowed;
1628
1629		if (weight == 1 && !test_bit(cpu, qib_cpulist))
1630			if (!find_hca(cpu, &unit) && unit >= 0)
1631				if (!find_free_ctxt(unit, fp, uinfo)) {
1632					ret = 0;
1633					goto done_chk_sdma;
1634				}
1635		ret = get_a_ctxt(fp, uinfo, alg);
1636	}
1637
1638done_chk_sdma:
1639	if (!ret)
1640		ret = do_qib_user_sdma_queue_create(fp);
1641done_ok:
1642	mutex_unlock(&qib_mutex);
1643
1644done:
1645	return ret;
1646}
1647
1648
1649static int qib_do_user_init(struct file *fp,
1650			    const struct qib_user_info *uinfo)
1651{
1652	int ret;
1653	struct qib_ctxtdata *rcd = ctxt_fp(fp);
1654	struct qib_devdata *dd;
1655	unsigned uctxt;
1656
1657	/* Subctxts don't need to initialize anything since master did it. */
1658	if (subctxt_fp(fp)) {
1659		ret = wait_event_interruptible(rcd->wait,
1660			!test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1661		goto bail;
1662	}
1663
1664	dd = rcd->dd;
1665
1666	/* some ctxts may get extra buffers, calculate that here */
1667	uctxt = rcd->ctxt - dd->first_user_ctxt;
1668	if (uctxt < dd->ctxts_extrabuf) {
1669		rcd->piocnt = dd->pbufsctxt + 1;
1670		rcd->pio_base = rcd->piocnt * uctxt;
1671	} else {
1672		rcd->piocnt = dd->pbufsctxt;
1673		rcd->pio_base = rcd->piocnt * uctxt +
1674			dd->ctxts_extrabuf;
1675	}
1676
1677	/*
1678	 * All user buffers are 2KB buffers.  If we ever support
1679	 * giving 4KB buffers to user processes, this will need some
1680	 * work.  Can't use piobufbase directly, because it has
1681	 * both 2K and 4K buffer base values.  So check and handle.
1682	 */
1683	if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1684		if (rcd->pio_base >= dd->piobcnt2k) {
1685			qib_dev_err(dd,
1686				    "%u:ctxt%u: no 2KB buffers available\n",
1687				    dd->unit, rcd->ctxt);
1688			ret = -ENOBUFS;
1689			goto bail;
1690		}
1691		rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1692		qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1693			    rcd->ctxt, rcd->piocnt);
1694	}
1695
1696	rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1697	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1698			       TXCHK_CHG_TYPE_USER, rcd);
1699	/*
1700	 * try to ensure that processes start up with consistent avail update
1701	 * for their own range, at least.   If system very quiet, it might
1702	 * have the in-memory copy out of date at startup for this range of
1703	 * buffers, when a context gets re-used.  Do after the chg_pioavail
1704	 * and before the rest of setup, so it's "almost certain" the dma
1705	 * will have occurred (can't 100% guarantee, but should be many
1706	 * decimals of 9s, with this ordering), given how much else happens
1707	 * after this.
1708	 */
1709	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1710
1711	/*
1712	 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1713	 * array for time being.  If rcd->ctxt > chip-supported,
1714	 * we need to do extra stuff here to handle by handling overflow
1715	 * through ctxt 0, someday
1716	 */
1717	ret = qib_create_rcvhdrq(dd, rcd);
1718	if (!ret)
1719		ret = qib_setup_eagerbufs(rcd);
1720	if (ret)
1721		goto bail_pio;
1722
1723	rcd->tidcursor = 0; /* start at beginning after open */
1724
1725	/* initialize poll variables... */
1726	rcd->urgent = 0;
1727	rcd->urgent_poll = 0;
1728
1729	/*
1730	 * Now enable the ctxt for receive.
1731	 * For chips that are set to DMA the tail register to memory
1732	 * when they change (and when the update bit transitions from
1733	 * 0 to 1.  So for those chips, we turn it off and then back on.
1734	 * This will (very briefly) affect any other open ctxts, but the
1735	 * duration is very short, and therefore isn't an issue.  We
1736	 * explicitly set the in-memory tail copy to 0 beforehand, so we
1737	 * don't have to wait to be sure the DMA update has happened
1738	 * (chip resets head/tail to 0 on transition to enable).
1739	 */
1740	if (rcd->rcvhdrtail_kvaddr)
1741		qib_clear_rcvhdrtail(rcd);
1742
1743	dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1744		      rcd->ctxt);
1745
1746	/* Notify any waiting slaves */
1747	if (rcd->subctxt_cnt) {
1748		clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1749		wake_up(&rcd->wait);
1750	}
1751	return 0;
1752
1753bail_pio:
1754	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1755			       TXCHK_CHG_TYPE_KERN, rcd);
1756bail:
1757	return ret;
1758}
1759
1760/**
1761 * unlock_exptid - unlock any expected TID entries context still had in use
1762 * @rcd: ctxt
1763 *
1764 * We don't actually update the chip here, because we do a bulk update
1765 * below, using f_clear_tids.
1766 */
1767static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1768{
1769	struct qib_devdata *dd = rcd->dd;
1770	int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1771	int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1772
1773	for (i = ctxt_tidbase; i < maxtid; i++) {
1774		struct page *p = dd->pageshadow[i];
1775		dma_addr_t phys;
1776
1777		if (!p)
1778			continue;
1779
1780		phys = dd->physshadow[i];
1781		dd->physshadow[i] = dd->tidinvalid;
1782		dd->pageshadow[i] = NULL;
1783		pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1784			       PCI_DMA_FROMDEVICE);
1785		qib_release_user_pages(&p, 1);
1786		cnt++;
1787	}
1788}
1789
1790static int qib_close(struct inode *in, struct file *fp)
1791{
1792	struct qib_filedata *fd;
1793	struct qib_ctxtdata *rcd;
1794	struct qib_devdata *dd;
1795	unsigned long flags;
1796	unsigned ctxt;
1797
1798	mutex_lock(&qib_mutex);
1799
1800	fd = fp->private_data;
1801	fp->private_data = NULL;
1802	rcd = fd->rcd;
1803	if (!rcd) {
1804		mutex_unlock(&qib_mutex);
1805		goto bail;
1806	}
1807
1808	dd = rcd->dd;
1809
1810	/* ensure all pio buffer writes in progress are flushed */
1811	qib_flush_wc();
1812
1813	/* drain user sdma queue */
1814	if (fd->pq) {
1815		qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1816		qib_user_sdma_queue_destroy(fd->pq);
1817	}
1818
1819	if (fd->rec_cpu_num != -1)
1820		__clear_bit(fd->rec_cpu_num, qib_cpulist);
1821
1822	if (--rcd->cnt) {
1823		/*
1824		 * XXX If the master closes the context before the slave(s),
1825		 * revoke the mmap for the eager receive queue so
1826		 * the slave(s) don't wait for receive data forever.
1827		 */
1828		rcd->active_slaves &= ~(1 << fd->subctxt);
1829		rcd->subpid[fd->subctxt] = 0;
1830		mutex_unlock(&qib_mutex);
1831		goto bail;
1832	}
1833
1834	/* early; no interrupt users after this */
1835	spin_lock_irqsave(&dd->uctxt_lock, flags);
1836	ctxt = rcd->ctxt;
1837	dd->rcd[ctxt] = NULL;
1838	rcd->pid = 0;
1839	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1840
1841	if (rcd->rcvwait_to || rcd->piowait_to ||
1842	    rcd->rcvnowait || rcd->pionowait) {
1843		rcd->rcvwait_to = 0;
1844		rcd->piowait_to = 0;
1845		rcd->rcvnowait = 0;
1846		rcd->pionowait = 0;
1847	}
1848	if (rcd->flag)
1849		rcd->flag = 0;
1850
1851	if (dd->kregbase) {
1852		/* atomically clear receive enable ctxt and intr avail. */
1853		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1854				  QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1855
1856		/* clean up the pkeys for this ctxt user */
1857		qib_clean_part_key(rcd, dd);
1858		qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1859		qib_chg_pioavailkernel(dd, rcd->pio_base,
1860				       rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1861
1862		dd->f_clear_tids(dd, rcd);
1863
1864		if (dd->pageshadow)
1865			unlock_expected_tids(rcd);
1866		qib_stats.sps_ctxts--;
1867		dd->freectxts++;
1868	}
1869
1870	mutex_unlock(&qib_mutex);
1871	qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1872
1873bail:
1874	kfree(fd);
1875	return 0;
1876}
1877
1878static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1879{
1880	struct qib_ctxt_info info;
1881	int ret;
1882	size_t sz;
1883	struct qib_ctxtdata *rcd = ctxt_fp(fp);
1884	struct qib_filedata *fd;
1885
1886	fd = fp->private_data;
1887
1888	info.num_active = qib_count_active_units();
1889	info.unit = rcd->dd->unit;
1890	info.port = rcd->ppd->port;
1891	info.ctxt = rcd->ctxt;
1892	info.subctxt =  subctxt_fp(fp);
1893	/* Number of user ctxts available for this device. */
1894	info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1895	info.num_subctxts = rcd->subctxt_cnt;
1896	info.rec_cpu = fd->rec_cpu_num;
1897	sz = sizeof(info);
1898
1899	if (copy_to_user(uinfo, &info, sz)) {
1900		ret = -EFAULT;
1901		goto bail;
1902	}
1903	ret = 0;
1904
1905bail:
1906	return ret;
1907}
1908
1909static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1910				 u32 __user *inflightp)
1911{
1912	const u32 val = qib_user_sdma_inflight_counter(pq);
1913
1914	if (put_user(val, inflightp))
1915		return -EFAULT;
1916
1917	return 0;
1918}
1919
1920static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1921				 struct qib_user_sdma_queue *pq,
1922				 u32 __user *completep)
1923{
1924	u32 val;
1925	int err;
1926
1927	if (!pq)
1928		return -EINVAL;
1929
1930	err = qib_user_sdma_make_progress(ppd, pq);
1931	if (err < 0)
1932		return err;
1933
1934	val = qib_user_sdma_complete_counter(pq);
1935	if (put_user(val, completep))
1936		return -EFAULT;
1937
1938	return 0;
1939}
1940
1941static int disarm_req_delay(struct qib_ctxtdata *rcd)
1942{
1943	int ret = 0;
1944
1945	if (!usable(rcd->ppd)) {
1946		int i;
1947		/*
1948		 * if link is down, or otherwise not usable, delay
1949		 * the caller up to 30 seconds, so we don't thrash
1950		 * in trying to get the chip back to ACTIVE, and
1951		 * set flag so they make the call again.
1952		 */
1953		if (rcd->user_event_mask) {
1954			/*
1955			 * subctxt_cnt is 0 if not shared, so do base
1956			 * separately, first, then remaining subctxt, if any
1957			 */
1958			set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1959				&rcd->user_event_mask[0]);
1960			for (i = 1; i < rcd->subctxt_cnt; i++)
1961				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1962					&rcd->user_event_mask[i]);
1963		}
1964		for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1965			msleep(100);
1966		ret = -ENETDOWN;
1967	}
1968	return ret;
1969}
1970
1971/*
1972 * Find all user contexts in use, and set the specified bit in their
1973 * event mask.
1974 * See also find_ctxt() for a similar use, that is specific to send buffers.
1975 */
1976int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1977{
1978	struct qib_ctxtdata *rcd;
1979	unsigned ctxt;
1980	int ret = 0;
1981	unsigned long flags;
1982
1983	spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
1984	for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1985	     ctxt++) {
1986		rcd = ppd->dd->rcd[ctxt];
1987		if (!rcd)
1988			continue;
1989		if (rcd->user_event_mask) {
1990			int i;
1991			/*
1992			 * subctxt_cnt is 0 if not shared, so do base
1993			 * separately, first, then remaining subctxt, if any
1994			 */
1995			set_bit(evtbit, &rcd->user_event_mask[0]);
1996			for (i = 1; i < rcd->subctxt_cnt; i++)
1997				set_bit(evtbit, &rcd->user_event_mask[i]);
1998		}
1999		ret = 1;
2000		break;
2001	}
2002	spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
2003
2004	return ret;
2005}
2006
2007/*
2008 * clear the event notifier events for this context.
2009 * For the DISARM_BUFS case, we also take action (this obsoletes
2010 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
2011 * compatibility.
2012 * Other bits don't currently require actions, just atomically clear.
2013 * User process then performs actions appropriate to bit having been
2014 * set, if desired, and checks again in future.
2015 */
2016static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
2017			      unsigned long events)
2018{
2019	int ret = 0, i;
2020
2021	for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
2022		if (!test_bit(i, &events))
2023			continue;
2024		if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
2025			(void)qib_disarm_piobufs_ifneeded(rcd);
2026			ret = disarm_req_delay(rcd);
2027		} else
2028			clear_bit(i, &rcd->user_event_mask[subctxt]);
2029	}
2030	return ret;
2031}
2032
2033static ssize_t qib_write(struct file *fp, const char __user *data,
2034			 size_t count, loff_t *off)
2035{
2036	const struct qib_cmd __user *ucmd;
2037	struct qib_ctxtdata *rcd;
2038	const void __user *src;
2039	size_t consumed, copy = 0;
2040	struct qib_cmd cmd;
2041	ssize_t ret = 0;
2042	void *dest;
2043
2044	if (!ib_safe_file_access(fp)) {
2045		pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
2046			    task_tgid_vnr(current), current->comm);
2047		return -EACCES;
2048	}
2049
2050	if (count < sizeof(cmd.type)) {
2051		ret = -EINVAL;
2052		goto bail;
2053	}
2054
2055	ucmd = (const struct qib_cmd __user *) data;
2056
2057	if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2058		ret = -EFAULT;
2059		goto bail;
2060	}
2061
2062	consumed = sizeof(cmd.type);
2063
2064	switch (cmd.type) {
2065	case QIB_CMD_ASSIGN_CTXT:
2066	case QIB_CMD_USER_INIT:
2067		copy = sizeof(cmd.cmd.user_info);
2068		dest = &cmd.cmd.user_info;
2069		src = &ucmd->cmd.user_info;
2070		break;
2071
2072	case QIB_CMD_RECV_CTRL:
2073		copy = sizeof(cmd.cmd.recv_ctrl);
2074		dest = &cmd.cmd.recv_ctrl;
2075		src = &ucmd->cmd.recv_ctrl;
2076		break;
2077
2078	case QIB_CMD_CTXT_INFO:
2079		copy = sizeof(cmd.cmd.ctxt_info);
2080		dest = &cmd.cmd.ctxt_info;
2081		src = &ucmd->cmd.ctxt_info;
2082		break;
2083
2084	case QIB_CMD_TID_UPDATE:
2085	case QIB_CMD_TID_FREE:
2086		copy = sizeof(cmd.cmd.tid_info);
2087		dest = &cmd.cmd.tid_info;
2088		src = &ucmd->cmd.tid_info;
2089		break;
2090
2091	case QIB_CMD_SET_PART_KEY:
2092		copy = sizeof(cmd.cmd.part_key);
2093		dest = &cmd.cmd.part_key;
2094		src = &ucmd->cmd.part_key;
2095		break;
2096
2097	case QIB_CMD_DISARM_BUFS:
2098	case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2099		copy = 0;
2100		src = NULL;
2101		dest = NULL;
2102		break;
2103
2104	case QIB_CMD_POLL_TYPE:
2105		copy = sizeof(cmd.cmd.poll_type);
2106		dest = &cmd.cmd.poll_type;
2107		src = &ucmd->cmd.poll_type;
2108		break;
2109
2110	case QIB_CMD_ARMLAUNCH_CTRL:
2111		copy = sizeof(cmd.cmd.armlaunch_ctrl);
2112		dest = &cmd.cmd.armlaunch_ctrl;
2113		src = &ucmd->cmd.armlaunch_ctrl;
2114		break;
2115
2116	case QIB_CMD_SDMA_INFLIGHT:
2117		copy = sizeof(cmd.cmd.sdma_inflight);
2118		dest = &cmd.cmd.sdma_inflight;
2119		src = &ucmd->cmd.sdma_inflight;
2120		break;
2121
2122	case QIB_CMD_SDMA_COMPLETE:
2123		copy = sizeof(cmd.cmd.sdma_complete);
2124		dest = &cmd.cmd.sdma_complete;
2125		src = &ucmd->cmd.sdma_complete;
2126		break;
2127
2128	case QIB_CMD_ACK_EVENT:
2129		copy = sizeof(cmd.cmd.event_mask);
2130		dest = &cmd.cmd.event_mask;
2131		src = &ucmd->cmd.event_mask;
2132		break;
2133
2134	default:
2135		ret = -EINVAL;
2136		goto bail;
2137	}
2138
2139	if (copy) {
2140		if ((count - consumed) < copy) {
2141			ret = -EINVAL;
2142			goto bail;
2143		}
2144		if (copy_from_user(dest, src, copy)) {
2145			ret = -EFAULT;
2146			goto bail;
2147		}
2148		consumed += copy;
2149	}
2150
2151	rcd = ctxt_fp(fp);
2152	if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2153		ret = -EINVAL;
2154		goto bail;
2155	}
2156
2157	switch (cmd.type) {
2158	case QIB_CMD_ASSIGN_CTXT:
2159		if (rcd) {
2160			ret = -EINVAL;
2161			goto bail;
2162		}
2163
2164		ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2165		if (ret)
2166			goto bail;
2167		break;
2168
2169	case QIB_CMD_USER_INIT:
2170		ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2171		if (ret)
2172			goto bail;
2173		ret = qib_get_base_info(fp, u64_to_user_ptr(
2174					  cmd.cmd.user_info.spu_base_info),
2175					cmd.cmd.user_info.spu_base_info_size);
2176		break;
2177
2178	case QIB_CMD_RECV_CTRL:
2179		ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2180		break;
2181
2182	case QIB_CMD_CTXT_INFO:
2183		ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2184				    (unsigned long) cmd.cmd.ctxt_info);
2185		break;
2186
2187	case QIB_CMD_TID_UPDATE:
2188		ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2189		break;
2190
2191	case QIB_CMD_TID_FREE:
2192		ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2193		break;
2194
2195	case QIB_CMD_SET_PART_KEY:
2196		ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2197		break;
2198
2199	case QIB_CMD_DISARM_BUFS:
2200		(void)qib_disarm_piobufs_ifneeded(rcd);
2201		ret = disarm_req_delay(rcd);
2202		break;
2203
2204	case QIB_CMD_PIOAVAILUPD:
2205		qib_force_pio_avail_update(rcd->dd);
2206		break;
2207
2208	case QIB_CMD_POLL_TYPE:
2209		rcd->poll_type = cmd.cmd.poll_type;
2210		break;
2211
2212	case QIB_CMD_ARMLAUNCH_CTRL:
2213		rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2214		break;
2215
2216	case QIB_CMD_SDMA_INFLIGHT:
2217		ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2218					    (u32 __user *) (unsigned long)
2219					    cmd.cmd.sdma_inflight);
2220		break;
2221
2222	case QIB_CMD_SDMA_COMPLETE:
2223		ret = qib_sdma_get_complete(rcd->ppd,
2224					    user_sdma_queue_fp(fp),
2225					    (u32 __user *) (unsigned long)
2226					    cmd.cmd.sdma_complete);
2227		break;
2228
2229	case QIB_CMD_ACK_EVENT:
2230		ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2231					 cmd.cmd.event_mask);
2232		break;
2233	}
2234
2235	if (ret >= 0)
2236		ret = consumed;
2237
2238bail:
2239	return ret;
2240}
2241
2242static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
2243{
2244	struct qib_filedata *fp = iocb->ki_filp->private_data;
2245	struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2246	struct qib_user_sdma_queue *pq = fp->pq;
2247
2248	if (!iter_is_iovec(from) || !from->nr_segs || !pq)
2249		return -EINVAL;
2250
2251	return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
2252}
2253
2254static struct class *qib_class;
2255static dev_t qib_dev;
2256
2257int qib_cdev_init(int minor, const char *name,
2258		  const struct file_operations *fops,
2259		  struct cdev **cdevp, struct device **devp)
2260{
2261	const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2262	struct cdev *cdev;
2263	struct device *device = NULL;
2264	int ret;
2265
2266	cdev = cdev_alloc();
2267	if (!cdev) {
2268		pr_err("Could not allocate cdev for minor %d, %s\n",
2269		       minor, name);
2270		ret = -ENOMEM;
2271		goto done;
2272	}
2273
2274	cdev->owner = THIS_MODULE;
2275	cdev->ops = fops;
2276	kobject_set_name(&cdev->kobj, name);
2277
2278	ret = cdev_add(cdev, dev, 1);
2279	if (ret < 0) {
2280		pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2281		       minor, name, -ret);
2282		goto err_cdev;
2283	}
2284
2285	device = device_create(qib_class, NULL, dev, NULL, "%s", name);
2286	if (!IS_ERR(device))
2287		goto done;
2288	ret = PTR_ERR(device);
2289	device = NULL;
2290	pr_err("Could not create device for minor %d, %s (err %d)\n",
2291	       minor, name, -ret);
2292err_cdev:
2293	cdev_del(cdev);
2294	cdev = NULL;
2295done:
2296	*cdevp = cdev;
2297	*devp = device;
2298	return ret;
2299}
2300
2301void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2302{
2303	struct device *device = *devp;
2304
2305	if (device) {
2306		device_unregister(device);
2307		*devp = NULL;
2308	}
2309
2310	if (*cdevp) {
2311		cdev_del(*cdevp);
2312		*cdevp = NULL;
2313	}
2314}
2315
2316static struct cdev *wildcard_cdev;
2317static struct device *wildcard_device;
2318
2319int __init qib_dev_init(void)
2320{
2321	int ret;
2322
2323	ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2324	if (ret < 0) {
2325		pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2326		goto done;
2327	}
2328
2329	qib_class = class_create(THIS_MODULE, "ipath");
2330	if (IS_ERR(qib_class)) {
2331		ret = PTR_ERR(qib_class);
2332		pr_err("Could not create device class (err %d)\n", -ret);
2333		unregister_chrdev_region(qib_dev, QIB_NMINORS);
2334	}
2335
2336done:
2337	return ret;
2338}
2339
2340void qib_dev_cleanup(void)
2341{
2342	if (qib_class) {
2343		class_destroy(qib_class);
2344		qib_class = NULL;
2345	}
2346
2347	unregister_chrdev_region(qib_dev, QIB_NMINORS);
2348}
2349
2350static atomic_t user_count = ATOMIC_INIT(0);
2351
2352static void qib_user_remove(struct qib_devdata *dd)
2353{
2354	if (atomic_dec_return(&user_count) == 0)
2355		qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2356
2357	qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2358}
2359
2360static int qib_user_add(struct qib_devdata *dd)
2361{
2362	char name[10];
2363	int ret;
2364
2365	if (atomic_inc_return(&user_count) == 1) {
2366		ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2367				    &wildcard_cdev, &wildcard_device);
2368		if (ret)
2369			goto done;
2370	}
2371
2372	snprintf(name, sizeof(name), "ipath%d", dd->unit);
2373	ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2374			    &dd->user_cdev, &dd->user_device);
2375	if (ret)
2376		qib_user_remove(dd);
2377done:
2378	return ret;
2379}
2380
2381/*
2382 * Create per-unit files in /dev
2383 */
2384int qib_device_create(struct qib_devdata *dd)
2385{
2386	int r, ret;
2387
2388	r = qib_user_add(dd);
2389	ret = qib_diag_add(dd);
2390	if (r && !ret)
2391		ret = r;
2392	return ret;
2393}
2394
2395/*
2396 * Remove per-unit files in /dev
2397 * void, core kernel returns no errors for this stuff
2398 */
2399void qib_device_remove(struct qib_devdata *dd)
2400{
2401	qib_user_remove(dd);
2402	qib_diag_remove(dd);
2403}
2404