1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright 2014 Cisco Systems, Inc.  All rights reserved.
3
4#include <linux/errno.h>
5#include <linux/pci.h>
6#include <linux/slab.h>
7
8#include <linux/interrupt.h>
9#include <linux/workqueue.h>
10#include <linux/spinlock.h>
11#include <linux/mempool.h>
12#include <scsi/scsi_tcq.h>
13
14#include "snic_io.h"
15#include "snic.h"
16#include "cq_enet_desc.h"
17#include "snic_fwint.h"
18
19static void
20snic_wq_cmpl_frame_send(struct vnic_wq *wq,
21			    struct cq_desc *cq_desc,
22			    struct vnic_wq_buf *buf,
23			    void *opaque)
24{
25	struct snic *snic = svnic_dev_priv(wq->vdev);
26
27	SNIC_BUG_ON(buf->os_buf == NULL);
28
29	if (snic_log_level & SNIC_DESC_LOGGING)
30		SNIC_HOST_INFO(snic->shost,
31			       "Ack received for snic_host_req %p.\n",
32			       buf->os_buf);
33
34	SNIC_TRC(snic->shost->host_no, 0, 0,
35		 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
36		 0);
37
38	buf->os_buf = NULL;
39}
40
41static int
42snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
43			  struct cq_desc *cq_desc,
44			  u8 type,
45			  u16 q_num,
46			  u16 cmpl_idx,
47			  void *opaque)
48{
49	struct snic *snic = svnic_dev_priv(vdev);
50	unsigned long flags;
51
52	SNIC_BUG_ON(q_num != 0);
53
54	spin_lock_irqsave(&snic->wq_lock[q_num], flags);
55	svnic_wq_service(&snic->wq[q_num],
56			 cq_desc,
57			 cmpl_idx,
58			 snic_wq_cmpl_frame_send,
59			 NULL);
60	spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
61
62	return 0;
63} /* end of snic_cmpl_handler_cont */
64
65int
66snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
67{
68	unsigned int work_done = 0;
69	unsigned int i;
70
71	snic->s_stats.misc.last_ack_time = jiffies;
72	for (i = 0; i < snic->wq_count; i++) {
73		work_done += svnic_cq_service(&snic->cq[i],
74					      work_to_do,
75					      snic_wq_cmpl_handler_cont,
76					      NULL);
77	}
78
79	return work_done;
80} /* end of snic_wq_cmpl_handler */
81
82void
83snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
84{
85
86	struct snic_host_req *req = buf->os_buf;
87	struct snic *snic = svnic_dev_priv(wq->vdev);
88	struct snic_req_info *rqi = NULL;
89	unsigned long flags;
90
91	dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
92			 DMA_TO_DEVICE);
93
94	rqi = req_to_rqi(req);
95	spin_lock_irqsave(&snic->spl_cmd_lock, flags);
96	if (list_empty(&rqi->list)) {
97		spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
98		goto end;
99	}
100
101	SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
102	list_del_init(&rqi->list);
103	spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
104
105	if (rqi->sge_va) {
106		snic_pci_unmap_rsp_buf(snic, rqi);
107		kfree((void *)rqi->sge_va);
108		rqi->sge_va = 0;
109	}
110	snic_req_free(snic, rqi);
111	SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
112
113end:
114	return;
115}
116
117/* Criteria to select work queue in multi queue mode */
118static int
119snic_select_wq(struct snic *snic)
120{
121	/* No multi queue support for now */
122	BUILD_BUG_ON(SNIC_WQ_MAX > 1);
123
124	return 0;
125}
126
127static int
128snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
129{
130	int nr_wqdesc = snic->config.wq_enet_desc_count;
131
132	if (q_num > 0) {
133		/*
134		 * Multi Queue case, additional care is required.
135		 * Per WQ active requests need to be maintained.
136		 */
137		SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
138		SNIC_BUG_ON(q_num > 0);
139
140		return -1;
141	}
142
143	nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
144
145	return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1);
146}
147
148int
149snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
150{
151	dma_addr_t pa = 0;
152	unsigned long flags;
153	struct snic_fw_stats *fwstats = &snic->s_stats.fw;
154	struct snic_host_req *req = (struct snic_host_req *) os_buf;
155	long act_reqs;
156	long desc_avail = 0;
157	int q_num = 0;
158
159	snic_print_desc(__func__, os_buf, len);
160
161	/* Map request buffer */
162	pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
163	if (dma_mapping_error(&snic->pdev->dev, pa)) {
164		SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
165
166		return -ENOMEM;
167	}
168
169	req->req_pa = (ulong)pa;
170
171	q_num = snic_select_wq(snic);
172
173	spin_lock_irqsave(&snic->wq_lock[q_num], flags);
174	desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
175	if (desc_avail <= 0) {
176		dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
177		req->req_pa = 0;
178		spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
179		atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
180		SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
181
182		return -ENOMEM;
183	}
184
185	snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
186	/*
187	 * Update stats
188	 * note: when multi queue enabled, fw actv_reqs should be per queue.
189	 */
190	act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
191	spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
192
193	if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
194		atomic64_set(&fwstats->max_actv_reqs, act_reqs);
195
196	return 0;
197} /* end of snic_queue_wq_desc() */
198
199/*
200 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
201 * Purpose : Used during driver unload to clean up the requests.
202 */
203void
204snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
205{
206	unsigned long flags;
207
208	INIT_LIST_HEAD(&rqi->list);
209
210	spin_lock_irqsave(&snic->spl_cmd_lock, flags);
211	list_add_tail(&rqi->list, &snic->spl_cmd_list);
212	spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
213}
214
215/*
216 * snic_req_init:
217 * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
218 */
219struct snic_req_info *
220snic_req_init(struct snic *snic, int sg_cnt)
221{
222	u8 typ;
223	struct snic_req_info *rqi = NULL;
224
225	typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
226		SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
227
228	rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
229	if (!rqi) {
230		atomic64_inc(&snic->s_stats.io.alloc_fail);
231		SNIC_HOST_ERR(snic->shost,
232			      "Failed to allocate memory from snic req pool id = %d\n",
233			      typ);
234		return rqi;
235	}
236
237	memset(rqi, 0, sizeof(*rqi));
238	rqi->rq_pool_type = typ;
239	rqi->start_time = jiffies;
240	rqi->req = (struct snic_host_req *) (rqi + 1);
241	rqi->req_len = sizeof(struct snic_host_req);
242	rqi->snic = snic;
243
244	rqi->req = (struct snic_host_req *)(rqi + 1);
245
246	if (sg_cnt == 0)
247		goto end;
248
249	rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
250
251	if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
252		atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
253
254	SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
255	atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
256
257end:
258	memset(rqi->req, 0, rqi->req_len);
259
260	/* pre initialization of init_ctx to support req_to_rqi */
261	rqi->req->hdr.init_ctx = (ulong) rqi;
262
263	SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
264
265	return rqi;
266} /* end of snic_req_init */
267
268/*
269 * snic_abort_req_init : Inits abort request.
270 */
271struct snic_host_req *
272snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
273{
274	struct snic_host_req *req = NULL;
275
276	SNIC_BUG_ON(!rqi);
277
278	/* If abort to be issued second time, then reuse */
279	if (rqi->abort_req)
280		return rqi->abort_req;
281
282
283	req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
284	if (!req) {
285		SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
286		WARN_ON_ONCE(1);
287
288		return NULL;
289	}
290
291	rqi->abort_req = req;
292	memset(req, 0, sizeof(struct snic_host_req));
293	/* pre initialization of init_ctx to support req_to_rqi */
294	req->hdr.init_ctx = (ulong) rqi;
295
296	return req;
297} /* end of snic_abort_req_init */
298
299/*
300 * snic_dr_req_init : Inits device reset req
301 */
302struct snic_host_req *
303snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
304{
305	struct snic_host_req *req = NULL;
306
307	SNIC_BUG_ON(!rqi);
308
309	req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
310	if (!req) {
311		SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
312		WARN_ON_ONCE(1);
313
314		return NULL;
315	}
316
317	SNIC_BUG_ON(rqi->dr_req != NULL);
318	rqi->dr_req = req;
319	memset(req, 0, sizeof(struct snic_host_req));
320	/* pre initialization of init_ctx to support req_to_rqi */
321	req->hdr.init_ctx = (ulong) rqi;
322
323	return req;
324} /* end of snic_dr_req_init */
325
326/* frees snic_req_info and snic_host_req */
327void
328snic_req_free(struct snic *snic, struct snic_req_info *rqi)
329{
330	SNIC_BUG_ON(rqi->req == rqi->abort_req);
331	SNIC_BUG_ON(rqi->req == rqi->dr_req);
332	SNIC_BUG_ON(rqi->sge_va != 0);
333
334	SNIC_SCSI_DBG(snic->shost,
335		      "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
336		      rqi, rqi->req, rqi->abort_req, rqi->dr_req);
337
338	if (rqi->abort_req) {
339		if (rqi->abort_req->req_pa)
340			dma_unmap_single(&snic->pdev->dev,
341					 rqi->abort_req->req_pa,
342					 sizeof(struct snic_host_req),
343					 DMA_TO_DEVICE);
344
345		mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
346	}
347
348	if (rqi->dr_req) {
349		if (rqi->dr_req->req_pa)
350			dma_unmap_single(&snic->pdev->dev,
351					 rqi->dr_req->req_pa,
352					 sizeof(struct snic_host_req),
353					 DMA_TO_DEVICE);
354
355		mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
356	}
357
358	if (rqi->req->req_pa)
359		dma_unmap_single(&snic->pdev->dev,
360				 rqi->req->req_pa,
361				 rqi->req_len,
362				 DMA_TO_DEVICE);
363
364	mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
365}
366
367void
368snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
369{
370	struct snic_sg_desc *sgd;
371
372	sgd = req_to_sgl(rqi_to_req(rqi));
373	SNIC_BUG_ON(sgd[0].addr == 0);
374	dma_unmap_single(&snic->pdev->dev,
375			 le64_to_cpu(sgd[0].addr),
376			 le32_to_cpu(sgd[0].len),
377			 DMA_FROM_DEVICE);
378}
379
380/*
381 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
382 */
383void
384snic_free_all_untagged_reqs(struct snic *snic)
385{
386	struct snic_req_info *rqi;
387	struct list_head *cur, *nxt;
388	unsigned long flags;
389
390	spin_lock_irqsave(&snic->spl_cmd_lock, flags);
391	list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
392		rqi = list_entry(cur, struct snic_req_info, list);
393		list_del_init(&rqi->list);
394		if (rqi->sge_va) {
395			snic_pci_unmap_rsp_buf(snic, rqi);
396			kfree((void *)rqi->sge_va);
397			rqi->sge_va = 0;
398		}
399
400		snic_req_free(snic, rqi);
401	}
402	spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
403}
404
405/*
406 * snic_release_untagged_req : Unlinks the untagged req and frees it.
407 */
408void
409snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
410{
411	unsigned long flags;
412
413	spin_lock_irqsave(&snic->snic_lock, flags);
414	if (snic->in_remove) {
415		spin_unlock_irqrestore(&snic->snic_lock, flags);
416		goto end;
417	}
418	spin_unlock_irqrestore(&snic->snic_lock, flags);
419
420	spin_lock_irqsave(&snic->spl_cmd_lock, flags);
421	if (list_empty(&rqi->list)) {
422		spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
423		goto end;
424	}
425	list_del_init(&rqi->list);
426	spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
427	snic_req_free(snic, rqi);
428
429end:
430	return;
431}
432
433/* dump buf in hex fmt */
434void
435snic_hex_dump(char *pfx, char *data, int len)
436{
437	SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
438	print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
439}
440
441#define	LINE_BUFSZ	128	/* for snic_print_desc fn */
442static void
443snic_dump_desc(const char *fn, char *os_buf, int len)
444{
445	struct snic_host_req *req = (struct snic_host_req *) os_buf;
446	struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
447	struct snic_req_info *rqi = NULL;
448	char line[LINE_BUFSZ] = { '\0' };
449	char *cmd_str = NULL;
450
451	if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
452		rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
453	else
454		rqi = (struct snic_req_info *) req->hdr.init_ctx;
455
456	SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
457	switch (req->hdr.type) {
458	case SNIC_REQ_REPORT_TGTS:
459		cmd_str = "report-tgt : ";
460		snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
461		break;
462
463	case SNIC_REQ_ICMND:
464		cmd_str = "icmnd : ";
465		snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
466			 req->u.icmnd.cdb[0]);
467		break;
468
469	case SNIC_REQ_ITMF:
470		cmd_str = "itmf : ";
471		snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
472		break;
473
474	case SNIC_REQ_HBA_RESET:
475		cmd_str = "hba reset :";
476		snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
477		break;
478
479	case SNIC_REQ_EXCH_VER:
480		cmd_str = "exch ver : ";
481		snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
482		break;
483
484	case SNIC_REQ_TGT_INFO:
485		cmd_str = "tgt info : ";
486		break;
487
488	case SNIC_RSP_REPORT_TGTS_CMPL:
489		cmd_str = "report tgt cmpl : ";
490		snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
491		break;
492
493	case SNIC_RSP_ICMND_CMPL:
494		cmd_str = "icmnd_cmpl : ";
495		snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
496			 rqi->req->u.icmnd.cdb[0]);
497		break;
498
499	case SNIC_RSP_ITMF_CMPL:
500		cmd_str = "itmf_cmpl : ";
501		snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
502		break;
503
504	case SNIC_RSP_HBA_RESET_CMPL:
505		cmd_str = "hba_reset_cmpl : ";
506		snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
507		break;
508
509	case SNIC_RSP_EXCH_VER_CMPL:
510		cmd_str = "exch_ver_cmpl : ";
511		snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
512		break;
513
514	case SNIC_MSG_ACK:
515		cmd_str = "msg ack : ";
516		snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
517		break;
518
519	case SNIC_MSG_ASYNC_EVNOTIFY:
520		cmd_str = "async notify : ";
521		snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
522		break;
523
524	default:
525		cmd_str = "unknown : ";
526		SNIC_BUG_ON(1);
527		break;
528	}
529
530	SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
531		  fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
532		  req->hdr.init_ctx);
533
534	/* Enable it, to dump byte stream */
535	if (snic_log_level & 0x20)
536		snic_hex_dump(cmd_str, os_buf, len);
537} /* end of __snic_print_desc */
538
539void
540snic_print_desc(const char *fn, char *os_buf, int len)
541{
542	if (snic_log_level & SNIC_DESC_LOGGING)
543		snic_dump_desc(fn, os_buf, len);
544}
545
546void
547snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
548{
549	u64 duration;
550
551	duration = jiffies - rqi->start_time;
552
553	if (duration > atomic64_read(&snic->s_stats.io.max_time))
554		atomic64_set(&snic->s_stats.io.max_time, duration);
555}
556