xref: /kernel/linux/linux-5.10/drivers/dma/idxd/irq.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/io-64-nonatomic-lo-hi.h>
8#include <linux/dmaengine.h>
9#include <uapi/linux/idxd.h>
10#include "../dmaengine.h"
11#include "idxd.h"
12#include "registers.h"
13
14static void idxd_device_reinit(struct work_struct *work)
15{
16	struct idxd_device *idxd = container_of(work, struct idxd_device, work);
17	struct device *dev = &idxd->pdev->dev;
18	int rc, i;
19
20	idxd_device_reset(idxd);
21	rc = idxd_device_config(idxd);
22	if (rc < 0)
23		goto out;
24
25	rc = idxd_device_enable(idxd);
26	if (rc < 0)
27		goto out;
28
29	for (i = 0; i < idxd->max_wqs; i++) {
30		struct idxd_wq *wq = &idxd->wqs[i];
31
32		if (wq->state == IDXD_WQ_ENABLED) {
33			rc = idxd_wq_enable(wq);
34			if (rc < 0) {
35				dev_warn(dev, "Unable to re-enable wq %s\n",
36					 dev_name(&wq->conf_dev));
37			}
38		}
39	}
40
41	return;
42
43 out:
44	idxd_device_wqs_clear_state(idxd);
45}
46
47irqreturn_t idxd_irq_handler(int vec, void *data)
48{
49	struct idxd_irq_entry *irq_entry = data;
50	struct idxd_device *idxd = irq_entry->idxd;
51
52	idxd_mask_msix_vector(idxd, irq_entry->id);
53	return IRQ_WAKE_THREAD;
54}
55
56static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
57{
58	struct device *dev = &idxd->pdev->dev;
59	union gensts_reg gensts;
60	u32 val = 0;
61	int i;
62	bool err = false;
63
64	if (cause & IDXD_INTC_ERR) {
65		spin_lock_bh(&idxd->dev_lock);
66		for (i = 0; i < 4; i++)
67			idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
68					IDXD_SWERR_OFFSET + i * sizeof(u64));
69
70		iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
71			  idxd->reg_base + IDXD_SWERR_OFFSET);
72
73		if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
74			int id = idxd->sw_err.wq_idx;
75			struct idxd_wq *wq = &idxd->wqs[id];
76
77			if (wq->type == IDXD_WQT_USER)
78				wake_up_interruptible(&wq->err_queue);
79		} else {
80			int i;
81
82			for (i = 0; i < idxd->max_wqs; i++) {
83				struct idxd_wq *wq = &idxd->wqs[i];
84
85				if (wq->type == IDXD_WQT_USER)
86					wake_up_interruptible(&wq->err_queue);
87			}
88		}
89
90		spin_unlock_bh(&idxd->dev_lock);
91		val |= IDXD_INTC_ERR;
92
93		for (i = 0; i < 4; i++)
94			dev_warn(dev, "err[%d]: %#16.16llx\n",
95				 i, idxd->sw_err.bits[i]);
96		err = true;
97	}
98
99	if (cause & IDXD_INTC_CMD) {
100		val |= IDXD_INTC_CMD;
101		complete(idxd->cmd_done);
102	}
103
104	if (cause & IDXD_INTC_OCCUPY) {
105		/* Driver does not utilize occupancy interrupt */
106		val |= IDXD_INTC_OCCUPY;
107	}
108
109	if (cause & IDXD_INTC_PERFMON_OVFL) {
110		/*
111		 * Driver does not utilize perfmon counter overflow interrupt
112		 * yet.
113		 */
114		val |= IDXD_INTC_PERFMON_OVFL;
115	}
116
117	val ^= cause;
118	if (val)
119		dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
120			      val);
121
122	if (!err)
123		return 0;
124
125	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
126	if (gensts.state == IDXD_DEVICE_STATE_HALT) {
127		idxd->state = IDXD_DEV_HALTED;
128		if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
129			/*
130			 * If we need a software reset, we will throw the work
131			 * on a system workqueue in order to allow interrupts
132			 * for the device command completions.
133			 */
134			INIT_WORK(&idxd->work, idxd_device_reinit);
135			queue_work(idxd->wq, &idxd->work);
136		} else {
137			spin_lock_bh(&idxd->dev_lock);
138			idxd_device_wqs_clear_state(idxd);
139			dev_err(&idxd->pdev->dev,
140				"idxd halted, need %s.\n",
141				gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
142				"FLR" : "system reset");
143			spin_unlock_bh(&idxd->dev_lock);
144			return -ENXIO;
145		}
146	}
147
148	return 0;
149}
150
151irqreturn_t idxd_misc_thread(int vec, void *data)
152{
153	struct idxd_irq_entry *irq_entry = data;
154	struct idxd_device *idxd = irq_entry->idxd;
155	int rc;
156	u32 cause;
157
158	cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
159	if (cause)
160		iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
161
162	while (cause) {
163		rc = process_misc_interrupts(idxd, cause);
164		if (rc < 0)
165			break;
166		cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
167		if (cause)
168			iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
169	}
170
171	idxd_unmask_msix_vector(idxd, irq_entry->id);
172	return IRQ_HANDLED;
173}
174
175static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
176				     int *processed)
177{
178	struct idxd_desc *desc, *t;
179	struct llist_node *head;
180	int queued = 0;
181
182	*processed = 0;
183	head = llist_del_all(&irq_entry->pending_llist);
184	if (!head)
185		return 0;
186
187	llist_for_each_entry_safe(desc, t, head, llnode) {
188		if (desc->completion->status) {
189			idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
190			idxd_free_desc(desc->wq, desc);
191			(*processed)++;
192		} else {
193			list_add_tail(&desc->list, &irq_entry->work_list);
194			queued++;
195		}
196	}
197
198	return queued;
199}
200
201static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
202				 int *processed)
203{
204	struct list_head *node, *next;
205	int queued = 0;
206
207	*processed = 0;
208	if (list_empty(&irq_entry->work_list))
209		return 0;
210
211	list_for_each_safe(node, next, &irq_entry->work_list) {
212		struct idxd_desc *desc =
213			container_of(node, struct idxd_desc, list);
214
215		if (desc->completion->status) {
216			list_del(&desc->list);
217			/* process and callback */
218			idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
219			idxd_free_desc(desc->wq, desc);
220			(*processed)++;
221		} else {
222			queued++;
223		}
224	}
225
226	return queued;
227}
228
229static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
230{
231	int rc, processed, total = 0;
232
233	/*
234	 * There are two lists we are processing. The pending_llist is where
235	 * submmiter adds all the submitted descriptor after sending it to
236	 * the workqueue. It's a lockless singly linked list. The work_list
237	 * is the common linux double linked list. We are in a scenario of
238	 * multiple producers and a single consumer. The producers are all
239	 * the kernel submitters of descriptors, and the consumer is the
240	 * kernel irq handler thread for the msix vector when using threaded
241	 * irq. To work with the restrictions of llist to remain lockless,
242	 * we are doing the following steps:
243	 * 1. Iterate through the work_list and process any completed
244	 *    descriptor. Delete the completed entries during iteration.
245	 * 2. llist_del_all() from the pending list.
246	 * 3. Iterate through the llist that was deleted from the pending list
247	 *    and process the completed entries.
248	 * 4. If the entry is still waiting on hardware, list_add_tail() to
249	 *    the work_list.
250	 * 5. Repeat until no more descriptors.
251	 */
252	do {
253		rc = irq_process_work_list(irq_entry, &processed);
254		total += processed;
255		if (rc != 0)
256			continue;
257
258		rc = irq_process_pending_llist(irq_entry, &processed);
259		total += processed;
260	} while (rc != 0);
261
262	return total;
263}
264
265irqreturn_t idxd_wq_thread(int irq, void *data)
266{
267	struct idxd_irq_entry *irq_entry = data;
268	int processed;
269
270	processed = idxd_desc_process(irq_entry);
271	idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
272
273	if (processed == 0)
274		return IRQ_NONE;
275
276	return IRQ_HANDLED;
277}
278