1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9#define KMSG_COMPONENT "ap"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <asm/facility.h>
15
16#include "ap_bus.h"
17#include "ap_debug.h"
18
19static void __ap_flush_queue(struct ap_queue *aq);
20
21/*
22 * some AP queue helper functions
23 */
24
25static inline bool ap_q_supports_bind(struct ap_queue *aq)
26{
27	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
28		ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
29}
30
31static inline bool ap_q_supports_assoc(struct ap_queue *aq)
32{
33	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
34}
35
36/**
37 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
38 * @aq: The AP queue
39 * @ind: the notification indicator byte
40 *
41 * Enables interruption on AP queue via ap_aqic(). Based on the return
42 * value it waits a while and tests the AP queue if interrupts
43 * have been switched on using ap_test_queue().
44 */
45static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
46{
47	union ap_qirq_ctrl qirqctrl = { .value = 0 };
48	struct ap_queue_status status;
49
50	qirqctrl.ir = 1;
51	qirqctrl.isc = AP_ISC;
52	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
53	if (status.async)
54		return -EPERM;
55	switch (status.response_code) {
56	case AP_RESPONSE_NORMAL:
57	case AP_RESPONSE_OTHERWISE_CHANGED:
58		return 0;
59	case AP_RESPONSE_Q_NOT_AVAIL:
60	case AP_RESPONSE_DECONFIGURED:
61	case AP_RESPONSE_CHECKSTOPPED:
62	case AP_RESPONSE_INVALID_ADDRESS:
63		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
64		       AP_QID_CARD(aq->qid),
65		       AP_QID_QUEUE(aq->qid));
66		return -EOPNOTSUPP;
67	case AP_RESPONSE_RESET_IN_PROGRESS:
68	case AP_RESPONSE_BUSY:
69	default:
70		return -EBUSY;
71	}
72}
73
74/**
75 * __ap_send(): Send message to adjunct processor queue.
76 * @qid: The AP queue number
77 * @psmid: The program supplied message identifier
78 * @msg: The message text
79 * @msglen: The message length
80 * @special: Special Bit
81 *
82 * Returns AP queue status structure.
83 * Condition code 1 on NQAP can't happen because the L bit is 1.
84 * Condition code 2 on NQAP also means the send is incomplete,
85 * because a segment boundary was reached. The NQAP is repeated.
86 */
87static inline struct ap_queue_status
88__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
89	  int special)
90{
91	if (special)
92		qid |= 0x400000UL;
93	return ap_nqap(qid, psmid, msg, msglen);
94}
95
96/* State machine definitions and helpers */
97
98static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
99{
100	return AP_SM_WAIT_NONE;
101}
102
103/**
104 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
105 *	not change the state of the device.
106 * @aq: pointer to the AP queue
107 *
108 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
109 */
110static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
111{
112	struct ap_queue_status status;
113	struct ap_message *ap_msg;
114	bool found = false;
115	size_t reslen;
116	unsigned long resgr0 = 0;
117	int parts = 0;
118
119	/*
120	 * DQAP loop until response code and resgr0 indicate that
121	 * the msg is totally received. As we use the very same buffer
122	 * the msg is overwritten with each invocation. That's intended
123	 * and the receiver of the msg is informed with a msg rc code
124	 * of EMSGSIZE in such a case.
125	 */
126	do {
127		status = ap_dqap(aq->qid, &aq->reply->psmid,
128				 aq->reply->msg, aq->reply->bufsize,
129				 &aq->reply->len, &reslen, &resgr0);
130		parts++;
131	} while (status.response_code == 0xFF && resgr0 != 0);
132
133	switch (status.response_code) {
134	case AP_RESPONSE_NORMAL:
135		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
136		if (!status.queue_empty && !aq->queue_count)
137			aq->queue_count++;
138		if (aq->queue_count > 0)
139			mod_timer(&aq->timeout,
140				  jiffies + aq->request_timeout);
141		list_for_each_entry(ap_msg, &aq->pendingq, list) {
142			if (ap_msg->psmid != aq->reply->psmid)
143				continue;
144			list_del_init(&ap_msg->list);
145			aq->pendingq_count--;
146			if (parts > 1) {
147				ap_msg->rc = -EMSGSIZE;
148				ap_msg->receive(aq, ap_msg, NULL);
149			} else {
150				ap_msg->receive(aq, ap_msg, aq->reply);
151			}
152			found = true;
153			break;
154		}
155		if (!found) {
156			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
157				    __func__, aq->reply->psmid,
158				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
159		}
160		fallthrough;
161	case AP_RESPONSE_NO_PENDING_REPLY:
162		if (!status.queue_empty || aq->queue_count <= 0)
163			break;
164		/* The card shouldn't forget requests but who knows. */
165		aq->queue_count = 0;
166		list_splice_init(&aq->pendingq, &aq->requestq);
167		aq->requestq_count += aq->pendingq_count;
168		aq->pendingq_count = 0;
169		break;
170	default:
171		break;
172	}
173	return status;
174}
175
176/**
177 * ap_sm_read(): Receive pending reply messages from an AP queue.
178 * @aq: pointer to the AP queue
179 *
180 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
181 */
182static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
183{
184	struct ap_queue_status status;
185
186	if (!aq->reply)
187		return AP_SM_WAIT_NONE;
188	status = ap_sm_recv(aq);
189	if (status.async)
190		return AP_SM_WAIT_NONE;
191	switch (status.response_code) {
192	case AP_RESPONSE_NORMAL:
193		if (aq->queue_count > 0) {
194			aq->sm_state = AP_SM_STATE_WORKING;
195			return AP_SM_WAIT_AGAIN;
196		}
197		aq->sm_state = AP_SM_STATE_IDLE;
198		return AP_SM_WAIT_NONE;
199	case AP_RESPONSE_NO_PENDING_REPLY:
200		if (aq->queue_count > 0)
201			return aq->interrupt ?
202				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
203		aq->sm_state = AP_SM_STATE_IDLE;
204		return AP_SM_WAIT_NONE;
205	default:
206		aq->dev_state = AP_DEV_STATE_ERROR;
207		aq->last_err_rc = status.response_code;
208		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
209			    __func__, status.response_code,
210			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
211		return AP_SM_WAIT_NONE;
212	}
213}
214
215/**
216 * ap_sm_write(): Send messages from the request queue to an AP queue.
217 * @aq: pointer to the AP queue
218 *
219 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
220 */
221static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
222{
223	struct ap_queue_status status;
224	struct ap_message *ap_msg;
225	ap_qid_t qid = aq->qid;
226
227	if (aq->requestq_count <= 0)
228		return AP_SM_WAIT_NONE;
229
230	/* Start the next request on the queue. */
231	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
232	status = __ap_send(qid, ap_msg->psmid,
233			   ap_msg->msg, ap_msg->len,
234			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
235	if (status.async)
236		return AP_SM_WAIT_NONE;
237	switch (status.response_code) {
238	case AP_RESPONSE_NORMAL:
239		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
240		if (aq->queue_count == 1)
241			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
242		list_move_tail(&ap_msg->list, &aq->pendingq);
243		aq->requestq_count--;
244		aq->pendingq_count++;
245		if (aq->queue_count < aq->card->queue_depth) {
246			aq->sm_state = AP_SM_STATE_WORKING;
247			return AP_SM_WAIT_AGAIN;
248		}
249		fallthrough;
250	case AP_RESPONSE_Q_FULL:
251		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
252		return aq->interrupt ?
253			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
254	case AP_RESPONSE_RESET_IN_PROGRESS:
255		aq->sm_state = AP_SM_STATE_RESET_WAIT;
256		return AP_SM_WAIT_LOW_TIMEOUT;
257	case AP_RESPONSE_INVALID_DOMAIN:
258		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
259		fallthrough;
260	case AP_RESPONSE_MESSAGE_TOO_BIG:
261	case AP_RESPONSE_REQ_FAC_NOT_INST:
262		list_del_init(&ap_msg->list);
263		aq->requestq_count--;
264		ap_msg->rc = -EINVAL;
265		ap_msg->receive(aq, ap_msg, NULL);
266		return AP_SM_WAIT_AGAIN;
267	default:
268		aq->dev_state = AP_DEV_STATE_ERROR;
269		aq->last_err_rc = status.response_code;
270		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
271			    __func__, status.response_code,
272			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
273		return AP_SM_WAIT_NONE;
274	}
275}
276
277/**
278 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
279 * @aq: pointer to the AP queue
280 *
281 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
282 */
283static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
284{
285	return min(ap_sm_read(aq), ap_sm_write(aq));
286}
287
288/**
289 * ap_sm_reset(): Reset an AP queue.
290 * @aq: The AP queue
291 *
292 * Submit the Reset command to an AP queue.
293 */
294static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
295{
296	struct ap_queue_status status;
297
298	status = ap_rapq(aq->qid, aq->rapq_fbit);
299	if (status.async)
300		return AP_SM_WAIT_NONE;
301	switch (status.response_code) {
302	case AP_RESPONSE_NORMAL:
303	case AP_RESPONSE_RESET_IN_PROGRESS:
304		aq->sm_state = AP_SM_STATE_RESET_WAIT;
305		aq->interrupt = false;
306		aq->rapq_fbit = 0;
307		return AP_SM_WAIT_LOW_TIMEOUT;
308	default:
309		aq->dev_state = AP_DEV_STATE_ERROR;
310		aq->last_err_rc = status.response_code;
311		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
312			    __func__, status.response_code,
313			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
314		return AP_SM_WAIT_NONE;
315	}
316}
317
318/**
319 * ap_sm_reset_wait(): Test queue for completion of the reset operation
320 * @aq: pointer to the AP queue
321 *
322 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
323 */
324static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
325{
326	struct ap_queue_status status;
327	void *lsi_ptr;
328
329	if (aq->queue_count > 0 && aq->reply)
330		/* Try to read a completed message and get the status */
331		status = ap_sm_recv(aq);
332	else
333		/* Get the status with TAPQ */
334		status = ap_tapq(aq->qid, NULL);
335
336	switch (status.response_code) {
337	case AP_RESPONSE_NORMAL:
338		lsi_ptr = ap_airq_ptr();
339		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
340			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
341		else
342			aq->sm_state = (aq->queue_count > 0) ?
343				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
344		return AP_SM_WAIT_AGAIN;
345	case AP_RESPONSE_BUSY:
346	case AP_RESPONSE_RESET_IN_PROGRESS:
347		return AP_SM_WAIT_LOW_TIMEOUT;
348	case AP_RESPONSE_Q_NOT_AVAIL:
349	case AP_RESPONSE_DECONFIGURED:
350	case AP_RESPONSE_CHECKSTOPPED:
351	default:
352		aq->dev_state = AP_DEV_STATE_ERROR;
353		aq->last_err_rc = status.response_code;
354		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
355			    __func__, status.response_code,
356			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
357		return AP_SM_WAIT_NONE;
358	}
359}
360
361/**
362 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
363 * @aq: pointer to the AP queue
364 *
365 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
366 */
367static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
368{
369	struct ap_queue_status status;
370
371	if (aq->queue_count > 0 && aq->reply)
372		/* Try to read a completed message and get the status */
373		status = ap_sm_recv(aq);
374	else
375		/* Get the status with TAPQ */
376		status = ap_tapq(aq->qid, NULL);
377
378	if (status.irq_enabled == 1) {
379		/* Irqs are now enabled */
380		aq->interrupt = true;
381		aq->sm_state = (aq->queue_count > 0) ?
382			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
383	}
384
385	switch (status.response_code) {
386	case AP_RESPONSE_NORMAL:
387		if (aq->queue_count > 0)
388			return AP_SM_WAIT_AGAIN;
389		fallthrough;
390	case AP_RESPONSE_NO_PENDING_REPLY:
391		return AP_SM_WAIT_LOW_TIMEOUT;
392	default:
393		aq->dev_state = AP_DEV_STATE_ERROR;
394		aq->last_err_rc = status.response_code;
395		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
396			    __func__, status.response_code,
397			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
398		return AP_SM_WAIT_NONE;
399	}
400}
401
402/**
403 * ap_sm_assoc_wait(): Test queue for completion of a pending
404 *		       association request.
405 * @aq: pointer to the AP queue
406 */
407static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
408{
409	struct ap_queue_status status;
410	struct ap_tapq_gr2 info;
411
412	status = ap_test_queue(aq->qid, 1, &info);
413	/* handle asynchronous error on this queue */
414	if (status.async && status.response_code) {
415		aq->dev_state = AP_DEV_STATE_ERROR;
416		aq->last_err_rc = status.response_code;
417		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
418			    __func__, status.response_code,
419			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
420		return AP_SM_WAIT_NONE;
421	}
422	if (status.response_code > AP_RESPONSE_BUSY) {
423		aq->dev_state = AP_DEV_STATE_ERROR;
424		aq->last_err_rc = status.response_code;
425		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
426			    __func__, status.response_code,
427			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
428		return AP_SM_WAIT_NONE;
429	}
430
431	/* check bs bits */
432	switch (info.bs) {
433	case AP_BS_Q_USABLE:
434		/* association is through */
435		aq->sm_state = AP_SM_STATE_IDLE;
436		AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
437			   __func__, AP_QID_CARD(aq->qid),
438			   AP_QID_QUEUE(aq->qid), aq->assoc_idx);
439		return AP_SM_WAIT_NONE;
440	case AP_BS_Q_USABLE_NO_SECURE_KEY:
441		/* association still pending */
442		return AP_SM_WAIT_LOW_TIMEOUT;
443	default:
444		/* reset from 'outside' happened or no idea at all */
445		aq->assoc_idx = ASSOC_IDX_INVALID;
446		aq->dev_state = AP_DEV_STATE_ERROR;
447		aq->last_err_rc = status.response_code;
448		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
449			    __func__, info.bs,
450			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
451		return AP_SM_WAIT_NONE;
452	}
453}
454
455/*
456 * AP state machine jump table
457 */
458static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
459	[AP_SM_STATE_RESET_START] = {
460		[AP_SM_EVENT_POLL] = ap_sm_reset,
461		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
462	},
463	[AP_SM_STATE_RESET_WAIT] = {
464		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
465		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
466	},
467	[AP_SM_STATE_SETIRQ_WAIT] = {
468		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
469		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
470	},
471	[AP_SM_STATE_IDLE] = {
472		[AP_SM_EVENT_POLL] = ap_sm_write,
473		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
474	},
475	[AP_SM_STATE_WORKING] = {
476		[AP_SM_EVENT_POLL] = ap_sm_read_write,
477		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
478	},
479	[AP_SM_STATE_QUEUE_FULL] = {
480		[AP_SM_EVENT_POLL] = ap_sm_read,
481		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
482	},
483	[AP_SM_STATE_ASSOC_WAIT] = {
484		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
485		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
486	},
487};
488
489enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
490{
491	if (aq->config && !aq->chkstop &&
492	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
493		return ap_jumptable[aq->sm_state][event](aq);
494	else
495		return AP_SM_WAIT_NONE;
496}
497
498enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
499{
500	enum ap_sm_wait wait;
501
502	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
503		;
504	return wait;
505}
506
507/*
508 * AP queue related attributes.
509 */
510static ssize_t request_count_show(struct device *dev,
511				  struct device_attribute *attr,
512				  char *buf)
513{
514	struct ap_queue *aq = to_ap_queue(dev);
515	bool valid = false;
516	u64 req_cnt;
517
518	spin_lock_bh(&aq->lock);
519	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
520		req_cnt = aq->total_request_count;
521		valid = true;
522	}
523	spin_unlock_bh(&aq->lock);
524
525	if (valid)
526		return sysfs_emit(buf, "%llu\n", req_cnt);
527	else
528		return sysfs_emit(buf, "-\n");
529}
530
531static ssize_t request_count_store(struct device *dev,
532				   struct device_attribute *attr,
533				   const char *buf, size_t count)
534{
535	struct ap_queue *aq = to_ap_queue(dev);
536
537	spin_lock_bh(&aq->lock);
538	aq->total_request_count = 0;
539	spin_unlock_bh(&aq->lock);
540
541	return count;
542}
543
544static DEVICE_ATTR_RW(request_count);
545
546static ssize_t requestq_count_show(struct device *dev,
547				   struct device_attribute *attr, char *buf)
548{
549	struct ap_queue *aq = to_ap_queue(dev);
550	unsigned int reqq_cnt = 0;
551
552	spin_lock_bh(&aq->lock);
553	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
554		reqq_cnt = aq->requestq_count;
555	spin_unlock_bh(&aq->lock);
556	return sysfs_emit(buf, "%d\n", reqq_cnt);
557}
558
559static DEVICE_ATTR_RO(requestq_count);
560
561static ssize_t pendingq_count_show(struct device *dev,
562				   struct device_attribute *attr, char *buf)
563{
564	struct ap_queue *aq = to_ap_queue(dev);
565	unsigned int penq_cnt = 0;
566
567	spin_lock_bh(&aq->lock);
568	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
569		penq_cnt = aq->pendingq_count;
570	spin_unlock_bh(&aq->lock);
571	return sysfs_emit(buf, "%d\n", penq_cnt);
572}
573
574static DEVICE_ATTR_RO(pendingq_count);
575
576static ssize_t reset_show(struct device *dev,
577			  struct device_attribute *attr, char *buf)
578{
579	struct ap_queue *aq = to_ap_queue(dev);
580	int rc = 0;
581
582	spin_lock_bh(&aq->lock);
583	switch (aq->sm_state) {
584	case AP_SM_STATE_RESET_START:
585	case AP_SM_STATE_RESET_WAIT:
586		rc = sysfs_emit(buf, "Reset in progress.\n");
587		break;
588	case AP_SM_STATE_WORKING:
589	case AP_SM_STATE_QUEUE_FULL:
590		rc = sysfs_emit(buf, "Reset Timer armed.\n");
591		break;
592	default:
593		rc = sysfs_emit(buf, "No Reset Timer set.\n");
594	}
595	spin_unlock_bh(&aq->lock);
596	return rc;
597}
598
599static ssize_t reset_store(struct device *dev,
600			   struct device_attribute *attr,
601			   const char *buf, size_t count)
602{
603	struct ap_queue *aq = to_ap_queue(dev);
604
605	spin_lock_bh(&aq->lock);
606	__ap_flush_queue(aq);
607	aq->sm_state = AP_SM_STATE_RESET_START;
608	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
609	spin_unlock_bh(&aq->lock);
610
611	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
612		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
613
614	return count;
615}
616
617static DEVICE_ATTR_RW(reset);
618
619static ssize_t interrupt_show(struct device *dev,
620			      struct device_attribute *attr, char *buf)
621{
622	struct ap_queue *aq = to_ap_queue(dev);
623	int rc = 0;
624
625	spin_lock_bh(&aq->lock);
626	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
627		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
628	else if (aq->interrupt)
629		rc = sysfs_emit(buf, "Interrupts enabled.\n");
630	else
631		rc = sysfs_emit(buf, "Interrupts disabled.\n");
632	spin_unlock_bh(&aq->lock);
633	return rc;
634}
635
636static DEVICE_ATTR_RO(interrupt);
637
638static ssize_t config_show(struct device *dev,
639			   struct device_attribute *attr, char *buf)
640{
641	struct ap_queue *aq = to_ap_queue(dev);
642	int rc;
643
644	spin_lock_bh(&aq->lock);
645	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
646	spin_unlock_bh(&aq->lock);
647	return rc;
648}
649
650static DEVICE_ATTR_RO(config);
651
652static ssize_t chkstop_show(struct device *dev,
653			    struct device_attribute *attr, char *buf)
654{
655	struct ap_queue *aq = to_ap_queue(dev);
656	int rc;
657
658	spin_lock_bh(&aq->lock);
659	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
660	spin_unlock_bh(&aq->lock);
661	return rc;
662}
663
664static DEVICE_ATTR_RO(chkstop);
665
666static ssize_t ap_functions_show(struct device *dev,
667				 struct device_attribute *attr, char *buf)
668{
669	struct ap_queue *aq = to_ap_queue(dev);
670	struct ap_queue_status status;
671	struct ap_tapq_gr2 info;
672
673	status = ap_test_queue(aq->qid, 1, &info);
674	if (status.response_code > AP_RESPONSE_BUSY) {
675		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
676			   __func__, status.response_code,
677			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
678		return -EIO;
679	}
680
681	return sysfs_emit(buf, "0x%08X\n", info.fac);
682}
683
684static DEVICE_ATTR_RO(ap_functions);
685
686#ifdef CONFIG_ZCRYPT_DEBUG
687static ssize_t states_show(struct device *dev,
688			   struct device_attribute *attr, char *buf)
689{
690	struct ap_queue *aq = to_ap_queue(dev);
691	int rc = 0;
692
693	spin_lock_bh(&aq->lock);
694	/* queue device state */
695	switch (aq->dev_state) {
696	case AP_DEV_STATE_UNINITIATED:
697		rc = sysfs_emit(buf, "UNINITIATED\n");
698		break;
699	case AP_DEV_STATE_OPERATING:
700		rc = sysfs_emit(buf, "OPERATING");
701		break;
702	case AP_DEV_STATE_SHUTDOWN:
703		rc = sysfs_emit(buf, "SHUTDOWN");
704		break;
705	case AP_DEV_STATE_ERROR:
706		rc = sysfs_emit(buf, "ERROR");
707		break;
708	default:
709		rc = sysfs_emit(buf, "UNKNOWN");
710	}
711	/* state machine state */
712	if (aq->dev_state) {
713		switch (aq->sm_state) {
714		case AP_SM_STATE_RESET_START:
715			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
716			break;
717		case AP_SM_STATE_RESET_WAIT:
718			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
719			break;
720		case AP_SM_STATE_SETIRQ_WAIT:
721			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
722			break;
723		case AP_SM_STATE_IDLE:
724			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
725			break;
726		case AP_SM_STATE_WORKING:
727			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
728			break;
729		case AP_SM_STATE_QUEUE_FULL:
730			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
731			break;
732		case AP_SM_STATE_ASSOC_WAIT:
733			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
734			break;
735		default:
736			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
737		}
738	}
739	spin_unlock_bh(&aq->lock);
740
741	return rc;
742}
743static DEVICE_ATTR_RO(states);
744
745static ssize_t last_err_rc_show(struct device *dev,
746				struct device_attribute *attr, char *buf)
747{
748	struct ap_queue *aq = to_ap_queue(dev);
749	int rc;
750
751	spin_lock_bh(&aq->lock);
752	rc = aq->last_err_rc;
753	spin_unlock_bh(&aq->lock);
754
755	switch (rc) {
756	case AP_RESPONSE_NORMAL:
757		return sysfs_emit(buf, "NORMAL\n");
758	case AP_RESPONSE_Q_NOT_AVAIL:
759		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
760	case AP_RESPONSE_RESET_IN_PROGRESS:
761		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
762	case AP_RESPONSE_DECONFIGURED:
763		return sysfs_emit(buf, "DECONFIGURED\n");
764	case AP_RESPONSE_CHECKSTOPPED:
765		return sysfs_emit(buf, "CHECKSTOPPED\n");
766	case AP_RESPONSE_BUSY:
767		return sysfs_emit(buf, "BUSY\n");
768	case AP_RESPONSE_INVALID_ADDRESS:
769		return sysfs_emit(buf, "INVALID_ADDRESS\n");
770	case AP_RESPONSE_OTHERWISE_CHANGED:
771		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
772	case AP_RESPONSE_Q_FULL:
773		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
774	case AP_RESPONSE_INDEX_TOO_BIG:
775		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
776	case AP_RESPONSE_NO_FIRST_PART:
777		return sysfs_emit(buf, "NO_FIRST_PART\n");
778	case AP_RESPONSE_MESSAGE_TOO_BIG:
779		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
780	case AP_RESPONSE_REQ_FAC_NOT_INST:
781		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
782	default:
783		return sysfs_emit(buf, "response code %d\n", rc);
784	}
785}
786static DEVICE_ATTR_RO(last_err_rc);
787#endif
788
789static struct attribute *ap_queue_dev_attrs[] = {
790	&dev_attr_request_count.attr,
791	&dev_attr_requestq_count.attr,
792	&dev_attr_pendingq_count.attr,
793	&dev_attr_reset.attr,
794	&dev_attr_interrupt.attr,
795	&dev_attr_config.attr,
796	&dev_attr_chkstop.attr,
797	&dev_attr_ap_functions.attr,
798#ifdef CONFIG_ZCRYPT_DEBUG
799	&dev_attr_states.attr,
800	&dev_attr_last_err_rc.attr,
801#endif
802	NULL
803};
804
805static struct attribute_group ap_queue_dev_attr_group = {
806	.attrs = ap_queue_dev_attrs
807};
808
809static const struct attribute_group *ap_queue_dev_attr_groups[] = {
810	&ap_queue_dev_attr_group,
811	NULL
812};
813
814static struct device_type ap_queue_type = {
815	.name = "ap_queue",
816	.groups = ap_queue_dev_attr_groups,
817};
818
819static ssize_t se_bind_show(struct device *dev,
820			    struct device_attribute *attr, char *buf)
821{
822	struct ap_queue *aq = to_ap_queue(dev);
823	struct ap_queue_status status;
824	struct ap_tapq_gr2 info;
825
826	if (!ap_q_supports_bind(aq))
827		return sysfs_emit(buf, "-\n");
828
829	status = ap_test_queue(aq->qid, 1, &info);
830	if (status.response_code > AP_RESPONSE_BUSY) {
831		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
832			   __func__, status.response_code,
833			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
834		return -EIO;
835	}
836	switch (info.bs) {
837	case AP_BS_Q_USABLE:
838	case AP_BS_Q_USABLE_NO_SECURE_KEY:
839		return sysfs_emit(buf, "bound\n");
840	default:
841		return sysfs_emit(buf, "unbound\n");
842	}
843}
844
845static ssize_t se_bind_store(struct device *dev,
846			     struct device_attribute *attr,
847			     const char *buf, size_t count)
848{
849	struct ap_queue *aq = to_ap_queue(dev);
850	struct ap_queue_status status;
851	bool value;
852	int rc;
853
854	if (!ap_q_supports_bind(aq))
855		return -EINVAL;
856
857	/* only 0 (unbind) and 1 (bind) allowed */
858	rc = kstrtobool(buf, &value);
859	if (rc)
860		return rc;
861
862	if (value) {
863		/* bind, do BAPQ */
864		spin_lock_bh(&aq->lock);
865		if (aq->sm_state < AP_SM_STATE_IDLE) {
866			spin_unlock_bh(&aq->lock);
867			return -EBUSY;
868		}
869		status = ap_bapq(aq->qid);
870		spin_unlock_bh(&aq->lock);
871		if (status.response_code) {
872			AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
873				    __func__, status.response_code,
874				    AP_QID_CARD(aq->qid),
875				    AP_QID_QUEUE(aq->qid));
876			return -EIO;
877		}
878	} else {
879		/* unbind, set F bit arg and trigger RAPQ */
880		spin_lock_bh(&aq->lock);
881		__ap_flush_queue(aq);
882		aq->rapq_fbit = 1;
883		aq->assoc_idx = ASSOC_IDX_INVALID;
884		aq->sm_state = AP_SM_STATE_RESET_START;
885		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
886		spin_unlock_bh(&aq->lock);
887	}
888
889	return count;
890}
891
892static DEVICE_ATTR_RW(se_bind);
893
894static ssize_t se_associate_show(struct device *dev,
895				 struct device_attribute *attr, char *buf)
896{
897	struct ap_queue *aq = to_ap_queue(dev);
898	struct ap_queue_status status;
899	struct ap_tapq_gr2 info;
900
901	if (!ap_q_supports_assoc(aq))
902		return sysfs_emit(buf, "-\n");
903
904	status = ap_test_queue(aq->qid, 1, &info);
905	if (status.response_code > AP_RESPONSE_BUSY) {
906		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
907			   __func__, status.response_code,
908			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
909		return -EIO;
910	}
911
912	switch (info.bs) {
913	case AP_BS_Q_USABLE:
914		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
915			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
916			return -EIO;
917		}
918		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
919	case AP_BS_Q_USABLE_NO_SECURE_KEY:
920		if (aq->assoc_idx != ASSOC_IDX_INVALID)
921			return sysfs_emit(buf, "association pending\n");
922		fallthrough;
923	default:
924		return sysfs_emit(buf, "unassociated\n");
925	}
926}
927
928static ssize_t se_associate_store(struct device *dev,
929				  struct device_attribute *attr,
930				  const char *buf, size_t count)
931{
932	struct ap_queue *aq = to_ap_queue(dev);
933	struct ap_queue_status status;
934	unsigned int value;
935	int rc;
936
937	if (!ap_q_supports_assoc(aq))
938		return -EINVAL;
939
940	/* association index needs to be >= 0 */
941	rc = kstrtouint(buf, 0, &value);
942	if (rc)
943		return rc;
944	if (value >= ASSOC_IDX_INVALID)
945		return -EINVAL;
946
947	spin_lock_bh(&aq->lock);
948
949	/* sm should be in idle state */
950	if (aq->sm_state != AP_SM_STATE_IDLE) {
951		spin_unlock_bh(&aq->lock);
952		return -EBUSY;
953	}
954
955	/* already associated or association pending ? */
956	if (aq->assoc_idx != ASSOC_IDX_INVALID) {
957		spin_unlock_bh(&aq->lock);
958		return -EINVAL;
959	}
960
961	/* trigger the asynchronous association request */
962	status = ap_aapq(aq->qid, value);
963	switch (status.response_code) {
964	case AP_RESPONSE_NORMAL:
965	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
966		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
967		aq->assoc_idx = value;
968		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
969		spin_unlock_bh(&aq->lock);
970		break;
971	default:
972		spin_unlock_bh(&aq->lock);
973		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
974			    __func__, status.response_code,
975			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
976		return -EIO;
977	}
978
979	return count;
980}
981
982static DEVICE_ATTR_RW(se_associate);
983
984static struct attribute *ap_queue_dev_sb_attrs[] = {
985	&dev_attr_se_bind.attr,
986	&dev_attr_se_associate.attr,
987	NULL
988};
989
990static struct attribute_group ap_queue_dev_sb_attr_group = {
991	.attrs = ap_queue_dev_sb_attrs
992};
993
994static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
995	&ap_queue_dev_sb_attr_group,
996	NULL
997};
998
999static void ap_queue_device_release(struct device *dev)
1000{
1001	struct ap_queue *aq = to_ap_queue(dev);
1002
1003	spin_lock_bh(&ap_queues_lock);
1004	hash_del(&aq->hnode);
1005	spin_unlock_bh(&ap_queues_lock);
1006
1007	kfree(aq);
1008}
1009
1010struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1011{
1012	struct ap_queue *aq;
1013
1014	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1015	if (!aq)
1016		return NULL;
1017	aq->ap_dev.device.release = ap_queue_device_release;
1018	aq->ap_dev.device.type = &ap_queue_type;
1019	aq->ap_dev.device_type = device_type;
1020	// add optional SE secure binding attributes group
1021	if (ap_sb_available() && is_prot_virt_guest())
1022		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1023	aq->qid = qid;
1024	aq->interrupt = false;
1025	spin_lock_init(&aq->lock);
1026	INIT_LIST_HEAD(&aq->pendingq);
1027	INIT_LIST_HEAD(&aq->requestq);
1028	timer_setup(&aq->timeout, ap_request_timeout, 0);
1029
1030	return aq;
1031}
1032
1033void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1034{
1035	aq->reply = reply;
1036
1037	spin_lock_bh(&aq->lock);
1038	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1039	spin_unlock_bh(&aq->lock);
1040}
1041EXPORT_SYMBOL(ap_queue_init_reply);
1042
1043/**
1044 * ap_queue_message(): Queue a request to an AP device.
1045 * @aq: The AP device to queue the message to
1046 * @ap_msg: The message that is to be added
1047 */
1048int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1049{
1050	int rc = 0;
1051
1052	/* msg needs to have a valid receive-callback */
1053	BUG_ON(!ap_msg->receive);
1054
1055	spin_lock_bh(&aq->lock);
1056
1057	/* only allow to queue new messages if device state is ok */
1058	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1059		list_add_tail(&ap_msg->list, &aq->requestq);
1060		aq->requestq_count++;
1061		aq->total_request_count++;
1062		atomic64_inc(&aq->card->total_request_count);
1063	} else {
1064		rc = -ENODEV;
1065	}
1066
1067	/* Send/receive as many request from the queue as possible. */
1068	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1069
1070	spin_unlock_bh(&aq->lock);
1071
1072	return rc;
1073}
1074EXPORT_SYMBOL(ap_queue_message);
1075
1076/**
1077 * ap_cancel_message(): Cancel a crypto request.
1078 * @aq: The AP device that has the message queued
1079 * @ap_msg: The message that is to be removed
1080 *
1081 * Cancel a crypto request. This is done by removing the request
1082 * from the device pending or request queue. Note that the
1083 * request stays on the AP queue. When it finishes the message
1084 * reply will be discarded because the psmid can't be found.
1085 */
1086void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1087{
1088	struct ap_message *tmp;
1089
1090	spin_lock_bh(&aq->lock);
1091	if (!list_empty(&ap_msg->list)) {
1092		list_for_each_entry(tmp, &aq->pendingq, list)
1093			if (tmp->psmid == ap_msg->psmid) {
1094				aq->pendingq_count--;
1095				goto found;
1096			}
1097		aq->requestq_count--;
1098found:
1099		list_del_init(&ap_msg->list);
1100	}
1101	spin_unlock_bh(&aq->lock);
1102}
1103EXPORT_SYMBOL(ap_cancel_message);
1104
1105/**
1106 * __ap_flush_queue(): Flush requests.
1107 * @aq: Pointer to the AP queue
1108 *
1109 * Flush all requests from the request/pending queue of an AP device.
1110 */
1111static void __ap_flush_queue(struct ap_queue *aq)
1112{
1113	struct ap_message *ap_msg, *next;
1114
1115	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1116		list_del_init(&ap_msg->list);
1117		aq->pendingq_count--;
1118		ap_msg->rc = -EAGAIN;
1119		ap_msg->receive(aq, ap_msg, NULL);
1120	}
1121	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1122		list_del_init(&ap_msg->list);
1123		aq->requestq_count--;
1124		ap_msg->rc = -EAGAIN;
1125		ap_msg->receive(aq, ap_msg, NULL);
1126	}
1127	aq->queue_count = 0;
1128}
1129
1130void ap_flush_queue(struct ap_queue *aq)
1131{
1132	spin_lock_bh(&aq->lock);
1133	__ap_flush_queue(aq);
1134	spin_unlock_bh(&aq->lock);
1135}
1136EXPORT_SYMBOL(ap_flush_queue);
1137
1138void ap_queue_prepare_remove(struct ap_queue *aq)
1139{
1140	spin_lock_bh(&aq->lock);
1141	/* flush queue */
1142	__ap_flush_queue(aq);
1143	/* move queue device state to SHUTDOWN in progress */
1144	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1145	spin_unlock_bh(&aq->lock);
1146	del_timer_sync(&aq->timeout);
1147}
1148
1149void ap_queue_remove(struct ap_queue *aq)
1150{
1151	/*
1152	 * all messages have been flushed and the device state
1153	 * is SHUTDOWN. Now reset with zero which also clears
1154	 * the irq registration and move the device state
1155	 * to the initial value AP_DEV_STATE_UNINITIATED.
1156	 */
1157	spin_lock_bh(&aq->lock);
1158	ap_zapq(aq->qid, 0);
1159	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1160	spin_unlock_bh(&aq->lock);
1161}
1162
1163void _ap_queue_init_state(struct ap_queue *aq)
1164{
1165	aq->dev_state = AP_DEV_STATE_OPERATING;
1166	aq->sm_state = AP_SM_STATE_RESET_START;
1167	aq->last_err_rc = 0;
1168	aq->assoc_idx = ASSOC_IDX_INVALID;
1169	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1170}
1171
1172void ap_queue_init_state(struct ap_queue *aq)
1173{
1174	spin_lock_bh(&aq->lock);
1175	_ap_queue_init_state(aq);
1176	spin_unlock_bh(&aq->lock);
1177}
1178EXPORT_SYMBOL(ap_queue_init_state);
1179