1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ISM driver for s390.
4 *
5 * Copyright IBM Corp. 2018
6 */
7#define KMSG_COMPONENT "ism"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/interrupt.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/ctype.h>
16#include <linux/processor.h>
17
18#include "ism.h"
19
20MODULE_DESCRIPTION("ISM driver for s390");
21MODULE_LICENSE("GPL");
22
23#define PCI_DEVICE_ID_IBM_ISM 0x04ED
24#define DRV_NAME "ism"
25
26static const struct pci_device_id ism_device_table[] = {
27	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
28	{ 0, }
29};
30MODULE_DEVICE_TABLE(pci, ism_device_table);
31
32static debug_info_t *ism_debug_info;
33
34#define NO_CLIENT		0xff		/* must be >= MAX_CLIENTS */
35static struct ism_client *clients[MAX_CLIENTS];	/* use an array rather than */
36						/* a list for fast mapping  */
37static u8 max_client;
38static DEFINE_MUTEX(clients_lock);
39struct ism_dev_list {
40	struct list_head list;
41	struct mutex mutex; /* protects ism device list */
42};
43
44static struct ism_dev_list ism_dev_list = {
45	.list = LIST_HEAD_INIT(ism_dev_list.list),
46	.mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
47};
48
49static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
50{
51	unsigned long flags;
52
53	spin_lock_irqsave(&ism->lock, flags);
54	ism->subs[client->id] = client;
55	spin_unlock_irqrestore(&ism->lock, flags);
56}
57
58int ism_register_client(struct ism_client *client)
59{
60	struct ism_dev *ism;
61	int i, rc = -ENOSPC;
62
63	mutex_lock(&ism_dev_list.mutex);
64	mutex_lock(&clients_lock);
65	for (i = 0; i < MAX_CLIENTS; ++i) {
66		if (!clients[i]) {
67			clients[i] = client;
68			client->id = i;
69			if (i == max_client)
70				max_client++;
71			rc = 0;
72			break;
73		}
74	}
75	mutex_unlock(&clients_lock);
76
77	if (i < MAX_CLIENTS) {
78		/* initialize with all devices that we got so far */
79		list_for_each_entry(ism, &ism_dev_list.list, list) {
80			ism->priv[i] = NULL;
81			client->add(ism);
82			ism_setup_forwarding(client, ism);
83		}
84	}
85	mutex_unlock(&ism_dev_list.mutex);
86
87	return rc;
88}
89EXPORT_SYMBOL_GPL(ism_register_client);
90
91int ism_unregister_client(struct ism_client *client)
92{
93	struct ism_dev *ism;
94	unsigned long flags;
95	int rc = 0;
96
97	mutex_lock(&ism_dev_list.mutex);
98	list_for_each_entry(ism, &ism_dev_list.list, list) {
99		spin_lock_irqsave(&ism->lock, flags);
100		/* Stop forwarding IRQs and events */
101		ism->subs[client->id] = NULL;
102		for (int i = 0; i < ISM_NR_DMBS; ++i) {
103			if (ism->sba_client_arr[i] == client->id) {
104				WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
105				     __func__, client->name);
106				rc = -EBUSY;
107				goto err_reg_dmb;
108			}
109		}
110		spin_unlock_irqrestore(&ism->lock, flags);
111	}
112	mutex_unlock(&ism_dev_list.mutex);
113
114	mutex_lock(&clients_lock);
115	clients[client->id] = NULL;
116	if (client->id + 1 == max_client)
117		max_client--;
118	mutex_unlock(&clients_lock);
119	return rc;
120
121err_reg_dmb:
122	spin_unlock_irqrestore(&ism->lock, flags);
123	mutex_unlock(&ism_dev_list.mutex);
124	return rc;
125}
126EXPORT_SYMBOL_GPL(ism_unregister_client);
127
128static int ism_cmd(struct ism_dev *ism, void *cmd)
129{
130	struct ism_req_hdr *req = cmd;
131	struct ism_resp_hdr *resp = cmd;
132
133	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
134	__ism_write_cmd(ism, req, 0, sizeof(*req));
135
136	WRITE_ONCE(resp->ret, ISM_ERROR);
137
138	__ism_read_cmd(ism, resp, 0, sizeof(*resp));
139	if (resp->ret) {
140		debug_text_event(ism_debug_info, 0, "cmd failure");
141		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
142		goto out;
143	}
144	__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
145out:
146	return resp->ret;
147}
148
149static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
150{
151	union ism_cmd_simple cmd;
152
153	memset(&cmd, 0, sizeof(cmd));
154	cmd.request.hdr.cmd = cmd_code;
155	cmd.request.hdr.len = sizeof(cmd.request);
156
157	return ism_cmd(ism, &cmd);
158}
159
160static int query_info(struct ism_dev *ism)
161{
162	union ism_qi cmd;
163
164	memset(&cmd, 0, sizeof(cmd));
165	cmd.request.hdr.cmd = ISM_QUERY_INFO;
166	cmd.request.hdr.len = sizeof(cmd.request);
167
168	if (ism_cmd(ism, &cmd))
169		goto out;
170
171	debug_text_event(ism_debug_info, 3, "query info");
172	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
173out:
174	return 0;
175}
176
177static int register_sba(struct ism_dev *ism)
178{
179	union ism_reg_sba cmd;
180	dma_addr_t dma_handle;
181	struct ism_sba *sba;
182
183	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
184				 GFP_KERNEL);
185	if (!sba)
186		return -ENOMEM;
187
188	memset(&cmd, 0, sizeof(cmd));
189	cmd.request.hdr.cmd = ISM_REG_SBA;
190	cmd.request.hdr.len = sizeof(cmd.request);
191	cmd.request.sba = dma_handle;
192
193	if (ism_cmd(ism, &cmd)) {
194		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
195		return -EIO;
196	}
197
198	ism->sba = sba;
199	ism->sba_dma_addr = dma_handle;
200
201	return 0;
202}
203
204static int register_ieq(struct ism_dev *ism)
205{
206	union ism_reg_ieq cmd;
207	dma_addr_t dma_handle;
208	struct ism_eq *ieq;
209
210	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
211				 GFP_KERNEL);
212	if (!ieq)
213		return -ENOMEM;
214
215	memset(&cmd, 0, sizeof(cmd));
216	cmd.request.hdr.cmd = ISM_REG_IEQ;
217	cmd.request.hdr.len = sizeof(cmd.request);
218	cmd.request.ieq = dma_handle;
219	cmd.request.len = sizeof(*ieq);
220
221	if (ism_cmd(ism, &cmd)) {
222		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
223		return -EIO;
224	}
225
226	ism->ieq = ieq;
227	ism->ieq_idx = -1;
228	ism->ieq_dma_addr = dma_handle;
229
230	return 0;
231}
232
233static int unregister_sba(struct ism_dev *ism)
234{
235	int ret;
236
237	if (!ism->sba)
238		return 0;
239
240	ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
241	if (ret && ret != ISM_ERROR)
242		return -EIO;
243
244	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
245			  ism->sba, ism->sba_dma_addr);
246
247	ism->sba = NULL;
248	ism->sba_dma_addr = 0;
249
250	return 0;
251}
252
253static int unregister_ieq(struct ism_dev *ism)
254{
255	int ret;
256
257	if (!ism->ieq)
258		return 0;
259
260	ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
261	if (ret && ret != ISM_ERROR)
262		return -EIO;
263
264	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
265			  ism->ieq, ism->ieq_dma_addr);
266
267	ism->ieq = NULL;
268	ism->ieq_dma_addr = 0;
269
270	return 0;
271}
272
273static int ism_read_local_gid(struct ism_dev *ism)
274{
275	union ism_read_gid cmd;
276	int ret;
277
278	memset(&cmd, 0, sizeof(cmd));
279	cmd.request.hdr.cmd = ISM_READ_GID;
280	cmd.request.hdr.len = sizeof(cmd.request);
281
282	ret = ism_cmd(ism, &cmd);
283	if (ret)
284		goto out;
285
286	ism->local_gid = cmd.response.gid;
287out:
288	return ret;
289}
290
291static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
292{
293	clear_bit(dmb->sba_idx, ism->sba_bitmap);
294	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
295			  dmb->cpu_addr, dmb->dma_addr);
296}
297
298static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
299{
300	unsigned long bit;
301
302	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
303		return -EINVAL;
304
305	if (!dmb->sba_idx) {
306		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
307					 ISM_DMB_BIT_OFFSET);
308		if (bit == ISM_NR_DMBS)
309			return -ENOSPC;
310
311		dmb->sba_idx = bit;
312	}
313	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
314	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
315		return -EINVAL;
316
317	dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
318					   &dmb->dma_addr,
319					   GFP_KERNEL | __GFP_NOWARN |
320					   __GFP_NOMEMALLOC | __GFP_NORETRY);
321	if (!dmb->cpu_addr)
322		clear_bit(dmb->sba_idx, ism->sba_bitmap);
323
324	return dmb->cpu_addr ? 0 : -ENOMEM;
325}
326
327int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
328		     struct ism_client *client)
329{
330	union ism_reg_dmb cmd;
331	unsigned long flags;
332	int ret;
333
334	ret = ism_alloc_dmb(ism, dmb);
335	if (ret)
336		goto out;
337
338	memset(&cmd, 0, sizeof(cmd));
339	cmd.request.hdr.cmd = ISM_REG_DMB;
340	cmd.request.hdr.len = sizeof(cmd.request);
341
342	cmd.request.dmb = dmb->dma_addr;
343	cmd.request.dmb_len = dmb->dmb_len;
344	cmd.request.sba_idx = dmb->sba_idx;
345	cmd.request.vlan_valid = dmb->vlan_valid;
346	cmd.request.vlan_id = dmb->vlan_id;
347	cmd.request.rgid = dmb->rgid;
348
349	ret = ism_cmd(ism, &cmd);
350	if (ret) {
351		ism_free_dmb(ism, dmb);
352		goto out;
353	}
354	dmb->dmb_tok = cmd.response.dmb_tok;
355	spin_lock_irqsave(&ism->lock, flags);
356	ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
357	spin_unlock_irqrestore(&ism->lock, flags);
358out:
359	return ret;
360}
361EXPORT_SYMBOL_GPL(ism_register_dmb);
362
363int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
364{
365	union ism_unreg_dmb cmd;
366	unsigned long flags;
367	int ret;
368
369	memset(&cmd, 0, sizeof(cmd));
370	cmd.request.hdr.cmd = ISM_UNREG_DMB;
371	cmd.request.hdr.len = sizeof(cmd.request);
372
373	cmd.request.dmb_tok = dmb->dmb_tok;
374
375	spin_lock_irqsave(&ism->lock, flags);
376	ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
377	spin_unlock_irqrestore(&ism->lock, flags);
378
379	ret = ism_cmd(ism, &cmd);
380	if (ret && ret != ISM_ERROR)
381		goto out;
382
383	ism_free_dmb(ism, dmb);
384out:
385	return ret;
386}
387EXPORT_SYMBOL_GPL(ism_unregister_dmb);
388
389static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
390{
391	union ism_set_vlan_id cmd;
392
393	memset(&cmd, 0, sizeof(cmd));
394	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
395	cmd.request.hdr.len = sizeof(cmd.request);
396
397	cmd.request.vlan_id = vlan_id;
398
399	return ism_cmd(ism, &cmd);
400}
401
402static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
403{
404	union ism_set_vlan_id cmd;
405
406	memset(&cmd, 0, sizeof(cmd));
407	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
408	cmd.request.hdr.len = sizeof(cmd.request);
409
410	cmd.request.vlan_id = vlan_id;
411
412	return ism_cmd(ism, &cmd);
413}
414
415static unsigned int max_bytes(unsigned int start, unsigned int len,
416			      unsigned int boundary)
417{
418	return min(boundary - (start & (boundary - 1)), len);
419}
420
421int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
422	     unsigned int offset, void *data, unsigned int size)
423{
424	unsigned int bytes;
425	u64 dmb_req;
426	int ret;
427
428	while (size) {
429		bytes = max_bytes(offset, size, PAGE_SIZE);
430		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
431					 offset);
432
433		ret = __ism_move(ism, dmb_req, data, bytes);
434		if (ret)
435			return ret;
436
437		size -= bytes;
438		data += bytes;
439		offset += bytes;
440	}
441
442	return 0;
443}
444EXPORT_SYMBOL_GPL(ism_move);
445
446static struct ism_systemeid SYSTEM_EID = {
447	.seid_string = "IBM-SYSZ-ISMSEID00000000",
448	.serial_number = "0000",
449	.type = "0000",
450};
451
452static void ism_create_system_eid(void)
453{
454	struct cpuid id;
455	u16 ident_tail;
456	char tmp[5];
457
458	get_cpu_id(&id);
459	ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
460	snprintf(tmp, 5, "%04X", ident_tail);
461	memcpy(&SYSTEM_EID.serial_number, tmp, 4);
462	snprintf(tmp, 5, "%04X", id.machine);
463	memcpy(&SYSTEM_EID.type, tmp, 4);
464}
465
466u8 *ism_get_seid(void)
467{
468	return SYSTEM_EID.seid_string;
469}
470EXPORT_SYMBOL_GPL(ism_get_seid);
471
472static void ism_handle_event(struct ism_dev *ism)
473{
474	struct ism_event *entry;
475	struct ism_client *clt;
476	int i;
477
478	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
479		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
480			ism->ieq_idx = 0;
481
482		entry = &ism->ieq->entry[ism->ieq_idx];
483		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
484		for (i = 0; i < max_client; ++i) {
485			clt = ism->subs[i];
486			if (clt)
487				clt->handle_event(ism, entry);
488		}
489	}
490}
491
492static irqreturn_t ism_handle_irq(int irq, void *data)
493{
494	struct ism_dev *ism = data;
495	unsigned long bit, end;
496	unsigned long *bv;
497	u16 dmbemask;
498	u8 client_id;
499
500	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
501	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
502
503	spin_lock(&ism->lock);
504	ism->sba->s = 0;
505	barrier();
506	for (bit = 0;;) {
507		bit = find_next_bit_inv(bv, end, bit);
508		if (bit >= end)
509			break;
510
511		clear_bit_inv(bit, bv);
512		dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
513		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
514		barrier();
515		client_id = ism->sba_client_arr[bit];
516		if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
517			continue;
518		ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
519	}
520
521	if (ism->sba->e) {
522		ism->sba->e = 0;
523		barrier();
524		ism_handle_event(ism);
525	}
526	spin_unlock(&ism->lock);
527	return IRQ_HANDLED;
528}
529
530static int ism_dev_init(struct ism_dev *ism)
531{
532	struct pci_dev *pdev = ism->pdev;
533	int i, ret;
534
535	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
536	if (ret <= 0)
537		goto out;
538
539	ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
540	if (!ism->sba_client_arr)
541		goto free_vectors;
542	memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
543
544	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
545			  pci_name(pdev), ism);
546	if (ret)
547		goto free_client_arr;
548
549	ret = register_sba(ism);
550	if (ret)
551		goto free_irq;
552
553	ret = register_ieq(ism);
554	if (ret)
555		goto unreg_sba;
556
557	ret = ism_read_local_gid(ism);
558	if (ret)
559		goto unreg_ieq;
560
561	if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
562		/* hardware is V2 capable */
563		ism_create_system_eid();
564
565	mutex_lock(&ism_dev_list.mutex);
566	mutex_lock(&clients_lock);
567	for (i = 0; i < max_client; ++i) {
568		if (clients[i]) {
569			clients[i]->add(ism);
570			ism_setup_forwarding(clients[i], ism);
571		}
572	}
573	mutex_unlock(&clients_lock);
574
575	list_add(&ism->list, &ism_dev_list.list);
576	mutex_unlock(&ism_dev_list.mutex);
577
578	query_info(ism);
579	return 0;
580
581unreg_ieq:
582	unregister_ieq(ism);
583unreg_sba:
584	unregister_sba(ism);
585free_irq:
586	free_irq(pci_irq_vector(pdev, 0), ism);
587free_client_arr:
588	kfree(ism->sba_client_arr);
589free_vectors:
590	pci_free_irq_vectors(pdev);
591out:
592	return ret;
593}
594
595static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
596{
597	struct ism_dev *ism;
598	int ret;
599
600	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
601	if (!ism)
602		return -ENOMEM;
603
604	spin_lock_init(&ism->lock);
605	dev_set_drvdata(&pdev->dev, ism);
606	ism->pdev = pdev;
607	ism->dev.parent = &pdev->dev;
608	device_initialize(&ism->dev);
609	dev_set_name(&ism->dev, dev_name(&pdev->dev));
610	ret = device_add(&ism->dev);
611	if (ret)
612		goto err_dev;
613
614	ret = pci_enable_device_mem(pdev);
615	if (ret)
616		goto err;
617
618	ret = pci_request_mem_regions(pdev, DRV_NAME);
619	if (ret)
620		goto err_disable;
621
622	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
623	if (ret)
624		goto err_resource;
625
626	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
627	dma_set_max_seg_size(&pdev->dev, SZ_1M);
628	pci_set_master(pdev);
629
630	ret = ism_dev_init(ism);
631	if (ret)
632		goto err_resource;
633
634	return 0;
635
636err_resource:
637	pci_release_mem_regions(pdev);
638err_disable:
639	pci_disable_device(pdev);
640err:
641	device_del(&ism->dev);
642err_dev:
643	dev_set_drvdata(&pdev->dev, NULL);
644	kfree(ism);
645
646	return ret;
647}
648
649static void ism_dev_exit(struct ism_dev *ism)
650{
651	struct pci_dev *pdev = ism->pdev;
652	unsigned long flags;
653	int i;
654
655	spin_lock_irqsave(&ism->lock, flags);
656	for (i = 0; i < max_client; ++i)
657		ism->subs[i] = NULL;
658	spin_unlock_irqrestore(&ism->lock, flags);
659
660	mutex_lock(&ism_dev_list.mutex);
661	mutex_lock(&clients_lock);
662	for (i = 0; i < max_client; ++i) {
663		if (clients[i])
664			clients[i]->remove(ism);
665	}
666	mutex_unlock(&clients_lock);
667
668	if (SYSTEM_EID.serial_number[0] != '0' ||
669	    SYSTEM_EID.type[0] != '0')
670		ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
671	unregister_ieq(ism);
672	unregister_sba(ism);
673	free_irq(pci_irq_vector(pdev, 0), ism);
674	kfree(ism->sba_client_arr);
675	pci_free_irq_vectors(pdev);
676	list_del_init(&ism->list);
677	mutex_unlock(&ism_dev_list.mutex);
678}
679
680static void ism_remove(struct pci_dev *pdev)
681{
682	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
683
684	ism_dev_exit(ism);
685
686	pci_release_mem_regions(pdev);
687	pci_disable_device(pdev);
688	device_del(&ism->dev);
689	dev_set_drvdata(&pdev->dev, NULL);
690	kfree(ism);
691}
692
693static struct pci_driver ism_driver = {
694	.name	  = DRV_NAME,
695	.id_table = ism_device_table,
696	.probe	  = ism_probe,
697	.remove	  = ism_remove,
698};
699
700static int __init ism_init(void)
701{
702	int ret;
703
704	ism_debug_info = debug_register("ism", 2, 1, 16);
705	if (!ism_debug_info)
706		return -ENODEV;
707
708	memset(clients, 0, sizeof(clients));
709	max_client = 0;
710	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
711	ret = pci_register_driver(&ism_driver);
712	if (ret)
713		debug_unregister(ism_debug_info);
714
715	return ret;
716}
717
718static void __exit ism_exit(void)
719{
720	pci_unregister_driver(&ism_driver);
721	debug_unregister(ism_debug_info);
722}
723
724module_init(ism_init);
725module_exit(ism_exit);
726
727/*************************** SMC-D Implementation *****************************/
728
729#if IS_ENABLED(CONFIG_SMC)
730static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
731			  u32 vid)
732{
733	union ism_query_rgid cmd;
734
735	memset(&cmd, 0, sizeof(cmd));
736	cmd.request.hdr.cmd = ISM_QUERY_RGID;
737	cmd.request.hdr.len = sizeof(cmd.request);
738
739	cmd.request.rgid = rgid;
740	cmd.request.vlan_valid = vid_valid;
741	cmd.request.vlan_id = vid;
742
743	return ism_cmd(ism, &cmd);
744}
745
746static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
747			   u32 vid)
748{
749	return ism_query_rgid(smcd->priv, rgid, vid_valid, vid);
750}
751
752static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
753			     struct ism_client *client)
754{
755	return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
756}
757
758static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
759{
760	return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
761}
762
763static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
764{
765	return ism_add_vlan_id(smcd->priv, vlan_id);
766}
767
768static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
769{
770	return ism_del_vlan_id(smcd->priv, vlan_id);
771}
772
773static int smcd_set_vlan_required(struct smcd_dev *smcd)
774{
775	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
776}
777
778static int smcd_reset_vlan_required(struct smcd_dev *smcd)
779{
780	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
781}
782
783static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
784			  u32 event_code, u64 info)
785{
786	union ism_sig_ieq cmd;
787
788	memset(&cmd, 0, sizeof(cmd));
789	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
790	cmd.request.hdr.len = sizeof(cmd.request);
791
792	cmd.request.rgid = rgid;
793	cmd.request.trigger_irq = trigger_irq;
794	cmd.request.event_code = event_code;
795	cmd.request.info = info;
796
797	return ism_cmd(ism, &cmd);
798}
799
800static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
801			   u32 event_code, u64 info)
802{
803	return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info);
804}
805
806static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
807		     bool sf, unsigned int offset, void *data,
808		     unsigned int size)
809{
810	return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
811}
812
813static int smcd_supports_v2(void)
814{
815	return SYSTEM_EID.serial_number[0] != '0' ||
816		SYSTEM_EID.type[0] != '0';
817}
818
819static u64 ism_get_local_gid(struct ism_dev *ism)
820{
821	return ism->local_gid;
822}
823
824static u64 smcd_get_local_gid(struct smcd_dev *smcd)
825{
826	return ism_get_local_gid(smcd->priv);
827}
828
829static u16 ism_get_chid(struct ism_dev *ism)
830{
831	if (!ism || !ism->pdev)
832		return 0;
833
834	return to_zpci(ism->pdev)->pchid;
835}
836
837static u16 smcd_get_chid(struct smcd_dev *smcd)
838{
839	return ism_get_chid(smcd->priv);
840}
841
842static inline struct device *smcd_get_dev(struct smcd_dev *dev)
843{
844	struct ism_dev *ism = dev->priv;
845
846	return &ism->dev;
847}
848
849static const struct smcd_ops ism_ops = {
850	.query_remote_gid = smcd_query_rgid,
851	.register_dmb = smcd_register_dmb,
852	.unregister_dmb = smcd_unregister_dmb,
853	.add_vlan_id = smcd_add_vlan_id,
854	.del_vlan_id = smcd_del_vlan_id,
855	.set_vlan_required = smcd_set_vlan_required,
856	.reset_vlan_required = smcd_reset_vlan_required,
857	.signal_event = smcd_signal_ieq,
858	.move_data = smcd_move,
859	.supports_v2 = smcd_supports_v2,
860	.get_system_eid = ism_get_seid,
861	.get_local_gid = smcd_get_local_gid,
862	.get_chid = smcd_get_chid,
863	.get_dev = smcd_get_dev,
864};
865
866const struct smcd_ops *ism_get_smcd_ops(void)
867{
868	return &ism_ops;
869}
870EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
871#endif
872