xref: /kernel/linux/linux-5.10/drivers/dma/idxd/sysfs.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14	[IDXD_WQT_NONE]		= "none",
15	[IDXD_WQT_KERNEL]	= "kernel",
16	[IDXD_WQT_USER]		= "user",
17};
18
19static void idxd_conf_device_release(struct device *dev)
20{
21	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22}
23
24static struct device_type idxd_group_device_type = {
25	.name = "group",
26	.release = idxd_conf_device_release,
27};
28
29static struct device_type idxd_wq_device_type = {
30	.name = "wq",
31	.release = idxd_conf_device_release,
32};
33
34static struct device_type idxd_engine_device_type = {
35	.name = "engine",
36	.release = idxd_conf_device_release,
37};
38
39static struct device_type dsa_device_type = {
40	.name = "dsa",
41	.release = idxd_conf_device_release,
42};
43
44static inline bool is_dsa_dev(struct device *dev)
45{
46	return dev ? dev->type == &dsa_device_type : false;
47}
48
49static inline bool is_idxd_dev(struct device *dev)
50{
51	return is_dsa_dev(dev);
52}
53
54static inline bool is_idxd_wq_dev(struct device *dev)
55{
56	return dev ? dev->type == &idxd_wq_device_type : false;
57}
58
59static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60{
61	if (wq->type == IDXD_WQT_KERNEL &&
62	    strcmp(wq->name, "dmaengine") == 0)
63		return true;
64	return false;
65}
66
67static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68{
69	return wq->type == IDXD_WQT_USER;
70}
71
72static int idxd_config_bus_match(struct device *dev,
73				 struct device_driver *drv)
74{
75	int matched = 0;
76
77	if (is_idxd_dev(dev)) {
78		struct idxd_device *idxd = confdev_to_idxd(dev);
79
80		if (idxd->state != IDXD_DEV_CONF_READY)
81			return 0;
82		matched = 1;
83	} else if (is_idxd_wq_dev(dev)) {
84		struct idxd_wq *wq = confdev_to_wq(dev);
85		struct idxd_device *idxd = wq->idxd;
86
87		if (idxd->state < IDXD_DEV_CONF_READY)
88			return 0;
89
90		if (wq->state != IDXD_WQ_DISABLED) {
91			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92			return 0;
93		}
94		matched = 1;
95	}
96
97	if (matched)
98		dev_dbg(dev, "%s matched\n", dev_name(dev));
99
100	return matched;
101}
102
103static int idxd_config_bus_probe(struct device *dev)
104{
105	int rc;
106	unsigned long flags;
107
108	dev_dbg(dev, "%s called\n", __func__);
109
110	if (is_idxd_dev(dev)) {
111		struct idxd_device *idxd = confdev_to_idxd(dev);
112
113		if (idxd->state != IDXD_DEV_CONF_READY) {
114			dev_warn(dev, "Device not ready for config\n");
115			return -EBUSY;
116		}
117
118		if (!try_module_get(THIS_MODULE))
119			return -ENXIO;
120
121		/* Perform IDXD configuration and enabling */
122		spin_lock_irqsave(&idxd->dev_lock, flags);
123		rc = idxd_device_config(idxd);
124		spin_unlock_irqrestore(&idxd->dev_lock, flags);
125		if (rc < 0) {
126			module_put(THIS_MODULE);
127			dev_warn(dev, "Device config failed: %d\n", rc);
128			return rc;
129		}
130
131		/* start device */
132		rc = idxd_device_enable(idxd);
133		if (rc < 0) {
134			module_put(THIS_MODULE);
135			dev_warn(dev, "Device enable failed: %d\n", rc);
136			return rc;
137		}
138
139		dev_info(dev, "Device %s enabled\n", dev_name(dev));
140
141		rc = idxd_register_dma_device(idxd);
142		if (rc < 0) {
143			module_put(THIS_MODULE);
144			dev_dbg(dev, "Failed to register dmaengine device\n");
145			return rc;
146		}
147		return 0;
148	} else if (is_idxd_wq_dev(dev)) {
149		struct idxd_wq *wq = confdev_to_wq(dev);
150		struct idxd_device *idxd = wq->idxd;
151
152		mutex_lock(&wq->wq_lock);
153
154		if (idxd->state != IDXD_DEV_ENABLED) {
155			mutex_unlock(&wq->wq_lock);
156			dev_warn(dev, "Enabling while device not enabled.\n");
157			return -EPERM;
158		}
159
160		if (wq->state != IDXD_WQ_DISABLED) {
161			mutex_unlock(&wq->wq_lock);
162			dev_warn(dev, "WQ %d already enabled.\n", wq->id);
163			return -EBUSY;
164		}
165
166		if (!wq->group) {
167			mutex_unlock(&wq->wq_lock);
168			dev_warn(dev, "WQ not attached to group.\n");
169			return -EINVAL;
170		}
171
172		if (strlen(wq->name) == 0) {
173			mutex_unlock(&wq->wq_lock);
174			dev_warn(dev, "WQ name not set.\n");
175			return -EINVAL;
176		}
177
178		rc = idxd_wq_alloc_resources(wq);
179		if (rc < 0) {
180			mutex_unlock(&wq->wq_lock);
181			dev_warn(dev, "WQ resource alloc failed\n");
182			return rc;
183		}
184
185		spin_lock_irqsave(&idxd->dev_lock, flags);
186		rc = idxd_device_config(idxd);
187		spin_unlock_irqrestore(&idxd->dev_lock, flags);
188		if (rc < 0) {
189			mutex_unlock(&wq->wq_lock);
190			dev_warn(dev, "Writing WQ %d config failed: %d\n",
191				 wq->id, rc);
192			return rc;
193		}
194
195		rc = idxd_wq_enable(wq);
196		if (rc < 0) {
197			mutex_unlock(&wq->wq_lock);
198			dev_warn(dev, "WQ %d enabling failed: %d\n",
199				 wq->id, rc);
200			return rc;
201		}
202
203		rc = idxd_wq_map_portal(wq);
204		if (rc < 0) {
205			dev_warn(dev, "wq portal mapping failed: %d\n", rc);
206			rc = idxd_wq_disable(wq);
207			if (rc < 0)
208				dev_warn(dev, "IDXD wq disable failed\n");
209			mutex_unlock(&wq->wq_lock);
210			return rc;
211		}
212
213		wq->client_count = 0;
214
215		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
216
217		if (is_idxd_wq_dmaengine(wq)) {
218			rc = idxd_register_dma_channel(wq);
219			if (rc < 0) {
220				dev_dbg(dev, "DMA channel register failed\n");
221				mutex_unlock(&wq->wq_lock);
222				return rc;
223			}
224		} else if (is_idxd_wq_cdev(wq)) {
225			rc = idxd_wq_add_cdev(wq);
226			if (rc < 0) {
227				dev_dbg(dev, "Cdev creation failed\n");
228				mutex_unlock(&wq->wq_lock);
229				return rc;
230			}
231		}
232
233		mutex_unlock(&wq->wq_lock);
234		return 0;
235	}
236
237	return -ENODEV;
238}
239
240static void disable_wq(struct idxd_wq *wq)
241{
242	struct idxd_device *idxd = wq->idxd;
243	struct device *dev = &idxd->pdev->dev;
244
245	mutex_lock(&wq->wq_lock);
246	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
247	if (wq->state == IDXD_WQ_DISABLED) {
248		mutex_unlock(&wq->wq_lock);
249		return;
250	}
251
252	if (is_idxd_wq_dmaengine(wq))
253		idxd_unregister_dma_channel(wq);
254	else if (is_idxd_wq_cdev(wq))
255		idxd_wq_del_cdev(wq);
256
257	if (idxd_wq_refcount(wq))
258		dev_warn(dev, "Clients has claim on wq %d: %d\n",
259			 wq->id, idxd_wq_refcount(wq));
260
261	idxd_wq_unmap_portal(wq);
262
263	idxd_wq_drain(wq);
264	idxd_wq_reset(wq);
265
266	idxd_wq_free_resources(wq);
267	wq->client_count = 0;
268	mutex_unlock(&wq->wq_lock);
269
270	dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
271}
272
273static int idxd_config_bus_remove(struct device *dev)
274{
275	int rc;
276
277	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
278
279	/* disable workqueue here */
280	if (is_idxd_wq_dev(dev)) {
281		struct idxd_wq *wq = confdev_to_wq(dev);
282
283		disable_wq(wq);
284	} else if (is_idxd_dev(dev)) {
285		struct idxd_device *idxd = confdev_to_idxd(dev);
286		int i;
287
288		dev_dbg(dev, "%s removing dev %s\n", __func__,
289			dev_name(&idxd->conf_dev));
290		for (i = 0; i < idxd->max_wqs; i++) {
291			struct idxd_wq *wq = &idxd->wqs[i];
292
293			if (wq->state == IDXD_WQ_DISABLED)
294				continue;
295			dev_warn(dev, "Active wq %d on disable %s.\n", i,
296				 dev_name(&idxd->conf_dev));
297			device_release_driver(&wq->conf_dev);
298		}
299
300		idxd_unregister_dma_device(idxd);
301		rc = idxd_device_disable(idxd);
302		for (i = 0; i < idxd->max_wqs; i++) {
303			struct idxd_wq *wq = &idxd->wqs[i];
304
305			mutex_lock(&wq->wq_lock);
306			idxd_wq_disable_cleanup(wq);
307			mutex_unlock(&wq->wq_lock);
308		}
309		module_put(THIS_MODULE);
310		if (rc < 0)
311			dev_warn(dev, "Device disable failed\n");
312		else
313			dev_info(dev, "Device %s disabled\n", dev_name(dev));
314
315	}
316
317	return 0;
318}
319
320static void idxd_config_bus_shutdown(struct device *dev)
321{
322	dev_dbg(dev, "%s called\n", __func__);
323}
324
325struct bus_type dsa_bus_type = {
326	.name = "dsa",
327	.match = idxd_config_bus_match,
328	.probe = idxd_config_bus_probe,
329	.remove = idxd_config_bus_remove,
330	.shutdown = idxd_config_bus_shutdown,
331};
332
333static struct bus_type *idxd_bus_types[] = {
334	&dsa_bus_type
335};
336
337static struct idxd_device_driver dsa_drv = {
338	.drv = {
339		.name = "dsa",
340		.bus = &dsa_bus_type,
341		.owner = THIS_MODULE,
342		.mod_name = KBUILD_MODNAME,
343	},
344};
345
346static struct idxd_device_driver *idxd_drvs[] = {
347	&dsa_drv
348};
349
350struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
351{
352	return idxd_bus_types[idxd->type];
353}
354
355static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
356{
357	if (idxd->type == IDXD_TYPE_DSA)
358		return &dsa_device_type;
359	else
360		return NULL;
361}
362
363/* IDXD generic driver setup */
364int idxd_register_driver(void)
365{
366	int i, rc;
367
368	for (i = 0; i < IDXD_TYPE_MAX; i++) {
369		rc = driver_register(&idxd_drvs[i]->drv);
370		if (rc < 0)
371			goto drv_fail;
372	}
373
374	return 0;
375
376drv_fail:
377	while (--i >= 0)
378		driver_unregister(&idxd_drvs[i]->drv);
379	return rc;
380}
381
382void idxd_unregister_driver(void)
383{
384	int i;
385
386	for (i = 0; i < IDXD_TYPE_MAX; i++)
387		driver_unregister(&idxd_drvs[i]->drv);
388}
389
390/* IDXD engine attributes */
391static ssize_t engine_group_id_show(struct device *dev,
392				    struct device_attribute *attr, char *buf)
393{
394	struct idxd_engine *engine =
395		container_of(dev, struct idxd_engine, conf_dev);
396
397	if (engine->group)
398		return sprintf(buf, "%d\n", engine->group->id);
399	else
400		return sprintf(buf, "%d\n", -1);
401}
402
403static ssize_t engine_group_id_store(struct device *dev,
404				     struct device_attribute *attr,
405				     const char *buf, size_t count)
406{
407	struct idxd_engine *engine =
408		container_of(dev, struct idxd_engine, conf_dev);
409	struct idxd_device *idxd = engine->idxd;
410	long id;
411	int rc;
412	struct idxd_group *prevg;
413
414	rc = kstrtol(buf, 10, &id);
415	if (rc < 0)
416		return -EINVAL;
417
418	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
419		return -EPERM;
420
421	if (id > idxd->max_groups - 1 || id < -1)
422		return -EINVAL;
423
424	if (id == -1) {
425		if (engine->group) {
426			engine->group->num_engines--;
427			engine->group = NULL;
428		}
429		return count;
430	}
431
432	prevg = engine->group;
433
434	if (prevg)
435		prevg->num_engines--;
436	engine->group = &idxd->groups[id];
437	engine->group->num_engines++;
438
439	return count;
440}
441
442static struct device_attribute dev_attr_engine_group =
443		__ATTR(group_id, 0644, engine_group_id_show,
444		       engine_group_id_store);
445
446static struct attribute *idxd_engine_attributes[] = {
447	&dev_attr_engine_group.attr,
448	NULL,
449};
450
451static const struct attribute_group idxd_engine_attribute_group = {
452	.attrs = idxd_engine_attributes,
453};
454
455static const struct attribute_group *idxd_engine_attribute_groups[] = {
456	&idxd_engine_attribute_group,
457	NULL,
458};
459
460/* Group attributes */
461
462static void idxd_set_free_tokens(struct idxd_device *idxd)
463{
464	int i, tokens;
465
466	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
467		struct idxd_group *g = &idxd->groups[i];
468
469		tokens += g->tokens_reserved;
470	}
471
472	idxd->nr_tokens = idxd->max_tokens - tokens;
473}
474
475static ssize_t group_tokens_reserved_show(struct device *dev,
476					  struct device_attribute *attr,
477					  char *buf)
478{
479	struct idxd_group *group =
480		container_of(dev, struct idxd_group, conf_dev);
481
482	return sprintf(buf, "%u\n", group->tokens_reserved);
483}
484
485static ssize_t group_tokens_reserved_store(struct device *dev,
486					   struct device_attribute *attr,
487					   const char *buf, size_t count)
488{
489	struct idxd_group *group =
490		container_of(dev, struct idxd_group, conf_dev);
491	struct idxd_device *idxd = group->idxd;
492	unsigned long val;
493	int rc;
494
495	rc = kstrtoul(buf, 10, &val);
496	if (rc < 0)
497		return -EINVAL;
498
499	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
500		return -EPERM;
501
502	if (idxd->state == IDXD_DEV_ENABLED)
503		return -EPERM;
504
505	if (val > idxd->max_tokens)
506		return -EINVAL;
507
508	if (val > idxd->nr_tokens + group->tokens_reserved)
509		return -EINVAL;
510
511	group->tokens_reserved = val;
512	idxd_set_free_tokens(idxd);
513	return count;
514}
515
516static struct device_attribute dev_attr_group_tokens_reserved =
517		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
518		       group_tokens_reserved_store);
519
520static ssize_t group_tokens_allowed_show(struct device *dev,
521					 struct device_attribute *attr,
522					 char *buf)
523{
524	struct idxd_group *group =
525		container_of(dev, struct idxd_group, conf_dev);
526
527	return sprintf(buf, "%u\n", group->tokens_allowed);
528}
529
530static ssize_t group_tokens_allowed_store(struct device *dev,
531					  struct device_attribute *attr,
532					  const char *buf, size_t count)
533{
534	struct idxd_group *group =
535		container_of(dev, struct idxd_group, conf_dev);
536	struct idxd_device *idxd = group->idxd;
537	unsigned long val;
538	int rc;
539
540	rc = kstrtoul(buf, 10, &val);
541	if (rc < 0)
542		return -EINVAL;
543
544	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
545		return -EPERM;
546
547	if (idxd->state == IDXD_DEV_ENABLED)
548		return -EPERM;
549
550	if (val < 4 * group->num_engines ||
551	    val > group->tokens_reserved + idxd->nr_tokens)
552		return -EINVAL;
553
554	group->tokens_allowed = val;
555	return count;
556}
557
558static struct device_attribute dev_attr_group_tokens_allowed =
559		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
560		       group_tokens_allowed_store);
561
562static ssize_t group_use_token_limit_show(struct device *dev,
563					  struct device_attribute *attr,
564					  char *buf)
565{
566	struct idxd_group *group =
567		container_of(dev, struct idxd_group, conf_dev);
568
569	return sprintf(buf, "%u\n", group->use_token_limit);
570}
571
572static ssize_t group_use_token_limit_store(struct device *dev,
573					   struct device_attribute *attr,
574					   const char *buf, size_t count)
575{
576	struct idxd_group *group =
577		container_of(dev, struct idxd_group, conf_dev);
578	struct idxd_device *idxd = group->idxd;
579	unsigned long val;
580	int rc;
581
582	rc = kstrtoul(buf, 10, &val);
583	if (rc < 0)
584		return -EINVAL;
585
586	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
587		return -EPERM;
588
589	if (idxd->state == IDXD_DEV_ENABLED)
590		return -EPERM;
591
592	if (idxd->token_limit == 0)
593		return -EPERM;
594
595	group->use_token_limit = !!val;
596	return count;
597}
598
599static struct device_attribute dev_attr_group_use_token_limit =
600		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
601		       group_use_token_limit_store);
602
603static ssize_t group_engines_show(struct device *dev,
604				  struct device_attribute *attr, char *buf)
605{
606	struct idxd_group *group =
607		container_of(dev, struct idxd_group, conf_dev);
608	int i, rc = 0;
609	char *tmp = buf;
610	struct idxd_device *idxd = group->idxd;
611
612	for (i = 0; i < idxd->max_engines; i++) {
613		struct idxd_engine *engine = &idxd->engines[i];
614
615		if (!engine->group)
616			continue;
617
618		if (engine->group->id == group->id)
619			rc += sprintf(tmp + rc, "engine%d.%d ",
620					idxd->id, engine->id);
621	}
622
623	rc--;
624	rc += sprintf(tmp + rc, "\n");
625
626	return rc;
627}
628
629static struct device_attribute dev_attr_group_engines =
630		__ATTR(engines, 0444, group_engines_show, NULL);
631
632static ssize_t group_work_queues_show(struct device *dev,
633				      struct device_attribute *attr, char *buf)
634{
635	struct idxd_group *group =
636		container_of(dev, struct idxd_group, conf_dev);
637	int i, rc = 0;
638	char *tmp = buf;
639	struct idxd_device *idxd = group->idxd;
640
641	for (i = 0; i < idxd->max_wqs; i++) {
642		struct idxd_wq *wq = &idxd->wqs[i];
643
644		if (!wq->group)
645			continue;
646
647		if (wq->group->id == group->id)
648			rc += sprintf(tmp + rc, "wq%d.%d ",
649					idxd->id, wq->id);
650	}
651
652	rc--;
653	rc += sprintf(tmp + rc, "\n");
654
655	return rc;
656}
657
658static struct device_attribute dev_attr_group_work_queues =
659		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
660
661static ssize_t group_traffic_class_a_show(struct device *dev,
662					  struct device_attribute *attr,
663					  char *buf)
664{
665	struct idxd_group *group =
666		container_of(dev, struct idxd_group, conf_dev);
667
668	return sprintf(buf, "%d\n", group->tc_a);
669}
670
671static ssize_t group_traffic_class_a_store(struct device *dev,
672					   struct device_attribute *attr,
673					   const char *buf, size_t count)
674{
675	struct idxd_group *group =
676		container_of(dev, struct idxd_group, conf_dev);
677	struct idxd_device *idxd = group->idxd;
678	long val;
679	int rc;
680
681	rc = kstrtol(buf, 10, &val);
682	if (rc < 0)
683		return -EINVAL;
684
685	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
686		return -EPERM;
687
688	if (idxd->state == IDXD_DEV_ENABLED)
689		return -EPERM;
690
691	if (val < 0 || val > 7)
692		return -EINVAL;
693
694	group->tc_a = val;
695	return count;
696}
697
698static struct device_attribute dev_attr_group_traffic_class_a =
699		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
700		       group_traffic_class_a_store);
701
702static ssize_t group_traffic_class_b_show(struct device *dev,
703					  struct device_attribute *attr,
704					  char *buf)
705{
706	struct idxd_group *group =
707		container_of(dev, struct idxd_group, conf_dev);
708
709	return sprintf(buf, "%d\n", group->tc_b);
710}
711
712static ssize_t group_traffic_class_b_store(struct device *dev,
713					   struct device_attribute *attr,
714					   const char *buf, size_t count)
715{
716	struct idxd_group *group =
717		container_of(dev, struct idxd_group, conf_dev);
718	struct idxd_device *idxd = group->idxd;
719	long val;
720	int rc;
721
722	rc = kstrtol(buf, 10, &val);
723	if (rc < 0)
724		return -EINVAL;
725
726	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
727		return -EPERM;
728
729	if (idxd->state == IDXD_DEV_ENABLED)
730		return -EPERM;
731
732	if (val < 0 || val > 7)
733		return -EINVAL;
734
735	group->tc_b = val;
736	return count;
737}
738
739static struct device_attribute dev_attr_group_traffic_class_b =
740		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
741		       group_traffic_class_b_store);
742
743static struct attribute *idxd_group_attributes[] = {
744	&dev_attr_group_work_queues.attr,
745	&dev_attr_group_engines.attr,
746	&dev_attr_group_use_token_limit.attr,
747	&dev_attr_group_tokens_allowed.attr,
748	&dev_attr_group_tokens_reserved.attr,
749	&dev_attr_group_traffic_class_a.attr,
750	&dev_attr_group_traffic_class_b.attr,
751	NULL,
752};
753
754static const struct attribute_group idxd_group_attribute_group = {
755	.attrs = idxd_group_attributes,
756};
757
758static const struct attribute_group *idxd_group_attribute_groups[] = {
759	&idxd_group_attribute_group,
760	NULL,
761};
762
763/* IDXD work queue attribs */
764static ssize_t wq_clients_show(struct device *dev,
765			       struct device_attribute *attr, char *buf)
766{
767	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
768
769	return sprintf(buf, "%d\n", wq->client_count);
770}
771
772static struct device_attribute dev_attr_wq_clients =
773		__ATTR(clients, 0444, wq_clients_show, NULL);
774
775static ssize_t wq_state_show(struct device *dev,
776			     struct device_attribute *attr, char *buf)
777{
778	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
779
780	switch (wq->state) {
781	case IDXD_WQ_DISABLED:
782		return sprintf(buf, "disabled\n");
783	case IDXD_WQ_ENABLED:
784		return sprintf(buf, "enabled\n");
785	}
786
787	return sprintf(buf, "unknown\n");
788}
789
790static struct device_attribute dev_attr_wq_state =
791		__ATTR(state, 0444, wq_state_show, NULL);
792
793static ssize_t wq_group_id_show(struct device *dev,
794				struct device_attribute *attr, char *buf)
795{
796	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
797
798	if (wq->group)
799		return sprintf(buf, "%u\n", wq->group->id);
800	else
801		return sprintf(buf, "-1\n");
802}
803
804static ssize_t wq_group_id_store(struct device *dev,
805				 struct device_attribute *attr,
806				 const char *buf, size_t count)
807{
808	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
809	struct idxd_device *idxd = wq->idxd;
810	long id;
811	int rc;
812	struct idxd_group *prevg, *group;
813
814	rc = kstrtol(buf, 10, &id);
815	if (rc < 0)
816		return -EINVAL;
817
818	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
819		return -EPERM;
820
821	if (wq->state != IDXD_WQ_DISABLED)
822		return -EPERM;
823
824	if (id > idxd->max_groups - 1 || id < -1)
825		return -EINVAL;
826
827	if (id == -1) {
828		if (wq->group) {
829			wq->group->num_wqs--;
830			wq->group = NULL;
831		}
832		return count;
833	}
834
835	group = &idxd->groups[id];
836	prevg = wq->group;
837
838	if (prevg)
839		prevg->num_wqs--;
840	wq->group = group;
841	group->num_wqs++;
842	return count;
843}
844
845static struct device_attribute dev_attr_wq_group_id =
846		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
847
848static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
849			    char *buf)
850{
851	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
852
853	return sprintf(buf, "%s\n",
854			wq_dedicated(wq) ? "dedicated" : "shared");
855}
856
857static ssize_t wq_mode_store(struct device *dev,
858			     struct device_attribute *attr, const char *buf,
859			     size_t count)
860{
861	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
862	struct idxd_device *idxd = wq->idxd;
863
864	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
865		return -EPERM;
866
867	if (wq->state != IDXD_WQ_DISABLED)
868		return -EPERM;
869
870	if (sysfs_streq(buf, "dedicated")) {
871		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
872		wq->threshold = 0;
873	} else {
874		return -EINVAL;
875	}
876
877	return count;
878}
879
880static struct device_attribute dev_attr_wq_mode =
881		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
882
883static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
884			    char *buf)
885{
886	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
887
888	return sprintf(buf, "%u\n", wq->size);
889}
890
891static int total_claimed_wq_size(struct idxd_device *idxd)
892{
893	int i;
894	int wq_size = 0;
895
896	for (i = 0; i < idxd->max_wqs; i++) {
897		struct idxd_wq *wq = &idxd->wqs[i];
898
899		wq_size += wq->size;
900	}
901
902	return wq_size;
903}
904
905static ssize_t wq_size_store(struct device *dev,
906			     struct device_attribute *attr, const char *buf,
907			     size_t count)
908{
909	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
910	unsigned long size;
911	struct idxd_device *idxd = wq->idxd;
912	int rc;
913
914	rc = kstrtoul(buf, 10, &size);
915	if (rc < 0)
916		return -EINVAL;
917
918	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
919		return -EPERM;
920
921	if (idxd->state == IDXD_DEV_ENABLED)
922		return -EPERM;
923
924	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
925		return -EINVAL;
926
927	wq->size = size;
928	return count;
929}
930
931static struct device_attribute dev_attr_wq_size =
932		__ATTR(size, 0644, wq_size_show, wq_size_store);
933
934static ssize_t wq_priority_show(struct device *dev,
935				struct device_attribute *attr, char *buf)
936{
937	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
938
939	return sprintf(buf, "%u\n", wq->priority);
940}
941
942static ssize_t wq_priority_store(struct device *dev,
943				 struct device_attribute *attr,
944				 const char *buf, size_t count)
945{
946	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
947	unsigned long prio;
948	struct idxd_device *idxd = wq->idxd;
949	int rc;
950
951	rc = kstrtoul(buf, 10, &prio);
952	if (rc < 0)
953		return -EINVAL;
954
955	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
956		return -EPERM;
957
958	if (wq->state != IDXD_WQ_DISABLED)
959		return -EPERM;
960
961	if (prio > IDXD_MAX_PRIORITY)
962		return -EINVAL;
963
964	wq->priority = prio;
965	return count;
966}
967
968static struct device_attribute dev_attr_wq_priority =
969		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
970
971static ssize_t wq_type_show(struct device *dev,
972			    struct device_attribute *attr, char *buf)
973{
974	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
975
976	switch (wq->type) {
977	case IDXD_WQT_KERNEL:
978		return sprintf(buf, "%s\n",
979			       idxd_wq_type_names[IDXD_WQT_KERNEL]);
980	case IDXD_WQT_USER:
981		return sprintf(buf, "%s\n",
982			       idxd_wq_type_names[IDXD_WQT_USER]);
983	case IDXD_WQT_NONE:
984	default:
985		return sprintf(buf, "%s\n",
986			       idxd_wq_type_names[IDXD_WQT_NONE]);
987	}
988
989	return -EINVAL;
990}
991
992static ssize_t wq_type_store(struct device *dev,
993			     struct device_attribute *attr, const char *buf,
994			     size_t count)
995{
996	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
997	enum idxd_wq_type old_type;
998
999	if (wq->state != IDXD_WQ_DISABLED)
1000		return -EPERM;
1001
1002	old_type = wq->type;
1003	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1004		wq->type = IDXD_WQT_NONE;
1005	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1006		wq->type = IDXD_WQT_KERNEL;
1007	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1008		wq->type = IDXD_WQT_USER;
1009	else
1010		return -EINVAL;
1011
1012	/* If we are changing queue type, clear the name */
1013	if (wq->type != old_type)
1014		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1015
1016	return count;
1017}
1018
1019static struct device_attribute dev_attr_wq_type =
1020		__ATTR(type, 0644, wq_type_show, wq_type_store);
1021
1022static ssize_t wq_name_show(struct device *dev,
1023			    struct device_attribute *attr, char *buf)
1024{
1025	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1026
1027	return sprintf(buf, "%s\n", wq->name);
1028}
1029
1030static ssize_t wq_name_store(struct device *dev,
1031			     struct device_attribute *attr, const char *buf,
1032			     size_t count)
1033{
1034	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1035
1036	if (wq->state != IDXD_WQ_DISABLED)
1037		return -EPERM;
1038
1039	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1040		return -EINVAL;
1041
1042	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1043	strncpy(wq->name, buf, WQ_NAME_SIZE);
1044	strreplace(wq->name, '\n', '\0');
1045	return count;
1046}
1047
1048static struct device_attribute dev_attr_wq_name =
1049		__ATTR(name, 0644, wq_name_show, wq_name_store);
1050
1051static ssize_t wq_cdev_minor_show(struct device *dev,
1052				  struct device_attribute *attr, char *buf)
1053{
1054	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1055	int minor = -1;
1056
1057	mutex_lock(&wq->wq_lock);
1058	if (wq->idxd_cdev)
1059		minor = wq->idxd_cdev->minor;
1060	mutex_unlock(&wq->wq_lock);
1061
1062	if (minor == -1)
1063		return -ENXIO;
1064	return sysfs_emit(buf, "%d\n", minor);
1065}
1066
1067static struct device_attribute dev_attr_wq_cdev_minor =
1068		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1069
1070static int __get_sysfs_u64(const char *buf, u64 *val)
1071{
1072	int rc;
1073
1074	rc = kstrtou64(buf, 0, val);
1075	if (rc < 0)
1076		return -EINVAL;
1077
1078	if (*val == 0)
1079		return -EINVAL;
1080
1081	*val = roundup_pow_of_two(*val);
1082	return 0;
1083}
1084
1085static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1086					 char *buf)
1087{
1088	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1089
1090	return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1091}
1092
1093static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1094					  const char *buf, size_t count)
1095{
1096	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1097	struct idxd_device *idxd = wq->idxd;
1098	u64 xfer_size;
1099	int rc;
1100
1101	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1102		return -EPERM;
1103
1104	if (wq->state != IDXD_WQ_DISABLED)
1105		return -EPERM;
1106
1107	rc = __get_sysfs_u64(buf, &xfer_size);
1108	if (rc < 0)
1109		return rc;
1110
1111	if (xfer_size > idxd->max_xfer_bytes)
1112		return -EINVAL;
1113
1114	wq->max_xfer_bytes = xfer_size;
1115
1116	return count;
1117}
1118
1119static struct device_attribute dev_attr_wq_max_transfer_size =
1120		__ATTR(max_transfer_size, 0644,
1121		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1122
1123static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1124{
1125	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1126
1127	return sprintf(buf, "%u\n", wq->max_batch_size);
1128}
1129
1130static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1131				       const char *buf, size_t count)
1132{
1133	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1134	struct idxd_device *idxd = wq->idxd;
1135	u64 batch_size;
1136	int rc;
1137
1138	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1139		return -EPERM;
1140
1141	if (wq->state != IDXD_WQ_DISABLED)
1142		return -EPERM;
1143
1144	rc = __get_sysfs_u64(buf, &batch_size);
1145	if (rc < 0)
1146		return rc;
1147
1148	if (batch_size > idxd->max_batch_size)
1149		return -EINVAL;
1150
1151	wq->max_batch_size = (u32)batch_size;
1152
1153	return count;
1154}
1155
1156static struct device_attribute dev_attr_wq_max_batch_size =
1157		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1158
1159static struct attribute *idxd_wq_attributes[] = {
1160	&dev_attr_wq_clients.attr,
1161	&dev_attr_wq_state.attr,
1162	&dev_attr_wq_group_id.attr,
1163	&dev_attr_wq_mode.attr,
1164	&dev_attr_wq_size.attr,
1165	&dev_attr_wq_priority.attr,
1166	&dev_attr_wq_type.attr,
1167	&dev_attr_wq_name.attr,
1168	&dev_attr_wq_cdev_minor.attr,
1169	&dev_attr_wq_max_transfer_size.attr,
1170	&dev_attr_wq_max_batch_size.attr,
1171	NULL,
1172};
1173
1174static const struct attribute_group idxd_wq_attribute_group = {
1175	.attrs = idxd_wq_attributes,
1176};
1177
1178static const struct attribute_group *idxd_wq_attribute_groups[] = {
1179	&idxd_wq_attribute_group,
1180	NULL,
1181};
1182
1183/* IDXD device attribs */
1184static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1185			    char *buf)
1186{
1187	struct idxd_device *idxd =
1188		container_of(dev, struct idxd_device, conf_dev);
1189
1190	return sprintf(buf, "%#x\n", idxd->hw.version);
1191}
1192static DEVICE_ATTR_RO(version);
1193
1194static ssize_t max_work_queues_size_show(struct device *dev,
1195					 struct device_attribute *attr,
1196					 char *buf)
1197{
1198	struct idxd_device *idxd =
1199		container_of(dev, struct idxd_device, conf_dev);
1200
1201	return sprintf(buf, "%u\n", idxd->max_wq_size);
1202}
1203static DEVICE_ATTR_RO(max_work_queues_size);
1204
1205static ssize_t max_groups_show(struct device *dev,
1206			       struct device_attribute *attr, char *buf)
1207{
1208	struct idxd_device *idxd =
1209		container_of(dev, struct idxd_device, conf_dev);
1210
1211	return sprintf(buf, "%u\n", idxd->max_groups);
1212}
1213static DEVICE_ATTR_RO(max_groups);
1214
1215static ssize_t max_work_queues_show(struct device *dev,
1216				    struct device_attribute *attr, char *buf)
1217{
1218	struct idxd_device *idxd =
1219		container_of(dev, struct idxd_device, conf_dev);
1220
1221	return sprintf(buf, "%u\n", idxd->max_wqs);
1222}
1223static DEVICE_ATTR_RO(max_work_queues);
1224
1225static ssize_t max_engines_show(struct device *dev,
1226				struct device_attribute *attr, char *buf)
1227{
1228	struct idxd_device *idxd =
1229		container_of(dev, struct idxd_device, conf_dev);
1230
1231	return sprintf(buf, "%u\n", idxd->max_engines);
1232}
1233static DEVICE_ATTR_RO(max_engines);
1234
1235static ssize_t numa_node_show(struct device *dev,
1236			      struct device_attribute *attr, char *buf)
1237{
1238	struct idxd_device *idxd =
1239		container_of(dev, struct idxd_device, conf_dev);
1240
1241	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1242}
1243static DEVICE_ATTR_RO(numa_node);
1244
1245static ssize_t max_batch_size_show(struct device *dev,
1246				   struct device_attribute *attr, char *buf)
1247{
1248	struct idxd_device *idxd =
1249		container_of(dev, struct idxd_device, conf_dev);
1250
1251	return sprintf(buf, "%u\n", idxd->max_batch_size);
1252}
1253static DEVICE_ATTR_RO(max_batch_size);
1254
1255static ssize_t max_transfer_size_show(struct device *dev,
1256				      struct device_attribute *attr,
1257				      char *buf)
1258{
1259	struct idxd_device *idxd =
1260		container_of(dev, struct idxd_device, conf_dev);
1261
1262	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1263}
1264static DEVICE_ATTR_RO(max_transfer_size);
1265
1266static ssize_t op_cap_show(struct device *dev,
1267			   struct device_attribute *attr, char *buf)
1268{
1269	struct idxd_device *idxd =
1270		container_of(dev, struct idxd_device, conf_dev);
1271	int i, rc = 0;
1272
1273	for (i = 0; i < 4; i++)
1274		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1275
1276	rc--;
1277	rc += sysfs_emit_at(buf, rc, "\n");
1278	return rc;
1279}
1280static DEVICE_ATTR_RO(op_cap);
1281
1282static ssize_t gen_cap_show(struct device *dev,
1283			    struct device_attribute *attr, char *buf)
1284{
1285	struct idxd_device *idxd =
1286		container_of(dev, struct idxd_device, conf_dev);
1287
1288	return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1289}
1290static DEVICE_ATTR_RO(gen_cap);
1291
1292static ssize_t configurable_show(struct device *dev,
1293				 struct device_attribute *attr, char *buf)
1294{
1295	struct idxd_device *idxd =
1296		container_of(dev, struct idxd_device, conf_dev);
1297
1298	return sprintf(buf, "%u\n",
1299			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1300}
1301static DEVICE_ATTR_RO(configurable);
1302
1303static ssize_t clients_show(struct device *dev,
1304			    struct device_attribute *attr, char *buf)
1305{
1306	struct idxd_device *idxd =
1307		container_of(dev, struct idxd_device, conf_dev);
1308	unsigned long flags;
1309	int count = 0, i;
1310
1311	spin_lock_irqsave(&idxd->dev_lock, flags);
1312	for (i = 0; i < idxd->max_wqs; i++) {
1313		struct idxd_wq *wq = &idxd->wqs[i];
1314
1315		count += wq->client_count;
1316	}
1317	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1318
1319	return sprintf(buf, "%d\n", count);
1320}
1321static DEVICE_ATTR_RO(clients);
1322
1323static ssize_t state_show(struct device *dev,
1324			  struct device_attribute *attr, char *buf)
1325{
1326	struct idxd_device *idxd =
1327		container_of(dev, struct idxd_device, conf_dev);
1328
1329	switch (idxd->state) {
1330	case IDXD_DEV_DISABLED:
1331	case IDXD_DEV_CONF_READY:
1332		return sprintf(buf, "disabled\n");
1333	case IDXD_DEV_ENABLED:
1334		return sprintf(buf, "enabled\n");
1335	case IDXD_DEV_HALTED:
1336		return sprintf(buf, "halted\n");
1337	}
1338
1339	return sprintf(buf, "unknown\n");
1340}
1341static DEVICE_ATTR_RO(state);
1342
1343static ssize_t errors_show(struct device *dev,
1344			   struct device_attribute *attr, char *buf)
1345{
1346	struct idxd_device *idxd =
1347		container_of(dev, struct idxd_device, conf_dev);
1348	int i, out = 0;
1349	unsigned long flags;
1350
1351	spin_lock_irqsave(&idxd->dev_lock, flags);
1352	for (i = 0; i < 4; i++)
1353		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1354	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1355	out--;
1356	out += sprintf(buf + out, "\n");
1357	return out;
1358}
1359static DEVICE_ATTR_RO(errors);
1360
1361static ssize_t max_tokens_show(struct device *dev,
1362			       struct device_attribute *attr, char *buf)
1363{
1364	struct idxd_device *idxd =
1365		container_of(dev, struct idxd_device, conf_dev);
1366
1367	return sprintf(buf, "%u\n", idxd->max_tokens);
1368}
1369static DEVICE_ATTR_RO(max_tokens);
1370
1371static ssize_t token_limit_show(struct device *dev,
1372				struct device_attribute *attr, char *buf)
1373{
1374	struct idxd_device *idxd =
1375		container_of(dev, struct idxd_device, conf_dev);
1376
1377	return sprintf(buf, "%u\n", idxd->token_limit);
1378}
1379
1380static ssize_t token_limit_store(struct device *dev,
1381				 struct device_attribute *attr,
1382				 const char *buf, size_t count)
1383{
1384	struct idxd_device *idxd =
1385		container_of(dev, struct idxd_device, conf_dev);
1386	unsigned long val;
1387	int rc;
1388
1389	rc = kstrtoul(buf, 10, &val);
1390	if (rc < 0)
1391		return -EINVAL;
1392
1393	if (idxd->state == IDXD_DEV_ENABLED)
1394		return -EPERM;
1395
1396	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1397		return -EPERM;
1398
1399	if (!idxd->hw.group_cap.token_limit)
1400		return -EPERM;
1401
1402	if (val > idxd->hw.group_cap.total_tokens)
1403		return -EINVAL;
1404
1405	idxd->token_limit = val;
1406	return count;
1407}
1408static DEVICE_ATTR_RW(token_limit);
1409
1410static ssize_t cdev_major_show(struct device *dev,
1411			       struct device_attribute *attr, char *buf)
1412{
1413	struct idxd_device *idxd =
1414		container_of(dev, struct idxd_device, conf_dev);
1415
1416	return sprintf(buf, "%u\n", idxd->major);
1417}
1418static DEVICE_ATTR_RO(cdev_major);
1419
1420static ssize_t cmd_status_show(struct device *dev,
1421			       struct device_attribute *attr, char *buf)
1422{
1423	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1424
1425	return sprintf(buf, "%#x\n", idxd->cmd_status);
1426}
1427static DEVICE_ATTR_RO(cmd_status);
1428
1429static struct attribute *idxd_device_attributes[] = {
1430	&dev_attr_version.attr,
1431	&dev_attr_max_groups.attr,
1432	&dev_attr_max_work_queues.attr,
1433	&dev_attr_max_work_queues_size.attr,
1434	&dev_attr_max_engines.attr,
1435	&dev_attr_numa_node.attr,
1436	&dev_attr_max_batch_size.attr,
1437	&dev_attr_max_transfer_size.attr,
1438	&dev_attr_op_cap.attr,
1439	&dev_attr_gen_cap.attr,
1440	&dev_attr_configurable.attr,
1441	&dev_attr_clients.attr,
1442	&dev_attr_state.attr,
1443	&dev_attr_errors.attr,
1444	&dev_attr_max_tokens.attr,
1445	&dev_attr_token_limit.attr,
1446	&dev_attr_cdev_major.attr,
1447	&dev_attr_cmd_status.attr,
1448	NULL,
1449};
1450
1451static const struct attribute_group idxd_device_attribute_group = {
1452	.attrs = idxd_device_attributes,
1453};
1454
1455static const struct attribute_group *idxd_attribute_groups[] = {
1456	&idxd_device_attribute_group,
1457	NULL,
1458};
1459
1460static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1461{
1462	struct device *dev = &idxd->pdev->dev;
1463	int i, rc;
1464
1465	for (i = 0; i < idxd->max_engines; i++) {
1466		struct idxd_engine *engine = &idxd->engines[i];
1467
1468		engine->conf_dev.parent = &idxd->conf_dev;
1469		dev_set_name(&engine->conf_dev, "engine%d.%d",
1470			     idxd->id, engine->id);
1471		engine->conf_dev.bus = idxd_get_bus_type(idxd);
1472		engine->conf_dev.groups = idxd_engine_attribute_groups;
1473		engine->conf_dev.type = &idxd_engine_device_type;
1474		dev_dbg(dev, "Engine device register: %s\n",
1475			dev_name(&engine->conf_dev));
1476		rc = device_register(&engine->conf_dev);
1477		if (rc < 0) {
1478			put_device(&engine->conf_dev);
1479			goto cleanup;
1480		}
1481	}
1482
1483	return 0;
1484
1485cleanup:
1486	while (i--) {
1487		struct idxd_engine *engine = &idxd->engines[i];
1488
1489		device_unregister(&engine->conf_dev);
1490	}
1491	return rc;
1492}
1493
1494static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1495{
1496	struct device *dev = &idxd->pdev->dev;
1497	int i, rc;
1498
1499	for (i = 0; i < idxd->max_groups; i++) {
1500		struct idxd_group *group = &idxd->groups[i];
1501
1502		group->conf_dev.parent = &idxd->conf_dev;
1503		dev_set_name(&group->conf_dev, "group%d.%d",
1504			     idxd->id, group->id);
1505		group->conf_dev.bus = idxd_get_bus_type(idxd);
1506		group->conf_dev.groups = idxd_group_attribute_groups;
1507		group->conf_dev.type = &idxd_group_device_type;
1508		dev_dbg(dev, "Group device register: %s\n",
1509			dev_name(&group->conf_dev));
1510		rc = device_register(&group->conf_dev);
1511		if (rc < 0) {
1512			put_device(&group->conf_dev);
1513			goto cleanup;
1514		}
1515	}
1516
1517	return 0;
1518
1519cleanup:
1520	while (i--) {
1521		struct idxd_group *group = &idxd->groups[i];
1522
1523		device_unregister(&group->conf_dev);
1524	}
1525	return rc;
1526}
1527
1528static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1529{
1530	struct device *dev = &idxd->pdev->dev;
1531	int i, rc;
1532
1533	for (i = 0; i < idxd->max_wqs; i++) {
1534		struct idxd_wq *wq = &idxd->wqs[i];
1535
1536		wq->conf_dev.parent = &idxd->conf_dev;
1537		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1538		wq->conf_dev.bus = idxd_get_bus_type(idxd);
1539		wq->conf_dev.groups = idxd_wq_attribute_groups;
1540		wq->conf_dev.type = &idxd_wq_device_type;
1541		dev_dbg(dev, "WQ device register: %s\n",
1542			dev_name(&wq->conf_dev));
1543		rc = device_register(&wq->conf_dev);
1544		if (rc < 0) {
1545			put_device(&wq->conf_dev);
1546			goto cleanup;
1547		}
1548	}
1549
1550	return 0;
1551
1552cleanup:
1553	while (i--) {
1554		struct idxd_wq *wq = &idxd->wqs[i];
1555
1556		device_unregister(&wq->conf_dev);
1557	}
1558	return rc;
1559}
1560
1561static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1562{
1563	struct device *dev = &idxd->pdev->dev;
1564	int rc;
1565	char devname[IDXD_NAME_SIZE];
1566
1567	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1568	idxd->conf_dev.parent = dev;
1569	dev_set_name(&idxd->conf_dev, "%s", devname);
1570	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1571	idxd->conf_dev.groups = idxd_attribute_groups;
1572	idxd->conf_dev.type = idxd_get_device_type(idxd);
1573
1574	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1575	rc = device_register(&idxd->conf_dev);
1576	if (rc < 0) {
1577		put_device(&idxd->conf_dev);
1578		return rc;
1579	}
1580
1581	return 0;
1582}
1583
1584int idxd_setup_sysfs(struct idxd_device *idxd)
1585{
1586	struct device *dev = &idxd->pdev->dev;
1587	int rc;
1588
1589	rc = idxd_setup_device_sysfs(idxd);
1590	if (rc < 0) {
1591		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1592		return rc;
1593	}
1594
1595	rc = idxd_setup_wq_sysfs(idxd);
1596	if (rc < 0) {
1597		/* unregister conf dev */
1598		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1599		return rc;
1600	}
1601
1602	rc = idxd_setup_group_sysfs(idxd);
1603	if (rc < 0) {
1604		/* unregister conf dev */
1605		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1606		return rc;
1607	}
1608
1609	rc = idxd_setup_engine_sysfs(idxd);
1610	if (rc < 0) {
1611		/* unregister conf dev */
1612		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1613		return rc;
1614	}
1615
1616	return 0;
1617}
1618
1619void idxd_cleanup_sysfs(struct idxd_device *idxd)
1620{
1621	int i;
1622
1623	for (i = 0; i < idxd->max_wqs; i++) {
1624		struct idxd_wq *wq = &idxd->wqs[i];
1625
1626		device_unregister(&wq->conf_dev);
1627	}
1628
1629	for (i = 0; i < idxd->max_engines; i++) {
1630		struct idxd_engine *engine = &idxd->engines[i];
1631
1632		device_unregister(&engine->conf_dev);
1633	}
1634
1635	for (i = 0; i < idxd->max_groups; i++) {
1636		struct idxd_group *group = &idxd->groups[i];
1637
1638		device_unregister(&group->conf_dev);
1639	}
1640
1641	device_unregister(&idxd->conf_dev);
1642}
1643
1644int idxd_register_bus_type(void)
1645{
1646	int i, rc;
1647
1648	for (i = 0; i < IDXD_TYPE_MAX; i++) {
1649		rc = bus_register(idxd_bus_types[i]);
1650		if (rc < 0)
1651			goto bus_err;
1652	}
1653
1654	return 0;
1655
1656bus_err:
1657	while (--i >= 0)
1658		bus_unregister(idxd_bus_types[i]);
1659	return rc;
1660}
1661
1662void idxd_unregister_bus_type(void)
1663{
1664	int i;
1665
1666	for (i = 0; i < IDXD_TYPE_MAX; i++)
1667		bus_unregister(idxd_bus_types[i]);
1668}
1669