Lines Matching defs:idxd

9 #include <uapi/linux/idxd.h>
11 #include "idxd.h"
78 struct idxd_device *idxd = confdev_to_idxd(dev);
80 if (idxd->state != IDXD_DEV_CONF_READY)
85 struct idxd_device *idxd = wq->idxd;
87 if (idxd->state < IDXD_DEV_CONF_READY)
111 struct idxd_device *idxd = confdev_to_idxd(dev);
113 if (idxd->state != IDXD_DEV_CONF_READY) {
122 spin_lock_irqsave(&idxd->dev_lock, flags);
123 rc = idxd_device_config(idxd);
124 spin_unlock_irqrestore(&idxd->dev_lock, flags);
132 rc = idxd_device_enable(idxd);
141 rc = idxd_register_dma_device(idxd);
150 struct idxd_device *idxd = wq->idxd;
154 if (idxd->state != IDXD_DEV_ENABLED) {
185 spin_lock_irqsave(&idxd->dev_lock, flags);
186 rc = idxd_device_config(idxd);
187 spin_unlock_irqrestore(&idxd->dev_lock, flags);
242 struct idxd_device *idxd = wq->idxd;
243 struct device *dev = &idxd->pdev->dev;
285 struct idxd_device *idxd = confdev_to_idxd(dev);
289 dev_name(&idxd->conf_dev));
290 for (i = 0; i < idxd->max_wqs; i++) {
291 struct idxd_wq *wq = &idxd->wqs[i];
296 dev_name(&idxd->conf_dev));
300 idxd_unregister_dma_device(idxd);
301 rc = idxd_device_disable(idxd);
302 for (i = 0; i < idxd->max_wqs; i++) {
303 struct idxd_wq *wq = &idxd->wqs[i];
350 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
352 return idxd_bus_types[idxd->type];
355 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
357 if (idxd->type == IDXD_TYPE_DSA)
409 struct idxd_device *idxd = engine->idxd;
418 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
421 if (id > idxd->max_groups - 1 || id < -1)
436 engine->group = &idxd->groups[id];
462 static void idxd_set_free_tokens(struct idxd_device *idxd)
466 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
467 struct idxd_group *g = &idxd->groups[i];
472 idxd->nr_tokens = idxd->max_tokens - tokens;
491 struct idxd_device *idxd = group->idxd;
499 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
502 if (idxd->state == IDXD_DEV_ENABLED)
505 if (val > idxd->max_tokens)
508 if (val > idxd->nr_tokens + group->tokens_reserved)
512 idxd_set_free_tokens(idxd);
536 struct idxd_device *idxd = group->idxd;
544 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
547 if (idxd->state == IDXD_DEV_ENABLED)
551 val > group->tokens_reserved + idxd->nr_tokens)
578 struct idxd_device *idxd = group->idxd;
586 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
589 if (idxd->state == IDXD_DEV_ENABLED)
592 if (idxd->token_limit == 0)
610 struct idxd_device *idxd = group->idxd;
612 for (i = 0; i < idxd->max_engines; i++) {
613 struct idxd_engine *engine = &idxd->engines[i];
620 idxd->id, engine->id);
639 struct idxd_device *idxd = group->idxd;
641 for (i = 0; i < idxd->max_wqs; i++) {
642 struct idxd_wq *wq = &idxd->wqs[i];
649 idxd->id, wq->id);
677 struct idxd_device *idxd = group->idxd;
685 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
688 if (idxd->state == IDXD_DEV_ENABLED)
718 struct idxd_device *idxd = group->idxd;
726 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
729 if (idxd->state == IDXD_DEV_ENABLED)
809 struct idxd_device *idxd = wq->idxd;
818 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
824 if (id > idxd->max_groups - 1 || id < -1)
835 group = &idxd->groups[id];
862 struct idxd_device *idxd = wq->idxd;
864 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
891 static int total_claimed_wq_size(struct idxd_device *idxd)
896 for (i = 0; i < idxd->max_wqs; i++) {
897 struct idxd_wq *wq = &idxd->wqs[i];
911 struct idxd_device *idxd = wq->idxd;
918 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
921 if (idxd->state == IDXD_DEV_ENABLED)
924 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
948 struct idxd_device *idxd = wq->idxd;
955 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1097 struct idxd_device *idxd = wq->idxd;
1101 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1111 if (xfer_size > idxd->max_xfer_bytes)
1134 struct idxd_device *idxd = wq->idxd;
1138 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1148 if (batch_size > idxd->max_batch_size)
1187 struct idxd_device *idxd =
1190 return sprintf(buf, "%#x\n", idxd->hw.version);
1198 struct idxd_device *idxd =
1201 return sprintf(buf, "%u\n", idxd->max_wq_size);
1208 struct idxd_device *idxd =
1211 return sprintf(buf, "%u\n", idxd->max_groups);
1218 struct idxd_device *idxd =
1221 return sprintf(buf, "%u\n", idxd->max_wqs);
1228 struct idxd_device *idxd =
1231 return sprintf(buf, "%u\n", idxd->max_engines);
1238 struct idxd_device *idxd =
1241 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1248 struct idxd_device *idxd =
1251 return sprintf(buf, "%u\n", idxd->max_batch_size);
1259 struct idxd_device *idxd =
1262 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1269 struct idxd_device *idxd =
1274 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1285 struct idxd_device *idxd =
1288 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1295 struct idxd_device *idxd =
1299 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1306 struct idxd_device *idxd =
1311 spin_lock_irqsave(&idxd->dev_lock, flags);
1312 for (i = 0; i < idxd->max_wqs; i++) {
1313 struct idxd_wq *wq = &idxd->wqs[i];
1317 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1326 struct idxd_device *idxd =
1329 switch (idxd->state) {
1346 struct idxd_device *idxd =
1351 spin_lock_irqsave(&idxd->dev_lock, flags);
1353 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1354 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1364 struct idxd_device *idxd =
1367 return sprintf(buf, "%u\n", idxd->max_tokens);
1374 struct idxd_device *idxd =
1377 return sprintf(buf, "%u\n", idxd->token_limit);
1384 struct idxd_device *idxd =
1393 if (idxd->state == IDXD_DEV_ENABLED)
1396 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1399 if (!idxd->hw.group_cap.token_limit)
1402 if (val > idxd->hw.group_cap.total_tokens)
1405 idxd->token_limit = val;
1413 struct idxd_device *idxd =
1416 return sprintf(buf, "%u\n", idxd->major);
1423 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1425 return sprintf(buf, "%#x\n", idxd->cmd_status);
1460 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1462 struct device *dev = &idxd->pdev->dev;
1465 for (i = 0; i < idxd->max_engines; i++) {
1466 struct idxd_engine *engine = &idxd->engines[i];
1468 engine->conf_dev.parent = &idxd->conf_dev;
1470 idxd->id, engine->id);
1471 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1487 struct idxd_engine *engine = &idxd->engines[i];
1494 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1496 struct device *dev = &idxd->pdev->dev;
1499 for (i = 0; i < idxd->max_groups; i++) {
1500 struct idxd_group *group = &idxd->groups[i];
1502 group->conf_dev.parent = &idxd->conf_dev;
1504 idxd->id, group->id);
1505 group->conf_dev.bus = idxd_get_bus_type(idxd);
1521 struct idxd_group *group = &idxd->groups[i];
1528 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1530 struct device *dev = &idxd->pdev->dev;
1533 for (i = 0; i < idxd->max_wqs; i++) {
1534 struct idxd_wq *wq = &idxd->wqs[i];
1536 wq->conf_dev.parent = &idxd->conf_dev;
1537 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1538 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1554 struct idxd_wq *wq = &idxd->wqs[i];
1561 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1563 struct device *dev = &idxd->pdev->dev;
1567 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1568 idxd->conf_dev.parent = dev;
1569 dev_set_name(&idxd->conf_dev, "%s", devname);
1570 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1571 idxd->conf_dev.groups = idxd_attribute_groups;
1572 idxd->conf_dev.type = idxd_get_device_type(idxd);
1574 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1575 rc = device_register(&idxd->conf_dev);
1577 put_device(&idxd->conf_dev);
1584 int idxd_setup_sysfs(struct idxd_device *idxd)
1586 struct device *dev = &idxd->pdev->dev;
1589 rc = idxd_setup_device_sysfs(idxd);
1595 rc = idxd_setup_wq_sysfs(idxd);
1602 rc = idxd_setup_group_sysfs(idxd);
1609 rc = idxd_setup_engine_sysfs(idxd);
1619 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1623 for (i = 0; i < idxd->max_wqs; i++) {
1624 struct idxd_wq *wq = &idxd->wqs[i];
1629 for (i = 0; i < idxd->max_engines; i++) {
1630 struct idxd_engine *engine = &idxd->engines[i];
1635 for (i = 0; i < idxd->max_groups; i++) {
1636 struct idxd_group *group = &idxd->groups[i];
1641 device_unregister(&idxd->conf_dev);