Lines Matching refs:device

26  * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
39 #include <linux/device.h>
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * @dev: device node
154 static struct dma_chan *dev_to_dma_chan(struct device *dev)
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
162 static ssize_t memcpy_count_show(struct device *dev,
184 static ssize_t bytes_transferred_show(struct device *dev,
206 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
232 static void chan_dev_release(struct device *dev)
236 chan_dev = container_of(dev, typeof(*chan_dev), device);
246 /* --- client and device registration --- */
304 int node = dev_to_node(chan->device->dev);
322 struct dma_device *device;
327 list_for_each_entry(device, &dma_device_list, global_node) {
328 if (!dma_has_cap(cap, device->cap_mask) ||
329 dma_has_cap(DMA_PRIVATE, device->cap_mask))
331 list_for_each_entry(chan, &device->channels, device_node) {
364 struct dma_device *device;
373 list_for_each_entry(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 list_for_each_entry(chan, &device->channels, device_node)
392 static int dma_device_satisfies_mask(struct dma_device *device,
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
404 return chan->device->owner;
425 struct dma_device *device = container_of(ref, struct dma_device, ref);
427 list_del_rcu(&device->global_node);
430 if (device->device_release)
431 device->device_release(device);
434 static void dma_device_put(struct dma_device *device)
437 kref_put(&device->ref, dma_device_release);
461 ret = kref_get_unless_zero(&chan->device->ref);
468 if (chan->device->device_alloc_chan_resources) {
469 ret = chan->device->device_alloc_chan_resources(chan);
476 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
482 dma_device_put(chan->device);
503 if (!chan->client_count && chan->device->device_free_chan_resources) {
506 chan->device->device_free_chan_resources(chan);
516 dma_device_put(chan->device);
529 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
556 struct dma_device *device;
560 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
561 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
563 list_for_each_entry(chan, &device->channels, device_node)
565 device->device_issue_pending(chan);
573 struct dma_device *device;
578 device = chan->device;
581 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
582 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
590 if (!device->directions)
593 caps->src_addr_widths = device->src_addr_widths;
594 caps->dst_addr_widths = device->dst_addr_widths;
595 caps->directions = device->directions;
596 caps->min_burst = device->min_burst;
597 caps->max_burst = device->max_burst;
598 caps->max_sg_burst = device->max_sg_burst;
599 caps->residue_granularity = device->residue_granularity;
600 caps->descriptor_reuse = device->descriptor_reuse;
601 caps->cmd_pause = !!device->device_pause;
602 caps->cmd_resume = !!device->device_resume;
603 caps->cmd_terminate = !!device->device_terminate_all;
606 * DMA engine device might be configured with non-uniformly
607 * distributed slave capabilities per device channels. In this
612 if (device->device_caps)
613 device->device_caps(chan, caps);
656 static struct dma_chan *find_candidate(struct dma_device *device,
660 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
669 dma_cap_set(DMA_PRIVATE, device->cap_mask);
670 device->privatecnt++;
675 dev_dbg(device->dev, "%s: %s module removed\n",
677 list_del_rcu(&device->global_node);
679 dev_dbg(device->dev,
683 if (--device->privatecnt == 0)
684 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
705 struct dma_device *device = chan->device;
707 dma_cap_set(DMA_PRIVATE, device->cap_mask);
708 device->privatecnt++;
711 dev_dbg(chan->device->dev,
715 if (--device->privatecnt == 0)
716 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
728 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
739 chan = find_candidate(device, &mask, NULL, NULL);
752 * @np: device node to look for DMA channels
760 struct dma_device *device, *_d;
765 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
766 /* Finds a DMA controller with matching device node */
767 if (np && device->dev->of_node && np != device->dev->of_node)
770 chan = find_candidate(device, mask, fn, fn_param);
787 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
789 struct device *dev)
793 if (!device->filter.mapcnt)
796 for (i = 0; i < device->filter.mapcnt; i++) {
797 const struct dma_slave_map *map = &device->filter.map[i];
809 * @dev: pointer to client device structure
814 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
819 /* If device-tree is present get slave info from here */
823 /* If device was enumerated by ACPI get slave info from here */
867 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
870 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
911 if (--chan->device->privatecnt == 0)
912 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
915 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
935 struct dma_device *device, *_d;
943 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
944 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
946 list_for_each_entry(chan, &device->channels, device_node) {
950 list_del_rcu(&device->global_node);
953 dev_dbg(chan->device->dev,
974 struct dma_device *device, *_d;
981 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
982 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
984 list_for_each_entry(chan, &device->channels, device_node)
991 static bool device_has_all_tx_types(struct dma_device *device)
993 /* A device that satisfies this test has channels that will never cause
998 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
1003 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1008 if (!dma_has_cap(DMA_XOR, device->cap_mask))
1012 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1018 if (!dma_has_cap(DMA_PQ, device->cap_mask))
1022 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1030 static int get_dma_id(struct dma_device *device)
1036 device->dev_id = rc;
1040 static int __dma_async_device_channel_register(struct dma_device *device,
1058 mutex_lock(&device->chan_mutex);
1059 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1060 mutex_unlock(&device->chan_mutex);
1068 chan->dev->device.class = &dma_devclass;
1069 chan->dev->device.parent = device->dev;
1071 chan->dev->dev_id = device->dev_id;
1072 dev_set_name(&chan->dev->device, "dma%dchan%d",
1073 device->dev_id, chan->chan_id);
1074 rc = device_register(&chan->dev->device);
1078 device->chancnt++;
1083 mutex_lock(&device->chan_mutex);
1084 ida_free(&device->chan_ida, chan->chan_id);
1085 mutex_unlock(&device->chan_mutex);
1094 int dma_async_device_channel_register(struct dma_device *device,
1099 rc = __dma_async_device_channel_register(device, chan);
1108 static void __dma_async_device_channel_unregister(struct dma_device *device,
1114 WARN_ONCE(!device->device_release && chan->client_count,
1118 device->chancnt--;
1121 mutex_lock(&device->chan_mutex);
1122 ida_free(&device->chan_ida, chan->chan_id);
1123 mutex_unlock(&device->chan_mutex);
1124 device_unregister(&chan->dev->device);
1128 void dma_async_device_channel_unregister(struct dma_device *device,
1131 __dma_async_device_channel_unregister(device, chan);
1138 * @device: pointer to &struct dma_device
1144 int dma_async_device_register(struct dma_device *device)
1149 if (!device)
1152 /* validate device routines */
1153 if (!device->dev) {
1158 device->owner = device->dev->driver->owner;
1160 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
1161 dev_err(device->dev,
1167 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
1168 dev_err(device->dev,
1174 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
1175 dev_err(device->dev,
1181 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
1182 dev_err(device->dev,
1188 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
1189 dev_err(device->dev,
1195 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
1196 dev_err(device->dev,
1202 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
1203 dev_err(device->dev,
1209 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
1210 dev_err(device->dev,
1216 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
1217 dev_err(device->dev,
1224 if (!device->device_tx_status) {
1225 dev_err(device->dev, "Device tx_status is not defined\n");
1230 if (!device->device_issue_pending) {
1231 dev_err(device->dev, "Device issue_pending is not defined\n");
1235 if (!device->device_release)
1236 dev_dbg(device->dev,
1239 kref_init(&device->ref);
1244 if (device_has_all_tx_types(device))
1245 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1247 rc = get_dma_id(device);
1251 mutex_init(&device->chan_mutex);
1252 ida_init(&device->chan_ida);
1255 list_for_each_entry(chan, &device->channels, device_node) {
1256 rc = __dma_async_device_channel_register(device, chan);
1263 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1264 list_for_each_entry(chan, &device->channels, device_node) {
1278 list_add_tail_rcu(&device->global_node, &dma_device_list);
1279 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1280 device->privatecnt++; /* Always private */
1284 dmaengine_debug_register(device);
1290 if (!device->chancnt) {
1291 ida_free(&dma_ida, device->dev_id);
1295 list_for_each_entry(chan, &device->channels, device_node) {
1301 device_unregister(&chan->dev->device);
1309 * dma_async_device_unregister - unregister a DMA device
1310 * @device: pointer to &struct dma_device
1315 void dma_async_device_unregister(struct dma_device *device)
1319 dmaengine_debug_unregister(device);
1321 list_for_each_entry_safe(chan, n, &device->channels, device_node)
1322 __dma_async_device_channel_unregister(device, chan);
1326 * setting DMA_PRIVATE ensures the device being torn down will not
1329 dma_cap_set(DMA_PRIVATE, device->cap_mask);
1331 ida_free(&dma_ida, device->dev_id);
1332 dma_device_put(device);
1337 static void dmam_device_release(struct device *dev, void *res)
1339 struct dma_device *device;
1341 device = *(struct dma_device **)res;
1342 dma_async_device_unregister(device);
1347 * @device: pointer to &struct dma_device
1351 int dmaenginem_async_device_register(struct dma_device *device)
1360 ret = dma_async_device_register(device);
1362 *(struct dma_device **)p = device;
1363 devres_add(device->dev, p);
1413 struct device *dev = unmap->dev;
1484 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1598 dev_err(tx->chan->device->dev,
1646 chan->device->device_issue_pending(chan);