Lines Matching refs:device
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
39 #include <linux/device.h>
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * @dev: device node
154 static struct dma_chan *dev_to_dma_chan(struct device *dev)
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
162 static ssize_t memcpy_count_show(struct device *dev,
184 static ssize_t bytes_transferred_show(struct device *dev,
206 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
232 static void chan_dev_release(struct device *dev)
236 chan_dev = container_of(dev, typeof(*chan_dev), device);
246 /* --- client and device registration --- */
304 int node = dev_to_node(chan->device->dev);
322 struct dma_device *device;
327 list_for_each_entry(device, &dma_device_list, global_node) {
328 if (!dma_has_cap(cap, device->cap_mask) ||
329 dma_has_cap(DMA_PRIVATE, device->cap_mask))
331 list_for_each_entry(chan, &device->channels, device_node) {
364 struct dma_device *device;
373 list_for_each_entry(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 list_for_each_entry(chan, &device->channels, device_node)
392 static int dma_device_satisfies_mask(struct dma_device *device,
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
404 return chan->device->owner;
425 struct dma_device *device = container_of(ref, struct dma_device, ref);
427 list_del_rcu(&device->global_node);
430 if (device->device_release)
431 device->device_release(device);
434 static void dma_device_put(struct dma_device *device)
437 kref_put(&device->ref, dma_device_release);
461 ret = kref_get_unless_zero(&chan->device->ref);
468 if (chan->device->device_alloc_chan_resources) {
469 ret = chan->device->device_alloc_chan_resources(chan);
476 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
482 dma_device_put(chan->device);
503 if (!chan->client_count && chan->device->device_free_chan_resources) {
506 chan->device->device_free_chan_resources(chan);
516 dma_device_put(chan->device);
529 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
556 struct dma_device *device;
560 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
561 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
563 list_for_each_entry(chan, &device->channels, device_node)
565 device->device_issue_pending(chan);
573 struct dma_device *device;
578 device = chan->device;
581 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
582 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
590 if (!device->directions)
593 caps->src_addr_widths = device->src_addr_widths;
594 caps->dst_addr_widths = device->dst_addr_widths;
595 caps->directions = device->directions;
596 caps->min_burst = device->min_burst;
597 caps->max_burst = device->max_burst;
598 caps->max_sg_burst = device->max_sg_burst;
599 caps->residue_granularity = device->residue_granularity;
600 caps->descriptor_reuse = device->descriptor_reuse;
601 caps->cmd_pause = !!device->device_pause;
602 caps->cmd_resume = !!device->device_resume;
603 caps->cmd_terminate = !!device->device_terminate_all;
606 * DMA engine device might be configured with non-uniformly
607 * distributed slave capabilities per device channels. In this
612 if (device->device_caps)
613 device->device_caps(chan, caps);
656 static struct dma_chan *find_candidate(struct dma_device *device,
660 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
669 dma_cap_set(DMA_PRIVATE, device->cap_mask);
670 device->privatecnt++;
675 dev_dbg(device->dev, "%s: %s module removed\n",
677 list_del_rcu(&device->global_node);
679 dev_dbg(device->dev,
683 if (--device->privatecnt == 0)
684 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
703 struct dma_device *device = chan->device;
706 dma_cap_set(DMA_PRIVATE, device->cap_mask);
707 device->privatecnt++;
710 dev_dbg(chan->device->dev,
714 if (--device->privatecnt == 0)
715 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
727 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
738 chan = find_candidate(device, &mask, NULL, NULL);
751 * @np: device node to look for DMA channels
759 struct dma_device *device, *_d;
764 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
765 /* Finds a DMA controller with matching device node */
766 if (np && device->dev->of_node && np != device->dev->of_node)
769 chan = find_candidate(device, mask, fn, fn_param);
786 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
788 struct device *dev)
792 if (!device->filter.mapcnt)
795 for (i = 0; i < device->filter.mapcnt; i++) {
796 const struct dma_slave_map *map = &device->filter.map[i];
808 * @dev: pointer to client device structure
813 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
818 /* If device-tree is present get slave info from here */
822 /* If device was enumerated by ACPI get slave info from here */
866 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
869 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
910 if (--chan->device->privatecnt == 0)
911 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
914 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
934 struct dma_device *device, *_d;
942 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
943 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
945 list_for_each_entry(chan, &device->channels, device_node) {
949 list_del_rcu(&device->global_node);
952 dev_dbg(chan->device->dev,
973 struct dma_device *device, *_d;
980 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
981 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
983 list_for_each_entry(chan, &device->channels, device_node)
990 static bool device_has_all_tx_types(struct dma_device *device)
992 /* A device that satisfies this test has channels that will never cause
997 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
1002 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1007 if (!dma_has_cap(DMA_XOR, device->cap_mask))
1011 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1017 if (!dma_has_cap(DMA_PQ, device->cap_mask))
1021 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1029 static int get_dma_id(struct dma_device *device)
1035 device->dev_id = rc;
1039 static int __dma_async_device_channel_register(struct dma_device *device,
1057 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1065 chan->dev->device.class = &dma_devclass;
1066 chan->dev->device.parent = device->dev;
1068 chan->dev->dev_id = device->dev_id;
1069 dev_set_name(&chan->dev->device, "dma%dchan%d",
1070 device->dev_id, chan->chan_id);
1071 rc = device_register(&chan->dev->device);
1075 device->chancnt++;
1080 ida_free(&device->chan_ida, chan->chan_id);
1089 int dma_async_device_channel_register(struct dma_device *device,
1094 rc = __dma_async_device_channel_register(device, chan);
1103 static void __dma_async_device_channel_unregister(struct dma_device *device,
1109 WARN_ONCE(!device->device_release && chan->client_count,
1113 device->chancnt--;
1116 ida_free(&device->chan_ida, chan->chan_id);
1117 device_unregister(&chan->dev->device);
1121 void dma_async_device_channel_unregister(struct dma_device *device,
1124 __dma_async_device_channel_unregister(device, chan);
1131 * @device: pointer to &struct dma_device
1137 int dma_async_device_register(struct dma_device *device)
1142 if (!device)
1145 /* validate device routines */
1146 if (!device->dev) {
1151 device->owner = device->dev->driver->owner;
1155 if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
1156 dev_err(device->dev, \
1175 if (!device->device_tx_status) {
1176 dev_err(device->dev, "Device tx_status is not defined\n");
1181 if (!device->device_issue_pending) {
1182 dev_err(device->dev, "Device issue_pending is not defined\n");
1186 if (!device->device_release)
1187 dev_dbg(device->dev,
1190 kref_init(&device->ref);
1195 if (device_has_all_tx_types(device))
1196 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1198 rc = get_dma_id(device);
1202 ida_init(&device->chan_ida);
1205 list_for_each_entry(chan, &device->channels, device_node) {
1206 rc = __dma_async_device_channel_register(device, chan);
1213 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1214 list_for_each_entry(chan, &device->channels, device_node) {
1228 list_add_tail_rcu(&device->global_node, &dma_device_list);
1229 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1230 device->privatecnt++; /* Always private */
1234 dmaengine_debug_register(device);
1240 if (!device->chancnt) {
1241 ida_free(&dma_ida, device->dev_id);
1245 list_for_each_entry(chan, &device->channels, device_node) {
1251 device_unregister(&chan->dev->device);
1259 * dma_async_device_unregister - unregister a DMA device
1260 * @device: pointer to &struct dma_device
1265 void dma_async_device_unregister(struct dma_device *device)
1269 dmaengine_debug_unregister(device);
1271 list_for_each_entry_safe(chan, n, &device->channels, device_node)
1272 __dma_async_device_channel_unregister(device, chan);
1276 * setting DMA_PRIVATE ensures the device being torn down will not
1279 dma_cap_set(DMA_PRIVATE, device->cap_mask);
1281 ida_free(&dma_ida, device->dev_id);
1282 dma_device_put(device);
1287 static void dmaenginem_async_device_unregister(void *device)
1289 dma_async_device_unregister(device);
1294 * @device: pointer to &struct dma_device
1298 int dmaenginem_async_device_register(struct dma_device *device)
1302 ret = dma_async_device_register(device);
1306 return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
1351 struct device *dev = unmap->dev;
1422 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1536 dev_err(tx->chan->device->dev,
1584 chan->device->device_issue_pending(chan);