Lines Matching refs:dev

96 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
97 void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len);
99 static inline void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
102 static inline void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len)
108 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
110 debug_dma_mapping_error(dev, dma_addr);
118 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size,
120 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
122 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
124 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
126 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
128 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
130 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
131 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
132 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir);
133 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir);
134 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
135 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs);
136 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
137 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
138 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size,
140 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size,
142 bool dma_can_mmap(struct device *dev);
143 int dma_supported(struct device *dev, u64 mask);
144 int dma_set_mask(struct device *dev, u64 mask);
145 int dma_set_coherent_mask(struct device *dev, u64 mask);
146 u64 dma_get_required_mask(struct device *dev);
147 size_t dma_max_mapping_size(struct device *dev);
148 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
149 unsigned long dma_get_merge_boundary(struct device *dev);
151 static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size,
156 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
160 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
165 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
169 static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
174 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
178 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
182 static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
186 static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
190 static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
194 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
198 static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
203 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
206 static inline void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp,
211 static inline void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
214 static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
219 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr,
224 static inline bool dma_can_mmap(struct device *dev)
228 static inline int dma_supported(struct device *dev, u64 mask)
232 static inline int dma_set_mask(struct device *dev, u64 mask)
236 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
240 static inline u64 dma_get_required_mask(struct device *dev)
244 static inline size_t dma_max_mapping_size(struct device *dev)
248 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
252 static inline unsigned long dma_get_merge_boundary(struct device *dev)
258 struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir,
260 void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle,
262 void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir,
264 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle,
267 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir,
271 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), "rejecting DMA map of vmalloc memory\n")) {
274 debug_dma_map_single(dev, ptr, size);
275 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs);
278 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
281 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
284 static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t addr, unsigned long offset, size_t size,
287 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
290 static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t addr, unsigned long offset,
293 return dma_sync_single_for_device(dev, addr + offset, size, dir);
298 * @dev: The device for which to perform the DMA operation
304 * object for the @dir DMA operation by the @dev device. After success the
312 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir,
317 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
327 * @dev: The device for which to perform the DMA operation
333 * object for the @dir DMA operation by the @dev device. After this function
336 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir,
339 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
344 * @dev: The device for which to perform the DMA operation
354 static inline void dma_sync_sgtable_for_cpu(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir)
356 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
361 * @dev: The device for which to perform the DMA operation
370 static inline void dma_sync_sgtable_for_device(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir)
372 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
384 static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
386 return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
389 static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle)
391 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
394 static inline u64 dma_get_mask(struct device *dev)
396 if (dev->dma_mask && *dev->dma_mask) {
397 return *dev->dma_mask;
408 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
410 int rc = dma_set_mask(dev, mask);
412 dma_set_coherent_mask(dev, mask);
419 * does not have dev->dma_mask appropriately setup.
421 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
423 dev->dma_mask = &dev->coherent_dma_mask;
424 return dma_set_mask_and_coherent(dev, mask);
429 * @dev: device to check
435 static inline bool dma_addressing_limited(struct device *dev)
437 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < dma_get_required_mask(dev);
440 static inline unsigned int dma_get_max_seg_size(struct device *dev)
442 if (dev->dma_parms && dev->dma_parms->max_segment_size) {
443 return dev->dma_parms->max_segment_size;
448 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
450 if (dev->dma_parms) {
451 dev->dma_parms->max_segment_size = size;
457 static inline unsigned long dma_get_seg_boundary(struct device *dev)
459 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) {
460 return dev->dma_parms->segment_boundary_mask;
467 * @dev: device to guery the boundary for
473 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
476 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, unsigned int page_shift)
478 if (!dev) {
481 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
484 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
486 if (dev->dma_parms) {
487 dev->dma_parms->segment_boundary_mask = mask;
493 static inline unsigned int dma_get_min_align_mask(struct device *dev)
495 if (dev->dma_parms) {
496 return dev->dma_parms->min_align_mask;
501 static inline int dma_set_min_align_mask(struct device *dev, unsigned int min_align_mask)
503 if (WARN_ON_ONCE(!dev->dma_parms)) {
506 dev->dma_parms->min_align_mask = min_align_mask;
518 static inline void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
520 return dmam_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
523 static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp)
531 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
534 static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr)
536 return dma_free_attrs(dev, size, cpu_addr, dma_addr, DMA_ATTR_WRITE_COMBINE);
539 static inline int dma_mmap_wc(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr,
542 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, DMA_ATTR_WRITE_COMBINE);
569 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, dma_addr_t dma_start, u64 size);