18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 48c2ecf20Sopenharmony_ci */ 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci#include <linux/dma-map-ops.h> 78c2ecf20Sopenharmony_ci#include <asm/cache.h> 88c2ecf20Sopenharmony_ci#include <asm/cacheflush.h> 98c2ecf20Sopenharmony_ci 108c2ecf20Sopenharmony_ci/* 118c2ecf20Sopenharmony_ci * ARCH specific callbacks for generic noncoherent DMA ops 128c2ecf20Sopenharmony_ci * - hardware IOC not available (or "dma-coherent" not set for device in DT) 138c2ecf20Sopenharmony_ci * - But still handle both coherent and non-coherent requests from caller 148c2ecf20Sopenharmony_ci * 158c2ecf20Sopenharmony_ci * For DMA coherent hardware (IOC) generic code suffices 168c2ecf20Sopenharmony_ci */ 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_civoid arch_dma_prep_coherent(struct page *page, size_t size) 198c2ecf20Sopenharmony_ci{ 208c2ecf20Sopenharmony_ci /* 218c2ecf20Sopenharmony_ci * Evict any existing L1 and/or L2 lines for the backing page 228c2ecf20Sopenharmony_ci * in case it was used earlier as a normal "cached" page. 238c2ecf20Sopenharmony_ci * Yeah this bit us - STAR 9000898266 248c2ecf20Sopenharmony_ci * 258c2ecf20Sopenharmony_ci * Although core does call flush_cache_vmap(), it gets kvaddr hence 268c2ecf20Sopenharmony_ci * can't be used to efficiently flush L1 and/or L2 which need paddr 278c2ecf20Sopenharmony_ci * Currently flush_cache_vmap nukes the L1 cache completely which 288c2ecf20Sopenharmony_ci * will be optimized as a separate commit 298c2ecf20Sopenharmony_ci */ 308c2ecf20Sopenharmony_ci dma_cache_wback_inv(page_to_phys(page), size); 318c2ecf20Sopenharmony_ci} 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci/* 348c2ecf20Sopenharmony_ci * Cache operations depending on function and direction argument, inspired by 358c2ecf20Sopenharmony_ci * https://lkml.org/lkml/2018/5/18/979 368c2ecf20Sopenharmony_ci * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20] 378c2ecf20Sopenharmony_ci * dma-mapping: provide a generic dma-noncoherent implementation)" 388c2ecf20Sopenharmony_ci * 398c2ecf20Sopenharmony_ci * | map == for_device | unmap == for_cpu 408c2ecf20Sopenharmony_ci * |---------------------------------------------------------------- 418c2ecf20Sopenharmony_ci * TO_DEV | writeback writeback | none none 428c2ecf20Sopenharmony_ci * FROM_DEV | invalidate invalidate | invalidate* invalidate* 438c2ecf20Sopenharmony_ci * BIDIR | writeback+inv writeback+inv | invalidate invalidate 448c2ecf20Sopenharmony_ci * 458c2ecf20Sopenharmony_ci * [*] needed for CPU speculative prefetches 468c2ecf20Sopenharmony_ci * 478c2ecf20Sopenharmony_ci * NOTE: we don't check the validity of direction argument as it is done in 488c2ecf20Sopenharmony_ci * upper layer functions (in include/linux/dma-mapping.h) 498c2ecf20Sopenharmony_ci */ 508c2ecf20Sopenharmony_ci 518c2ecf20Sopenharmony_civoid arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 528c2ecf20Sopenharmony_ci enum dma_data_direction dir) 538c2ecf20Sopenharmony_ci{ 548c2ecf20Sopenharmony_ci switch (dir) { 558c2ecf20Sopenharmony_ci case DMA_TO_DEVICE: 568c2ecf20Sopenharmony_ci dma_cache_wback(paddr, size); 578c2ecf20Sopenharmony_ci break; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_ci case DMA_FROM_DEVICE: 608c2ecf20Sopenharmony_ci dma_cache_inv(paddr, size); 618c2ecf20Sopenharmony_ci break; 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci case DMA_BIDIRECTIONAL: 648c2ecf20Sopenharmony_ci dma_cache_wback_inv(paddr, size); 658c2ecf20Sopenharmony_ci break; 668c2ecf20Sopenharmony_ci 678c2ecf20Sopenharmony_ci default: 688c2ecf20Sopenharmony_ci break; 698c2ecf20Sopenharmony_ci } 708c2ecf20Sopenharmony_ci} 718c2ecf20Sopenharmony_ci 728c2ecf20Sopenharmony_civoid arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 738c2ecf20Sopenharmony_ci enum dma_data_direction dir) 748c2ecf20Sopenharmony_ci{ 758c2ecf20Sopenharmony_ci switch (dir) { 768c2ecf20Sopenharmony_ci case DMA_TO_DEVICE: 778c2ecf20Sopenharmony_ci break; 788c2ecf20Sopenharmony_ci 798c2ecf20Sopenharmony_ci /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ 808c2ecf20Sopenharmony_ci case DMA_FROM_DEVICE: 818c2ecf20Sopenharmony_ci case DMA_BIDIRECTIONAL: 828c2ecf20Sopenharmony_ci dma_cache_inv(paddr, size); 838c2ecf20Sopenharmony_ci break; 848c2ecf20Sopenharmony_ci 858c2ecf20Sopenharmony_ci default: 868c2ecf20Sopenharmony_ci break; 878c2ecf20Sopenharmony_ci } 888c2ecf20Sopenharmony_ci} 898c2ecf20Sopenharmony_ci 908c2ecf20Sopenharmony_ci/* 918c2ecf20Sopenharmony_ci * Plug in direct dma map ops. 928c2ecf20Sopenharmony_ci */ 938c2ecf20Sopenharmony_civoid arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 948c2ecf20Sopenharmony_ci const struct iommu_ops *iommu, bool coherent) 958c2ecf20Sopenharmony_ci{ 968c2ecf20Sopenharmony_ci /* 978c2ecf20Sopenharmony_ci * IOC hardware snoops all DMA traffic keeping the caches consistent 988c2ecf20Sopenharmony_ci * with memory - eliding need for any explicit cache maintenance of 998c2ecf20Sopenharmony_ci * DMA buffers. 1008c2ecf20Sopenharmony_ci */ 1018c2ecf20Sopenharmony_ci if (is_isa_arcv2() && ioc_enable && coherent) 1028c2ecf20Sopenharmony_ci dev->dma_coherent = true; 1038c2ecf20Sopenharmony_ci 1048c2ecf20Sopenharmony_ci dev_info(dev, "use %scoherent DMA ops\n", 1058c2ecf20Sopenharmony_ci dev->dma_coherent ? "" : "non"); 1068c2ecf20Sopenharmony_ci} 107