162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
462306a36Sopenharmony_ci * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
562306a36Sopenharmony_ci * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
662306a36Sopenharmony_ci */
762306a36Sopenharmony_ci#include <linux/dma-direct.h>
862306a36Sopenharmony_ci#include <linux/dma-map-ops.h>
962306a36Sopenharmony_ci#include <linux/highmem.h>
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#include <asm/cache.h>
1262306a36Sopenharmony_ci#include <asm/cpu-type.h>
1362306a36Sopenharmony_ci#include <asm/io.h>
1462306a36Sopenharmony_ci
1562306a36Sopenharmony_ci/*
1662306a36Sopenharmony_ci * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
1762306a36Sopenharmony_ci * fill random cachelines with stale data at any time, requiring an extra
1862306a36Sopenharmony_ci * flush post-DMA.
1962306a36Sopenharmony_ci *
2062306a36Sopenharmony_ci * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
2162306a36Sopenharmony_ci * terminology calls memory areas with hardware maintained coherency coherent.
2262306a36Sopenharmony_ci *
2362306a36Sopenharmony_ci * Note that the R14000 and R16000 should also be checked for in this condition.
2462306a36Sopenharmony_ci * However this function is only called on non-I/O-coherent systems and only the
2562306a36Sopenharmony_ci * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
2662306a36Sopenharmony_ci * SGI IP32 aka O2.
2762306a36Sopenharmony_ci */
2862306a36Sopenharmony_cistatic inline bool cpu_needs_post_dma_flush(void)
2962306a36Sopenharmony_ci{
3062306a36Sopenharmony_ci	switch (boot_cpu_type()) {
3162306a36Sopenharmony_ci	case CPU_R10000:
3262306a36Sopenharmony_ci	case CPU_R12000:
3362306a36Sopenharmony_ci	case CPU_BMIPS5000:
3462306a36Sopenharmony_ci	case CPU_LOONGSON2EF:
3562306a36Sopenharmony_ci	case CPU_XBURST:
3662306a36Sopenharmony_ci		return true;
3762306a36Sopenharmony_ci	default:
3862306a36Sopenharmony_ci		/*
3962306a36Sopenharmony_ci		 * Presence of MAARs suggests that the CPU supports
4062306a36Sopenharmony_ci		 * speculatively prefetching data, and therefore requires
4162306a36Sopenharmony_ci		 * the post-DMA flush/invalidate.
4262306a36Sopenharmony_ci		 */
4362306a36Sopenharmony_ci		return cpu_has_maar;
4462306a36Sopenharmony_ci	}
4562306a36Sopenharmony_ci}
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_civoid arch_dma_prep_coherent(struct page *page, size_t size)
4862306a36Sopenharmony_ci{
4962306a36Sopenharmony_ci	dma_cache_wback_inv((unsigned long)page_address(page), size);
5062306a36Sopenharmony_ci}
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_civoid *arch_dma_set_uncached(void *addr, size_t size)
5362306a36Sopenharmony_ci{
5462306a36Sopenharmony_ci	return (void *)(__pa(addr) + UNCAC_BASE);
5562306a36Sopenharmony_ci}
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_cistatic inline void dma_sync_virt_for_device(void *addr, size_t size,
5862306a36Sopenharmony_ci		enum dma_data_direction dir)
5962306a36Sopenharmony_ci{
6062306a36Sopenharmony_ci	switch (dir) {
6162306a36Sopenharmony_ci	case DMA_TO_DEVICE:
6262306a36Sopenharmony_ci		dma_cache_wback((unsigned long)addr, size);
6362306a36Sopenharmony_ci		break;
6462306a36Sopenharmony_ci	case DMA_FROM_DEVICE:
6562306a36Sopenharmony_ci		dma_cache_inv((unsigned long)addr, size);
6662306a36Sopenharmony_ci		break;
6762306a36Sopenharmony_ci	case DMA_BIDIRECTIONAL:
6862306a36Sopenharmony_ci		dma_cache_wback_inv((unsigned long)addr, size);
6962306a36Sopenharmony_ci		break;
7062306a36Sopenharmony_ci	default:
7162306a36Sopenharmony_ci		BUG();
7262306a36Sopenharmony_ci	}
7362306a36Sopenharmony_ci}
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_cistatic inline void dma_sync_virt_for_cpu(void *addr, size_t size,
7662306a36Sopenharmony_ci		enum dma_data_direction dir)
7762306a36Sopenharmony_ci{
7862306a36Sopenharmony_ci	switch (dir) {
7962306a36Sopenharmony_ci	case DMA_TO_DEVICE:
8062306a36Sopenharmony_ci		break;
8162306a36Sopenharmony_ci	case DMA_FROM_DEVICE:
8262306a36Sopenharmony_ci	case DMA_BIDIRECTIONAL:
8362306a36Sopenharmony_ci		dma_cache_inv((unsigned long)addr, size);
8462306a36Sopenharmony_ci		break;
8562306a36Sopenharmony_ci	default:
8662306a36Sopenharmony_ci		BUG();
8762306a36Sopenharmony_ci	}
8862306a36Sopenharmony_ci}
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci/*
9162306a36Sopenharmony_ci * A single sg entry may refer to multiple physically contiguous pages.  But
9262306a36Sopenharmony_ci * we still need to process highmem pages individually.  If highmem is not
9362306a36Sopenharmony_ci * configured then the bulk of this loop gets optimized out.
9462306a36Sopenharmony_ci */
9562306a36Sopenharmony_cistatic inline void dma_sync_phys(phys_addr_t paddr, size_t size,
9662306a36Sopenharmony_ci		enum dma_data_direction dir, bool for_device)
9762306a36Sopenharmony_ci{
9862306a36Sopenharmony_ci	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
9962306a36Sopenharmony_ci	unsigned long offset = paddr & ~PAGE_MASK;
10062306a36Sopenharmony_ci	size_t left = size;
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci	do {
10362306a36Sopenharmony_ci		size_t len = left;
10462306a36Sopenharmony_ci		void *addr;
10562306a36Sopenharmony_ci
10662306a36Sopenharmony_ci		if (PageHighMem(page)) {
10762306a36Sopenharmony_ci			if (offset + len > PAGE_SIZE)
10862306a36Sopenharmony_ci				len = PAGE_SIZE - offset;
10962306a36Sopenharmony_ci		}
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci		addr = kmap_atomic(page);
11262306a36Sopenharmony_ci		if (for_device)
11362306a36Sopenharmony_ci			dma_sync_virt_for_device(addr + offset, len, dir);
11462306a36Sopenharmony_ci		else
11562306a36Sopenharmony_ci			dma_sync_virt_for_cpu(addr + offset, len, dir);
11662306a36Sopenharmony_ci		kunmap_atomic(addr);
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_ci		offset = 0;
11962306a36Sopenharmony_ci		page++;
12062306a36Sopenharmony_ci		left -= len;
12162306a36Sopenharmony_ci	} while (left);
12262306a36Sopenharmony_ci}
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_civoid arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
12562306a36Sopenharmony_ci		enum dma_data_direction dir)
12662306a36Sopenharmony_ci{
12762306a36Sopenharmony_ci	dma_sync_phys(paddr, size, dir, true);
12862306a36Sopenharmony_ci}
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_ci#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
13162306a36Sopenharmony_civoid arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
13262306a36Sopenharmony_ci		enum dma_data_direction dir)
13362306a36Sopenharmony_ci{
13462306a36Sopenharmony_ci	if (cpu_needs_post_dma_flush())
13562306a36Sopenharmony_ci		dma_sync_phys(paddr, size, dir, false);
13662306a36Sopenharmony_ci}
13762306a36Sopenharmony_ci#endif
13862306a36Sopenharmony_ci
13962306a36Sopenharmony_ci#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
14062306a36Sopenharmony_civoid arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
14162306a36Sopenharmony_ci		const struct iommu_ops *iommu, bool coherent)
14262306a36Sopenharmony_ci{
14362306a36Sopenharmony_ci	dev->dma_coherent = coherent;
14462306a36Sopenharmony_ci}
14562306a36Sopenharmony_ci#endif
146