1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Microblaze support for cache consistent memory.
4 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2010 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 */
8
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/dma-map-ops.h>
15#include <asm/cpuinfo.h>
16#include <asm/cacheflush.h>
17
18void arch_dma_prep_coherent(struct page *page, size_t size)
19{
20	phys_addr_t paddr = page_to_phys(page);
21
22	flush_dcache_range(paddr, paddr + size);
23}
24
25#ifndef CONFIG_MMU
26/*
27 * Consistent memory allocators. Used for DMA devices that want to share
28 * uncached memory with the processor core.  My crufty no-MMU approach is
29 * simple.  In the HW platform we can optionally mirror the DDR up above the
30 * processor cacheable region.  So, memory accessed in this mirror region will
31 * not be cached.  It's alloced from the same pool as normal memory, but the
32 * handle we return is shifted up into the uncached region.  This will no doubt
33 * cause big problems if memory allocated here is not also freed properly. -- JW
34 *
35 * I have to use dcache values because I can't relate on ram size:
36 */
37#ifdef CONFIG_XILINX_UNCACHED_SHADOW
38#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
39#else
40#define UNCACHED_SHADOW_MASK 0
41#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
42
43void *arch_dma_set_uncached(void *ptr, size_t size)
44{
45	unsigned long addr = (unsigned long)ptr;
46
47	addr |= UNCACHED_SHADOW_MASK;
48	if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
49		pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
50	return (void *)addr;
51}
52#endif /* CONFIG_MMU */
53