xref: /kernel/linux/linux-5.10/arch/arc/mm/ioremap.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#include <linux/vmalloc.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/io.h>
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/cache.h>
13
14static inline bool arc_uncached_addr_space(phys_addr_t paddr)
15{
16	if (is_isa_arcompact()) {
17		if (paddr >= ARC_UNCACHED_ADDR_SPACE)
18			return true;
19	} else if (paddr >= perip_base && paddr <= perip_end) {
20		return true;
21	}
22
23	return false;
24}
25
26void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
27{
28	phys_addr_t end;
29
30	/* Don't allow wraparound or zero size */
31	end = paddr + size - 1;
32	if (!size || (end < paddr))
33		return NULL;
34
35	/*
36	 * If the region is h/w uncached, MMU mapping can be elided as optim
37	 * The cast to u32 is fine as this region can only be inside 4GB
38	 */
39	if (arc_uncached_addr_space(paddr))
40		return (void __iomem *)(u32)paddr;
41
42	return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
43}
44EXPORT_SYMBOL(ioremap);
45
46/*
47 * ioremap with access flags
48 * Cache semantics wise it is same as ioremap - "forced" uncached.
49 * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
50 * ARC hardware uncached region, this one still goes thru the MMU as caller
51 * might need finer access control (R/W/X)
52 */
53void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
54			   unsigned long flags)
55{
56	unsigned int off;
57	unsigned long vaddr;
58	struct vm_struct *area;
59	phys_addr_t end;
60	pgprot_t prot = __pgprot(flags);
61
62	/* Don't allow wraparound, zero size */
63	end = paddr + size - 1;
64	if ((!size) || (end < paddr))
65		return NULL;
66
67	/* An early platform driver might end up here */
68	if (!slab_is_available())
69		return NULL;
70
71	/* force uncached */
72	prot = pgprot_noncached(prot);
73
74	/* Mappings have to be page-aligned */
75	off = paddr & ~PAGE_MASK;
76	paddr &= PAGE_MASK_PHYS;
77	size = PAGE_ALIGN(end + 1) - paddr;
78
79	/*
80	 * Ok, go for it..
81	 */
82	area = get_vm_area(size, VM_IOREMAP);
83	if (!area)
84		return NULL;
85	area->phys_addr = paddr;
86	vaddr = (unsigned long)area->addr;
87	if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
88		vunmap((void __force *)vaddr);
89		return NULL;
90	}
91	return (void __iomem *)(off + (char __iomem *)vaddr);
92}
93EXPORT_SYMBOL(ioremap_prot);
94
95
96void iounmap(const volatile void __iomem *addr)
97{
98	/* weird double cast to handle phys_addr_t > 32 bits */
99	if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
100		return;
101
102	vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
103}
104EXPORT_SYMBOL(iounmap);
105