1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/hugetlb.h>
4#include <linux/security.h>
5#include <asm/cacheflush.h>
6#include <asm/machdep.h>
7#include <asm/mman.h>
8#include <asm/tlb.h>
9
10void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
11{
12	int psize;
13	struct hstate *hstate = hstate_file(vma->vm_file);
14
15	psize = hstate_get_psize(hstate);
16	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
17}
18
19void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
20{
21	int psize;
22	struct hstate *hstate = hstate_file(vma->vm_file);
23
24	psize = hstate_get_psize(hstate);
25	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
26}
27
28void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
29				   unsigned long end)
30{
31	int psize;
32	struct hstate *hstate = hstate_file(vma->vm_file);
33
34	psize = hstate_get_psize(hstate);
35	radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
36}
37
38/*
39 * A vairant of hugetlb_get_unmapped_area doing topdown search
40 * FIXME!! should we do as x86 does or non hugetlb area does ?
41 * ie, use topdown or not based on mmap_is_legacy check ?
42 */
43unsigned long
44radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
45				unsigned long len, unsigned long pgoff,
46				unsigned long flags)
47{
48	struct mm_struct *mm = current->mm;
49	struct vm_area_struct *vma;
50	struct hstate *h = hstate_file(file);
51	int fixed = (flags & MAP_FIXED);
52	unsigned long high_limit;
53	struct vm_unmapped_area_info info;
54
55	high_limit = DEFAULT_MAP_WINDOW;
56	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
57		high_limit = TASK_SIZE;
58
59	if (len & ~huge_page_mask(h))
60		return -EINVAL;
61	if (len > high_limit)
62		return -ENOMEM;
63
64	if (fixed) {
65		if (addr > high_limit - len)
66			return -ENOMEM;
67		if (prepare_hugepage_range(file, addr, len))
68			return -EINVAL;
69		return addr;
70	}
71
72	if (addr) {
73		addr = ALIGN(addr, huge_page_size(h));
74		vma = find_vma(mm, addr);
75		if (high_limit - len >= addr && addr >= mmap_min_addr &&
76		    (!vma || addr + len <= vm_start_gap(vma)))
77			return addr;
78	}
79	/*
80	 * We are always doing an topdown search here. Slice code
81	 * does that too.
82	 */
83	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
84	info.length = len;
85	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
86	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
87	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
88	info.align_offset = 0;
89
90	return vm_unmapped_area(&info);
91}
92
93void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
94					 unsigned long addr, pte_t *ptep,
95					 pte_t old_pte, pte_t pte)
96{
97	struct mm_struct *mm = vma->vm_mm;
98
99	/*
100	 * To avoid NMMU hang while relaxing access we need to flush the tlb before
101	 * we set the new value.
102	 */
103	if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
104	    (atomic_read(&mm->context.copros) > 0))
105		radix__flush_hugetlb_page(vma, addr);
106
107	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
108}
109