1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11#include <linux/mm_types.h>
12#include <linux/mmzone.h>
13#ifdef CONFIG_32BIT
14#include <asm/pgtable-32.h>
15#endif
16#ifdef CONFIG_64BIT
17#include <asm/pgtable-64.h>
18#endif
19
20#include <asm/cmpxchg.h>
21#include <asm/io.h>
22#include <asm/pgtable-bits.h>
23#include <asm/cpu-features.h>
24
25struct mm_struct;
26struct vm_area_struct;
27
28#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
29				 _page_cachable_default)
30#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
31				 _page_cachable_default)
32#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
33				 _page_cachable_default)
34#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | \
35				 _page_cachable_default)
36#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37				 _PAGE_GLOBAL | _page_cachable_default)
38#define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
39				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
40#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
41			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
42
43/*
44 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
45 * execute, and consider it to be the same as read. Also, write
46 * permissions imply read permissions. This is the closest we can get
47 * by reasonable means..
48 */
49
50/*
51 * Dummy values to fill the table in mmap.c
52 * The real values will be generated at runtime
53 */
54#define __P000 __pgprot(0)
55#define __P001 __pgprot(0)
56#define __P010 __pgprot(0)
57#define __P011 __pgprot(0)
58#define __P100 __pgprot(0)
59#define __P101 __pgprot(0)
60#define __P110 __pgprot(0)
61#define __P111 __pgprot(0)
62
63#define __S000 __pgprot(0)
64#define __S001 __pgprot(0)
65#define __S010 __pgprot(0)
66#define __S011 __pgprot(0)
67#define __S100 __pgprot(0)
68#define __S101 __pgprot(0)
69#define __S110 __pgprot(0)
70#define __S111 __pgprot(0)
71
72extern unsigned long _page_cachable_default;
73
74/*
75 * ZERO_PAGE is a global shared page that is always zero; used
76 * for zero-mapped memory areas etc..
77 */
78
79extern unsigned long empty_zero_page;
80extern unsigned long zero_page_mask;
81
82#define ZERO_PAGE(vaddr) \
83	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
84#define __HAVE_COLOR_ZERO_PAGE
85
86extern void paging_init(void);
87
88/*
89 * Conversion functions: convert a page and protection to a page entry,
90 * and a page entry and page directory to the page they refer to.
91 */
92#define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
93
94#define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
95#ifndef CONFIG_TRANSPARENT_HUGEPAGE
96#define pmd_page(pmd)		__pmd_page(pmd)
97#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
98
99#define pmd_page_vaddr(pmd)	pmd_val(pmd)
100
101#define htw_stop()							\
102do {									\
103	unsigned long flags;						\
104									\
105	if (cpu_has_htw) {						\
106		local_irq_save(flags);					\
107		if(!raw_current_cpu_data.htw_seq++) {			\
108			write_c0_pwctl(read_c0_pwctl() &		\
109				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
110			back_to_back_c0_hazard();			\
111		}							\
112		local_irq_restore(flags);				\
113	}								\
114} while(0)
115
116#define htw_start()							\
117do {									\
118	unsigned long flags;						\
119									\
120	if (cpu_has_htw) {						\
121		local_irq_save(flags);					\
122		if (!--raw_current_cpu_data.htw_seq) {			\
123			write_c0_pwctl(read_c0_pwctl() |		\
124				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
125			back_to_back_c0_hazard();			\
126		}							\
127		local_irq_restore(flags);				\
128	}								\
129} while(0)
130
131static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
132			      pte_t *ptep, pte_t pteval);
133
134#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
135
136#ifdef CONFIG_XPA
137# define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
138#else
139# define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
140#endif
141
142#define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
143#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
144
145static inline void set_pte(pte_t *ptep, pte_t pte)
146{
147	ptep->pte_high = pte.pte_high;
148	smp_wmb();
149	ptep->pte_low = pte.pte_low;
150
151#ifdef CONFIG_XPA
152	if (pte.pte_high & _PAGE_GLOBAL) {
153#else
154	if (pte.pte_low & _PAGE_GLOBAL) {
155#endif
156		pte_t *buddy = ptep_buddy(ptep);
157		/*
158		 * Make sure the buddy is global too (if it's !none,
159		 * it better already be global)
160		 */
161		if (pte_none(*buddy)) {
162			if (!IS_ENABLED(CONFIG_XPA))
163				buddy->pte_low |= _PAGE_GLOBAL;
164			buddy->pte_high |= _PAGE_GLOBAL;
165		}
166	}
167}
168
169static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
170{
171	pte_t null = __pte(0);
172
173	htw_stop();
174	/* Preserve global status for the pair */
175	if (IS_ENABLED(CONFIG_XPA)) {
176		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
177			null.pte_high = _PAGE_GLOBAL;
178	} else {
179		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
180			null.pte_low = null.pte_high = _PAGE_GLOBAL;
181	}
182
183	set_pte_at(mm, addr, ptep, null);
184	htw_start();
185}
186#else
187
188#define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
189#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
190#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
191
192/*
193 * Certain architectures need to do special things when pte's
194 * within a page table are directly modified.  Thus, the following
195 * hook is made available.
196 */
197static inline void set_pte(pte_t *ptep, pte_t pteval)
198{
199	*ptep = pteval;
200#if !defined(CONFIG_CPU_R3K_TLB)
201	if (pte_val(pteval) & _PAGE_GLOBAL) {
202		pte_t *buddy = ptep_buddy(ptep);
203		/*
204		 * Make sure the buddy is global too (if it's !none,
205		 * it better already be global)
206		 */
207# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
208		cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
209# else
210		cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
211# endif
212	}
213#endif
214}
215
216static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
217{
218	htw_stop();
219#if !defined(CONFIG_CPU_R3K_TLB)
220	/* Preserve global status for the pair */
221	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
222		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
223	else
224#endif
225		set_pte_at(mm, addr, ptep, __pte(0));
226	htw_start();
227}
228#endif
229
230static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
231			      pte_t *ptep, pte_t pteval)
232{
233	extern void __update_cache(unsigned long address, pte_t pte);
234
235	if (!pte_present(pteval))
236		goto cache_sync_done;
237
238	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
239		goto cache_sync_done;
240
241	__update_cache(addr, pteval);
242cache_sync_done:
243	set_pte(ptep, pteval);
244}
245
246/*
247 * (pmds are folded into puds so this doesn't get actually called,
248 * but the define is needed for a generic inline function.)
249 */
250#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
251
252#ifndef __PAGETABLE_PMD_FOLDED
253/*
254 * (puds are folded into pgds so this doesn't get actually called,
255 * but the define is needed for a generic inline function.)
256 */
257#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
258#endif
259
260#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
261#define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
262#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
263
264/*
265 * We used to declare this array with size but gcc 3.3 and older are not able
266 * to find that this expression is a constant, so the size is dropped.
267 */
268extern pgd_t swapper_pg_dir[];
269
270/*
271 * Platform specific pte_special() and pte_mkspecial() definitions
272 * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
273 */
274#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
275#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
276static inline int pte_special(pte_t pte)
277{
278	return pte.pte_low & _PAGE_SPECIAL;
279}
280
281static inline pte_t pte_mkspecial(pte_t pte)
282{
283	pte.pte_low |= _PAGE_SPECIAL;
284	return pte;
285}
286#else
287static inline int pte_special(pte_t pte)
288{
289	return pte_val(pte) & _PAGE_SPECIAL;
290}
291
292static inline pte_t pte_mkspecial(pte_t pte)
293{
294	pte_val(pte) |= _PAGE_SPECIAL;
295	return pte;
296}
297#endif
298#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
299
300/*
301 * The following only work if pte_present() is true.
302 * Undefined behaviour if not..
303 */
304#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
305static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
306static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
307static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
308
309static inline pte_t pte_wrprotect(pte_t pte)
310{
311	pte.pte_low  &= ~_PAGE_WRITE;
312	if (!IS_ENABLED(CONFIG_XPA))
313		pte.pte_low &= ~_PAGE_SILENT_WRITE;
314	pte.pte_high &= ~_PAGE_SILENT_WRITE;
315	return pte;
316}
317
318static inline pte_t pte_mkclean(pte_t pte)
319{
320	pte.pte_low  &= ~_PAGE_MODIFIED;
321	if (!IS_ENABLED(CONFIG_XPA))
322		pte.pte_low &= ~_PAGE_SILENT_WRITE;
323	pte.pte_high &= ~_PAGE_SILENT_WRITE;
324	return pte;
325}
326
327static inline pte_t pte_mkold(pte_t pte)
328{
329	pte.pte_low  &= ~_PAGE_ACCESSED;
330	if (!IS_ENABLED(CONFIG_XPA))
331		pte.pte_low &= ~_PAGE_SILENT_READ;
332	pte.pte_high &= ~_PAGE_SILENT_READ;
333	return pte;
334}
335
336static inline pte_t pte_mkwrite(pte_t pte)
337{
338	pte.pte_low |= _PAGE_WRITE;
339	if (pte.pte_low & _PAGE_MODIFIED) {
340		if (!IS_ENABLED(CONFIG_XPA))
341			pte.pte_low |= _PAGE_SILENT_WRITE;
342		pte.pte_high |= _PAGE_SILENT_WRITE;
343	}
344	return pte;
345}
346
347static inline pte_t pte_mkdirty(pte_t pte)
348{
349	pte.pte_low |= _PAGE_MODIFIED;
350	if (pte.pte_low & _PAGE_WRITE) {
351		if (!IS_ENABLED(CONFIG_XPA))
352			pte.pte_low |= _PAGE_SILENT_WRITE;
353		pte.pte_high |= _PAGE_SILENT_WRITE;
354	}
355	return pte;
356}
357
358static inline pte_t pte_mkyoung(pte_t pte)
359{
360	pte.pte_low |= _PAGE_ACCESSED;
361	if (!(pte.pte_low & _PAGE_NO_READ)) {
362		if (!IS_ENABLED(CONFIG_XPA))
363			pte.pte_low |= _PAGE_SILENT_READ;
364		pte.pte_high |= _PAGE_SILENT_READ;
365	}
366	return pte;
367}
368#else
369static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
370static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
371static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
372
373static inline pte_t pte_wrprotect(pte_t pte)
374{
375	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
376	return pte;
377}
378
379static inline pte_t pte_mkclean(pte_t pte)
380{
381	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
382	return pte;
383}
384
385static inline pte_t pte_mkold(pte_t pte)
386{
387	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
388	return pte;
389}
390
391static inline pte_t pte_mkwrite(pte_t pte)
392{
393	pte_val(pte) |= _PAGE_WRITE;
394	if (pte_val(pte) & _PAGE_MODIFIED)
395		pte_val(pte) |= _PAGE_SILENT_WRITE;
396	return pte;
397}
398
399static inline pte_t pte_mkdirty(pte_t pte)
400{
401	pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
402	if (pte_val(pte) & _PAGE_WRITE)
403		pte_val(pte) |= _PAGE_SILENT_WRITE;
404	return pte;
405}
406
407static inline pte_t pte_mkyoung(pte_t pte)
408{
409	pte_val(pte) |= _PAGE_ACCESSED;
410	if (!(pte_val(pte) & _PAGE_NO_READ))
411		pte_val(pte) |= _PAGE_SILENT_READ;
412	return pte;
413}
414
415#define pte_sw_mkyoung	pte_mkyoung
416
417#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
418static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
419
420static inline pte_t pte_mkhuge(pte_t pte)
421{
422	pte_val(pte) |= _PAGE_HUGE;
423	return pte;
424}
425#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
426
427#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
428static inline bool pte_soft_dirty(pte_t pte)
429{
430	return pte_val(pte) & _PAGE_SOFT_DIRTY;
431}
432#define pte_swp_soft_dirty pte_soft_dirty
433
434static inline pte_t pte_mksoft_dirty(pte_t pte)
435{
436	pte_val(pte) |= _PAGE_SOFT_DIRTY;
437	return pte;
438}
439#define pte_swp_mksoft_dirty pte_mksoft_dirty
440
441static inline pte_t pte_clear_soft_dirty(pte_t pte)
442{
443	pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
444	return pte;
445}
446#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
447
448#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
449
450#endif
451
452/*
453 * Macro to make mark a page protection value as "uncacheable".	 Note
454 * that "protection" is really a misnomer here as the protection value
455 * contains the memory attribute bits, dirty bits, and various other
456 * bits as well.
457 */
458#define pgprot_noncached pgprot_noncached
459
460static inline pgprot_t pgprot_noncached(pgprot_t _prot)
461{
462	unsigned long prot = pgprot_val(_prot);
463
464	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
465
466	return __pgprot(prot);
467}
468
469#define pgprot_writecombine pgprot_writecombine
470
471static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
472{
473	unsigned long prot = pgprot_val(_prot);
474
475	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
476	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
477
478	return __pgprot(prot);
479}
480
481static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
482						unsigned long address)
483{
484}
485
486#define __HAVE_ARCH_PTE_SAME
487static inline int pte_same(pte_t pte_a, pte_t pte_b)
488{
489	return pte_val(pte_a) == pte_val(pte_b);
490}
491
492#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
493static inline int ptep_set_access_flags(struct vm_area_struct *vma,
494					unsigned long address, pte_t *ptep,
495					pte_t entry, int dirty)
496{
497	if (!pte_same(*ptep, entry))
498		set_pte_at(vma->vm_mm, address, ptep, entry);
499	/*
500	 * update_mmu_cache will unconditionally execute, handling both
501	 * the case that the PTE changed and the spurious fault case.
502	 */
503	return true;
504}
505
506/*
507 * Conversion functions: convert a page and protection to a page entry,
508 * and a page entry and page directory to the page they refer to.
509 */
510#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
511
512#if defined(CONFIG_XPA)
513static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
514{
515	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
516	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
517	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
518	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
519	return pte;
520}
521#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
522static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
523{
524	pte.pte_low  &= _PAGE_CHG_MASK;
525	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
526	pte.pte_low  |= pgprot_val(newprot);
527	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
528	return pte;
529}
530#else
531static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
532{
533	pte_val(pte) &= _PAGE_CHG_MASK;
534	pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
535	if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
536		pte_val(pte) |= _PAGE_SILENT_READ;
537	return pte;
538}
539#endif
540
541
542extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
543	pte_t pte);
544
545static inline void update_mmu_cache(struct vm_area_struct *vma,
546	unsigned long address, pte_t *ptep)
547{
548	pte_t pte = *ptep;
549	__update_tlb(vma, address, pte);
550}
551
552#define	__HAVE_ARCH_UPDATE_MMU_TLB
553#define update_mmu_tlb	update_mmu_cache
554
555static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
556	unsigned long address, pmd_t *pmdp)
557{
558	pte_t pte = *(pte_t *)pmdp;
559
560	__update_tlb(vma, address, pte);
561}
562
563#define kern_addr_valid(addr)	(1)
564
565/*
566 * Allow physical addresses to be fixed up to help 36-bit peripherals.
567 */
568#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
569phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
570int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
571		unsigned long pfn, unsigned long size, pgprot_t prot);
572#define io_remap_pfn_range io_remap_pfn_range
573#else
574#define fixup_bigphys_addr(addr, size)	(addr)
575#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
576
577#ifdef CONFIG_TRANSPARENT_HUGEPAGE
578
579/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
580#define pmdp_establish generic_pmdp_establish
581
582#define has_transparent_hugepage has_transparent_hugepage
583extern int has_transparent_hugepage(void);
584
585static inline int pmd_trans_huge(pmd_t pmd)
586{
587	return !!(pmd_val(pmd) & _PAGE_HUGE);
588}
589
590static inline pmd_t pmd_mkhuge(pmd_t pmd)
591{
592	pmd_val(pmd) |= _PAGE_HUGE;
593
594	return pmd;
595}
596
597extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
598		       pmd_t *pmdp, pmd_t pmd);
599
600#define pmd_write pmd_write
601static inline int pmd_write(pmd_t pmd)
602{
603	return !!(pmd_val(pmd) & _PAGE_WRITE);
604}
605
606static inline pmd_t pmd_wrprotect(pmd_t pmd)
607{
608	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
609	return pmd;
610}
611
612static inline pmd_t pmd_mkwrite(pmd_t pmd)
613{
614	pmd_val(pmd) |= _PAGE_WRITE;
615	if (pmd_val(pmd) & _PAGE_MODIFIED)
616		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
617
618	return pmd;
619}
620
621static inline int pmd_dirty(pmd_t pmd)
622{
623	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
624}
625
626static inline pmd_t pmd_mkclean(pmd_t pmd)
627{
628	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
629	return pmd;
630}
631
632static inline pmd_t pmd_mkdirty(pmd_t pmd)
633{
634	pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
635	if (pmd_val(pmd) & _PAGE_WRITE)
636		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
637
638	return pmd;
639}
640
641static inline int pmd_young(pmd_t pmd)
642{
643	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
644}
645
646static inline pmd_t pmd_mkold(pmd_t pmd)
647{
648	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
649
650	return pmd;
651}
652
653static inline pmd_t pmd_mkyoung(pmd_t pmd)
654{
655	pmd_val(pmd) |= _PAGE_ACCESSED;
656
657	if (!(pmd_val(pmd) & _PAGE_NO_READ))
658		pmd_val(pmd) |= _PAGE_SILENT_READ;
659
660	return pmd;
661}
662
663#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
664static inline int pmd_soft_dirty(pmd_t pmd)
665{
666	return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
667}
668
669static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
670{
671	pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
672	return pmd;
673}
674
675static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
676{
677	pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
678	return pmd;
679}
680
681#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
682
683/* Extern to avoid header file madness */
684extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
685
686static inline unsigned long pmd_pfn(pmd_t pmd)
687{
688	return pmd_val(pmd) >> _PFN_SHIFT;
689}
690
691static inline struct page *pmd_page(pmd_t pmd)
692{
693	if (pmd_trans_huge(pmd))
694		return pfn_to_page(pmd_pfn(pmd));
695
696	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
697}
698
699static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
700{
701	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
702		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
703	return pmd;
704}
705
706static inline pmd_t pmd_mkinvalid(pmd_t pmd)
707{
708	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
709
710	return pmd;
711}
712
713/*
714 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
715 * different prototype.
716 */
717#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
718static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
719					    unsigned long address, pmd_t *pmdp)
720{
721	pmd_t old = *pmdp;
722
723	pmd_clear(pmdp);
724
725	return old;
726}
727
728#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
729
730#ifdef _PAGE_HUGE
731#define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
732#define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
733#endif
734
735#define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
736
737/*
738 * We provide our own get_unmapped area to cope with the virtual aliasing
739 * constraints placed on us by the cache architecture.
740 */
741#define HAVE_ARCH_UNMAPPED_AREA
742#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
743
744#endif /* _ASM_PGTABLE_H */
745