Lines Matching defs:pfn

580  * to pfn. To get around this restriction, we allow arbitrary mappings so long
599 unsigned long pfn = pte_pfn(pte);
608 if (is_zero_pfn(pfn))
621 if (!pfn_valid(pfn))
627 if (pfn == vma->vm_pgoff + off)
634 if (is_zero_pfn(pfn))
638 if (unlikely(pfn > highest_memmap_pfn)) {
648 return pfn_to_page(pfn);
655 unsigned long pfn = pmd_pfn(pmd);
664 if (!pfn_valid(pfn))
670 if (pfn == vma->vm_pgoff + off)
681 if (unlikely(pfn > highest_memmap_pfn))
689 return pfn_to_page(pfn);
1960 pfn_t pfn, pgprot_t prot, bool mkwrite)
1981 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1994 if (pfn_t_devmap(pfn))
1995 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1997 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2013 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2016 * @pfn: source kernel pfn
2034 unsigned long pfn, pgprot_t pgprot)
2046 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2051 if (!pfn_modify_allowed(pfn, pgprot))
2054 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2056 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2062 * vmf_insert_pfn - insert single pfn into user vma
2065 * @pfn: source kernel pfn
2082 unsigned long pfn)
2084 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2088 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2093 if (pfn_t_devmap(pfn))
2095 if (pfn_t_special(pfn))
2097 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2103 unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2108 BUG_ON(!vm_mixed_ok(vma, pfn));
2113 track_pfn_insert(vma, &pgprot, pfn);
2115 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2126 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2134 page = pfn_to_page(pfn_t_to_pfn(pfn));
2137 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2149 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2152 * @pfn: source kernel pfn
2175 pfn_t pfn, pgprot_t pgprot)
2177 return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2182 pfn_t pfn)
2184 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2194 unsigned long addr, pfn_t pfn)
2196 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2207 unsigned long pfn, pgprot_t prot)
2219 if (!pfn_modify_allowed(pfn, prot)) {
2223 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2224 pfn++;
2233 unsigned long pfn, pgprot_t prot)
2239 pfn -= addr >> PAGE_SHIFT;
2247 pfn + (addr >> PAGE_SHIFT), prot);
2256 unsigned long pfn, pgprot_t prot)
2262 pfn -= addr >> PAGE_SHIFT;
2269 pfn + (addr >> PAGE_SHIFT), prot);
2278 unsigned long pfn, pgprot_t prot)
2284 pfn -= addr >> PAGE_SHIFT;
2291 pfn + (addr >> PAGE_SHIFT), prot);
2302 * @pfn: page frame number of kernel physical memory address
2311 unsigned long pfn, unsigned long size, pgprot_t prot)
2317 unsigned long remap_pfn = pfn;
2344 vma->vm_pgoff = pfn;
2354 pfn -= addr >> PAGE_SHIFT;
2360 pfn + (addr >> PAGE_SHIFT), prot);
2389 unsigned long vm_len, pfn, pages;
2400 pfn = start >> PAGE_SHIFT;
2402 if (pfn + pages < pfn)
2408 pfn += vma->vm_pgoff;
2417 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
4351 * the pfn may be screwed if the read is non atomic.
4972 * @pfn: location to store found PFN
4979 * Return: zero and the pfn at @pfn on success, -ve otherwise.
4982 unsigned long *pfn)
4994 *pfn = pte_pfn(*ptep);