Lines Matching refs:paddr
35 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
174 * Programming model requires both paddr and vaddr irrespecive of aliasing
177 * - paddr in {I,D}C_PTAG
181 * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
189 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
204 * and have @paddr - aligned to cache line and integral @num_lines.
206 * -@paddr will be cache-line aligned already (being page aligned)
210 sz += paddr & ~CACHE_LINE_MASK;
211 paddr &= CACHE_LINE_MASK;
217 * MMUv3, cache ops require paddr in PTAG reg
221 write_aux_reg(aux_tag, paddr);
226 * - upper 8 bits of paddr need to be written into PTAG_HI
231 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
235 write_aux_reg(aux_tag, paddr);
236 paddr += L1_CACHE_BYTES;
249 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
263 * and have @paddr - aligned to cache line and integral @num_lines.
265 * -@paddr will be cache-line aligned already (being page aligned)
269 sz += paddr & ~CACHE_LINE_MASK;
270 paddr &= CACHE_LINE_MASK;
277 * - upper 8 bits of paddr need to be written into PTAG_HI
286 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
288 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
292 write_aux_reg(aux_cmd, paddr);
293 paddr += L1_CACHE_BYTES;
303 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
318 /* for any leading gap between @paddr and start of cache line */
319 sz += paddr & ~CACHE_LINE_MASK;
320 paddr &= CACHE_LINE_MASK;
332 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
334 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
338 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
339 write_aux_reg(s, paddr);
455 /* For kernel mappings cache operation: index is same as paddr */
461 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
471 __cache_line_loop(paddr, vaddr, sz, op, full_page);
483 #define __dc_line_op(paddr, vaddr, sz, op)
484 #define __dc_line_op_k(paddr, sz, op)
497 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
504 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
515 phys_addr_t paddr, vaddr;
523 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
526 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
530 .paddr = paddr,
547 static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
590 end = paddr + sz + l2_line_sz - 1;
597 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
599 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
610 static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
641 sz += paddr & ~SLC_LINE_MASK;
642 paddr &= SLC_LINE_MASK;
647 write_aux_reg(cmd, paddr);
648 paddr += l2_line_sz;
660 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
740 phys_addr_t paddr = (unsigned long)folio_address(folio);
747 if (addr_not_cache_congruent(paddr, vaddr))
748 __flush_dcache_pages(paddr, vaddr,
847 * The 2nd arg despite being paddr will be used to index icache
882 * @paddr is phy addr of region
885 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
886 * use a paddr to index the cache (despite VIPT). This is fine since a
890 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
892 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
893 __ic_line_inv_vaddr(paddr, vaddr, len);
897 void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
899 __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
904 * For kernel mappings @vaddr == @paddr
906 void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
908 __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
934 phys_addr_t paddr = pfn << PAGE_SHIFT;
938 __flush_dcache_pages(paddr, u_vaddr, 1);
941 __inv_icache_pages(paddr, u_vaddr, 1);
992 * here as well (given that both vaddr/paddr are available).
1124 * pair to provide vaddr/paddr respectively, just as in MMU v3