Lines Matching refs:paddr

31 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
210 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
211 * The orig Cache Management Module "CDU" only required paddr to invalidate a
217 * paddr alone could not be used to correctly index the cache.
226 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
227 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
250 void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
264 * and have @paddr - aligned to cache line and integral @num_lines.
266 * -@paddr will be cache-line aligned already (being page aligned)
270 sz += paddr & ~CACHE_LINE_MASK;
271 paddr &= CACHE_LINE_MASK;
277 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
278 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
281 write_aux_reg(aux_cmd, paddr);
282 paddr += L1_CACHE_BYTES;
288 * - ARC700 programming model requires paddr and vaddr be passed in seperate
295 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
310 * and have @paddr - aligned to cache line and integral @num_lines.
312 * -@paddr will be cache-line aligned already (being page aligned)
316 sz += paddr & ~CACHE_LINE_MASK;
317 paddr &= CACHE_LINE_MASK;
323 * MMUv3, cache ops require paddr in PTAG reg
327 write_aux_reg(aux_tag, paddr);
332 * - upper 8 bits of paddr need to be written into PTAG_HI
337 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
341 write_aux_reg(aux_tag, paddr);
342 paddr += L1_CACHE_BYTES;
356 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
357 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
358 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
366 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
380 * and have @paddr - aligned to cache line and integral @num_lines.
382 * -@paddr will be cache-line aligned already (being page aligned)
386 sz += paddr & ~CACHE_LINE_MASK;
387 paddr &= CACHE_LINE_MASK;
394 * - upper 8 bits of paddr need to be written into PTAG_HI
403 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
405 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
409 write_aux_reg(aux_cmd, paddr);
410 paddr += L1_CACHE_BYTES;
420 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
435 /* for any leading gap between @paddr and start of cache line */
436 sz += paddr & ~CACHE_LINE_MASK;
437 paddr &= CACHE_LINE_MASK;
449 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
451 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
455 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
456 write_aux_reg(s, paddr);
574 /* For kernel mappings cache operation: index is same as paddr */
580 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
590 __cache_line_loop(paddr, vaddr, sz, op, full_page);
602 #define __dc_line_op(paddr, vaddr, sz, op)
603 #define __dc_line_op_k(paddr, sz, op)
616 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
623 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
634 phys_addr_t paddr, vaddr;
642 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
645 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
649 .paddr = paddr,
666 noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
709 end = paddr + sz + l2_line_sz - 1;
716 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
718 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
729 noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
760 sz += paddr & ~SLC_LINE_MASK;
761 paddr &= SLC_LINE_MASK;
766 write_aux_reg(cmd, paddr);
767 paddr += l2_line_sz;
779 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
860 phys_addr_t paddr = (unsigned long)page_address(page);
863 if (addr_not_cache_congruent(paddr, vaddr))
864 __flush_dcache_page(paddr, vaddr);
956 * The 2nd arg despite being paddr will be used to index icache
991 * @paddr is phy addr of region
994 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
995 * use a paddr to index the cache (despite VIPT). This is fine since since a
999 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
1001 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
1002 __ic_line_inv_vaddr(paddr, vaddr, len);
1006 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
1008 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
1013 * For kernel mappings @vaddr == @paddr
1015 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
1017 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
1043 phys_addr_t paddr = pfn << PAGE_SHIFT;
1047 __flush_dcache_page(paddr, u_vaddr);
1050 __inv_icache_page(paddr, u_vaddr);
1099 * here as well (given that both vaddr/paddr are available).
1232 * pair to provide vaddr/paddr respectively, just as in MMU v3