Lines Matching refs:ppa
31 struct ppa_addr *ppa = line_ws->priv;
34 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
39 line = pblk_ppa_to_line(pblk, *ppa);
40 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
46 kfree(ppa);
55 struct ppa_addr *ppa;
70 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71 if (!ppa)
74 *ppa = ppa_addr;
75 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
131 struct ppa_addr ppa;
135 ppa.ppa = 0;
142 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
153 struct ppa_addr ppa)
157 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158 int lun_off = ppa.m.pu * geo->num_chk;
159 int chk_off = ppa.m.chk;
204 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
210 /* Callers must ensure that the ppa points to a device address */
211 BUG_ON(pblk_addr_in_cache(ppa));
212 BUG_ON(pblk_ppa_empty(ppa));
215 line = pblk_ppa_to_line(pblk, ppa);
216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
228 struct ppa_addr ppa;
230 ppa = pblk_trans_map_get(pblk, lba);
232 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233 pblk_map_invalidate(pblk, ppa);
235 pblk_ppa_set_empty(&ppa);
236 pblk_trans_map_set(pblk, lba, ppa);
531 struct ppa_addr *ppa = &ppa_list[i];
532 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
533 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
537 ppa, NVM_CHK_ST_OPEN);
540 ppa, NVM_CHK_ST_CLOSED);
620 /* logic error: ppa out-of-bounds. Prevent generating bad address */
805 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
806 int pos = pblk_ppa_to_pos(geo, ppa);
818 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
819 pos = pblk_ppa_to_pos(geo, ppa);
856 struct ppa_addr ppa)
859 rqd->ppa_addr = ppa;
865 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
870 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
873 pblk_setup_e_rq(pblk, &rqd, ppa);
888 struct ppa_addr ppa;
901 ppa = pblk->luns[bit].bppa; /* set ch and lun */
902 ppa.a.blk = line->id;
908 ret = pblk_blk_erase_sync(pblk, ppa);
1436 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1440 line = pblk_ppa_to_line(pblk, ppa);
1689 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1696 pblk_setup_e_rq(pblk, rqd, ppa);
1702 &ppa, PBLK_CHUNK_RESET_START);
1713 pblk_ppa_to_line_id(ppa),
1714 pblk_ppa_to_pos(geo, ppa));
1897 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1901 int pos = pblk_ppa_to_pos(geo, ppa);
1906 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1911 int pos = pblk_ppa_to_pos(geo, ppa);
1922 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1927 int pos = pblk_ppa_to_pos(geo, ppa);
1947 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1963 pblk_trans_map_set(pblk, lba, ppa);
1967 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1971 /* Callers must ensure that the ppa points to a cache address */
1972 BUG_ON(!pblk_addr_in_cache(ppa));
1973 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1976 pblk_update_map(pblk, lba, ppa);
1986 /* Callers must ensure that the ppa points to a cache address */
2023 /* Callers must ensure that the ppa points to a device address */
2047 * the mapped ppa must be invalidated
2071 struct ppa_addr ppa;
2073 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2076 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2077 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);