Lines Matching refs:pblk

16  * pblk-core.c - pblk's core functionality
22 #include "pblk.h"
23 #include "pblk-trace.h"
29 struct pblk *pblk = line_ws->pblk;
30 struct nvm_tgt_dev *dev = pblk->dev;
39 line = pblk_ppa_to_line(pblk, *ppa);
42 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
47 mempool_free(line_ws, &pblk->gen_ws_pool);
50 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
53 struct nvm_tgt_dev *dev = pblk->dev;
58 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
59 atomic_long_inc(&pblk->erase_failed);
63 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
75 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76 GFP_ATOMIC, pblk->bb_wq);
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
81 struct nvm_tgt_dev *dev = pblk->dev;
87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
94 trace_pblk_chunk_reset(pblk_disk_name(pblk),
98 pblk_mark_bb(pblk, line, rqd->ppa_addr);
100 trace_pblk_chunk_reset(pblk_disk_name(pblk),
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
109 atomic_dec(&pblk->inflight_io);
115 struct pblk *pblk = rqd->private;
117 __pblk_end_io_erase(pblk, rqd);
118 mempool_free(rqd, &pblk->e_rq_pool);
126 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
128 struct nvm_tgt_dev *dev = pblk->dev;
151 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
155 struct nvm_tgt_dev *dev = pblk->dev;
164 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
167 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
178 WARN_ONCE(1, "pblk: double invalidate\n");
185 move_list = pblk_line_gc_list(pblk, line);
204 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
215 line = pblk_ppa_to_line(pblk, ppa);
216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
218 __pblk_map_invalidate(pblk, line, paddr);
221 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
226 spin_lock(&pblk->trans_lock);
230 ppa = pblk_trans_map_get(pblk, lba);
233 pblk_map_invalidate(pblk, ppa);
236 pblk_trans_map_set(pblk, lba, ppa);
238 spin_unlock(&pblk->trans_lock);
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
243 struct nvm_tgt_dev *dev = pblk->dev;
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
261 struct nvm_tgt_dev *dev = pblk->dev;
269 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
278 pool = &pblk->w_rq_pool;
282 pool = &pblk->r_rq_pool;
286 pool = &pblk->e_rq_pool;
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
306 pool = &pblk->w_rq_pool;
309 pool = &pblk->r_rq_pool;
312 pool = &pblk->e_rq_pool;
315 pblk_err(pblk, "trying to free unknown rqd type\n");
319 pblk_free_rqd_meta(pblk, rqd);
323 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
335 mempool_free(page++, &pblk->page_bio_pool);
339 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
342 struct request_queue *q = pblk->dev->q;
347 page = mempool_alloc(&pblk->page_bio_pool, flags);
351 pblk_err(pblk, "could not add page to bio\n");
352 mempool_free(page, &pblk->page_bio_pool);
359 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
363 void pblk_write_kick(struct pblk *pblk)
365 wake_up_process(pblk->writer_ts);
366 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
371 struct pblk *pblk = from_timer(pblk, t, wtimer);
374 pblk_write_kick(pblk);
377 void pblk_write_should_kick(struct pblk *pblk)
379 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
381 if (secs_avail >= pblk->min_write_pgs_data)
382 pblk_write_kick(pblk);
385 static void pblk_wait_for_meta(struct pblk *pblk)
388 if (!atomic_read(&pblk->inflight_io))
395 static void pblk_flush_writer(struct pblk *pblk)
397 pblk_rb_flush(&pblk->rwb);
399 if (!pblk_rb_sync_count(&pblk->rwb))
402 pblk_write_kick(pblk);
407 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
409 struct pblk_line_meta *lm = &pblk->lm;
410 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
412 int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
413 * (pblk->min_write_pgs - pblk->min_write_pgs_data);
422 pblk_rl_werr_line_in(&pblk->rl);
451 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
456 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
465 void pblk_discard(struct pblk *pblk, struct bio *bio)
470 pblk_invalidate_range(pblk, slba, nr_secs);
473 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
475 atomic_long_inc(&pblk->write_failed);
477 pblk_print_failed_rqd(pblk, rqd, rqd->error);
481 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
485 atomic_long_inc(&pblk->read_empty);
491 atomic_long_inc(&pblk->read_high_ecc);
495 atomic_long_inc(&pblk->read_failed);
498 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
501 pblk_print_failed_rqd(pblk, rqd, rqd->error);
505 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
507 pblk->sec_per_write = sec_per_write;
510 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
512 struct nvm_tgt_dev *dev = pblk->dev;
514 atomic_inc(&pblk->inflight_io);
517 if (pblk_check_io(pblk, rqd))
524 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
532 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
533 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
536 trace_pblk_chunk_state(pblk_disk_name(pblk),
539 trace_pblk_chunk_state(pblk_disk_name(pblk),
544 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
546 struct nvm_tgt_dev *dev = pblk->dev;
549 atomic_inc(&pblk->inflight_io);
552 if (pblk_check_io(pblk, rqd))
560 pblk_check_chunk_state_update(pblk, rqd);
565 static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
571 pblk_down_chunk(pblk, ppa_list[0]);
572 ret = pblk_submit_io_sync(pblk, rqd, buf);
573 pblk_up_chunk(pblk, ppa_list[0]);
578 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
581 int max = pblk->sec_per_write;
582 int min = pblk->min_write_pgs;
585 if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
586 min = max = pblk->min_write_pgs_data;
598 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
605 pblk->lm.sec_per_line, line->cur_sec);
613 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
621 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
622 WARN(1, "pblk: page allocation out of bounds\n");
623 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
627 pblk->lm.sec_per_line, line->cur_sec);
634 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
642 addr = __pblk_alloc_page(pblk, line, nr_secs);
644 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
650 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
656 pblk->lm.sec_per_line, line->cur_sec);
662 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
664 struct nvm_tgt_dev *dev = pblk->dev;
666 struct pblk_line_meta *lm = &pblk->lm;
677 int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
679 struct pblk_line_meta *lm = &pblk->lm;
682 u64 paddr = pblk_line_smeta_start(pblk, line);
687 ret = pblk_alloc_rqd_meta(pblk, &rqd);
697 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
699 ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
701 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
705 atomic_dec(&pblk->inflight_io);
708 pblk_log_read_err(pblk, &rqd);
713 pblk_free_rqd_meta(pblk, &rqd);
717 static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
720 struct pblk_line_meta *lm = &pblk->lm;
723 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
729 ret = pblk_alloc_rqd_meta(pblk, &rqd);
739 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
742 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
746 ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
748 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
752 atomic_dec(&pblk->inflight_io);
755 pblk_log_write_err(pblk, &rqd);
760 pblk_free_rqd_meta(pblk, &rqd);
764 int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
767 struct nvm_tgt_dev *dev = pblk->dev;
769 struct pblk_line_meta *lm = &pblk->lm;
775 int min = pblk->min_write_pgs;
787 ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
788 dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
793 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
805 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
808 if (pblk_io_aligned(pblk, rq_ppas))
813 if (pblk_boundary_paddr_checks(pblk, paddr)) {
818 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
822 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
828 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
831 ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
833 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
837 atomic_dec(&pblk->inflight_io);
840 pblk_log_read_err(pblk, &rqd);
855 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
865 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
870 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
873 pblk_setup_e_rq(pblk, &rqd, ppa);
878 ret = pblk_submit_io_sync(pblk, &rqd, NULL);
879 rqd.private = pblk;
880 __pblk_end_io_erase(pblk, &rqd);
885 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
887 struct pblk_line_meta *lm = &pblk->lm;
901 ppa = pblk->luns[bit].bppa; /* set ch and lun */
908 ret = pblk_blk_erase_sync(pblk, ppa);
910 pblk_err(pblk, "failed to erase line %d\n", line->id);
951 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
954 struct nvm_tgt_dev *dev = pblk->dev;
956 struct pblk_line_meta *lm = &pblk->lm;
957 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
972 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
979 pblk_debug(pblk, "line %d is bad\n", line->id);
991 guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
1012 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1013 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1022 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1034 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1036 struct pblk_line_meta *lm = &pblk->lm;
1037 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1059 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1062 struct nvm_tgt_dev *dev = pblk->dev;
1064 struct pblk_line_meta *lm = &pblk->lm;
1065 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1090 if (init && pblk_line_smeta_write(pblk, line, off)) {
1091 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1120 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1125 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1133 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1135 struct pblk_line_meta *lm = &pblk->lm;
1136 struct nvm_tgt_dev *dev = pblk->dev;
1142 struct pblk_lun *rlun = &pblk->luns[i];
1157 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1159 struct pblk_line_meta *lm = &pblk->lm;
1172 blk_to_erase = pblk_prepare_new_line(pblk, line);
1174 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1186 WARN(1, "pblk: corrupted line %d, state %d\n",
1193 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1209 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1211 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1218 ret = pblk_line_prepare(pblk, line);
1226 ret = pblk_line_alloc_bitmaps(pblk, line);
1230 if (!pblk_line_init_bb(pblk, line, 0)) {
1235 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1246 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1248 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1268 struct pblk *pblk = line->pblk;
1269 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1277 struct pblk_line *pblk_line_get(struct pblk *pblk)
1279 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1280 struct pblk_line_meta *lm = &pblk->lm;
1288 pblk_err(pblk, "no free lines\n");
1300 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1306 pblk_debug(pblk, "line %d is bad\n", line->id);
1310 ret = pblk_line_prepare(pblk, line);
1320 pblk_err(pblk, "failed to prepare line %d\n", line->id);
1330 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1333 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1338 retry_line = pblk_line_get(pblk);
1356 pblk_rl_free_lines_dec(&pblk->rl, line, false);
1358 if (pblk_line_erase(pblk, retry_line))
1364 static void pblk_set_space_limit(struct pblk *pblk)
1366 struct pblk_rl *rl = &pblk->rl;
1371 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1373 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1377 line = pblk_line_get(pblk);
1387 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1390 l_mg->data_next = pblk_line_get(pblk);
1396 pblk_set_space_limit(pblk);
1405 if (pblk_line_alloc_bitmaps(pblk, line))
1408 if (pblk_line_erase(pblk, line)) {
1409 line = pblk_line_retry(pblk, line);
1415 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1416 line = pblk_line_retry(pblk, line);
1423 if (!pblk_line_init_bb(pblk, line, 1)) {
1424 line = pblk_line_retry(pblk, line);
1431 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1436 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1440 line = pblk_ppa_to_line(pblk, ppa);
1444 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1450 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1453 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1455 lockdep_assert_held(&pblk->l_mg.free_lock);
1457 pblk_set_space_limit(pblk);
1458 pblk->state = PBLK_STATE_STOPPING;
1459 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1462 static void pblk_line_close_meta_sync(struct pblk *pblk)
1464 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1465 struct pblk_line_meta *lm = &pblk->lm;
1484 ret = pblk_submit_meta_io(pblk, line);
1486 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1493 pblk_wait_for_meta(pblk);
1494 flush_workqueue(pblk->close_wq);
1497 void __pblk_pipeline_flush(struct pblk *pblk)
1499 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1503 if (pblk->state == PBLK_STATE_RECOVERING ||
1504 pblk->state == PBLK_STATE_STOPPED) {
1508 pblk->state = PBLK_STATE_RECOVERING;
1509 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1512 pblk_flush_writer(pblk);
1513 pblk_wait_for_meta(pblk);
1515 ret = pblk_recov_pad(pblk);
1517 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1521 flush_workqueue(pblk->bb_wq);
1522 pblk_line_close_meta_sync(pblk);
1525 void __pblk_pipeline_stop(struct pblk *pblk)
1527 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1530 pblk->state = PBLK_STATE_STOPPED;
1531 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1537 void pblk_pipeline_stop(struct pblk *pblk)
1539 __pblk_pipeline_flush(pblk);
1540 __pblk_pipeline_stop(pblk);
1543 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1545 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1557 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1565 if (pblk_line_erase(pblk, new))
1573 if (pblk_line_alloc_bitmaps(pblk, new))
1577 if (!pblk_line_init_metadata(pblk, new, cur)) {
1578 new = pblk_line_retry(pblk, new);
1585 if (!pblk_line_init_bb(pblk, new, 1)) {
1586 new = pblk_line_retry(pblk, new);
1593 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1597 l_mg->data_next = pblk_line_get(pblk);
1603 pblk_stop_writes(pblk, new);
1615 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1617 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1618 struct pblk_gc *gc = &pblk->gc;
1624 pblk_err(pblk, "line %d had errors during GC\n", line->id);
1625 pblk_put_line_back(pblk, line);
1631 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1637 pblk_rl_werr_line_out(&pblk->rl);
1649 pblk_rl_free_lines_inc(&pblk->rl, line);
1656 struct pblk *pblk = line_put_ws->pblk;
1659 __pblk_line_put(pblk, line);
1660 mempool_free(line_put_ws, &pblk->gen_ws_pool);
1666 struct pblk *pblk = line->pblk;
1668 __pblk_line_put(pblk, line);
1674 struct pblk *pblk = line->pblk;
1677 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1681 line_put_ws->pblk = pblk;
1686 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1689 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1694 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1696 pblk_setup_e_rq(pblk, rqd, ppa);
1699 rqd->private = pblk;
1701 trace_pblk_chunk_reset(pblk_disk_name(pblk),
1707 err = pblk_submit_io(pblk, rqd, NULL);
1709 struct nvm_tgt_dev *dev = pblk->dev;
1712 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1720 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1722 return pblk->l_mg.data_line;
1726 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1728 return pblk->l_mg.data_next;
1736 static void pblk_line_should_sync_meta(struct pblk *pblk)
1738 if (pblk_rl_is_limit(&pblk->rl))
1739 pblk_line_close_meta_sync(pblk);
1742 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1744 struct nvm_tgt_dev *dev = pblk->dev;
1746 struct pblk_line_meta *lm = &pblk->lm;
1747 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1753 "pblk: corrupt closed line %d\n", line->id);
1764 move_list = pblk_line_gc_list(pblk, line);
1773 struct pblk_lun *rlun = &pblk->luns[i];
1784 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1788 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1790 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1791 struct pblk_line_meta *lm = &pblk->lm;
1797 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1800 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1801 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1802 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1807 &pblk->instance_uuid);
1813 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1817 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1832 pblk_line_should_sync_meta(pblk);
1835 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1837 struct pblk_line_meta *lm = &pblk->lm;
1843 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1851 struct pblk *pblk = line_ws->pblk;
1859 pblk_save_lba_list(pblk, line);
1861 pblk_line_close(pblk, line);
1862 mempool_free(line_ws, &pblk->gen_ws_pool);
1865 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1871 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1873 line_ws->pblk = pblk;
1881 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1883 struct pblk_lun *rlun = &pblk->luns[pos];
1893 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1897 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1899 struct nvm_tgt_dev *dev = pblk->dev;
1903 __pblk_down_chunk(pblk, pos);
1906 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1909 struct nvm_tgt_dev *dev = pblk->dev;
1919 __pblk_down_chunk(pblk, pos);
1922 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1924 struct nvm_tgt_dev *dev = pblk->dev;
1929 rlun = &pblk->luns[pos];
1933 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
1935 struct nvm_tgt_dev *dev = pblk->dev;
1942 rlun = &pblk->luns[bit];
1947 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1952 if (!(lba < pblk->capacity)) {
1953 WARN(1, "pblk: corrupted L2P map request\n");
1957 spin_lock(&pblk->trans_lock);
1958 ppa_l2p = pblk_trans_map_get(pblk, lba);
1961 pblk_map_invalidate(pblk, ppa_l2p);
1963 pblk_trans_map_set(pblk, lba, ppa);
1964 spin_unlock(&pblk->trans_lock);
1967 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1973 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1976 pblk_update_map(pblk, lba, ppa);
1979 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1988 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1992 if (!(lba < pblk->capacity)) {
1993 WARN(1, "pblk: corrupted L2P map request\n");
1997 spin_lock(&pblk->trans_lock);
1998 ppa_l2p = pblk_trans_map_get(pblk, lba);
1999 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2004 "pblk: corrupted GC update");
2011 pblk_trans_map_set(pblk, lba, ppa_new);
2013 spin_unlock(&pblk->trans_lock);
2017 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2028 atomic64_inc(&pblk->pad_wa);
2030 atomic_long_inc(&pblk->padded_wb);
2033 pblk_map_invalidate(pblk, ppa_mapped);
2038 if (!(lba < pblk->capacity)) {
2039 WARN(1, "pblk: corrupted L2P map request\n");
2043 spin_lock(&pblk->trans_lock);
2044 ppa_l2p = pblk_trans_map_get(pblk, lba);
2051 pblk_map_invalidate(pblk, ppa_mapped);
2059 pblk_trans_map_set(pblk, lba, ppa_mapped);
2061 spin_unlock(&pblk->trans_lock);
2064 int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2069 spin_lock(&pblk->trans_lock);
2073 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2077 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2090 spin_unlock(&pblk->trans_lock);
2094 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2100 spin_lock(&pblk->trans_lock);
2105 if (!(lba < pblk->capacity)) {
2106 WARN(1, "pblk: corrupted L2P map request\n");
2109 ppas[i] = pblk_trans_map_get(pblk, lba);
2112 spin_unlock(&pblk->trans_lock);
2115 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2119 if (pblk_is_oob_meta_supported(pblk)) {
2133 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2139 if (pblk_is_oob_meta_supported(pblk))
2145 memcpy(pblk_get_meta(pblk, meta_list, i),