Lines Matching refs:pblk
16 * pblk-write.c - pblk's write path from write buffer to media
19 #include "pblk.h"
20 #include "pblk-trace.h"
22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
26 struct pblk_rb *rwb = &pblk->rwb;
53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
60 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
63 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
73 return pblk_end_w_bio(pblk, rqd, c_ctx);
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
84 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
86 pblk_up_rq(pblk, c_ctx->lun_bitmap);
88 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
90 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
93 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
96 pos = pblk_end_queued_w_bio(pblk, rqd, c);
102 list_add_tail(&c_ctx->list, &pblk->compl_list);
104 pblk_rb_sync_end(&pblk->rwb, &flags);
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
119 line = pblk_ppa_to_line(pblk, *ppa);
120 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
125 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
138 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
147 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
150 struct pblk_rb *rb = &pblk->rwb;
158 spin_lock(&pblk->trans_lock);
165 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
179 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
183 spin_unlock(&pblk->trans_lock);
186 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
199 spin_lock(&pblk->resubmit_lock);
200 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
201 spin_unlock(&pblk->resubmit_lock);
204 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
212 struct pblk *pblk = recovery->pblk;
217 pblk_log_write_err(pblk, rqd);
219 pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
220 pblk_queue_resubmit(pblk, c_ctx);
222 pblk_up_rq(pblk, c_ctx->lun_bitmap);
224 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
227 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
228 mempool_free(recovery, &pblk->rec_pool);
230 atomic_dec(&pblk->inflight_io);
231 pblk_write_kick(pblk);
235 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
239 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
241 pblk_err(pblk, "could not allocate recovery work\n");
245 recovery->pblk = pblk;
249 queue_work(pblk->close_wq, &recovery->ws_rec);
254 struct pblk *pblk = rqd->private;
258 pblk_end_w_fail(pblk, rqd);
262 pblk_check_chunk_state_update(pblk, rqd);
264 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
268 pblk_complete_write(pblk, rqd, c_ctx);
269 atomic_dec(&pblk->inflight_io);
274 struct pblk *pblk = rqd->private;
281 pblk_up_chunk(pblk, ppa_list[0]);
284 pblk_log_write_err(pblk, rqd);
285 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
289 pblk_check_chunk_state_update(pblk, rqd);
294 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
295 GFP_ATOMIC, pblk->close_wq);
297 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
299 atomic_dec(&pblk->inflight_io);
302 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
309 rqd->private = pblk;
312 return pblk_alloc_rqd_meta(pblk, rqd);
315 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
318 struct pblk_line_meta *lm = &pblk->lm;
319 struct pblk_line *e_line = pblk_line_get_erase(pblk);
332 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
339 ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
342 ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
348 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
353 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
359 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
367 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
369 struct nvm_tgt_dev *dev = pblk->dev;
371 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372 struct pblk_line_meta *lm = &pblk->lm;
379 int rq_ppas = pblk->min_write_pgs;
385 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
393 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
400 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
403 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
412 pblk_down_chunk(pblk, ppa_list[0]);
414 ret = pblk_submit_io(pblk, rqd, data);
416 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
423 pblk_up_chunk(pblk, ppa_list[0]);
425 pblk_dealloc_page(pblk, meta_line, rq_ppas);
429 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
433 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
437 struct nvm_tgt_dev *dev = pblk->dev;
440 struct pblk_line *data_line = pblk_line_get_data(pblk);
446 * with regards to the number of LUNs forming the pblk instance. This
454 paddr = pblk_lookup_page(pblk, meta_line);
455 ppa = addr_to_gen_ppa(pblk, paddr, 0);
456 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
469 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
472 struct pblk_line_meta *lm = &pblk->lm;
473 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
488 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
494 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
503 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
505 pblk_err(pblk, "could not setup write request: %d\n", err);
509 meta_line = pblk_should_submit_meta_io(pblk, rqd);
512 err = pblk_submit_io(pblk, rqd, NULL);
514 pblk_err(pblk, "data I/O submission failed: %d\n", err);
520 if (pblk_blk_erase_async(pblk, erase_ppa)) {
521 struct pblk_line *e_line = pblk_line_get_erase(pblk);
522 struct nvm_tgt_dev *dev = pblk->dev;
534 err = pblk_submit_meta_io(pblk, meta_line);
536 pblk_err(pblk, "metadata I/O submission failed: %d",
545 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
551 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
555 static int pblk_submit_write(struct pblk *pblk, int *secs_left)
566 spin_lock(&pblk->resubmit_lock);
567 resubmit = !list_empty(&pblk->resubmit_list);
568 spin_unlock(&pblk->resubmit_lock);
574 spin_lock(&pblk->resubmit_lock);
575 r_ctx = list_first_entry(&pblk->resubmit_list,
578 spin_unlock(&pblk->resubmit_lock);
583 pblk_prepare_resubmit(pblk, pos, secs_avail);
584 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
593 secs_avail = pblk_rb_read_count(&pblk->rwb);
597 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
598 if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
601 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
603 if (secs_to_sync > pblk->max_write_pgs) {
604 pblk_err(pblk, "bad buffer sync calculation\n");
610 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
613 packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
619 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
622 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
624 pblk_err(pblk, "corrupted write bio\n");
628 if (pblk_submit_io_set(pblk, rqd))
632 atomic_long_add(secs_to_sync, &pblk->sub_writes);
639 pblk_free_write_rqd(pblk, rqd);
642 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
649 struct pblk *pblk = data;
655 write_failure = pblk_submit_write(pblk, &secs_left);