Lines Matching refs:req
73 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
75 u8 zasl = req->sq->ctrl->subsys->zasl;
76 struct nvmet_ctrl *ctrl = req->sq->ctrl;
91 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
95 nvmet_req_complete(req, status);
98 void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
105 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
106 req->error_loc = offsetof(struct nvme_identify, nsid);
117 status = nvmet_req_find_ns(req);
121 if (nvmet_ns_revalidate(req->ns)) {
122 mutex_lock(&req->ns->subsys->lock);
123 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
124 mutex_unlock(&req->ns->subsys->lock);
127 if (!bdev_is_zoned(req->ns->bdev)) {
129 req->error_loc = offsetof(struct nvme_identify, nsid);
133 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
134 req->ns->blksize_shift;
137 mor = bdev_max_open_zones(req->ns->bdev);
144 mar = bdev_max_active_zones(req->ns->bdev);
152 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
155 nvmet_req_complete(req, status);
158 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
160 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
161 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
163 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
164 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
169 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
173 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
174 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
178 switch (req->cmd->zmr.pr) {
183 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
187 switch (req->cmd->zmr.zrasf) {
198 req->error_loc =
207 struct nvmet_req *req;
235 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
236 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
237 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
242 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
255 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
257 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
259 return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect);
262 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
273 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
274 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
275 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
276 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
281 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
284 .zrasf = req->cmd->zmr.zrasf,
286 .req = req,
289 status = nvmet_bdev_validate_zone_mgmt_recv(req);
298 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
309 if (req->cmd->zmr.pr)
313 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
316 nvmet_req_complete(req, status);
319 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
321 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
322 queue_work(zbd_wq, &req->z.zmgmt_work);
356 struct nvmet_req *req;
363 switch (zsa_req_op(data->req->cmd->zms.zsa)) {
400 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
402 struct block_device *bdev = req->ns->bdev;
408 .req = req,
432 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
452 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
456 switch (zsa_req_op(req->cmd->zms.zsa)) {
458 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
459 get_capacity(req->ns->bdev->bd_disk),
467 return nvmet_bdev_zone_mgmt_emulate_all(req);
470 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
479 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
480 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
481 enum req_op op = zsa_req_op(req->cmd->zms.zsa);
482 struct block_device *bdev = req->ns->bdev;
488 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
494 if (req->cmd->zms.select_all) {
495 status = nvmet_bdev_execute_zmgmt_send_all(req);
500 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
506 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
516 nvmet_req_complete(req, status);
519 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
521 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
522 queue_work(zbd_wq, &req->z.zmgmt_work);
527 struct nvmet_req *req = bio->bi_private;
530 req->cqe->result.u64 =
531 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
534 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
535 nvmet_req_bio_put(req, bio);
538 void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
540 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
549 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
552 if (!req->sg_cnt) {
553 nvmet_req_complete(req, 0);
557 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
558 req->error_loc = offsetof(struct nvme_rw_command, slba);
563 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
564 req->error_loc = offsetof(struct nvme_rw_command, slba);
569 if (nvmet_use_inline_bvec(req)) {
570 bio = &req->z.inline_bio;
571 bio_init(bio, req->ns->bdev, req->inline_bvec,
572 ARRAY_SIZE(req->inline_bvec), opf);
574 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
579 bio->bi_private = req;
580 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
583 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
597 if (total_len != nvmet_rw_data_len(req)) {
606 nvmet_req_bio_put(req, bio);
608 nvmet_req_complete(req, status);
611 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
613 struct nvme_command *cmd = req->cmd;
617 req->execute = nvmet_bdev_execute_zone_append;
620 req->execute = nvmet_bdev_execute_zone_mgmt_recv;
623 req->execute = nvmet_bdev_execute_zone_mgmt_send;
626 return nvmet_bdev_parse_io_cmd(req);