Lines Matching refs:req

102 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
105 struct kiocb *iocb = &req->f.iocb;
110 if (req->cmd->rw.opcode == nvme_cmd_write) {
111 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
113 call_iter = req->ns->file->f_op->write_iter;
116 call_iter = req->ns->file->f_op->read_iter;
120 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
123 iocb->ki_filp = req->ns->file;
124 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
131 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
134 if (req->f.bvec != req->inline_bvec) {
135 if (likely(req->f.mpool_alloc == false))
136 kfree(req->f.bvec);
138 mempool_free(req->f.bvec, req->ns->bvec_pool);
141 if (unlikely(ret != req->transfer_len))
142 status = errno_to_nvme_status(req, ret);
143 nvmet_req_complete(req, status);
146 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
148 ssize_t nr_bvec = req->sg_cnt;
157 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
160 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
161 if (unlikely(pos + req->transfer_len > req->ns->size)) {
162 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
166 memset(&req->f.iocb, 0, sizeof(struct kiocb));
167 for_each_sg(req->sg, sg, req->sg_cnt, i) {
168 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
169 len += req->f.bvec[bv_cnt].bv_len;
170 total_len += req->f.bvec[bv_cnt].bv_len;
177 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
188 if (WARN_ON_ONCE(total_len != req->transfer_len)) {
203 req->f.iocb.ki_complete = nvmet_file_io_done;
205 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
226 nvmet_file_io_done(&req->f.iocb, ret, 0);
232 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
234 nvmet_file_execute_io(req, 0);
237 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
239 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
240 queue_work(buffered_io_wq, &req->f.work);
243 static void nvmet_file_execute_rw(struct nvmet_req *req)
245 ssize_t nr_bvec = req->sg_cnt;
247 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
250 if (!req->sg_cnt || !nr_bvec) {
251 nvmet_req_complete(req, 0);
256 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
259 req->f.bvec = req->inline_bvec;
261 if (unlikely(!req->f.bvec)) {
263 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
264 req->f.mpool_alloc = true;
266 req->f.mpool_alloc = false;
268 if (req->ns->buffered_io) {
269 if (likely(!req->f.mpool_alloc) &&
270 (req->ns->file->f_mode & FMODE_NOWAIT) &&
271 nvmet_file_execute_io(req, IOCB_NOWAIT))
273 nvmet_file_submit_buffered_io(req);
275 nvmet_file_execute_io(req, 0);
278 u16 nvmet_file_flush(struct nvmet_req *req)
280 return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
285 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
287 nvmet_req_complete(req, nvmet_file_flush(req));
290 static void nvmet_file_execute_flush(struct nvmet_req *req)
292 if (!nvmet_check_transfer_len(req, 0))
294 INIT_WORK(&req->f.work, nvmet_file_flush_work);
295 schedule_work(&req->f.work);
298 static void nvmet_file_execute_discard(struct nvmet_req *req)
307 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
308 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
313 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
315 len <<= req->ns->blksize_shift;
316 if (offset + len > req->ns->size) {
317 req->error_slba = le64_to_cpu(range.slba);
318 status = errno_to_nvme_status(req, -ENOSPC);
322 ret = vfs_fallocate(req->ns->file, mode, offset, len);
324 req->error_slba = le64_to_cpu(range.slba);
325 status = errno_to_nvme_status(req, ret);
330 nvmet_req_complete(req, status);
335 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
337 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
339 nvmet_file_execute_discard(req);
345 nvmet_req_complete(req, 0);
350 static void nvmet_file_execute_dsm(struct nvmet_req *req)
352 if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
354 INIT_WORK(&req->f.work, nvmet_file_dsm_work);
355 schedule_work(&req->f.work);
360 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
361 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
367 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
369 req->ns->blksize_shift);
371 if (unlikely(offset + len > req->ns->size)) {
372 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
376 ret = vfs_fallocate(req->ns->file, mode, offset, len);
377 nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
380 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
382 if (!nvmet_check_transfer_len(req, 0))
384 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
385 schedule_work(&req->f.work);
388 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
390 struct nvme_command *cmd = req->cmd;
395 req->execute = nvmet_file_execute_rw;
398 req->execute = nvmet_file_execute_flush;
401 req->execute = nvmet_file_execute_dsm;
404 req->execute = nvmet_file_execute_write_zeroes;
408 cmd->common.opcode, req->sq->qid);
409 req->error_loc = offsetof(struct nvme_common_command, opcode);