Lines Matching refs:req
131 * from the req->port address in case the port in question listens
134 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
137 if (req->ops->disc_traddr)
138 req->ops->disc_traddr(req, port, traddr);
143 static size_t discovery_log_entries(struct nvmet_req *req)
145 struct nvmet_ctrl *ctrl = req->sq->ctrl;
150 list_for_each_entry(p, &req->port->subsystems, entry) {
155 list_for_each_entry(r, &req->port->referrals, entry)
160 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
163 struct nvmet_ctrl *ctrl = req->sq->ctrl;
165 u64 offset = nvmet_get_log_page_offset(req->cmd);
166 size_t data_len = nvmet_get_log_page_len(req->cmd);
174 if (!nvmet_check_transfer_len(req, data_len))
177 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
178 req->error_loc =
186 req->error_loc =
198 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
207 list_for_each_entry(p, &req->port->subsystems, entry) {
213 nvmet_set_disc_traddr(req, req->port, traddr);
214 nvmet_format_discovery_entry(hdr, req->port,
220 list_for_each_entry(r, &req->port->referrals, entry) {
232 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
236 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
239 nvmet_req_complete(req, status);
242 static void nvmet_execute_disc_identify(struct nvmet_req *req)
244 struct nvmet_ctrl *ctrl = req->sq->ctrl;
249 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
252 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
253 req->error_loc = offsetof(struct nvme_identify, cns);
284 if (req->port->inline_data_size)
291 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
295 nvmet_req_complete(req, status);
298 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
300 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
303 if (!nvmet_check_transfer_len(req, 0))
308 stat = nvmet_set_feat_kato(req);
311 stat = nvmet_set_feat_async_event(req,
315 req->error_loc =
321 nvmet_req_complete(req, stat);
324 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
326 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
329 if (!nvmet_check_transfer_len(req, 0))
334 nvmet_get_feat_kato(req);
337 nvmet_get_feat_async_event(req);
340 req->error_loc =
346 nvmet_req_complete(req, stat);
349 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
351 struct nvme_command *cmd = req->cmd;
353 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
356 req->error_loc =
363 req->execute = nvmet_execute_disc_set_features;
366 req->execute = nvmet_execute_disc_get_features;
369 req->execute = nvmet_execute_async_event;
372 req->execute = nvmet_execute_keep_alive;
375 req->execute = nvmet_execute_disc_get_log_page;
378 req->execute = nvmet_execute_disc_identify;
382 req->error_loc = offsetof(struct nvme_common_command, opcode);