1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/rculist.h>
9#include <linux/part_stat.h>
10
11#include <generated/utsrelease.h>
12#include <asm/unaligned.h>
13#include "nvmet.h"
14
15u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16{
17	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19	len <<= 16;
20	len += le16_to_cpu(cmd->get_log_page.numdl);
21	/* NUMD is a 0's based value */
22	len += 1;
23	len *= sizeof(u32);
24
25	return len;
26}
27
28static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29{
30	switch (cdw10 & 0xff) {
31	case NVME_FEAT_HOST_ID:
32		return sizeof(req->sq->ctrl->hostid);
33	default:
34		return 0;
35	}
36}
37
38u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39{
40	return le64_to_cpu(cmd->get_log_page.lpo);
41}
42
43static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44{
45	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46}
47
48static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49{
50	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51	unsigned long flags;
52	off_t offset = 0;
53	u64 slot;
54	u64 i;
55
56	spin_lock_irqsave(&ctrl->error_lock, flags);
57	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61				sizeof(struct nvme_error_slot)))
62			break;
63
64		if (slot == 0)
65			slot = NVMET_ERROR_LOG_SLOTS - 1;
66		else
67			slot--;
68		offset += sizeof(struct nvme_error_slot);
69	}
70	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71	nvmet_req_complete(req, 0);
72}
73
74static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75		struct nvme_smart_log *slog)
76{
77	struct nvmet_ns *ns;
78	u64 host_reads, host_writes, data_units_read, data_units_written;
79
80	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
81	if (!ns) {
82		pr_err("Could not find namespace id : %d\n",
83				le32_to_cpu(req->cmd->get_log_page.nsid));
84		req->error_loc = offsetof(struct nvme_rw_command, nsid);
85		return NVME_SC_INVALID_NS;
86	}
87
88	/* we don't have the right data for file backed ns */
89	if (!ns->bdev)
90		goto out;
91
92	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
93	data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
94		sectors[READ]), 1000);
95	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
96	data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
97		sectors[WRITE]), 1000);
98
99	put_unaligned_le64(host_reads, &slog->host_reads[0]);
100	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101	put_unaligned_le64(host_writes, &slog->host_writes[0]);
102	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
103out:
104	nvmet_put_namespace(ns);
105
106	return NVME_SC_SUCCESS;
107}
108
109static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
110		struct nvme_smart_log *slog)
111{
112	u64 host_reads = 0, host_writes = 0;
113	u64 data_units_read = 0, data_units_written = 0;
114	struct nvmet_ns *ns;
115	struct nvmet_ctrl *ctrl;
116	unsigned long idx;
117
118	ctrl = req->sq->ctrl;
119	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
120		/* we don't have the right data for file backed ns */
121		if (!ns->bdev)
122			continue;
123		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
124		data_units_read += DIV_ROUND_UP(
125			part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
126		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
127		data_units_written += DIV_ROUND_UP(
128			part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
129	}
130
131	put_unaligned_le64(host_reads, &slog->host_reads[0]);
132	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
133	put_unaligned_le64(host_writes, &slog->host_writes[0]);
134	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
135
136	return NVME_SC_SUCCESS;
137}
138
139static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
140{
141	struct nvme_smart_log *log;
142	u16 status = NVME_SC_INTERNAL;
143	unsigned long flags;
144
145	if (req->transfer_len != sizeof(*log))
146		goto out;
147
148	log = kzalloc(sizeof(*log), GFP_KERNEL);
149	if (!log)
150		goto out;
151
152	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
153		status = nvmet_get_smart_log_all(req, log);
154	else
155		status = nvmet_get_smart_log_nsid(req, log);
156	if (status)
157		goto out_free_log;
158
159	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
160	put_unaligned_le64(req->sq->ctrl->err_counter,
161			&log->num_err_log_entries);
162	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
163
164	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
165out_free_log:
166	kfree(log);
167out:
168	nvmet_req_complete(req, status);
169}
170
171static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
172{
173	u16 status = NVME_SC_INTERNAL;
174	struct nvme_effects_log *log;
175
176	log = kzalloc(sizeof(*log), GFP_KERNEL);
177	if (!log)
178		goto out;
179
180	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
181	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
182	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
183	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
184	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
185	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
186	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
187
188	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
189	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
190	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
191	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
192	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
193
194	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
195
196	kfree(log);
197out:
198	nvmet_req_complete(req, status);
199}
200
201static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
202{
203	struct nvmet_ctrl *ctrl = req->sq->ctrl;
204	u16 status = NVME_SC_INTERNAL;
205	size_t len;
206
207	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
208		goto out;
209
210	mutex_lock(&ctrl->lock);
211	if (ctrl->nr_changed_ns == U32_MAX)
212		len = sizeof(__le32);
213	else
214		len = ctrl->nr_changed_ns * sizeof(__le32);
215	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
216	if (!status)
217		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
218	ctrl->nr_changed_ns = 0;
219	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
220	mutex_unlock(&ctrl->lock);
221out:
222	nvmet_req_complete(req, status);
223}
224
225static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
226		struct nvme_ana_group_desc *desc)
227{
228	struct nvmet_ctrl *ctrl = req->sq->ctrl;
229	struct nvmet_ns *ns;
230	unsigned long idx;
231	u32 count = 0;
232
233	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
234		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
235			if (ns->anagrpid == grpid)
236				desc->nsids[count++] = cpu_to_le32(ns->nsid);
237	}
238
239	desc->grpid = cpu_to_le32(grpid);
240	desc->nnsids = cpu_to_le32(count);
241	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
242	desc->state = req->port->ana_state[grpid];
243	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
244	return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
245}
246
247static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
248{
249	struct nvme_ana_rsp_hdr hdr = { 0, };
250	struct nvme_ana_group_desc *desc;
251	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
252	size_t len;
253	u32 grpid;
254	u16 ngrps = 0;
255	u16 status;
256
257	status = NVME_SC_INTERNAL;
258	desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
259			NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
260	if (!desc)
261		goto out;
262
263	down_read(&nvmet_ana_sem);
264	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
265		if (!nvmet_ana_group_enabled[grpid])
266			continue;
267		len = nvmet_format_ana_group(req, grpid, desc);
268		status = nvmet_copy_to_sgl(req, offset, desc, len);
269		if (status)
270			break;
271		offset += len;
272		ngrps++;
273	}
274	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
275		if (nvmet_ana_group_enabled[grpid])
276			ngrps++;
277	}
278
279	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
280	hdr.ngrps = cpu_to_le16(ngrps);
281	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
282	up_read(&nvmet_ana_sem);
283
284	kfree(desc);
285
286	/* copy the header last once we know the number of groups */
287	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
288out:
289	nvmet_req_complete(req, status);
290}
291
292static void nvmet_execute_get_log_page(struct nvmet_req *req)
293{
294	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
295		return;
296
297	switch (req->cmd->get_log_page.lid) {
298	case NVME_LOG_ERROR:
299		return nvmet_execute_get_log_page_error(req);
300	case NVME_LOG_SMART:
301		return nvmet_execute_get_log_page_smart(req);
302	case NVME_LOG_FW_SLOT:
303		/*
304		 * We only support a single firmware slot which always is
305		 * active, so we can zero out the whole firmware slot log and
306		 * still claim to fully implement this mandatory log page.
307		 */
308		return nvmet_execute_get_log_page_noop(req);
309	case NVME_LOG_CHANGED_NS:
310		return nvmet_execute_get_log_changed_ns(req);
311	case NVME_LOG_CMD_EFFECTS:
312		return nvmet_execute_get_log_cmd_effects_ns(req);
313	case NVME_LOG_ANA:
314		return nvmet_execute_get_log_page_ana(req);
315	}
316	pr_debug("unhandled lid %d on qid %d\n",
317	       req->cmd->get_log_page.lid, req->sq->qid);
318	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
319	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
320}
321
322static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
323				      struct nvmet_subsys *subsys)
324{
325	const char *model = NVMET_DEFAULT_CTRL_MODEL;
326	struct nvmet_subsys_model *subsys_model;
327
328	rcu_read_lock();
329	subsys_model = rcu_dereference(subsys->model);
330	if (subsys_model)
331		model = subsys_model->number;
332	memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
333	rcu_read_unlock();
334}
335
336static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
337{
338	struct nvmet_ctrl *ctrl = req->sq->ctrl;
339	struct nvme_id_ctrl *id;
340	u32 cmd_capsule_size;
341	u16 status = 0;
342
343	id = kzalloc(sizeof(*id), GFP_KERNEL);
344	if (!id) {
345		status = NVME_SC_INTERNAL;
346		goto out;
347	}
348
349	/* XXX: figure out how to assign real vendors IDs. */
350	id->vid = 0;
351	id->ssvid = 0;
352
353	memset(id->sn, ' ', sizeof(id->sn));
354	bin2hex(id->sn, &ctrl->subsys->serial,
355		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
356	nvmet_id_set_model_number(id, ctrl->subsys);
357	memcpy_and_pad(id->fr, sizeof(id->fr),
358		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
359
360	id->rab = 6;
361
362	/*
363	 * XXX: figure out how we can assign a IEEE OUI, but until then
364	 * the safest is to leave it as zeroes.
365	 */
366
367	/* we support multiple ports, multiples hosts and ANA: */
368	id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
369
370	/* Limit MDTS according to transport capability */
371	if (ctrl->ops->get_mdts)
372		id->mdts = ctrl->ops->get_mdts(ctrl);
373	else
374		id->mdts = 0;
375
376	id->cntlid = cpu_to_le16(ctrl->cntlid);
377	id->ver = cpu_to_le32(ctrl->subsys->ver);
378
379	/* XXX: figure out what to do about RTD3R/RTD3 */
380	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
381	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
382		NVME_CTRL_ATTR_TBKAS);
383
384	id->oacs = 0;
385
386	/*
387	 * We don't really have a practical limit on the number of abort
388	 * comands.  But we don't do anything useful for abort either, so
389	 * no point in allowing more abort commands than the spec requires.
390	 */
391	id->acl = 3;
392
393	id->aerl = NVMET_ASYNC_EVENTS - 1;
394
395	/* first slot is read-only, only one slot supported */
396	id->frmw = (1 << 0) | (1 << 1);
397	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
398	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
399	id->npss = 0;
400
401	/* We support keep-alive timeout in granularity of seconds */
402	id->kas = cpu_to_le16(NVMET_KAS);
403
404	id->sqes = (0x6 << 4) | 0x6;
405	id->cqes = (0x4 << 4) | 0x4;
406
407	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
408	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
409
410	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
411	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
412	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
413			NVME_CTRL_ONCS_WRITE_ZEROES);
414
415	/* XXX: don't report vwc if the underlying device is write through */
416	id->vwc = NVME_CTRL_VWC_PRESENT;
417
418	/*
419	 * We can't support atomic writes bigger than a LBA without support
420	 * from the backend device.
421	 */
422	id->awun = 0;
423	id->awupf = 0;
424
425	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
426	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
427		id->sgls |= cpu_to_le32(1 << 2);
428	if (req->port->inline_data_size)
429		id->sgls |= cpu_to_le32(1 << 20);
430
431	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
432
433	/*
434	 * Max command capsule size is sqe + in-capsule data size.
435	 * Disable in-capsule data for Metadata capable controllers.
436	 */
437	cmd_capsule_size = sizeof(struct nvme_command);
438	if (!ctrl->pi_support)
439		cmd_capsule_size += req->port->inline_data_size;
440	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
441
442	/* Max response capsule size is cqe */
443	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
444
445	id->msdbd = ctrl->ops->msdbd;
446
447	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
448	id->anatt = 10; /* random value */
449	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
450	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
451
452	/*
453	 * Meh, we don't really support any power state.  Fake up the same
454	 * values that qemu does.
455	 */
456	id->psd[0].max_power = cpu_to_le16(0x9c4);
457	id->psd[0].entry_lat = cpu_to_le32(0x10);
458	id->psd[0].exit_lat = cpu_to_le32(0x4);
459
460	id->nwpc = 1 << 0; /* write protect and no write protect */
461
462	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
463
464	kfree(id);
465out:
466	nvmet_req_complete(req, status);
467}
468
469static void nvmet_execute_identify_ns(struct nvmet_req *req)
470{
471	struct nvmet_ctrl *ctrl = req->sq->ctrl;
472	struct nvme_id_ns *id;
473	u16 status = 0;
474
475	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
476		req->error_loc = offsetof(struct nvme_identify, nsid);
477		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
478		goto out;
479	}
480
481	id = kzalloc(sizeof(*id), GFP_KERNEL);
482	if (!id) {
483		status = NVME_SC_INTERNAL;
484		goto out;
485	}
486
487	/* return an all zeroed buffer if we can't find an active namespace */
488	req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
489	if (!req->ns) {
490		status = 0;
491		goto done;
492	}
493
494	nvmet_ns_revalidate(req->ns);
495
496	/*
497	 * nuse = ncap = nsze isn't always true, but we have no way to find
498	 * that out from the underlying device.
499	 */
500	id->ncap = id->nsze =
501		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
502	switch (req->port->ana_state[req->ns->anagrpid]) {
503	case NVME_ANA_INACCESSIBLE:
504	case NVME_ANA_PERSISTENT_LOSS:
505		break;
506	default:
507		id->nuse = id->nsze;
508		break;
509        }
510
511	if (req->ns->bdev)
512		nvmet_bdev_set_limits(req->ns->bdev, id);
513
514	/*
515	 * We just provide a single LBA format that matches what the
516	 * underlying device reports.
517	 */
518	id->nlbaf = 0;
519	id->flbas = 0;
520
521	/*
522	 * Our namespace might always be shared.  Not just with other
523	 * controllers, but also with any other user of the block device.
524	 */
525	id->nmic = (1 << 0);
526	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
527
528	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
529
530	id->lbaf[0].ds = req->ns->blksize_shift;
531
532	if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
533		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
534			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
535			  NVME_NS_DPC_PI_TYPE3;
536		id->mc = NVME_MC_EXTENDED_LBA;
537		id->dps = req->ns->pi_type;
538		id->flbas = NVME_NS_FLBAS_META_EXT;
539		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
540	}
541
542	if (req->ns->readonly)
543		id->nsattr |= (1 << 0);
544done:
545	if (!status)
546		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
547
548	kfree(id);
549out:
550	nvmet_req_complete(req, status);
551}
552
553static void nvmet_execute_identify_nslist(struct nvmet_req *req)
554{
555	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
556	struct nvmet_ctrl *ctrl = req->sq->ctrl;
557	struct nvmet_ns *ns;
558	unsigned long idx;
559	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
560	__le32 *list;
561	u16 status = 0;
562	int i = 0;
563
564	list = kzalloc(buf_size, GFP_KERNEL);
565	if (!list) {
566		status = NVME_SC_INTERNAL;
567		goto out;
568	}
569
570	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
571		if (ns->nsid <= min_nsid)
572			continue;
573		list[i++] = cpu_to_le32(ns->nsid);
574		if (i == buf_size / sizeof(__le32))
575			break;
576	}
577
578	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
579
580	kfree(list);
581out:
582	nvmet_req_complete(req, status);
583}
584
585static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
586				    void *id, off_t *off)
587{
588	struct nvme_ns_id_desc desc = {
589		.nidt = type,
590		.nidl = len,
591	};
592	u16 status;
593
594	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
595	if (status)
596		return status;
597	*off += sizeof(desc);
598
599	status = nvmet_copy_to_sgl(req, *off, id, len);
600	if (status)
601		return status;
602	*off += len;
603
604	return 0;
605}
606
607static void nvmet_execute_identify_desclist(struct nvmet_req *req)
608{
609	struct nvmet_ns *ns;
610	u16 status = 0;
611	off_t off = 0;
612
613	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
614	if (!ns) {
615		req->error_loc = offsetof(struct nvme_identify, nsid);
616		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
617		goto out;
618	}
619
620	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
621		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
622						  NVME_NIDT_UUID_LEN,
623						  &ns->uuid, &off);
624		if (status)
625			goto out_put_ns;
626	}
627	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
628		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
629						  NVME_NIDT_NGUID_LEN,
630						  &ns->nguid, &off);
631		if (status)
632			goto out_put_ns;
633	}
634
635	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
636			off) != NVME_IDENTIFY_DATA_SIZE - off)
637		status = NVME_SC_INTERNAL | NVME_SC_DNR;
638out_put_ns:
639	nvmet_put_namespace(ns);
640out:
641	nvmet_req_complete(req, status);
642}
643
644static void nvmet_execute_identify(struct nvmet_req *req)
645{
646	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
647		return;
648
649	switch (req->cmd->identify.cns) {
650	case NVME_ID_CNS_NS:
651		return nvmet_execute_identify_ns(req);
652	case NVME_ID_CNS_CTRL:
653		return nvmet_execute_identify_ctrl(req);
654	case NVME_ID_CNS_NS_ACTIVE_LIST:
655		return nvmet_execute_identify_nslist(req);
656	case NVME_ID_CNS_NS_DESC_LIST:
657		return nvmet_execute_identify_desclist(req);
658	}
659
660	pr_debug("unhandled identify cns %d on qid %d\n",
661	       req->cmd->identify.cns, req->sq->qid);
662	req->error_loc = offsetof(struct nvme_identify, cns);
663	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
664}
665
666/*
667 * A "minimum viable" abort implementation: the command is mandatory in the
668 * spec, but we are not required to do any useful work.  We couldn't really
669 * do a useful abort, so don't bother even with waiting for the command
670 * to be exectuted and return immediately telling the command to abort
671 * wasn't found.
672 */
673static void nvmet_execute_abort(struct nvmet_req *req)
674{
675	if (!nvmet_check_transfer_len(req, 0))
676		return;
677	nvmet_set_result(req, 1);
678	nvmet_req_complete(req, 0);
679}
680
681static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
682{
683	u16 status;
684
685	if (req->ns->file)
686		status = nvmet_file_flush(req);
687	else
688		status = nvmet_bdev_flush(req);
689
690	if (status)
691		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
692	return status;
693}
694
695static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
696{
697	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
698	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
699	u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
700
701	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
702	if (unlikely(!req->ns)) {
703		req->error_loc = offsetof(struct nvme_common_command, nsid);
704		return status;
705	}
706
707	mutex_lock(&subsys->lock);
708	switch (write_protect) {
709	case NVME_NS_WRITE_PROTECT:
710		req->ns->readonly = true;
711		status = nvmet_write_protect_flush_sync(req);
712		if (status)
713			req->ns->readonly = false;
714		break;
715	case NVME_NS_NO_WRITE_PROTECT:
716		req->ns->readonly = false;
717		status = 0;
718		break;
719	default:
720		break;
721	}
722
723	if (!status)
724		nvmet_ns_changed(subsys, req->ns->nsid);
725	mutex_unlock(&subsys->lock);
726	return status;
727}
728
729u16 nvmet_set_feat_kato(struct nvmet_req *req)
730{
731	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
732
733	nvmet_stop_keep_alive_timer(req->sq->ctrl);
734	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
735	nvmet_start_keep_alive_timer(req->sq->ctrl);
736
737	nvmet_set_result(req, req->sq->ctrl->kato);
738
739	return 0;
740}
741
742u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
743{
744	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
745
746	if (val32 & ~mask) {
747		req->error_loc = offsetof(struct nvme_common_command, cdw11);
748		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
749	}
750
751	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
752	nvmet_set_result(req, val32);
753
754	return 0;
755}
756
757void nvmet_execute_set_features(struct nvmet_req *req)
758{
759	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
760	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
761	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
762	u16 status = 0;
763	u16 nsqr;
764	u16 ncqr;
765
766	if (!nvmet_check_transfer_len(req, 0))
767		return;
768
769	switch (cdw10 & 0xff) {
770	case NVME_FEAT_NUM_QUEUES:
771		ncqr = (cdw11 >> 16) & 0xffff;
772		nsqr = cdw11 & 0xffff;
773		if (ncqr == 0xffff || nsqr == 0xffff) {
774			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
775			break;
776		}
777		nvmet_set_result(req,
778			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
779		break;
780	case NVME_FEAT_KATO:
781		status = nvmet_set_feat_kato(req);
782		break;
783	case NVME_FEAT_ASYNC_EVENT:
784		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
785		break;
786	case NVME_FEAT_HOST_ID:
787		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
788		break;
789	case NVME_FEAT_WRITE_PROTECT:
790		status = nvmet_set_feat_write_protect(req);
791		break;
792	default:
793		req->error_loc = offsetof(struct nvme_common_command, cdw10);
794		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
795		break;
796	}
797
798	nvmet_req_complete(req, status);
799}
800
801static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
802{
803	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
804	u32 result;
805
806	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
807	if (!req->ns)  {
808		req->error_loc = offsetof(struct nvme_common_command, nsid);
809		return NVME_SC_INVALID_NS | NVME_SC_DNR;
810	}
811	mutex_lock(&subsys->lock);
812	if (req->ns->readonly == true)
813		result = NVME_NS_WRITE_PROTECT;
814	else
815		result = NVME_NS_NO_WRITE_PROTECT;
816	nvmet_set_result(req, result);
817	mutex_unlock(&subsys->lock);
818
819	return 0;
820}
821
822void nvmet_get_feat_kato(struct nvmet_req *req)
823{
824	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
825}
826
827void nvmet_get_feat_async_event(struct nvmet_req *req)
828{
829	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
830}
831
832void nvmet_execute_get_features(struct nvmet_req *req)
833{
834	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
835	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
836	u16 status = 0;
837
838	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
839		return;
840
841	switch (cdw10 & 0xff) {
842	/*
843	 * These features are mandatory in the spec, but we don't
844	 * have a useful way to implement them.  We'll eventually
845	 * need to come up with some fake values for these.
846	 */
847#if 0
848	case NVME_FEAT_ARBITRATION:
849		break;
850	case NVME_FEAT_POWER_MGMT:
851		break;
852	case NVME_FEAT_TEMP_THRESH:
853		break;
854	case NVME_FEAT_ERR_RECOVERY:
855		break;
856	case NVME_FEAT_IRQ_COALESCE:
857		break;
858	case NVME_FEAT_IRQ_CONFIG:
859		break;
860	case NVME_FEAT_WRITE_ATOMIC:
861		break;
862#endif
863	case NVME_FEAT_ASYNC_EVENT:
864		nvmet_get_feat_async_event(req);
865		break;
866	case NVME_FEAT_VOLATILE_WC:
867		nvmet_set_result(req, 1);
868		break;
869	case NVME_FEAT_NUM_QUEUES:
870		nvmet_set_result(req,
871			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
872		break;
873	case NVME_FEAT_KATO:
874		nvmet_get_feat_kato(req);
875		break;
876	case NVME_FEAT_HOST_ID:
877		/* need 128-bit host identifier flag */
878		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
879			req->error_loc =
880				offsetof(struct nvme_common_command, cdw11);
881			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
882			break;
883		}
884
885		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
886				sizeof(req->sq->ctrl->hostid));
887		break;
888	case NVME_FEAT_WRITE_PROTECT:
889		status = nvmet_get_feat_write_protect(req);
890		break;
891	default:
892		req->error_loc =
893			offsetof(struct nvme_common_command, cdw10);
894		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
895		break;
896	}
897
898	nvmet_req_complete(req, status);
899}
900
901void nvmet_execute_async_event(struct nvmet_req *req)
902{
903	struct nvmet_ctrl *ctrl = req->sq->ctrl;
904
905	if (!nvmet_check_transfer_len(req, 0))
906		return;
907
908	mutex_lock(&ctrl->lock);
909	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
910		mutex_unlock(&ctrl->lock);
911		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
912		return;
913	}
914	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
915	mutex_unlock(&ctrl->lock);
916
917	schedule_work(&ctrl->async_event_work);
918}
919
920void nvmet_execute_keep_alive(struct nvmet_req *req)
921{
922	struct nvmet_ctrl *ctrl = req->sq->ctrl;
923
924	if (!nvmet_check_transfer_len(req, 0))
925		return;
926
927	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
928		ctrl->cntlid, ctrl->kato);
929
930	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
931	nvmet_req_complete(req, 0);
932}
933
934u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
935{
936	struct nvme_command *cmd = req->cmd;
937	u16 ret;
938
939	if (nvme_is_fabrics(cmd))
940		return nvmet_parse_fabrics_cmd(req);
941	if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
942		return nvmet_parse_discovery_cmd(req);
943
944	ret = nvmet_check_ctrl_status(req, cmd);
945	if (unlikely(ret))
946		return ret;
947
948	if (nvmet_req_passthru_ctrl(req))
949		return nvmet_parse_passthru_admin_cmd(req);
950
951	switch (cmd->common.opcode) {
952	case nvme_admin_get_log_page:
953		req->execute = nvmet_execute_get_log_page;
954		return 0;
955	case nvme_admin_identify:
956		req->execute = nvmet_execute_identify;
957		return 0;
958	case nvme_admin_abort_cmd:
959		req->execute = nvmet_execute_abort;
960		return 0;
961	case nvme_admin_set_features:
962		req->execute = nvmet_execute_set_features;
963		return 0;
964	case nvme_admin_get_features:
965		req->execute = nvmet_execute_get_features;
966		return 0;
967	case nvme_admin_async_event:
968		req->execute = nvmet_execute_async_event;
969		return 0;
970	case nvme_admin_keep_alive:
971		req->execute = nvmet_execute_keep_alive;
972		return 0;
973	}
974
975	pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
976	       req->sq->qid);
977	req->error_loc = offsetof(struct nvme_common_command, opcode);
978	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
979}
980