1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c)  2003-2014 QLogic Corporation
5 */
6#include "qla_def.h"
7
8#include <linux/debugfs.h>
9#include <linux/seq_file.h>
10
11static struct dentry *qla2x00_dfs_root;
12static atomic_t qla2x00_dfs_root_count;
13
14#define QLA_DFS_RPORT_DEVLOSS_TMO	1
15
16static int
17qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18{
19	switch (attr_id) {
20	case QLA_DFS_RPORT_DEVLOSS_TMO:
21		/* Only supported for FC-NVMe devices that are registered. */
22		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23			return -EIO;
24		*val = fp->nvme_remote_port->dev_loss_tmo;
25		break;
26	default:
27		return -EINVAL;
28	}
29	return 0;
30}
31
32static int
33qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34{
35	switch (attr_id) {
36	case QLA_DFS_RPORT_DEVLOSS_TMO:
37		/* Only supported for FC-NVMe devices that are registered. */
38		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39			return -EIO;
40#if (IS_ENABLED(CONFIG_NVME_FC))
41		return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42						      val);
43#else /* CONFIG_NVME_FC */
44		return -EINVAL;
45#endif /* CONFIG_NVME_FC */
46	default:
47		return -EINVAL;
48	}
49	return 0;
50}
51
52#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr)		\
53static int qla_dfs_rport_##_attr##_get(void *data, u64 *val)	\
54{								\
55	struct fc_port *fp = data;				\
56	return qla_dfs_rport_get(fp, _attr_id, val);		\
57}								\
58static int qla_dfs_rport_##_attr##_set(void *data, u64 val)	\
59{								\
60	struct fc_port *fp = data;				\
61	return qla_dfs_rport_set(fp, _attr_id, val);		\
62}								\
63DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops,		\
64		qla_dfs_rport_##_attr##_get,			\
65		qla_dfs_rport_##_attr##_set, "%llu\n")
66
67/*
68 * Wrapper for getting fc_port fields.
69 *
70 * _attr    : Attribute name.
71 * _get_val : Accessor macro to retrieve the value.
72 */
73#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)			\
74static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val)	\
75{									\
76	struct fc_port *fp = data;					\
77	*val = _get_val;						\
78	return 0;							\
79}									\
80DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops,		\
81		qla_dfs_rport_field_##_attr##_get,			\
82		NULL, "%llu\n")
83
84#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86
87#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89
90DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91
92DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96DEFINE_QLA_DFS_RPORT_FIELD(flags);
97DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104
105void
106qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107{
108	char wwn[32];
109
110#define QLA_CREATE_RPORT_FIELD_ATTR(_attr)			\
111	debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir,	\
112		fp, &qla_dfs_rport_field_##_attr##_fops)
113
114	if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115		return;
116
117	sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118	fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119	if (IS_ERR(fp->dfs_rport_dir))
120		return;
121	if (NVME_TARGET(vha->hw, fp))
122		debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123				    fp, &qla_dfs_rport_dev_loss_tmo_fops);
124
125	QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126	QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127	QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128	QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129	QLA_CREATE_RPORT_FIELD_ATTR(flags);
130	QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131	QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132	QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133	QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134	QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135	QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136	QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137}
138
139void
140qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141{
142	if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143		return;
144	debugfs_remove_recursive(fp->dfs_rport_dir);
145	fp->dfs_rport_dir = NULL;
146}
147
148static int
149qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
150{
151	scsi_qla_host_t *vha = s->private;
152	struct qla_hw_data *ha = vha->hw;
153	unsigned long flags;
154	struct fc_port *sess = NULL;
155	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
156
157	seq_printf(s, "%s\n", vha->host_str);
158	if (tgt) {
159		seq_puts(s, "Port ID   Port Name                Handle\n");
160
161		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
162		list_for_each_entry(sess, &vha->vp_fcports, list)
163			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
164			    sess->d_id.b.domain, sess->d_id.b.area,
165			    sess->d_id.b.al_pa, sess->port_name,
166			    sess->loop_id);
167		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
168	}
169
170	return 0;
171}
172
173DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
174
175static int
176qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
177{
178	scsi_qla_host_t *vha = s->private;
179	struct qla_hw_data *ha = vha->hw;
180	struct gid_list_info *gid_list;
181	dma_addr_t gid_list_dma;
182	fc_port_t fc_port;
183	char *id_iter;
184	int rc, i;
185	uint16_t entries, loop_id;
186
187	seq_printf(s, "%s\n", vha->host_str);
188	gid_list = dma_alloc_coherent(&ha->pdev->dev,
189				      qla2x00_gid_list_size(ha),
190				      &gid_list_dma, GFP_KERNEL);
191	if (!gid_list) {
192		ql_dbg(ql_dbg_user, vha, 0x7018,
193		       "DMA allocation failed for %u\n",
194		       qla2x00_gid_list_size(ha));
195		return 0;
196	}
197
198	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
199				  &entries);
200	if (rc != QLA_SUCCESS)
201		goto out_free_id_list;
202
203	id_iter = (char *)gid_list;
204
205	seq_puts(s, "Port Name	Port ID		Loop ID\n");
206
207	for (i = 0; i < entries; i++) {
208		struct gid_list_info *gid =
209			(struct gid_list_info *)id_iter;
210		loop_id = le16_to_cpu(gid->loop_id);
211		memset(&fc_port, 0, sizeof(fc_port_t));
212
213		fc_port.loop_id = loop_id;
214
215		rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
216		seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
217			   fc_port.port_name, fc_port.d_id.b.domain,
218			   fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
219			   fc_port.loop_id);
220		id_iter += ha->gid_list_info_size;
221	}
222out_free_id_list:
223	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
224			  gid_list, gid_list_dma);
225
226	return 0;
227}
228
229DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
230
231static int
232qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
233{
234	struct scsi_qla_host *vha = s->private;
235	uint16_t mb[MAX_IOCB_MB_REG];
236	int rc;
237	struct qla_hw_data *ha = vha->hw;
238	u16 iocbs_used, i;
239
240	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
241	if (rc != QLA_SUCCESS) {
242		seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
243	} else {
244		seq_puts(s, "FW Resource count\n\n");
245		seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
246		seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
247		seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
248		seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
249		seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
250		seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
251		seq_printf(s, "MAX VP count[%d]\n", mb[11]);
252		seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
253		seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
254		    mb[20]);
255		seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
256		    mb[21]);
257		seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
258		    mb[22]);
259		seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
260		    mb[23]);
261	}
262
263	if (ql2xenforce_iocb_limit) {
264		/* lock is not require. It's an estimate. */
265		iocbs_used = ha->base_qpair->fwres.iocbs_used;
266		for (i = 0; i < ha->max_qpairs; i++) {
267			if (ha->queue_pair_map[i])
268				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
269		}
270
271		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
272			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
273	}
274
275	return 0;
276}
277
278DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
279
280static int
281qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
282{
283	struct scsi_qla_host *vha = s->private;
284	struct qla_qpair *qpair = vha->hw->base_qpair;
285	uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
286		core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
287		num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
288	u16 i;
289
290	qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
291	core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
292	qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
293	core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
294	qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
295	core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
296	num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
297	num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
298	num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
299
300	for (i = 0; i < vha->hw->max_qpairs; i++) {
301		qpair = vha->hw->queue_pair_map[i];
302		if (!qpair)
303			continue;
304		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
305		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
306		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
307		core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
308		qla_core_ret_sta_ctio +=
309		    qpair->tgt_counters.qla_core_ret_sta_ctio;
310		core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
311		num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
312		num_alloc_iocb_failed +=
313		    qpair->tgt_counters.num_alloc_iocb_failed;
314		num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
315	}
316
317	seq_puts(s, "Target Counters\n");
318	seq_printf(s, "qla_core_sbt_cmd = %lld\n",
319		qla_core_sbt_cmd);
320	seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
321		qla_core_ret_sta_ctio);
322	seq_printf(s, "qla_core_ret_ctio = %lld\n",
323		qla_core_ret_ctio);
324	seq_printf(s, "core_qla_que_buf = %lld\n",
325		core_qla_que_buf);
326	seq_printf(s, "core_qla_snd_status = %lld\n",
327		core_qla_snd_status);
328	seq_printf(s, "core_qla_free_cmd = %lld\n",
329		core_qla_free_cmd);
330	seq_printf(s, "num alloc iocb failed = %lld\n",
331		num_alloc_iocb_failed);
332	seq_printf(s, "num term exchange sent = %lld\n",
333		num_term_xchg_sent);
334	seq_printf(s, "num Q full sent = %lld\n",
335		num_q_full_sent);
336
337	/* DIF stats */
338	seq_printf(s, "DIF Inp Bytes = %lld\n",
339		vha->qla_stats.qla_dif_stats.dif_input_bytes);
340	seq_printf(s, "DIF Outp Bytes = %lld\n",
341		vha->qla_stats.qla_dif_stats.dif_output_bytes);
342	seq_printf(s, "DIF Inp Req = %lld\n",
343		vha->qla_stats.qla_dif_stats.dif_input_requests);
344	seq_printf(s, "DIF Outp Req = %lld\n",
345		vha->qla_stats.qla_dif_stats.dif_output_requests);
346	seq_printf(s, "DIF Guard err = %d\n",
347		vha->qla_stats.qla_dif_stats.dif_guard_err);
348	seq_printf(s, "DIF Ref tag err = %d\n",
349		vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
350	seq_printf(s, "DIF App tag err = %d\n",
351		vha->qla_stats.qla_dif_stats.dif_app_tag_err);
352	return 0;
353}
354
355DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
356
357static int
358qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
359{
360	scsi_qla_host_t *vha = s->private;
361	uint32_t cnt;
362	uint32_t *fce;
363	uint64_t fce_start;
364	struct qla_hw_data *ha = vha->hw;
365
366	mutex_lock(&ha->fce_mutex);
367
368	seq_puts(s, "FCE Trace Buffer\n");
369	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
370	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
371	seq_puts(s, "FCE Enable Registers\n");
372	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
373	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
374	    ha->fce_mb[5], ha->fce_mb[6]);
375
376	fce = (uint32_t *) ha->fce;
377	fce_start = (unsigned long long) ha->fce_dma;
378	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
379		if (cnt % 8 == 0)
380			seq_printf(s, "\n%llx: ",
381			    (unsigned long long)((cnt * 4) + fce_start));
382		else
383			seq_putc(s, ' ');
384		seq_printf(s, "%08x", *fce++);
385	}
386
387	seq_puts(s, "\nEnd\n");
388
389	mutex_unlock(&ha->fce_mutex);
390
391	return 0;
392}
393
394static int
395qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
396{
397	scsi_qla_host_t *vha = inode->i_private;
398	struct qla_hw_data *ha = vha->hw;
399	int rval;
400
401	if (!ha->flags.fce_enabled)
402		goto out;
403
404	mutex_lock(&ha->fce_mutex);
405
406	/* Pause tracing to flush FCE buffers. */
407	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
408	if (rval)
409		ql_dbg(ql_dbg_user, vha, 0x705c,
410		    "DebugFS: Unable to disable FCE (%d).\n", rval);
411
412	ha->flags.fce_enabled = 0;
413
414	mutex_unlock(&ha->fce_mutex);
415out:
416	return single_open(file, qla2x00_dfs_fce_show, vha);
417}
418
419static int
420qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
421{
422	scsi_qla_host_t *vha = inode->i_private;
423	struct qla_hw_data *ha = vha->hw;
424	int rval;
425
426	if (ha->flags.fce_enabled)
427		goto out;
428
429	mutex_lock(&ha->fce_mutex);
430
431	/* Re-enable FCE tracing. */
432	ha->flags.fce_enabled = 1;
433	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
434	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
435	    ha->fce_mb, &ha->fce_bufs);
436	if (rval) {
437		ql_dbg(ql_dbg_user, vha, 0x700d,
438		    "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
439		ha->flags.fce_enabled = 0;
440	}
441
442	mutex_unlock(&ha->fce_mutex);
443out:
444	return single_release(inode, file);
445}
446
447static const struct file_operations dfs_fce_ops = {
448	.open		= qla2x00_dfs_fce_open,
449	.read		= seq_read,
450	.llseek		= seq_lseek,
451	.release	= qla2x00_dfs_fce_release,
452};
453
454static int
455qla_dfs_naqp_show(struct seq_file *s, void *unused)
456{
457	struct scsi_qla_host *vha = s->private;
458	struct qla_hw_data *ha = vha->hw;
459
460	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
461	return 0;
462}
463
464static int
465qla_dfs_naqp_open(struct inode *inode, struct file *file)
466{
467	struct scsi_qla_host *vha = inode->i_private;
468
469	return single_open(file, qla_dfs_naqp_show, vha);
470}
471
472static ssize_t
473qla_dfs_naqp_write(struct file *file, const char __user *buffer,
474    size_t count, loff_t *pos)
475{
476	struct seq_file *s = file->private_data;
477	struct scsi_qla_host *vha = s->private;
478	struct qla_hw_data *ha = vha->hw;
479	char *buf;
480	int rc = 0;
481	unsigned long num_act_qp;
482
483	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
484		pr_err("host%ld: this adapter does not support Multi Q.",
485		    vha->host_no);
486		return -EINVAL;
487	}
488
489	if (!vha->flags.qpairs_available) {
490		pr_err("host%ld: Driver is not setup with Multi Q.",
491		    vha->host_no);
492		return -EINVAL;
493	}
494	buf = memdup_user_nul(buffer, count);
495	if (IS_ERR(buf)) {
496		pr_err("host%ld: fail to copy user buffer.",
497		    vha->host_no);
498		return PTR_ERR(buf);
499	}
500
501	num_act_qp = simple_strtoul(buf, NULL, 0);
502
503	if (num_act_qp >= vha->hw->max_qpairs) {
504		pr_err("User set invalid number of qpairs %lu. Max = %d",
505		    num_act_qp, vha->hw->max_qpairs);
506		rc = -EINVAL;
507		goto out_free;
508	}
509
510	if (num_act_qp != ha->tgt.num_act_qpairs) {
511		ha->tgt.num_act_qpairs = num_act_qp;
512		qlt_clr_qp_table(vha);
513	}
514	rc = count;
515out_free:
516	kfree(buf);
517	return rc;
518}
519
520static const struct file_operations dfs_naqp_ops = {
521	.open		= qla_dfs_naqp_open,
522	.read		= seq_read,
523	.llseek		= seq_lseek,
524	.release	= single_release,
525	.write		= qla_dfs_naqp_write,
526};
527
528
529int
530qla2x00_dfs_setup(scsi_qla_host_t *vha)
531{
532	struct qla_hw_data *ha = vha->hw;
533
534	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
535	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
536		goto out;
537	if (!ha->fce)
538		goto out;
539
540	if (qla2x00_dfs_root)
541		goto create_dir;
542
543	atomic_set(&qla2x00_dfs_root_count, 0);
544	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
545
546create_dir:
547	if (ha->dfs_dir)
548		goto create_nodes;
549
550	mutex_init(&ha->fce_mutex);
551	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
552
553	atomic_inc(&qla2x00_dfs_root_count);
554
555create_nodes:
556	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
557	    S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
558
559	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
560	    ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
561
562	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
563	    S_IRUSR,  ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
564
565	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
566	    &dfs_fce_ops);
567
568	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
569		S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
570
571	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
572		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
573		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
574		if (IS_ERR(ha->tgt.dfs_naqp)) {
575			ql_log(ql_log_warn, vha, 0xd011,
576			       "Unable to create debugFS naqp node.\n");
577			goto out;
578		}
579	}
580	vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
581	if (IS_ERR(vha->dfs_rport_root)) {
582		ql_log(ql_log_warn, vha, 0xd012,
583		       "Unable to create debugFS rports node.\n");
584		goto out;
585	}
586out:
587	return 0;
588}
589
590int
591qla2x00_dfs_remove(scsi_qla_host_t *vha)
592{
593	struct qla_hw_data *ha = vha->hw;
594
595	if (ha->tgt.dfs_naqp) {
596		debugfs_remove(ha->tgt.dfs_naqp);
597		ha->tgt.dfs_naqp = NULL;
598	}
599
600	if (ha->tgt.dfs_tgt_sess) {
601		debugfs_remove(ha->tgt.dfs_tgt_sess);
602		ha->tgt.dfs_tgt_sess = NULL;
603	}
604
605	if (ha->tgt.dfs_tgt_port_database) {
606		debugfs_remove(ha->tgt.dfs_tgt_port_database);
607		ha->tgt.dfs_tgt_port_database = NULL;
608	}
609
610	if (ha->dfs_fw_resource_cnt) {
611		debugfs_remove(ha->dfs_fw_resource_cnt);
612		ha->dfs_fw_resource_cnt = NULL;
613	}
614
615	if (ha->dfs_tgt_counters) {
616		debugfs_remove(ha->dfs_tgt_counters);
617		ha->dfs_tgt_counters = NULL;
618	}
619
620	if (ha->dfs_fce) {
621		debugfs_remove(ha->dfs_fce);
622		ha->dfs_fce = NULL;
623	}
624
625	if (vha->dfs_rport_root) {
626		debugfs_remove_recursive(vha->dfs_rport_root);
627		vha->dfs_rport_root = NULL;
628	}
629
630	if (ha->dfs_dir) {
631		debugfs_remove(ha->dfs_dir);
632		ha->dfs_dir = NULL;
633		atomic_dec(&qla2x00_dfs_root_count);
634	}
635
636	if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
637	    qla2x00_dfs_root) {
638		debugfs_remove(qla2x00_dfs_root);
639		qla2x00_dfs_root = NULL;
640	}
641
642	return 0;
643}
644