1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Western Digital Corporation
3
4#include <linux/err.h>
5#include <linux/string.h>
6#include <linux/bitfield.h>
7#include <asm/unaligned.h>
8
9#include <ufs/ufs.h>
10#include "ufs-sysfs.h"
11#include "ufshcd-priv.h"
12
13static const char *ufshcd_uic_link_state_to_string(
14			enum uic_link_state state)
15{
16	switch (state) {
17	case UIC_LINK_OFF_STATE:	return "OFF";
18	case UIC_LINK_ACTIVE_STATE:	return "ACTIVE";
19	case UIC_LINK_HIBERN8_STATE:	return "HIBERN8";
20	case UIC_LINK_BROKEN_STATE:	return "BROKEN";
21	default:			return "UNKNOWN";
22	}
23}
24
25static const char *ufshcd_ufs_dev_pwr_mode_to_string(
26			enum ufs_dev_pwr_mode state)
27{
28	switch (state) {
29	case UFS_ACTIVE_PWR_MODE:	return "ACTIVE";
30	case UFS_SLEEP_PWR_MODE:	return "SLEEP";
31	case UFS_POWERDOWN_PWR_MODE:	return "POWERDOWN";
32	case UFS_DEEPSLEEP_PWR_MODE:	return "DEEPSLEEP";
33	default:			return "UNKNOWN";
34	}
35}
36
37static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
38					     struct device_attribute *attr,
39					     const char *buf, size_t count,
40					     bool rpm)
41{
42	struct ufs_hba *hba = dev_get_drvdata(dev);
43	struct ufs_dev_info *dev_info = &hba->dev_info;
44	unsigned long flags, value;
45
46	if (kstrtoul(buf, 0, &value))
47		return -EINVAL;
48
49	if (value >= UFS_PM_LVL_MAX)
50		return -EINVAL;
51
52	if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
53	    (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
54	     !(dev_info->wspecversion >= 0x310)))
55		return -EINVAL;
56
57	spin_lock_irqsave(hba->host->host_lock, flags);
58	if (rpm)
59		hba->rpm_lvl = value;
60	else
61		hba->spm_lvl = value;
62	spin_unlock_irqrestore(hba->host->host_lock, flags);
63	return count;
64}
65
66static ssize_t rpm_lvl_show(struct device *dev,
67		struct device_attribute *attr, char *buf)
68{
69	struct ufs_hba *hba = dev_get_drvdata(dev);
70
71	return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
72}
73
74static ssize_t rpm_lvl_store(struct device *dev,
75		struct device_attribute *attr, const char *buf, size_t count)
76{
77	return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
78}
79
80static ssize_t rpm_target_dev_state_show(struct device *dev,
81		struct device_attribute *attr, char *buf)
82{
83	struct ufs_hba *hba = dev_get_drvdata(dev);
84
85	return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
86			ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
87}
88
89static ssize_t rpm_target_link_state_show(struct device *dev,
90		struct device_attribute *attr, char *buf)
91{
92	struct ufs_hba *hba = dev_get_drvdata(dev);
93
94	return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
95			ufs_pm_lvl_states[hba->rpm_lvl].link_state));
96}
97
98static ssize_t spm_lvl_show(struct device *dev,
99		struct device_attribute *attr, char *buf)
100{
101	struct ufs_hba *hba = dev_get_drvdata(dev);
102
103	return sysfs_emit(buf, "%d\n", hba->spm_lvl);
104}
105
106static ssize_t spm_lvl_store(struct device *dev,
107		struct device_attribute *attr, const char *buf, size_t count)
108{
109	return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
110}
111
112static ssize_t spm_target_dev_state_show(struct device *dev,
113		struct device_attribute *attr, char *buf)
114{
115	struct ufs_hba *hba = dev_get_drvdata(dev);
116
117	return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
118				ufs_pm_lvl_states[hba->spm_lvl].dev_state));
119}
120
121static ssize_t spm_target_link_state_show(struct device *dev,
122		struct device_attribute *attr, char *buf)
123{
124	struct ufs_hba *hba = dev_get_drvdata(dev);
125
126	return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
127				ufs_pm_lvl_states[hba->spm_lvl].link_state));
128}
129
130/* Convert Auto-Hibernate Idle Timer register value to microseconds */
131static int ufshcd_ahit_to_us(u32 ahit)
132{
133	int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
134	int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
135
136	for (; scale > 0; --scale)
137		timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
138
139	return timer;
140}
141
142/* Convert microseconds to Auto-Hibernate Idle Timer register value */
143static u32 ufshcd_us_to_ahit(unsigned int timer)
144{
145	unsigned int scale;
146
147	for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
148		timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
149
150	return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
151	       FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
152}
153
154static ssize_t auto_hibern8_show(struct device *dev,
155				 struct device_attribute *attr, char *buf)
156{
157	u32 ahit;
158	int ret;
159	struct ufs_hba *hba = dev_get_drvdata(dev);
160
161	if (!ufshcd_is_auto_hibern8_supported(hba))
162		return -EOPNOTSUPP;
163
164	down(&hba->host_sem);
165	if (!ufshcd_is_user_access_allowed(hba)) {
166		ret = -EBUSY;
167		goto out;
168	}
169
170	pm_runtime_get_sync(hba->dev);
171	ufshcd_hold(hba);
172	ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
173	ufshcd_release(hba);
174	pm_runtime_put_sync(hba->dev);
175
176	ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
177
178out:
179	up(&hba->host_sem);
180	return ret;
181}
182
183static ssize_t auto_hibern8_store(struct device *dev,
184				  struct device_attribute *attr,
185				  const char *buf, size_t count)
186{
187	struct ufs_hba *hba = dev_get_drvdata(dev);
188	unsigned int timer;
189	int ret = 0;
190
191	if (!ufshcd_is_auto_hibern8_supported(hba))
192		return -EOPNOTSUPP;
193
194	if (kstrtouint(buf, 0, &timer))
195		return -EINVAL;
196
197	if (timer > UFSHCI_AHIBERN8_MAX)
198		return -EINVAL;
199
200	down(&hba->host_sem);
201	if (!ufshcd_is_user_access_allowed(hba)) {
202		ret = -EBUSY;
203		goto out;
204	}
205
206	ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
207
208out:
209	up(&hba->host_sem);
210	return ret ? ret : count;
211}
212
213static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
214			  char *buf)
215{
216	struct ufs_hba *hba = dev_get_drvdata(dev);
217
218	return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
219}
220
221static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
222			   const char *buf, size_t count)
223{
224	struct ufs_hba *hba = dev_get_drvdata(dev);
225	unsigned int wb_enable;
226	ssize_t res;
227
228	if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
229		&& ufshcd_enable_wb_if_scaling_up(hba))) {
230		/*
231		 * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
232		 * on/off will be done while clock scaling up/down.
233		 */
234		dev_warn(dev, "It is not allowed to configure WB!\n");
235		return -EOPNOTSUPP;
236	}
237
238	if (kstrtouint(buf, 0, &wb_enable))
239		return -EINVAL;
240
241	if (wb_enable != 0 && wb_enable != 1)
242		return -EINVAL;
243
244	down(&hba->host_sem);
245	if (!ufshcd_is_user_access_allowed(hba)) {
246		res = -EBUSY;
247		goto out;
248	}
249
250	ufshcd_rpm_get_sync(hba);
251	res = ufshcd_wb_toggle(hba, wb_enable);
252	ufshcd_rpm_put_sync(hba);
253out:
254	up(&hba->host_sem);
255	return res < 0 ? res : count;
256}
257
258static ssize_t enable_wb_buf_flush_show(struct device *dev,
259				    struct device_attribute *attr,
260				    char *buf)
261{
262	struct ufs_hba *hba = dev_get_drvdata(dev);
263
264	return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
265}
266
267static ssize_t enable_wb_buf_flush_store(struct device *dev,
268				     struct device_attribute *attr,
269				     const char *buf, size_t count)
270{
271	struct ufs_hba *hba = dev_get_drvdata(dev);
272	unsigned int enable_wb_buf_flush;
273	ssize_t res;
274
275	if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
276		dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
277		return -EOPNOTSUPP;
278	}
279
280	if (kstrtouint(buf, 0, &enable_wb_buf_flush))
281		return -EINVAL;
282
283	if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
284		return -EINVAL;
285
286	down(&hba->host_sem);
287	if (!ufshcd_is_user_access_allowed(hba)) {
288		res = -EBUSY;
289		goto out;
290	}
291
292	ufshcd_rpm_get_sync(hba);
293	res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
294	ufshcd_rpm_put_sync(hba);
295
296out:
297	up(&hba->host_sem);
298	return res < 0 ? res : count;
299}
300
301static ssize_t wb_flush_threshold_show(struct device *dev,
302					 struct device_attribute *attr,
303					 char *buf)
304{
305	struct ufs_hba *hba = dev_get_drvdata(dev);
306
307	return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold);
308}
309
310static ssize_t wb_flush_threshold_store(struct device *dev,
311					  struct device_attribute *attr,
312					  const char *buf, size_t count)
313{
314	struct ufs_hba *hba = dev_get_drvdata(dev);
315	unsigned int wb_flush_threshold;
316
317	if (kstrtouint(buf, 0, &wb_flush_threshold))
318		return -EINVAL;
319
320	/* The range of values for wb_flush_threshold is (0,10] */
321	if (wb_flush_threshold > UFS_WB_BUF_REMAIN_PERCENT(100) ||
322	    wb_flush_threshold == 0) {
323		dev_err(dev, "The value of wb_flush_threshold is invalid!\n");
324		return -EINVAL;
325	}
326
327	hba->vps->wb_flush_threshold = wb_flush_threshold;
328
329	return count;
330}
331
332static DEVICE_ATTR_RW(rpm_lvl);
333static DEVICE_ATTR_RO(rpm_target_dev_state);
334static DEVICE_ATTR_RO(rpm_target_link_state);
335static DEVICE_ATTR_RW(spm_lvl);
336static DEVICE_ATTR_RO(spm_target_dev_state);
337static DEVICE_ATTR_RO(spm_target_link_state);
338static DEVICE_ATTR_RW(auto_hibern8);
339static DEVICE_ATTR_RW(wb_on);
340static DEVICE_ATTR_RW(enable_wb_buf_flush);
341static DEVICE_ATTR_RW(wb_flush_threshold);
342
343static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
344	&dev_attr_rpm_lvl.attr,
345	&dev_attr_rpm_target_dev_state.attr,
346	&dev_attr_rpm_target_link_state.attr,
347	&dev_attr_spm_lvl.attr,
348	&dev_attr_spm_target_dev_state.attr,
349	&dev_attr_spm_target_link_state.attr,
350	&dev_attr_auto_hibern8.attr,
351	&dev_attr_wb_on.attr,
352	&dev_attr_enable_wb_buf_flush.attr,
353	&dev_attr_wb_flush_threshold.attr,
354	NULL
355};
356
357static const struct attribute_group ufs_sysfs_default_group = {
358	.attrs = ufs_sysfs_ufshcd_attrs,
359};
360
361static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
362				  char *buf)
363{
364	struct ufs_hba *hba = dev_get_drvdata(dev);
365
366	return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
367}
368
369static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
370				  char *buf)
371{
372	struct ufs_hba *hba = dev_get_drvdata(dev);
373
374	return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
375}
376
377static DEVICE_ATTR_RO(clock_scaling);
378static DEVICE_ATTR_RO(write_booster);
379
380/*
381 * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
382 * group.
383 */
384static struct attribute *ufs_sysfs_capabilities_attrs[] = {
385	&dev_attr_clock_scaling.attr,
386	&dev_attr_write_booster.attr,
387	NULL
388};
389
390static const struct attribute_group ufs_sysfs_capabilities_group = {
391	.name = "capabilities",
392	.attrs = ufs_sysfs_capabilities_attrs,
393};
394
395static ssize_t monitor_enable_show(struct device *dev,
396				   struct device_attribute *attr, char *buf)
397{
398	struct ufs_hba *hba = dev_get_drvdata(dev);
399
400	return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
401}
402
403static ssize_t monitor_enable_store(struct device *dev,
404				    struct device_attribute *attr,
405				    const char *buf, size_t count)
406{
407	struct ufs_hba *hba = dev_get_drvdata(dev);
408	unsigned long value, flags;
409
410	if (kstrtoul(buf, 0, &value))
411		return -EINVAL;
412
413	value = !!value;
414	spin_lock_irqsave(hba->host->host_lock, flags);
415	if (value == hba->monitor.enabled)
416		goto out_unlock;
417
418	if (!value) {
419		memset(&hba->monitor, 0, sizeof(hba->monitor));
420	} else {
421		hba->monitor.enabled = true;
422		hba->monitor.enabled_ts = ktime_get();
423	}
424
425out_unlock:
426	spin_unlock_irqrestore(hba->host->host_lock, flags);
427	return count;
428}
429
430static ssize_t monitor_chunk_size_show(struct device *dev,
431				   struct device_attribute *attr, char *buf)
432{
433	struct ufs_hba *hba = dev_get_drvdata(dev);
434
435	return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
436}
437
438static ssize_t monitor_chunk_size_store(struct device *dev,
439				    struct device_attribute *attr,
440				    const char *buf, size_t count)
441{
442	struct ufs_hba *hba = dev_get_drvdata(dev);
443	unsigned long value, flags;
444
445	if (kstrtoul(buf, 0, &value))
446		return -EINVAL;
447
448	spin_lock_irqsave(hba->host->host_lock, flags);
449	/* Only allow chunk size change when monitor is disabled */
450	if (!hba->monitor.enabled)
451		hba->monitor.chunk_size = value;
452	spin_unlock_irqrestore(hba->host->host_lock, flags);
453	return count;
454}
455
456static ssize_t read_total_sectors_show(struct device *dev,
457				       struct device_attribute *attr, char *buf)
458{
459	struct ufs_hba *hba = dev_get_drvdata(dev);
460
461	return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
462}
463
464static ssize_t read_total_busy_show(struct device *dev,
465				    struct device_attribute *attr, char *buf)
466{
467	struct ufs_hba *hba = dev_get_drvdata(dev);
468
469	return sysfs_emit(buf, "%llu\n",
470			  ktime_to_us(hba->monitor.total_busy[READ]));
471}
472
473static ssize_t read_nr_requests_show(struct device *dev,
474				     struct device_attribute *attr, char *buf)
475{
476	struct ufs_hba *hba = dev_get_drvdata(dev);
477
478	return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
479}
480
481static ssize_t read_req_latency_avg_show(struct device *dev,
482					 struct device_attribute *attr,
483					 char *buf)
484{
485	struct ufs_hba *hba = dev_get_drvdata(dev);
486	struct ufs_hba_monitor *m = &hba->monitor;
487
488	return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
489						 m->nr_req[READ]));
490}
491
492static ssize_t read_req_latency_max_show(struct device *dev,
493					 struct device_attribute *attr,
494					 char *buf)
495{
496	struct ufs_hba *hba = dev_get_drvdata(dev);
497
498	return sysfs_emit(buf, "%llu\n",
499			  ktime_to_us(hba->monitor.lat_max[READ]));
500}
501
502static ssize_t read_req_latency_min_show(struct device *dev,
503					 struct device_attribute *attr,
504					 char *buf)
505{
506	struct ufs_hba *hba = dev_get_drvdata(dev);
507
508	return sysfs_emit(buf, "%llu\n",
509			  ktime_to_us(hba->monitor.lat_min[READ]));
510}
511
512static ssize_t read_req_latency_sum_show(struct device *dev,
513					 struct device_attribute *attr,
514					 char *buf)
515{
516	struct ufs_hba *hba = dev_get_drvdata(dev);
517
518	return sysfs_emit(buf, "%llu\n",
519			  ktime_to_us(hba->monitor.lat_sum[READ]));
520}
521
522static ssize_t write_total_sectors_show(struct device *dev,
523					struct device_attribute *attr,
524					char *buf)
525{
526	struct ufs_hba *hba = dev_get_drvdata(dev);
527
528	return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
529}
530
531static ssize_t write_total_busy_show(struct device *dev,
532				     struct device_attribute *attr, char *buf)
533{
534	struct ufs_hba *hba = dev_get_drvdata(dev);
535
536	return sysfs_emit(buf, "%llu\n",
537			  ktime_to_us(hba->monitor.total_busy[WRITE]));
538}
539
540static ssize_t write_nr_requests_show(struct device *dev,
541				      struct device_attribute *attr, char *buf)
542{
543	struct ufs_hba *hba = dev_get_drvdata(dev);
544
545	return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
546}
547
548static ssize_t write_req_latency_avg_show(struct device *dev,
549					  struct device_attribute *attr,
550					  char *buf)
551{
552	struct ufs_hba *hba = dev_get_drvdata(dev);
553	struct ufs_hba_monitor *m = &hba->monitor;
554
555	return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
556						 m->nr_req[WRITE]));
557}
558
559static ssize_t write_req_latency_max_show(struct device *dev,
560					  struct device_attribute *attr,
561					  char *buf)
562{
563	struct ufs_hba *hba = dev_get_drvdata(dev);
564
565	return sysfs_emit(buf, "%llu\n",
566			  ktime_to_us(hba->monitor.lat_max[WRITE]));
567}
568
569static ssize_t write_req_latency_min_show(struct device *dev,
570					  struct device_attribute *attr,
571					  char *buf)
572{
573	struct ufs_hba *hba = dev_get_drvdata(dev);
574
575	return sysfs_emit(buf, "%llu\n",
576			  ktime_to_us(hba->monitor.lat_min[WRITE]));
577}
578
579static ssize_t write_req_latency_sum_show(struct device *dev,
580					  struct device_attribute *attr,
581					  char *buf)
582{
583	struct ufs_hba *hba = dev_get_drvdata(dev);
584
585	return sysfs_emit(buf, "%llu\n",
586			  ktime_to_us(hba->monitor.lat_sum[WRITE]));
587}
588
589static DEVICE_ATTR_RW(monitor_enable);
590static DEVICE_ATTR_RW(monitor_chunk_size);
591static DEVICE_ATTR_RO(read_total_sectors);
592static DEVICE_ATTR_RO(read_total_busy);
593static DEVICE_ATTR_RO(read_nr_requests);
594static DEVICE_ATTR_RO(read_req_latency_avg);
595static DEVICE_ATTR_RO(read_req_latency_max);
596static DEVICE_ATTR_RO(read_req_latency_min);
597static DEVICE_ATTR_RO(read_req_latency_sum);
598static DEVICE_ATTR_RO(write_total_sectors);
599static DEVICE_ATTR_RO(write_total_busy);
600static DEVICE_ATTR_RO(write_nr_requests);
601static DEVICE_ATTR_RO(write_req_latency_avg);
602static DEVICE_ATTR_RO(write_req_latency_max);
603static DEVICE_ATTR_RO(write_req_latency_min);
604static DEVICE_ATTR_RO(write_req_latency_sum);
605
606static struct attribute *ufs_sysfs_monitor_attrs[] = {
607	&dev_attr_monitor_enable.attr,
608	&dev_attr_monitor_chunk_size.attr,
609	&dev_attr_read_total_sectors.attr,
610	&dev_attr_read_total_busy.attr,
611	&dev_attr_read_nr_requests.attr,
612	&dev_attr_read_req_latency_avg.attr,
613	&dev_attr_read_req_latency_max.attr,
614	&dev_attr_read_req_latency_min.attr,
615	&dev_attr_read_req_latency_sum.attr,
616	&dev_attr_write_total_sectors.attr,
617	&dev_attr_write_total_busy.attr,
618	&dev_attr_write_nr_requests.attr,
619	&dev_attr_write_req_latency_avg.attr,
620	&dev_attr_write_req_latency_max.attr,
621	&dev_attr_write_req_latency_min.attr,
622	&dev_attr_write_req_latency_sum.attr,
623	NULL
624};
625
626static const struct attribute_group ufs_sysfs_monitor_group = {
627	.name = "monitor",
628	.attrs = ufs_sysfs_monitor_attrs,
629};
630
631static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
632				  enum desc_idn desc_id,
633				  u8 desc_index,
634				  u8 param_offset,
635				  u8 *sysfs_buf,
636				  u8 param_size)
637{
638	u8 desc_buf[8] = {0};
639	int ret;
640
641	if (param_size > 8)
642		return -EINVAL;
643
644	down(&hba->host_sem);
645	if (!ufshcd_is_user_access_allowed(hba)) {
646		ret = -EBUSY;
647		goto out;
648	}
649
650	ufshcd_rpm_get_sync(hba);
651	ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
652				param_offset, desc_buf, param_size);
653	ufshcd_rpm_put_sync(hba);
654	if (ret) {
655		ret = -EINVAL;
656		goto out;
657	}
658
659	switch (param_size) {
660	case 1:
661		ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
662		break;
663	case 2:
664		ret = sysfs_emit(sysfs_buf, "0x%04X\n",
665			get_unaligned_be16(desc_buf));
666		break;
667	case 4:
668		ret = sysfs_emit(sysfs_buf, "0x%08X\n",
669			get_unaligned_be32(desc_buf));
670		break;
671	case 8:
672		ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
673			get_unaligned_be64(desc_buf));
674		break;
675	}
676
677out:
678	up(&hba->host_sem);
679	return ret;
680}
681
682#define UFS_DESC_PARAM(_name, _puname, _duname, _size)			\
683static ssize_t _name##_show(struct device *dev,				\
684	struct device_attribute *attr, char *buf)			\
685{									\
686	struct ufs_hba *hba = dev_get_drvdata(dev);			\
687	return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname,	\
688		0, _duname##_DESC_PARAM##_puname, buf, _size);		\
689}									\
690static DEVICE_ATTR_RO(_name)
691
692#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size)			\
693	UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
694
695UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
696UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
697UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
698UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
699UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
700UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
701UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
702UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
703UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
704UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
705UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
706UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
707UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
708UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
709UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
710UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
711UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
712UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
713UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
714UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
715UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
716UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
717UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
718UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
719UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
720UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
721UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
722UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
723UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
724UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
725
726static struct attribute *ufs_sysfs_device_descriptor[] = {
727	&dev_attr_device_type.attr,
728	&dev_attr_device_class.attr,
729	&dev_attr_device_sub_class.attr,
730	&dev_attr_protocol.attr,
731	&dev_attr_number_of_luns.attr,
732	&dev_attr_number_of_wluns.attr,
733	&dev_attr_boot_enable.attr,
734	&dev_attr_descriptor_access_enable.attr,
735	&dev_attr_initial_power_mode.attr,
736	&dev_attr_high_priority_lun.attr,
737	&dev_attr_secure_removal_type.attr,
738	&dev_attr_support_security_lun.attr,
739	&dev_attr_bkops_termination_latency.attr,
740	&dev_attr_initial_active_icc_level.attr,
741	&dev_attr_specification_version.attr,
742	&dev_attr_manufacturing_date.attr,
743	&dev_attr_manufacturer_id.attr,
744	&dev_attr_rtt_capability.attr,
745	&dev_attr_rtc_update.attr,
746	&dev_attr_ufs_features.attr,
747	&dev_attr_ffu_timeout.attr,
748	&dev_attr_queue_depth.attr,
749	&dev_attr_device_version.attr,
750	&dev_attr_number_of_secure_wpa.attr,
751	&dev_attr_psa_max_data_size.attr,
752	&dev_attr_psa_state_timeout.attr,
753	&dev_attr_ext_feature_sup.attr,
754	&dev_attr_wb_presv_us_en.attr,
755	&dev_attr_wb_type.attr,
756	&dev_attr_wb_shared_alloc_units.attr,
757	NULL,
758};
759
760static const struct attribute_group ufs_sysfs_device_descriptor_group = {
761	.name = "device_descriptor",
762	.attrs = ufs_sysfs_device_descriptor,
763};
764
765#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size)		\
766	UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
767
768UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
769UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
770
771static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
772	&dev_attr_unipro_version.attr,
773	&dev_attr_mphy_version.attr,
774	NULL,
775};
776
777static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
778	.name = "interconnect_descriptor",
779	.attrs = ufs_sysfs_interconnect_descriptor,
780};
781
782#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size)			\
783	UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
784
785UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
786UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
787UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
788UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
789UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
790UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
791UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
792UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
793UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
794UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
795UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
796UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
797UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
798UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
799UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
800UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
801UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
802UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
803	_SCM_MAX_NUM_UNITS, 4);
804UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
805	_SCM_CAP_ADJ_FCTR, 2);
806UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
807	_NPM_MAX_NUM_UNITS, 4);
808UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
809	_NPM_CAP_ADJ_FCTR, 2);
810UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
811	_ENM1_MAX_NUM_UNITS, 4);
812UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
813	_ENM1_CAP_ADJ_FCTR, 2);
814UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
815	_ENM2_MAX_NUM_UNITS, 4);
816UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
817	_ENM2_CAP_ADJ_FCTR, 2);
818UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
819	_ENM3_MAX_NUM_UNITS, 4);
820UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
821	_ENM3_CAP_ADJ_FCTR, 2);
822UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
823	_ENM4_MAX_NUM_UNITS, 4);
824UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
825	_ENM4_CAP_ADJ_FCTR, 2);
826UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
827UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
828UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
829UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
830UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
831
832
833static struct attribute *ufs_sysfs_geometry_descriptor[] = {
834	&dev_attr_raw_device_capacity.attr,
835	&dev_attr_max_number_of_luns.attr,
836	&dev_attr_segment_size.attr,
837	&dev_attr_allocation_unit_size.attr,
838	&dev_attr_min_addressable_block_size.attr,
839	&dev_attr_optimal_read_block_size.attr,
840	&dev_attr_optimal_write_block_size.attr,
841	&dev_attr_max_in_buffer_size.attr,
842	&dev_attr_max_out_buffer_size.attr,
843	&dev_attr_rpmb_rw_size.attr,
844	&dev_attr_dyn_capacity_resource_policy.attr,
845	&dev_attr_data_ordering.attr,
846	&dev_attr_max_number_of_contexts.attr,
847	&dev_attr_sys_data_tag_unit_size.attr,
848	&dev_attr_sys_data_tag_resource_size.attr,
849	&dev_attr_secure_removal_types.attr,
850	&dev_attr_memory_types.attr,
851	&dev_attr_sys_code_memory_max_alloc_units.attr,
852	&dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
853	&dev_attr_non_persist_memory_max_alloc_units.attr,
854	&dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
855	&dev_attr_enh1_memory_max_alloc_units.attr,
856	&dev_attr_enh1_memory_capacity_adjustment_factor.attr,
857	&dev_attr_enh2_memory_max_alloc_units.attr,
858	&dev_attr_enh2_memory_capacity_adjustment_factor.attr,
859	&dev_attr_enh3_memory_max_alloc_units.attr,
860	&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
861	&dev_attr_enh4_memory_max_alloc_units.attr,
862	&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
863	&dev_attr_wb_max_alloc_units.attr,
864	&dev_attr_wb_max_wb_luns.attr,
865	&dev_attr_wb_buff_cap_adj.attr,
866	&dev_attr_wb_sup_red_type.attr,
867	&dev_attr_wb_sup_wb_type.attr,
868	NULL,
869};
870
871static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
872	.name = "geometry_descriptor",
873	.attrs = ufs_sysfs_geometry_descriptor,
874};
875
876#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size)			\
877	UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
878
879UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
880UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
881UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
882
883static struct attribute *ufs_sysfs_health_descriptor[] = {
884	&dev_attr_eol_info.attr,
885	&dev_attr_life_time_estimation_a.attr,
886	&dev_attr_life_time_estimation_b.attr,
887	NULL,
888};
889
890static const struct attribute_group ufs_sysfs_health_descriptor_group = {
891	.name = "health_descriptor",
892	.attrs = ufs_sysfs_health_descriptor,
893};
894
895#define UFS_POWER_DESC_PARAM(_name, _uname, _index)			\
896static ssize_t _name##_index##_show(struct device *dev,			\
897	struct device_attribute *attr, char *buf)			\
898{									\
899	struct ufs_hba *hba = dev_get_drvdata(dev);			\
900	return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0,	\
901		PWR_DESC##_uname##_0 + _index * 2, buf, 2);		\
902}									\
903static DEVICE_ATTR_RO(_name##_index)
904
905UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
906UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
907UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
908UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
909UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
910UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
911UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
912UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
913UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
914UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
915UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
916UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
917UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
918UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
919UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
920UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
921UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
922UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
923UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
924UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
925UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
926UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
927UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
928UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
929UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
930UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
931UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
932UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
933UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
934UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
935UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
936UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
937UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
938UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
939UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
940UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
941UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
942UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
943UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
944UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
945UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
946UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
947UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
948UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
949UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
950UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
951UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
952UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
953
954static struct attribute *ufs_sysfs_power_descriptor[] = {
955	&dev_attr_active_icc_levels_vcc0.attr,
956	&dev_attr_active_icc_levels_vcc1.attr,
957	&dev_attr_active_icc_levels_vcc2.attr,
958	&dev_attr_active_icc_levels_vcc3.attr,
959	&dev_attr_active_icc_levels_vcc4.attr,
960	&dev_attr_active_icc_levels_vcc5.attr,
961	&dev_attr_active_icc_levels_vcc6.attr,
962	&dev_attr_active_icc_levels_vcc7.attr,
963	&dev_attr_active_icc_levels_vcc8.attr,
964	&dev_attr_active_icc_levels_vcc9.attr,
965	&dev_attr_active_icc_levels_vcc10.attr,
966	&dev_attr_active_icc_levels_vcc11.attr,
967	&dev_attr_active_icc_levels_vcc12.attr,
968	&dev_attr_active_icc_levels_vcc13.attr,
969	&dev_attr_active_icc_levels_vcc14.attr,
970	&dev_attr_active_icc_levels_vcc15.attr,
971	&dev_attr_active_icc_levels_vccq0.attr,
972	&dev_attr_active_icc_levels_vccq1.attr,
973	&dev_attr_active_icc_levels_vccq2.attr,
974	&dev_attr_active_icc_levels_vccq3.attr,
975	&dev_attr_active_icc_levels_vccq4.attr,
976	&dev_attr_active_icc_levels_vccq5.attr,
977	&dev_attr_active_icc_levels_vccq6.attr,
978	&dev_attr_active_icc_levels_vccq7.attr,
979	&dev_attr_active_icc_levels_vccq8.attr,
980	&dev_attr_active_icc_levels_vccq9.attr,
981	&dev_attr_active_icc_levels_vccq10.attr,
982	&dev_attr_active_icc_levels_vccq11.attr,
983	&dev_attr_active_icc_levels_vccq12.attr,
984	&dev_attr_active_icc_levels_vccq13.attr,
985	&dev_attr_active_icc_levels_vccq14.attr,
986	&dev_attr_active_icc_levels_vccq15.attr,
987	&dev_attr_active_icc_levels_vccq20.attr,
988	&dev_attr_active_icc_levels_vccq21.attr,
989	&dev_attr_active_icc_levels_vccq22.attr,
990	&dev_attr_active_icc_levels_vccq23.attr,
991	&dev_attr_active_icc_levels_vccq24.attr,
992	&dev_attr_active_icc_levels_vccq25.attr,
993	&dev_attr_active_icc_levels_vccq26.attr,
994	&dev_attr_active_icc_levels_vccq27.attr,
995	&dev_attr_active_icc_levels_vccq28.attr,
996	&dev_attr_active_icc_levels_vccq29.attr,
997	&dev_attr_active_icc_levels_vccq210.attr,
998	&dev_attr_active_icc_levels_vccq211.attr,
999	&dev_attr_active_icc_levels_vccq212.attr,
1000	&dev_attr_active_icc_levels_vccq213.attr,
1001	&dev_attr_active_icc_levels_vccq214.attr,
1002	&dev_attr_active_icc_levels_vccq215.attr,
1003	NULL,
1004};
1005
1006static const struct attribute_group ufs_sysfs_power_descriptor_group = {
1007	.name = "power_descriptor",
1008	.attrs = ufs_sysfs_power_descriptor,
1009};
1010
1011#define UFS_STRING_DESCRIPTOR(_name, _pname)				\
1012static ssize_t _name##_show(struct device *dev,				\
1013	struct device_attribute *attr, char *buf)			\
1014{									\
1015	u8 index;							\
1016	struct ufs_hba *hba = dev_get_drvdata(dev);			\
1017	int ret;							\
1018	int desc_len = QUERY_DESC_MAX_SIZE;				\
1019	u8 *desc_buf;							\
1020									\
1021	down(&hba->host_sem);						\
1022	if (!ufshcd_is_user_access_allowed(hba)) {			\
1023		up(&hba->host_sem);					\
1024		return -EBUSY;						\
1025	}								\
1026	desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC);		\
1027	if (!desc_buf) {						\
1028		up(&hba->host_sem);					\
1029		return -ENOMEM;						\
1030	}								\
1031	ufshcd_rpm_get_sync(hba);					\
1032	ret = ufshcd_query_descriptor_retry(hba,			\
1033		UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE,	\
1034		0, 0, desc_buf, &desc_len);				\
1035	if (ret) {							\
1036		ret = -EINVAL;						\
1037		goto out;						\
1038	}								\
1039	index = desc_buf[DEVICE_DESC_PARAM##_pname];			\
1040	kfree(desc_buf);						\
1041	desc_buf = NULL;						\
1042	ret = ufshcd_read_string_desc(hba, index, &desc_buf,		\
1043				      SD_ASCII_STD);			\
1044	if (ret < 0)							\
1045		goto out;						\
1046	ret = sysfs_emit(buf, "%s\n", desc_buf);			\
1047out:									\
1048	ufshcd_rpm_put_sync(hba);					\
1049	kfree(desc_buf);						\
1050	up(&hba->host_sem);						\
1051	return ret;							\
1052}									\
1053static DEVICE_ATTR_RO(_name)
1054
1055UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
1056UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
1057UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
1058UFS_STRING_DESCRIPTOR(serial_number, _SN);
1059UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
1060
1061static struct attribute *ufs_sysfs_string_descriptors[] = {
1062	&dev_attr_manufacturer_name.attr,
1063	&dev_attr_product_name.attr,
1064	&dev_attr_oem_id.attr,
1065	&dev_attr_serial_number.attr,
1066	&dev_attr_product_revision.attr,
1067	NULL,
1068};
1069
1070static const struct attribute_group ufs_sysfs_string_descriptors_group = {
1071	.name = "string_descriptors",
1072	.attrs = ufs_sysfs_string_descriptors,
1073};
1074
1075static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
1076{
1077	return idn >= QUERY_FLAG_IDN_WB_EN &&
1078		idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
1079}
1080
1081#define UFS_FLAG(_name, _uname)						\
1082static ssize_t _name##_show(struct device *dev,				\
1083	struct device_attribute *attr, char *buf)			\
1084{									\
1085	bool flag;							\
1086	u8 index = 0;							\
1087	int ret;							\
1088	struct ufs_hba *hba = dev_get_drvdata(dev);			\
1089									\
1090	down(&hba->host_sem);						\
1091	if (!ufshcd_is_user_access_allowed(hba)) {			\
1092		up(&hba->host_sem);					\
1093		return -EBUSY;						\
1094	}								\
1095	if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname))			\
1096		index = ufshcd_wb_get_query_index(hba);			\
1097	ufshcd_rpm_get_sync(hba);					\
1098	ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,	\
1099		QUERY_FLAG_IDN##_uname, index, &flag);			\
1100	ufshcd_rpm_put_sync(hba);					\
1101	if (ret) {							\
1102		ret = -EINVAL;						\
1103		goto out;						\
1104	}								\
1105	ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false");		\
1106out:									\
1107	up(&hba->host_sem);						\
1108	return ret;							\
1109}									\
1110static DEVICE_ATTR_RO(_name)
1111
1112UFS_FLAG(device_init, _FDEVICEINIT);
1113UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
1114UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
1115UFS_FLAG(bkops_enable, _BKOPS_EN);
1116UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
1117UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
1118UFS_FLAG(busy_rtc, _BUSY_RTC);
1119UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
1120UFS_FLAG(wb_enable, _WB_EN);
1121UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
1122UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
1123
1124static struct attribute *ufs_sysfs_device_flags[] = {
1125	&dev_attr_device_init.attr,
1126	&dev_attr_permanent_wpe.attr,
1127	&dev_attr_power_on_wpe.attr,
1128	&dev_attr_bkops_enable.attr,
1129	&dev_attr_life_span_mode_enable.attr,
1130	&dev_attr_phy_resource_removal.attr,
1131	&dev_attr_busy_rtc.attr,
1132	&dev_attr_disable_fw_update.attr,
1133	&dev_attr_wb_enable.attr,
1134	&dev_attr_wb_flush_en.attr,
1135	&dev_attr_wb_flush_during_h8.attr,
1136	NULL,
1137};
1138
1139static const struct attribute_group ufs_sysfs_flags_group = {
1140	.name = "flags",
1141	.attrs = ufs_sysfs_device_flags,
1142};
1143
1144static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
1145{
1146	return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
1147		idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
1148}
1149
1150#define UFS_ATTRIBUTE(_name, _uname)					\
1151static ssize_t _name##_show(struct device *dev,				\
1152	struct device_attribute *attr, char *buf)			\
1153{									\
1154	struct ufs_hba *hba = dev_get_drvdata(dev);			\
1155	u32 value;							\
1156	int ret;							\
1157	u8 index = 0;							\
1158									\
1159	down(&hba->host_sem);						\
1160	if (!ufshcd_is_user_access_allowed(hba)) {			\
1161		up(&hba->host_sem);					\
1162		return -EBUSY;						\
1163	}								\
1164	if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname))			\
1165		index = ufshcd_wb_get_query_index(hba);			\
1166	ufshcd_rpm_get_sync(hba);					\
1167	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,	\
1168		QUERY_ATTR_IDN##_uname, index, 0, &value);		\
1169	ufshcd_rpm_put_sync(hba);					\
1170	if (ret) {							\
1171		ret = -EINVAL;						\
1172		goto out;						\
1173	}								\
1174	ret = sysfs_emit(buf, "0x%08X\n", value);			\
1175out:									\
1176	up(&hba->host_sem);						\
1177	return ret;							\
1178}									\
1179static DEVICE_ATTR_RO(_name)
1180
1181UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
1182UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
1183UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
1184UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
1185UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
1186UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
1187UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
1188UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
1189UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
1190UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
1191UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
1192UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
1193UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
1194UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
1195UFS_ATTRIBUTE(psa_state, _PSA_STATE);
1196UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
1197UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
1198UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
1199UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
1200UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
1201
1202
1203static struct attribute *ufs_sysfs_attributes[] = {
1204	&dev_attr_boot_lun_enabled.attr,
1205	&dev_attr_current_power_mode.attr,
1206	&dev_attr_active_icc_level.attr,
1207	&dev_attr_ooo_data_enabled.attr,
1208	&dev_attr_bkops_status.attr,
1209	&dev_attr_purge_status.attr,
1210	&dev_attr_max_data_in_size.attr,
1211	&dev_attr_max_data_out_size.attr,
1212	&dev_attr_reference_clock_frequency.attr,
1213	&dev_attr_configuration_descriptor_lock.attr,
1214	&dev_attr_max_number_of_rtt.attr,
1215	&dev_attr_exception_event_control.attr,
1216	&dev_attr_exception_event_status.attr,
1217	&dev_attr_ffu_status.attr,
1218	&dev_attr_psa_state.attr,
1219	&dev_attr_psa_data_size.attr,
1220	&dev_attr_wb_flush_status.attr,
1221	&dev_attr_wb_avail_buf.attr,
1222	&dev_attr_wb_life_time_est.attr,
1223	&dev_attr_wb_cur_buf.attr,
1224	NULL,
1225};
1226
1227static const struct attribute_group ufs_sysfs_attributes_group = {
1228	.name = "attributes",
1229	.attrs = ufs_sysfs_attributes,
1230};
1231
1232static const struct attribute_group *ufs_sysfs_groups[] = {
1233	&ufs_sysfs_default_group,
1234	&ufs_sysfs_capabilities_group,
1235	&ufs_sysfs_monitor_group,
1236	&ufs_sysfs_device_descriptor_group,
1237	&ufs_sysfs_interconnect_descriptor_group,
1238	&ufs_sysfs_geometry_descriptor_group,
1239	&ufs_sysfs_health_descriptor_group,
1240	&ufs_sysfs_power_descriptor_group,
1241	&ufs_sysfs_string_descriptors_group,
1242	&ufs_sysfs_flags_group,
1243	&ufs_sysfs_attributes_group,
1244	NULL,
1245};
1246
1247#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size)		\
1248static ssize_t _pname##_show(struct device *dev,			\
1249	struct device_attribute *attr, char *buf)			\
1250{									\
1251	struct scsi_device *sdev = to_scsi_device(dev);			\
1252	struct ufs_hba *hba = shost_priv(sdev->host);			\
1253	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);			\
1254	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))		\
1255		return -EINVAL;						\
1256	return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname,	\
1257		lun, _duname##_DESC_PARAM##_puname, buf, _size);	\
1258}									\
1259static DEVICE_ATTR_RO(_pname)
1260
1261#define UFS_UNIT_DESC_PARAM(_name, _uname, _size)			\
1262	UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
1263
1264UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
1265UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
1266UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
1267UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
1268UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
1269UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
1270UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
1271UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
1272UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
1273UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
1274UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
1275UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
1276UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
1277UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
1278UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
1279
1280static struct attribute *ufs_sysfs_unit_descriptor[] = {
1281	&dev_attr_lu_enable.attr,
1282	&dev_attr_boot_lun_id.attr,
1283	&dev_attr_lun_write_protect.attr,
1284	&dev_attr_lun_queue_depth.attr,
1285	&dev_attr_psa_sensitive.attr,
1286	&dev_attr_lun_memory_type.attr,
1287	&dev_attr_data_reliability.attr,
1288	&dev_attr_logical_block_size.attr,
1289	&dev_attr_logical_block_count.attr,
1290	&dev_attr_erase_block_size.attr,
1291	&dev_attr_provisioning_type.attr,
1292	&dev_attr_physical_memory_resourse_count.attr,
1293	&dev_attr_context_capabilities.attr,
1294	&dev_attr_large_unit_granularity.attr,
1295	&dev_attr_wb_buf_alloc_units.attr,
1296	NULL,
1297};
1298
1299static umode_t ufs_unit_descriptor_is_visible(struct kobject *kobj, struct attribute *attr, int n)
1300{
1301	struct device *dev = container_of(kobj, struct device, kobj);
1302	struct scsi_device *sdev = to_scsi_device(dev);
1303	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
1304	umode_t mode = attr->mode;
1305
1306	if (lun == UFS_UPIU_BOOT_WLUN || lun == UFS_UPIU_UFS_DEVICE_WLUN)
1307		/* Boot and device WLUN have no unit descriptors */
1308		mode = 0;
1309	if (lun == UFS_UPIU_RPMB_WLUN && attr == &dev_attr_wb_buf_alloc_units.attr)
1310		mode = 0;
1311
1312	return mode;
1313}
1314
1315
1316const struct attribute_group ufs_sysfs_unit_descriptor_group = {
1317	.name = "unit_descriptor",
1318	.attrs = ufs_sysfs_unit_descriptor,
1319	.is_visible = ufs_unit_descriptor_is_visible,
1320};
1321
1322static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
1323	struct device_attribute *attr, char *buf)
1324{
1325	u32 value;
1326	struct scsi_device *sdev = to_scsi_device(dev);
1327	struct ufs_hba *hba = shost_priv(sdev->host);
1328	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
1329	int ret;
1330
1331	down(&hba->host_sem);
1332	if (!ufshcd_is_user_access_allowed(hba)) {
1333		ret = -EBUSY;
1334		goto out;
1335	}
1336
1337	ufshcd_rpm_get_sync(hba);
1338	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1339		QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
1340	ufshcd_rpm_put_sync(hba);
1341	if (ret) {
1342		ret = -EINVAL;
1343		goto out;
1344	}
1345
1346	ret = sysfs_emit(buf, "0x%08X\n", value);
1347
1348out:
1349	up(&hba->host_sem);
1350	return ret;
1351}
1352static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
1353
1354static struct attribute *ufs_sysfs_lun_attributes[] = {
1355	&dev_attr_dyn_cap_needed_attribute.attr,
1356	NULL,
1357};
1358
1359const struct attribute_group ufs_sysfs_lun_attributes_group = {
1360	.attrs = ufs_sysfs_lun_attributes,
1361};
1362
1363void ufs_sysfs_add_nodes(struct device *dev)
1364{
1365	int ret;
1366
1367	ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
1368	if (ret)
1369		dev_err(dev,
1370			"%s: sysfs groups creation failed (err = %d)\n",
1371			__func__, ret);
1372}
1373
1374void ufs_sysfs_remove_nodes(struct device *dev)
1375{
1376	sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
1377}
1378