1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Universal Flash Storage Host controller PCI glue driver
4 *
5 * This code is based on drivers/scsi/ufs/ufshcd-pci.c
6 * Copyright (C) 2011-2013 Samsung India Software Operations
7 *
8 * Authors:
9 *	Santosh Yaraganavi <santosh.sy@samsung.com>
10 *	Vinayak Holikatti <h.vinayak@samsung.com>
11 */
12
13#include "ufshcd.h"
14#include <linux/pci.h>
15#include <linux/pm_runtime.h>
16#include <linux/pm_qos.h>
17#include <linux/debugfs.h>
18
19struct intel_host {
20	u32		active_ltr;
21	u32		idle_ltr;
22	struct dentry	*debugfs_root;
23};
24
25static int ufs_intel_disable_lcc(struct ufs_hba *hba)
26{
27	u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
28	u32 lcc_enable = 0;
29
30	ufshcd_dme_get(hba, attr, &lcc_enable);
31	if (lcc_enable)
32		ufshcd_disable_host_tx_lcc(hba);
33
34	return 0;
35}
36
37static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
38					 enum ufs_notify_change_status status)
39{
40	int err = 0;
41
42	switch (status) {
43	case PRE_CHANGE:
44		err = ufs_intel_disable_lcc(hba);
45		break;
46	case POST_CHANGE:
47		break;
48	default:
49		break;
50	}
51
52	return err;
53}
54
55#define INTEL_ACTIVELTR		0x804
56#define INTEL_IDLELTR		0x808
57
58#define INTEL_LTR_REQ		BIT(15)
59#define INTEL_LTR_SCALE_MASK	GENMASK(11, 10)
60#define INTEL_LTR_SCALE_1US	(2 << 10)
61#define INTEL_LTR_SCALE_32US	(3 << 10)
62#define INTEL_LTR_VALUE_MASK	GENMASK(9, 0)
63
64static void intel_cache_ltr(struct ufs_hba *hba)
65{
66	struct intel_host *host = ufshcd_get_variant(hba);
67
68	host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
69	host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
70}
71
72static void intel_ltr_set(struct device *dev, s32 val)
73{
74	struct ufs_hba *hba = dev_get_drvdata(dev);
75	struct intel_host *host = ufshcd_get_variant(hba);
76	u32 ltr;
77
78	pm_runtime_get_sync(dev);
79
80	/*
81	 * Program latency tolerance (LTR) accordingly what has been asked
82	 * by the PM QoS layer or disable it in case we were passed
83	 * negative value or PM_QOS_LATENCY_ANY.
84	 */
85	ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
86
87	if (val == PM_QOS_LATENCY_ANY || val < 0) {
88		ltr &= ~INTEL_LTR_REQ;
89	} else {
90		ltr |= INTEL_LTR_REQ;
91		ltr &= ~INTEL_LTR_SCALE_MASK;
92		ltr &= ~INTEL_LTR_VALUE_MASK;
93
94		if (val > INTEL_LTR_VALUE_MASK) {
95			val >>= 5;
96			if (val > INTEL_LTR_VALUE_MASK)
97				val = INTEL_LTR_VALUE_MASK;
98			ltr |= INTEL_LTR_SCALE_32US | val;
99		} else {
100			ltr |= INTEL_LTR_SCALE_1US | val;
101		}
102	}
103
104	if (ltr == host->active_ltr)
105		goto out;
106
107	writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
108	writel(ltr, hba->mmio_base + INTEL_IDLELTR);
109
110	/* Cache the values into intel_host structure */
111	intel_cache_ltr(hba);
112out:
113	pm_runtime_put(dev);
114}
115
116static void intel_ltr_expose(struct device *dev)
117{
118	dev->power.set_latency_tolerance = intel_ltr_set;
119	dev_pm_qos_expose_latency_tolerance(dev);
120}
121
122static void intel_ltr_hide(struct device *dev)
123{
124	dev_pm_qos_hide_latency_tolerance(dev);
125	dev->power.set_latency_tolerance = NULL;
126}
127
128static void intel_add_debugfs(struct ufs_hba *hba)
129{
130	struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
131	struct intel_host *host = ufshcd_get_variant(hba);
132
133	intel_cache_ltr(hba);
134
135	host->debugfs_root = dir;
136	debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
137	debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
138}
139
140static void intel_remove_debugfs(struct ufs_hba *hba)
141{
142	struct intel_host *host = ufshcd_get_variant(hba);
143
144	debugfs_remove_recursive(host->debugfs_root);
145}
146
147static int ufs_intel_common_init(struct ufs_hba *hba)
148{
149	struct intel_host *host;
150
151	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
152
153	host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
154	if (!host)
155		return -ENOMEM;
156	ufshcd_set_variant(hba, host);
157	intel_ltr_expose(hba->dev);
158	intel_add_debugfs(hba);
159	return 0;
160}
161
162static void ufs_intel_common_exit(struct ufs_hba *hba)
163{
164	intel_remove_debugfs(hba);
165	intel_ltr_hide(hba->dev);
166}
167
168static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
169{
170	/*
171	 * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
172	 * address registers must be restored because the restore kernel can
173	 * have used different addresses.
174	 */
175	ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
176		      REG_UTP_TRANSFER_REQ_LIST_BASE_L);
177	ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
178		      REG_UTP_TRANSFER_REQ_LIST_BASE_H);
179	ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
180		      REG_UTP_TASK_REQ_LIST_BASE_L);
181	ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
182		      REG_UTP_TASK_REQ_LIST_BASE_H);
183
184	if (ufshcd_is_link_hibern8(hba)) {
185		int ret = ufshcd_uic_hibern8_exit(hba);
186
187		if (!ret) {
188			ufshcd_set_link_active(hba);
189		} else {
190			dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
191				__func__, ret);
192			/*
193			 * Force reset and restore. Any other actions can lead
194			 * to an unrecoverable state.
195			 */
196			ufshcd_set_link_off(hba);
197		}
198	}
199
200	return 0;
201}
202
203static int ufs_intel_ehl_init(struct ufs_hba *hba)
204{
205	hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
206	return ufs_intel_common_init(hba);
207}
208
209static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
210	.name                   = "intel-pci",
211	.init			= ufs_intel_common_init,
212	.exit			= ufs_intel_common_exit,
213	.link_startup_notify	= ufs_intel_link_startup_notify,
214	.resume			= ufs_intel_resume,
215};
216
217static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
218	.name                   = "intel-pci",
219	.init			= ufs_intel_ehl_init,
220	.exit			= ufs_intel_common_exit,
221	.link_startup_notify	= ufs_intel_link_startup_notify,
222	.resume			= ufs_intel_resume,
223};
224
225#ifdef CONFIG_PM_SLEEP
226/**
227 * ufshcd_pci_suspend - suspend power management function
228 * @dev: pointer to PCI device handle
229 *
230 * Returns 0 if successful
231 * Returns non-zero otherwise
232 */
233static int ufshcd_pci_suspend(struct device *dev)
234{
235	return ufshcd_system_suspend(dev_get_drvdata(dev));
236}
237
238/**
239 * ufshcd_pci_resume - resume power management function
240 * @dev: pointer to PCI device handle
241 *
242 * Returns 0 if successful
243 * Returns non-zero otherwise
244 */
245static int ufshcd_pci_resume(struct device *dev)
246{
247	return ufshcd_system_resume(dev_get_drvdata(dev));
248}
249
250/**
251 * ufshcd_pci_poweroff - suspend-to-disk poweroff function
252 * @dev: pointer to PCI device handle
253 *
254 * Returns 0 if successful
255 * Returns non-zero otherwise
256 */
257static int ufshcd_pci_poweroff(struct device *dev)
258{
259	struct ufs_hba *hba = dev_get_drvdata(dev);
260	int spm_lvl = hba->spm_lvl;
261	int ret;
262
263	/*
264	 * For poweroff we need to set the UFS device to PowerDown mode.
265	 * Force spm_lvl to ensure that.
266	 */
267	hba->spm_lvl = 5;
268	ret = ufshcd_system_suspend(hba);
269	hba->spm_lvl = spm_lvl;
270	return ret;
271}
272
273#endif /* !CONFIG_PM_SLEEP */
274
275#ifdef CONFIG_PM
276static int ufshcd_pci_runtime_suspend(struct device *dev)
277{
278	return ufshcd_runtime_suspend(dev_get_drvdata(dev));
279}
280static int ufshcd_pci_runtime_resume(struct device *dev)
281{
282	return ufshcd_runtime_resume(dev_get_drvdata(dev));
283}
284static int ufshcd_pci_runtime_idle(struct device *dev)
285{
286	return ufshcd_runtime_idle(dev_get_drvdata(dev));
287}
288#endif /* !CONFIG_PM */
289
290/**
291 * ufshcd_pci_shutdown - main function to put the controller in reset state
292 * @pdev: pointer to PCI device handle
293 */
294static void ufshcd_pci_shutdown(struct pci_dev *pdev)
295{
296	ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
297}
298
299/**
300 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
301 *		data structure memory
302 * @pdev: pointer to PCI handle
303 */
304static void ufshcd_pci_remove(struct pci_dev *pdev)
305{
306	struct ufs_hba *hba = pci_get_drvdata(pdev);
307
308	pm_runtime_forbid(&pdev->dev);
309	pm_runtime_get_noresume(&pdev->dev);
310	ufshcd_remove(hba);
311	ufshcd_dealloc_host(hba);
312}
313
314/**
315 * ufshcd_pci_probe - probe routine of the driver
316 * @pdev: pointer to PCI device handle
317 * @id: PCI device id
318 *
319 * Returns 0 on success, non-zero value on failure
320 */
321static int
322ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
323{
324	struct ufs_hba *hba;
325	void __iomem *mmio_base;
326	int err;
327
328	err = pcim_enable_device(pdev);
329	if (err) {
330		dev_err(&pdev->dev, "pcim_enable_device failed\n");
331		return err;
332	}
333
334	pci_set_master(pdev);
335
336	err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
337	if (err < 0) {
338		dev_err(&pdev->dev, "request and iomap failed\n");
339		return err;
340	}
341
342	mmio_base = pcim_iomap_table(pdev)[0];
343
344	err = ufshcd_alloc_host(&pdev->dev, &hba);
345	if (err) {
346		dev_err(&pdev->dev, "Allocation failed\n");
347		return err;
348	}
349
350	hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
351
352	err = ufshcd_init(hba, mmio_base, pdev->irq);
353	if (err) {
354		dev_err(&pdev->dev, "Initialization failed\n");
355		ufshcd_dealloc_host(hba);
356		return err;
357	}
358
359	pm_runtime_put_noidle(&pdev->dev);
360	pm_runtime_allow(&pdev->dev);
361
362	return 0;
363}
364
365static const struct dev_pm_ops ufshcd_pci_pm_ops = {
366#ifdef CONFIG_PM_SLEEP
367	.suspend	= ufshcd_pci_suspend,
368	.resume		= ufshcd_pci_resume,
369	.freeze		= ufshcd_pci_suspend,
370	.thaw		= ufshcd_pci_resume,
371	.poweroff	= ufshcd_pci_poweroff,
372	.restore	= ufshcd_pci_resume,
373#endif
374	SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
375			   ufshcd_pci_runtime_resume,
376			   ufshcd_pci_runtime_idle)
377};
378
379static const struct pci_device_id ufshcd_pci_tbl[] = {
380	{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
381	{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
382	{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
383	{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
384	{ }	/* terminate list */
385};
386
387MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
388
389static struct pci_driver ufshcd_pci_driver = {
390	.name = UFSHCD,
391	.id_table = ufshcd_pci_tbl,
392	.probe = ufshcd_pci_probe,
393	.remove = ufshcd_pci_remove,
394	.shutdown = ufshcd_pci_shutdown,
395	.driver = {
396		.pm = &ufshcd_pci_pm_ops
397	},
398};
399
400module_pci_driver(ufshcd_pci_driver);
401
402MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
403MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
404MODULE_DESCRIPTION("UFS host controller PCI glue driver");
405MODULE_LICENSE("GPL");
406MODULE_VERSION(UFSHCD_DRIVER_VERSION);
407