1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4 */
5
6#include <asm/div64.h>
7#include <linux/interconnect-provider.h>
8#include <linux/list_sort.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/platform_device.h>
12
13#include <soc/qcom/rpmh.h>
14#include <soc/qcom/tcs.h>
15
16#include "bcm-voter.h"
17#include "icc-rpmh.h"
18
19static LIST_HEAD(bcm_voters);
20static DEFINE_MUTEX(bcm_voter_lock);
21
22/**
23 * struct bcm_voter - Bus Clock Manager voter
24 * @dev: reference to the device that communicates with the BCM
25 * @np: reference to the device node to match bcm voters
26 * @lock: mutex to protect commit and wake/sleep lists in the voter
27 * @commit_list: list containing bcms to be committed to hardware
28 * @ws_list: list containing bcms that have different wake/sleep votes
29 * @voter_node: list of bcm voters
30 * @tcs_wait: mask for which buckets require TCS completion
31 */
32struct bcm_voter {
33	struct device *dev;
34	struct device_node *np;
35	struct mutex lock;
36	struct list_head commit_list;
37	struct list_head ws_list;
38	struct list_head voter_node;
39	u32 tcs_wait;
40};
41
42static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b)
43{
44	const struct qcom_icc_bcm *bcm_a =
45			list_entry(a, struct qcom_icc_bcm, list);
46	const struct qcom_icc_bcm *bcm_b =
47			list_entry(b, struct qcom_icc_bcm, list);
48
49	if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
50		return -1;
51	else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
52		return 0;
53	else
54		return 1;
55}
56
57static u64 bcm_div(u64 num, u32 base)
58{
59	/* Ensure that small votes aren't lost. */
60	if (num && num < base)
61		return 1;
62
63	do_div(num, base);
64
65	return num;
66}
67
68static void bcm_aggregate(struct qcom_icc_bcm *bcm)
69{
70	struct qcom_icc_node *node;
71	size_t i, bucket;
72	u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
73	u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
74	u64 temp;
75
76	for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
77		for (i = 0; i < bcm->num_nodes; i++) {
78			node = bcm->nodes[i];
79			temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
80				       node->buswidth * node->channels);
81			agg_avg[bucket] = max(agg_avg[bucket], temp);
82
83			temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
84				       node->buswidth);
85			agg_peak[bucket] = max(agg_peak[bucket], temp);
86		}
87
88		temp = agg_avg[bucket] * bcm->vote_scale;
89		bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
90
91		temp = agg_peak[bucket] * bcm->vote_scale;
92		bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
93
94		if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
95			bcm->vote_x[bucket] = 0;
96			bcm->vote_y[bucket] = bcm->enable_mask;
97		}
98	}
99
100	if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
101	    bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
102		bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
103		bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
104		bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
105		bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
106	}
107}
108
109static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
110			       u32 addr, bool commit, bool wait)
111{
112	bool valid = true;
113
114	if (!cmd)
115		return;
116
117	memset(cmd, 0, sizeof(*cmd));
118
119	if (vote_x == 0 && vote_y == 0)
120		valid = false;
121
122	if (vote_x > BCM_TCS_CMD_VOTE_MASK)
123		vote_x = BCM_TCS_CMD_VOTE_MASK;
124
125	if (vote_y > BCM_TCS_CMD_VOTE_MASK)
126		vote_y = BCM_TCS_CMD_VOTE_MASK;
127
128	cmd->addr = addr;
129	cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
130
131	/*
132	 * Set the wait for completion flag on command that need to be completed
133	 * before the next command.
134	 */
135	cmd->wait = wait;
136}
137
138static void tcs_list_gen(struct bcm_voter *voter, int bucket,
139			 struct tcs_cmd tcs_list[MAX_VCD],
140			 int n[MAX_VCD + 1])
141{
142	struct list_head *bcm_list = &voter->commit_list;
143	struct qcom_icc_bcm *bcm;
144	bool commit, wait;
145	size_t idx = 0, batch = 0, cur_vcd_size = 0;
146
147	memset(n, 0, sizeof(int) * (MAX_VCD + 1));
148
149	list_for_each_entry(bcm, bcm_list, list) {
150		commit = false;
151		cur_vcd_size++;
152		if ((list_is_last(&bcm->list, bcm_list)) ||
153		    bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
154			commit = true;
155			cur_vcd_size = 0;
156		}
157
158		wait = commit && (voter->tcs_wait & BIT(bucket));
159
160		tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
161			    bcm->vote_y[bucket], bcm->addr, commit, wait);
162		idx++;
163		n[batch]++;
164		/*
165		 * Batch the BCMs in such a way that we do not split them in
166		 * multiple payloads when they are under the same VCD. This is
167		 * to ensure that every BCM is committed since we only set the
168		 * commit bit on the last BCM request of every VCD.
169		 */
170		if (n[batch] >= MAX_RPMH_PAYLOAD) {
171			if (!commit) {
172				n[batch] -= cur_vcd_size;
173				n[batch + 1] = cur_vcd_size;
174			}
175			batch++;
176		}
177	}
178}
179
180/**
181 * of_bcm_voter_get - gets a bcm voter handle from DT node
182 * @dev: device pointer for the consumer device
183 * @name: name for the bcm voter device
184 *
185 * This function will match a device_node pointer for the phandle
186 * specified in the device DT and return a bcm_voter handle on success.
187 *
188 * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
189 * when matching bcm voter is yet to be found.
190 */
191struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
192{
193	struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
194	struct bcm_voter *temp;
195	struct device_node *np, *node;
196	int idx = 0;
197
198	if (!dev || !dev->of_node)
199		return ERR_PTR(-ENODEV);
200
201	np = dev->of_node;
202
203	if (name) {
204		idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
205		if (idx < 0)
206			return ERR_PTR(idx);
207	}
208
209	node = of_parse_phandle(np, "qcom,bcm-voters", idx);
210
211	mutex_lock(&bcm_voter_lock);
212	list_for_each_entry(temp, &bcm_voters, voter_node) {
213		if (temp->np == node) {
214			voter = temp;
215			break;
216		}
217	}
218	mutex_unlock(&bcm_voter_lock);
219
220	of_node_put(node);
221	return voter;
222}
223EXPORT_SYMBOL_GPL(of_bcm_voter_get);
224
225/**
226 * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
227 * @voter: voter that the bcms are being added to
228 * @bcm: bcm to add to the commit and wake sleep list
229 */
230void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
231{
232	if (!voter)
233		return;
234
235	mutex_lock(&voter->lock);
236	if (list_empty(&bcm->list))
237		list_add_tail(&bcm->list, &voter->commit_list);
238
239	if (list_empty(&bcm->ws_list))
240		list_add_tail(&bcm->ws_list, &voter->ws_list);
241
242	mutex_unlock(&voter->lock);
243}
244EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
245
246/**
247 * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
248 * @voter: voter that needs flushing
249 *
250 * This function generates a set of AMC commands and flushes to the BCM device
251 * associated with the voter. It conditionally generate WAKE and SLEEP commands
252 * based on deltas between WAKE/SLEEP requirements. The ws_list persists
253 * through multiple commit requests and bcm nodes are removed only when the
254 * requirements for WAKE matches SLEEP.
255 *
256 * Returns 0 on success, or an appropriate error code otherwise.
257 */
258int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
259{
260	struct qcom_icc_bcm *bcm;
261	struct qcom_icc_bcm *bcm_tmp;
262	int commit_idx[MAX_VCD + 1];
263	struct tcs_cmd cmds[MAX_BCMS];
264	int ret = 0;
265
266	if (!voter)
267		return 0;
268
269	mutex_lock(&voter->lock);
270	list_for_each_entry(bcm, &voter->commit_list, list)
271		bcm_aggregate(bcm);
272
273	/*
274	 * Pre sort the BCMs based on VCD for ease of generating a command list
275	 * that groups the BCMs with the same VCD together. VCDs are numbered
276	 * with lowest being the most expensive time wise, ensuring that
277	 * those commands are being sent the earliest in the queue. This needs
278	 * to be sorted every commit since we can't guarantee the order in which
279	 * the BCMs are added to the list.
280	 */
281	list_sort(NULL, &voter->commit_list, cmp_vcd);
282
283	/*
284	 * Construct the command list based on a pre ordered list of BCMs
285	 * based on VCD.
286	 */
287	tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
288	if (!commit_idx[0])
289		goto out;
290
291	rpmh_invalidate(voter->dev);
292
293	ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
294			       cmds, commit_idx);
295	if (ret) {
296		pr_err("Error sending AMC RPMH requests (%d)\n", ret);
297		goto out;
298	}
299
300	list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
301		list_del_init(&bcm->list);
302
303	list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
304		/*
305		 * Only generate WAKE and SLEEP commands if a resource's
306		 * requirements change as the execution environment transitions
307		 * between different power states.
308		 */
309		if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
310		    bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
311		    bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
312		    bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
313			list_add_tail(&bcm->list, &voter->commit_list);
314		else
315			list_del_init(&bcm->ws_list);
316	}
317
318	if (list_empty(&voter->commit_list))
319		goto out;
320
321	list_sort(NULL, &voter->commit_list, cmp_vcd);
322
323	tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
324
325	ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
326	if (ret) {
327		pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
328		goto out;
329	}
330
331	tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
332
333	ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
334	if (ret) {
335		pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
336		goto out;
337	}
338
339out:
340	list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
341		list_del_init(&bcm->list);
342
343	mutex_unlock(&voter->lock);
344	return ret;
345}
346EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
347
348static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
349{
350	struct device_node *np = pdev->dev.of_node;
351	struct bcm_voter *voter;
352
353	voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
354	if (!voter)
355		return -ENOMEM;
356
357	voter->dev = &pdev->dev;
358	voter->np = np;
359
360	if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait))
361		voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY;
362
363	mutex_init(&voter->lock);
364	INIT_LIST_HEAD(&voter->commit_list);
365	INIT_LIST_HEAD(&voter->ws_list);
366
367	mutex_lock(&bcm_voter_lock);
368	list_add_tail(&voter->voter_node, &bcm_voters);
369	mutex_unlock(&bcm_voter_lock);
370
371	return 0;
372}
373
374static const struct of_device_id bcm_voter_of_match[] = {
375	{ .compatible = "qcom,bcm-voter" },
376	{ }
377};
378MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
379
380static struct platform_driver qcom_icc_bcm_voter_driver = {
381	.probe = qcom_icc_bcm_voter_probe,
382	.driver = {
383		.name		= "bcm_voter",
384		.of_match_table = bcm_voter_of_match,
385	},
386};
387module_platform_driver(qcom_icc_bcm_voter_driver);
388
389MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
390MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
391MODULE_LICENSE("GPL v2");
392