1// SPDX-License-Identifier: GPL-2.0
2/* Texas Instruments K3 AM65 Ethernet QoS submodule
3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
4 *
5 * quality of service module includes:
6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
7 */
8
9#include <linux/pm_runtime.h>
10#include <linux/time.h>
11
12#include "am65-cpsw-nuss.h"
13#include "am65-cpsw-qos.h"
14#include "am65-cpts.h"
15
16#define AM65_CPSW_REG_CTL			0x004
17#define AM65_CPSW_PN_REG_CTL			0x004
18#define AM65_CPSW_PN_REG_FIFO_STATUS		0x050
19#define AM65_CPSW_PN_REG_EST_CTL		0x060
20
21/* AM65_CPSW_REG_CTL register fields */
22#define AM65_CPSW_CTL_EST_EN			BIT(18)
23
24/* AM65_CPSW_PN_REG_CTL register fields */
25#define AM65_CPSW_PN_CTL_EST_PORT_EN		BIT(17)
26
27/* AM65_CPSW_PN_REG_EST_CTL register fields */
28#define AM65_CPSW_PN_EST_ONEBUF			BIT(0)
29#define AM65_CPSW_PN_EST_BUFSEL			BIT(1)
30#define AM65_CPSW_PN_EST_TS_EN			BIT(2)
31#define AM65_CPSW_PN_EST_TS_FIRST		BIT(3)
32#define AM65_CPSW_PN_EST_ONEPRI			BIT(4)
33#define AM65_CPSW_PN_EST_TS_PRI_MSK		GENMASK(7, 5)
34
35/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
36#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK	GENMASK(7, 0)
37#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK	GENMASK(15, 8)
38#define AM65_CPSW_PN_FST_EST_CNT_ERR		BIT(16)
39#define AM65_CPSW_PN_FST_EST_ADD_ERR		BIT(17)
40#define AM65_CPSW_PN_FST_EST_BUFACT		BIT(18)
41
42/* EST FETCH COMMAND RAM */
43#define AM65_CPSW_FETCH_RAM_CMD_NUM		0x80
44#define AM65_CPSW_FETCH_CNT_MSK			GENMASK(21, 8)
45#define AM65_CPSW_FETCH_CNT_MAX			(AM65_CPSW_FETCH_CNT_MSK >> 8)
46#define AM65_CPSW_FETCH_CNT_OFFSET		8
47#define AM65_CPSW_FETCH_ALLOW_MSK		GENMASK(7, 0)
48#define AM65_CPSW_FETCH_ALLOW_MAX		AM65_CPSW_FETCH_ALLOW_MSK
49
50enum timer_act {
51	TACT_PROG,		/* need program timer */
52	TACT_NEED_STOP,		/* need stop first */
53	TACT_SKIP_PROG,		/* just buffer can be updated */
54};
55
56static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
57{
58	return port->qos.est_oper || port->qos.est_admin;
59}
60
61static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
62{
63	u32 val;
64
65	val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
66
67	if (enable)
68		val |= AM65_CPSW_CTL_EST_EN;
69	else
70		val &= ~AM65_CPSW_CTL_EST_EN;
71
72	writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
73	common->est_enabled = enable;
74}
75
76static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
77{
78	u32 val;
79
80	val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
81	if (enable)
82		val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
83	else
84		val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
85
86	writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
87}
88
89/* target new EST RAM buffer, actual toggle happens after cycle completion */
90static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
91					      int buf_num)
92{
93	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
94	u32 val;
95
96	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
97	if (buf_num)
98		val |= AM65_CPSW_PN_EST_BUFSEL;
99	else
100		val &= ~AM65_CPSW_PN_EST_BUFSEL;
101
102	writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
103}
104
105/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
106 * admin -> oper or not
107 *
108 * Return true if already transitioned. i.e oper is equal to admin and buf
109 * numbers match (est_oper->buf match with est_admin->buf).
110 * false if before transition. i.e oper is not equal to admin, (i.e a
111 * previous admin command is waiting to be transitioned to oper state
112 * and est_oper->buf not match with est_oper->buf).
113 */
114static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
115					 int *admin)
116{
117	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
118	u32 val;
119
120	val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
121	*oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
122
123	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
124	*admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
125
126	return *admin == *oper;
127}
128
129/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
130 * Admin to program the new schedule.
131 *
132 * Logic as follows:-
133 * If oper is same as admin, return the other buffer (!oper) as the admin
134 * buffer.  If oper is not the same, driver let the current oper to continue
135 * as it is in the process of transitioning from admin -> oper. So keep the
136 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
137 * EST CTL register. In the second iteration they will match and code returns.
138 * The actual buffer to write command is selected later before it is ready
139 * to update the schedule.
140 */
141static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
142{
143	int oper, admin;
144	int roll = 2;
145
146	while (roll--) {
147		if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
148			return !oper;
149
150		/* admin is not set, so hinder transition as it's not allowed
151		 * to touch memory in-flight, by targeting same oper buf.
152		 */
153		am65_cpsw_port_est_assign_buf_num(ndev, oper);
154
155		dev_info(&ndev->dev,
156			 "Prev. EST admin cycle is in transit %d -> %d\n",
157			 oper, admin);
158	}
159
160	return admin;
161}
162
163static void am65_cpsw_admin_to_oper(struct net_device *ndev)
164{
165	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
166
167	if (port->qos.est_oper)
168		devm_kfree(&ndev->dev, port->qos.est_oper);
169
170	port->qos.est_oper = port->qos.est_admin;
171	port->qos.est_admin = NULL;
172}
173
174static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
175					   struct am65_cpsw_est *est_new)
176{
177	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
178	u32 val;
179
180	val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
181	val &= ~AM65_CPSW_PN_EST_ONEBUF;
182	writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
183
184	est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
185
186	/* rolled buf num means changed buf while configuring */
187	if (port->qos.est_oper && port->qos.est_admin &&
188	    est_new->buf == port->qos.est_oper->buf)
189		am65_cpsw_admin_to_oper(ndev);
190}
191
192static void am65_cpsw_est_set(struct net_device *ndev, int enable)
193{
194	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
195	struct am65_cpsw_common *common = port->common;
196	int common_enable = 0;
197	int i;
198
199	am65_cpsw_port_est_enable(port, enable);
200
201	for (i = 0; i < common->port_num; i++)
202		common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
203
204	common_enable |= enable;
205	am65_cpsw_est_enable(common, common_enable);
206}
207
208/* This update is supposed to be used in any routine before getting real state
209 * of admin -> oper transition, particularly it's supposed to be used in some
210 * generic routine for providing real state to Taprio Qdisc.
211 */
212static void am65_cpsw_est_update_state(struct net_device *ndev)
213{
214	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
215	int oper, admin;
216
217	if (!port->qos.est_admin)
218		return;
219
220	if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
221		return;
222
223	am65_cpsw_admin_to_oper(ndev);
224}
225
226/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
227 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
228 * bytes/nibbles that can be sent while transmission on given speed.
229 */
230static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
231{
232	u64 temp;
233
234	temp = ns * link_speed;
235	if (link_speed < SPEED_1000)
236		temp <<= 1;
237
238	return DIV_ROUND_UP(temp, 8 * 1000);
239}
240
241static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
242						  int fetch_cnt,
243						  int fetch_allow)
244{
245	u32 prio_mask, cmd_fetch_cnt, cmd;
246
247	do {
248		if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
249			fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
250			cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
251		} else {
252			cmd_fetch_cnt = fetch_cnt;
253			/* fetch count can't be less than 16? */
254			if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
255				cmd_fetch_cnt = 16;
256
257			fetch_cnt = 0;
258		}
259
260		prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
261		cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
262
263		writel(cmd, addr);
264		addr += 4;
265	} while (fetch_cnt);
266
267	return addr;
268}
269
270static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
271				      struct tc_taprio_qopt_offload *taprio,
272				      int link_speed)
273{
274	int i, cmd_cnt, cmd_sum = 0;
275	u32 fetch_cnt;
276
277	for (i = 0; i < taprio->num_entries; i++) {
278		if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
279			dev_err(&ndev->dev, "Only SET command is supported");
280			return -EINVAL;
281		}
282
283		fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
284						   link_speed);
285
286		cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
287		if (!cmd_cnt)
288			cmd_cnt++;
289
290		cmd_sum += cmd_cnt;
291
292		if (!fetch_cnt)
293			break;
294	}
295
296	return cmd_sum;
297}
298
299static int am65_cpsw_est_check_scheds(struct net_device *ndev,
300				      struct am65_cpsw_est *est_new)
301{
302	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
303	int cmd_num;
304
305	cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
306					     port->qos.link_speed);
307	if (cmd_num < 0)
308		return cmd_num;
309
310	if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
311		dev_err(&ndev->dev, "No fetch RAM");
312		return -ENOMEM;
313	}
314
315	return 0;
316}
317
318static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
319					 struct am65_cpsw_est *est_new)
320{
321	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
322	u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
323	void __iomem *ram_addr, *max_ram_addr;
324	struct tc_taprio_sched_entry *entry;
325	int i, ram_size;
326
327	ram_addr = port->fetch_ram_base;
328	ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
329	ram_addr += est_new->buf * ram_size;
330
331	max_ram_addr = ram_size + ram_addr;
332	for (i = 0; i < est_new->taprio.num_entries; i++) {
333		entry = &est_new->taprio.entries[i];
334
335		fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
336						   port->qos.link_speed);
337		fetch_allow = entry->gate_mask;
338		if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
339			dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
340				fetch_allow);
341
342		ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
343							fetch_allow);
344
345		if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
346			dev_info(&ndev->dev,
347				 "next scheds after %d have no impact", i + 1);
348			break;
349		}
350
351		all_fetch_allow |= fetch_allow;
352	}
353
354	/* end cmd, enabling non-timed queues for potential over cycle time */
355	if (ram_addr < max_ram_addr)
356		writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
357}
358
359/**
360 * Enable ESTf periodic output, set cycle start time and interval.
361 */
362static int am65_cpsw_timer_set(struct net_device *ndev,
363			       struct am65_cpsw_est *est_new)
364{
365	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
366	struct am65_cpsw_common *common = port->common;
367	struct am65_cpts *cpts = common->cpts;
368	struct am65_cpts_estf_cfg cfg;
369
370	cfg.ns_period = est_new->taprio.cycle_time;
371	cfg.ns_start = est_new->taprio.base_time;
372
373	return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
374}
375
376static void am65_cpsw_timer_stop(struct net_device *ndev)
377{
378	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
379	struct am65_cpts *cpts = port->common->cpts;
380
381	am65_cpts_estf_disable(cpts, port->port_id - 1);
382}
383
384static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
385					  struct am65_cpsw_est *est_new)
386{
387	struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
388	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
389	struct am65_cpts *cpts = port->common->cpts;
390	u64 cur_time;
391	s64 diff;
392
393	if (!port->qos.est_oper)
394		return TACT_PROG;
395
396	taprio_new = &est_new->taprio;
397	taprio_oper = &port->qos.est_oper->taprio;
398
399	if (taprio_new->cycle_time != taprio_oper->cycle_time)
400		return TACT_NEED_STOP;
401
402	/* in order to avoid timer reset get base_time form oper taprio */
403	if (!taprio_new->base_time && taprio_oper)
404		taprio_new->base_time = taprio_oper->base_time;
405
406	if (taprio_new->base_time == taprio_oper->base_time)
407		return TACT_SKIP_PROG;
408
409	/* base times are cycle synchronized */
410	diff = taprio_new->base_time - taprio_oper->base_time;
411	diff = diff < 0 ? -diff : diff;
412	if (diff % taprio_new->cycle_time)
413		return TACT_NEED_STOP;
414
415	cur_time = am65_cpts_ns_gettime(cpts);
416	if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
417		return TACT_SKIP_PROG;
418
419	/* TODO: Admin schedule at future time is not currently supported */
420	return TACT_NEED_STOP;
421}
422
423static void am65_cpsw_stop_est(struct net_device *ndev)
424{
425	am65_cpsw_est_set(ndev, 0);
426	am65_cpsw_timer_stop(ndev);
427}
428
429static void am65_cpsw_purge_est(struct net_device *ndev)
430{
431	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
432
433	am65_cpsw_stop_est(ndev);
434
435	if (port->qos.est_admin)
436		devm_kfree(&ndev->dev, port->qos.est_admin);
437
438	if (port->qos.est_oper)
439		devm_kfree(&ndev->dev, port->qos.est_oper);
440
441	port->qos.est_oper = NULL;
442	port->qos.est_admin = NULL;
443}
444
445static int am65_cpsw_configure_taprio(struct net_device *ndev,
446				      struct am65_cpsw_est *est_new)
447{
448	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
449	struct am65_cpts *cpts = common->cpts;
450	int ret = 0, tact = TACT_PROG;
451
452	am65_cpsw_est_update_state(ndev);
453
454	if (!est_new->taprio.enable) {
455		am65_cpsw_stop_est(ndev);
456		return ret;
457	}
458
459	ret = am65_cpsw_est_check_scheds(ndev, est_new);
460	if (ret < 0)
461		return ret;
462
463	tact = am65_cpsw_timer_act(ndev, est_new);
464	if (tact == TACT_NEED_STOP) {
465		dev_err(&ndev->dev,
466			"Can't toggle estf timer, stop taprio first");
467		return -EINVAL;
468	}
469
470	if (tact == TACT_PROG)
471		am65_cpsw_timer_stop(ndev);
472
473	if (!est_new->taprio.base_time)
474		est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
475
476	am65_cpsw_port_est_get_buf_num(ndev, est_new);
477	am65_cpsw_est_set_sched_list(ndev, est_new);
478	am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
479
480	am65_cpsw_est_set(ndev, est_new->taprio.enable);
481
482	if (tact == TACT_PROG) {
483		ret = am65_cpsw_timer_set(ndev, est_new);
484		if (ret) {
485			dev_err(&ndev->dev, "Failed to set cycle time");
486			return ret;
487		}
488	}
489
490	return ret;
491}
492
493static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
494				struct tc_taprio_qopt_offload *to)
495{
496	int i;
497
498	*to = *from;
499	for (i = 0; i < from->num_entries; i++)
500		to->entries[i] = from->entries[i];
501}
502
503static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
504{
505	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
506	struct tc_taprio_qopt_offload *taprio = type_data;
507	struct am65_cpsw_est *est_new;
508	int ret = 0;
509
510	if (taprio->cycle_time_extension) {
511		dev_err(&ndev->dev, "Failed to set cycle time extension");
512		return -EOPNOTSUPP;
513	}
514
515	est_new = devm_kzalloc(&ndev->dev,
516			       struct_size(est_new, taprio.entries, taprio->num_entries),
517			       GFP_KERNEL);
518	if (!est_new)
519		return -ENOMEM;
520
521	am65_cpsw_cp_taprio(taprio, &est_new->taprio);
522	ret = am65_cpsw_configure_taprio(ndev, est_new);
523	if (!ret) {
524		if (taprio->enable) {
525			if (port->qos.est_admin)
526				devm_kfree(&ndev->dev, port->qos.est_admin);
527
528			port->qos.est_admin = est_new;
529		} else {
530			devm_kfree(&ndev->dev, est_new);
531			am65_cpsw_purge_est(ndev);
532		}
533	} else {
534		devm_kfree(&ndev->dev, est_new);
535	}
536
537	return ret;
538}
539
540static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
541{
542	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
543	ktime_t cur_time;
544	s64 delta;
545
546	port->qos.link_speed = link_speed;
547	if (!am65_cpsw_port_est_enabled(port))
548		return;
549
550	if (port->qos.link_down_time) {
551		cur_time = ktime_get();
552		delta = ktime_us_delta(cur_time, port->qos.link_down_time);
553		if (delta > USEC_PER_SEC) {
554			dev_err(&ndev->dev,
555				"Link has been lost too long, stopping TAS");
556			goto purge_est;
557		}
558	}
559
560	return;
561
562purge_est:
563	am65_cpsw_purge_est(ndev);
564}
565
566static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
567{
568	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
569	struct am65_cpsw_common *common = port->common;
570
571	if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
572		return -ENODEV;
573
574	if (!netif_running(ndev)) {
575		dev_err(&ndev->dev, "interface is down, link speed unknown\n");
576		return -ENETDOWN;
577	}
578
579	if (common->pf_p0_rx_ptype_rrobin) {
580		dev_err(&ndev->dev,
581			"p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
582		return -EINVAL;
583	}
584
585	if (port->qos.link_speed == SPEED_UNKNOWN)
586		return -ENOLINK;
587
588	return am65_cpsw_set_taprio(ndev, type_data);
589}
590
591int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
592			       void *type_data)
593{
594	switch (type) {
595	case TC_SETUP_QDISC_TAPRIO:
596		return am65_cpsw_setup_taprio(ndev, type_data);
597	default:
598		return -EOPNOTSUPP;
599	}
600}
601
602void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
603{
604	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
605
606	if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
607		return;
608
609	am65_cpsw_est_link_up(ndev, link_speed);
610	port->qos.link_down_time = 0;
611}
612
613void am65_cpsw_qos_link_down(struct net_device *ndev)
614{
615	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
616
617	if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
618		return;
619
620	if (!port->qos.link_down_time)
621		port->qos.link_down_time = ktime_get();
622
623	port->qos.link_speed = SPEED_UNKNOWN;
624}
625