18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> 38c2ecf20Sopenharmony_ci */ 48c2ecf20Sopenharmony_ci#include "sja1105.h" 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci#define SJA1105_TAS_CLKSRC_DISABLED 0 78c2ecf20Sopenharmony_ci#define SJA1105_TAS_CLKSRC_STANDALONE 1 88c2ecf20Sopenharmony_ci#define SJA1105_TAS_CLKSRC_AS6802 2 98c2ecf20Sopenharmony_ci#define SJA1105_TAS_CLKSRC_PTP 3 108c2ecf20Sopenharmony_ci#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0) 118c2ecf20Sopenharmony_ci 128c2ecf20Sopenharmony_ci#define work_to_sja1105_tas(d) \ 138c2ecf20Sopenharmony_ci container_of((d), struct sja1105_tas_data, tas_work) 148c2ecf20Sopenharmony_ci#define tas_to_sja1105(d) \ 158c2ecf20Sopenharmony_ci container_of((d), struct sja1105_private, tas_data) 168c2ecf20Sopenharmony_ci 178c2ecf20Sopenharmony_cistatic int sja1105_tas_set_runtime_params(struct sja1105_private *priv) 188c2ecf20Sopenharmony_ci{ 198c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 208c2ecf20Sopenharmony_ci struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg; 218c2ecf20Sopenharmony_ci struct dsa_switch *ds = priv->ds; 228c2ecf20Sopenharmony_ci s64 earliest_base_time = S64_MAX; 238c2ecf20Sopenharmony_ci s64 latest_base_time = 0; 248c2ecf20Sopenharmony_ci s64 its_cycle_time = 0; 258c2ecf20Sopenharmony_ci s64 max_cycle_time = 0; 268c2ecf20Sopenharmony_ci int port; 278c2ecf20Sopenharmony_ci 288c2ecf20Sopenharmony_ci tas_data->enabled = false; 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci for (port = 0; port < SJA1105_NUM_PORTS; port++) { 318c2ecf20Sopenharmony_ci const struct tc_taprio_qopt_offload *offload; 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci offload = tas_data->offload[port]; 348c2ecf20Sopenharmony_ci if (!offload) 358c2ecf20Sopenharmony_ci continue; 368c2ecf20Sopenharmony_ci 378c2ecf20Sopenharmony_ci tas_data->enabled = true; 388c2ecf20Sopenharmony_ci 398c2ecf20Sopenharmony_ci if (max_cycle_time < offload->cycle_time) 408c2ecf20Sopenharmony_ci max_cycle_time = offload->cycle_time; 418c2ecf20Sopenharmony_ci if (latest_base_time < offload->base_time) 428c2ecf20Sopenharmony_ci latest_base_time = offload->base_time; 438c2ecf20Sopenharmony_ci if (earliest_base_time > offload->base_time) { 448c2ecf20Sopenharmony_ci earliest_base_time = offload->base_time; 458c2ecf20Sopenharmony_ci its_cycle_time = offload->cycle_time; 468c2ecf20Sopenharmony_ci } 478c2ecf20Sopenharmony_ci } 488c2ecf20Sopenharmony_ci 498c2ecf20Sopenharmony_ci if (!list_empty(&gating_cfg->entries)) { 508c2ecf20Sopenharmony_ci tas_data->enabled = true; 518c2ecf20Sopenharmony_ci 528c2ecf20Sopenharmony_ci if (max_cycle_time < gating_cfg->cycle_time) 538c2ecf20Sopenharmony_ci max_cycle_time = gating_cfg->cycle_time; 548c2ecf20Sopenharmony_ci if (latest_base_time < gating_cfg->base_time) 558c2ecf20Sopenharmony_ci latest_base_time = gating_cfg->base_time; 568c2ecf20Sopenharmony_ci if (earliest_base_time > gating_cfg->base_time) { 578c2ecf20Sopenharmony_ci earliest_base_time = gating_cfg->base_time; 588c2ecf20Sopenharmony_ci its_cycle_time = gating_cfg->cycle_time; 598c2ecf20Sopenharmony_ci } 608c2ecf20Sopenharmony_ci } 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_ci if (!tas_data->enabled) 638c2ecf20Sopenharmony_ci return 0; 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_ci /* Roll the earliest base time over until it is in a comparable 668c2ecf20Sopenharmony_ci * time base with the latest, then compare their deltas. 678c2ecf20Sopenharmony_ci * We want to enforce that all ports' base times are within 688c2ecf20Sopenharmony_ci * SJA1105_TAS_MAX_DELTA 200ns cycles of one another. 698c2ecf20Sopenharmony_ci */ 708c2ecf20Sopenharmony_ci earliest_base_time = future_base_time(earliest_base_time, 718c2ecf20Sopenharmony_ci its_cycle_time, 728c2ecf20Sopenharmony_ci latest_base_time); 738c2ecf20Sopenharmony_ci while (earliest_base_time > latest_base_time) 748c2ecf20Sopenharmony_ci earliest_base_time -= its_cycle_time; 758c2ecf20Sopenharmony_ci if (latest_base_time - earliest_base_time > 768c2ecf20Sopenharmony_ci sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) { 778c2ecf20Sopenharmony_ci dev_err(ds->dev, 788c2ecf20Sopenharmony_ci "Base times too far apart: min %llu max %llu\n", 798c2ecf20Sopenharmony_ci earliest_base_time, latest_base_time); 808c2ecf20Sopenharmony_ci return -ERANGE; 818c2ecf20Sopenharmony_ci } 828c2ecf20Sopenharmony_ci 838c2ecf20Sopenharmony_ci tas_data->earliest_base_time = earliest_base_time; 848c2ecf20Sopenharmony_ci tas_data->max_cycle_time = max_cycle_time; 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time); 878c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time); 888c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time); 898c2ecf20Sopenharmony_ci 908c2ecf20Sopenharmony_ci return 0; 918c2ecf20Sopenharmony_ci} 928c2ecf20Sopenharmony_ci 938c2ecf20Sopenharmony_ci/* Lo and behold: the egress scheduler from hell. 948c2ecf20Sopenharmony_ci * 958c2ecf20Sopenharmony_ci * At the hardware level, the Time-Aware Shaper holds a global linear arrray of 968c2ecf20Sopenharmony_ci * all schedule entries for all ports. These are the Gate Control List (GCL) 978c2ecf20Sopenharmony_ci * entries, let's call them "timeslots" for short. This linear array of 988c2ecf20Sopenharmony_ci * timeslots is held in BLK_IDX_SCHEDULE. 998c2ecf20Sopenharmony_ci * 1008c2ecf20Sopenharmony_ci * Then there are a maximum of 8 "execution threads" inside the switch, which 1018c2ecf20Sopenharmony_ci * iterate cyclically through the "schedule". Each "cycle" has an entry point 1028c2ecf20Sopenharmony_ci * and an exit point, both being timeslot indices in the schedule table. The 1038c2ecf20Sopenharmony_ci * hardware calls each cycle a "subschedule". 1048c2ecf20Sopenharmony_ci * 1058c2ecf20Sopenharmony_ci * Subschedule (cycle) i starts when 1068c2ecf20Sopenharmony_ci * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta. 1078c2ecf20Sopenharmony_ci * 1088c2ecf20Sopenharmony_ci * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from 1098c2ecf20Sopenharmony_ci * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to 1108c2ecf20Sopenharmony_ci * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i] 1118c2ecf20Sopenharmony_ci * 1128c2ecf20Sopenharmony_ci * For each schedule entry (timeslot) k, the engine executes the gate control 1138c2ecf20Sopenharmony_ci * list entry for the duration of BLK_IDX_SCHEDULE[k].delta. 1148c2ecf20Sopenharmony_ci * 1158c2ecf20Sopenharmony_ci * +---------+ 1168c2ecf20Sopenharmony_ci * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS 1178c2ecf20Sopenharmony_ci * +---------+ 1188c2ecf20Sopenharmony_ci * | 1198c2ecf20Sopenharmony_ci * +-----------------+ 1208c2ecf20Sopenharmony_ci * | .actsubsch 1218c2ecf20Sopenharmony_ci * BLK_IDX_SCHEDULE_ENTRY_POINTS v 1228c2ecf20Sopenharmony_ci * +-------+-------+ 1238c2ecf20Sopenharmony_ci * |cycle 0|cycle 1| 1248c2ecf20Sopenharmony_ci * +-------+-------+ 1258c2ecf20Sopenharmony_ci * | | | | 1268c2ecf20Sopenharmony_ci * +----------------+ | | +-------------------------------------+ 1278c2ecf20Sopenharmony_ci * | .subschindx | | .subschindx | 1288c2ecf20Sopenharmony_ci * | | +---------------+ | 1298c2ecf20Sopenharmony_ci * | .address | .address | | 1308c2ecf20Sopenharmony_ci * | | | | 1318c2ecf20Sopenharmony_ci * | | | | 1328c2ecf20Sopenharmony_ci * | BLK_IDX_SCHEDULE v v | 1338c2ecf20Sopenharmony_ci * | +-------+-------+-------+-------+-------+------+ | 1348c2ecf20Sopenharmony_ci * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| | 1358c2ecf20Sopenharmony_ci * | +-------+-------+-------+-------+-------+------+ | 1368c2ecf20Sopenharmony_ci * | ^ ^ ^ ^ | 1378c2ecf20Sopenharmony_ci * | | | | | | 1388c2ecf20Sopenharmony_ci * | +-------------------------+ | | | | 1398c2ecf20Sopenharmony_ci * | | +-------------------------------+ | | | 1408c2ecf20Sopenharmony_ci * | | | +-------------------+ | | 1418c2ecf20Sopenharmony_ci * | | | | | | 1428c2ecf20Sopenharmony_ci * | +---------------------------------------------------------------+ | 1438c2ecf20Sopenharmony_ci * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| | 1448c2ecf20Sopenharmony_ci * | +---------------------------------------------------------------+ | 1458c2ecf20Sopenharmony_ci * | ^ ^ BLK_IDX_SCHEDULE_PARAMS | 1468c2ecf20Sopenharmony_ci * | | | | 1478c2ecf20Sopenharmony_ci * +--------+ +-------------------------------------------+ 1488c2ecf20Sopenharmony_ci * 1498c2ecf20Sopenharmony_ci * In the above picture there are two subschedules (cycles): 1508c2ecf20Sopenharmony_ci * 1518c2ecf20Sopenharmony_ci * - cycle 0: iterates the schedule table from 0 to 2 (and back) 1528c2ecf20Sopenharmony_ci * - cycle 1: iterates the schedule table from 3 to 5 (and back) 1538c2ecf20Sopenharmony_ci * 1548c2ecf20Sopenharmony_ci * All other possible execution threads must be marked as unused by making 1558c2ecf20Sopenharmony_ci * their "subschedule end index" (subscheind) equal to the last valid 1568c2ecf20Sopenharmony_ci * subschedule's end index (in this case 5). 1578c2ecf20Sopenharmony_ci */ 1588c2ecf20Sopenharmony_ciint sja1105_init_scheduling(struct sja1105_private *priv) 1598c2ecf20Sopenharmony_ci{ 1608c2ecf20Sopenharmony_ci struct sja1105_schedule_entry_points_entry *schedule_entry_points; 1618c2ecf20Sopenharmony_ci struct sja1105_schedule_entry_points_params_entry 1628c2ecf20Sopenharmony_ci *schedule_entry_points_params; 1638c2ecf20Sopenharmony_ci struct sja1105_schedule_params_entry *schedule_params; 1648c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 1658c2ecf20Sopenharmony_ci struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg; 1668c2ecf20Sopenharmony_ci struct sja1105_schedule_entry *schedule; 1678c2ecf20Sopenharmony_ci struct sja1105_table *table; 1688c2ecf20Sopenharmony_ci int schedule_start_idx; 1698c2ecf20Sopenharmony_ci s64 entry_point_delta; 1708c2ecf20Sopenharmony_ci int schedule_end_idx; 1718c2ecf20Sopenharmony_ci int num_entries = 0; 1728c2ecf20Sopenharmony_ci int num_cycles = 0; 1738c2ecf20Sopenharmony_ci int cycle = 0; 1748c2ecf20Sopenharmony_ci int i, k = 0; 1758c2ecf20Sopenharmony_ci int port, rc; 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci rc = sja1105_tas_set_runtime_params(priv); 1788c2ecf20Sopenharmony_ci if (rc < 0) 1798c2ecf20Sopenharmony_ci return rc; 1808c2ecf20Sopenharmony_ci 1818c2ecf20Sopenharmony_ci /* Discard previous Schedule Table */ 1828c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; 1838c2ecf20Sopenharmony_ci if (table->entry_count) { 1848c2ecf20Sopenharmony_ci kfree(table->entries); 1858c2ecf20Sopenharmony_ci table->entry_count = 0; 1868c2ecf20Sopenharmony_ci } 1878c2ecf20Sopenharmony_ci 1888c2ecf20Sopenharmony_ci /* Discard previous Schedule Entry Points Parameters Table */ 1898c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS]; 1908c2ecf20Sopenharmony_ci if (table->entry_count) { 1918c2ecf20Sopenharmony_ci kfree(table->entries); 1928c2ecf20Sopenharmony_ci table->entry_count = 0; 1938c2ecf20Sopenharmony_ci } 1948c2ecf20Sopenharmony_ci 1958c2ecf20Sopenharmony_ci /* Discard previous Schedule Parameters Table */ 1968c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS]; 1978c2ecf20Sopenharmony_ci if (table->entry_count) { 1988c2ecf20Sopenharmony_ci kfree(table->entries); 1998c2ecf20Sopenharmony_ci table->entry_count = 0; 2008c2ecf20Sopenharmony_ci } 2018c2ecf20Sopenharmony_ci 2028c2ecf20Sopenharmony_ci /* Discard previous Schedule Entry Points Table */ 2038c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS]; 2048c2ecf20Sopenharmony_ci if (table->entry_count) { 2058c2ecf20Sopenharmony_ci kfree(table->entries); 2068c2ecf20Sopenharmony_ci table->entry_count = 0; 2078c2ecf20Sopenharmony_ci } 2088c2ecf20Sopenharmony_ci 2098c2ecf20Sopenharmony_ci /* Figure out the dimensioning of the problem */ 2108c2ecf20Sopenharmony_ci for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2118c2ecf20Sopenharmony_ci if (tas_data->offload[port]) { 2128c2ecf20Sopenharmony_ci num_entries += tas_data->offload[port]->num_entries; 2138c2ecf20Sopenharmony_ci num_cycles++; 2148c2ecf20Sopenharmony_ci } 2158c2ecf20Sopenharmony_ci } 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_ci if (!list_empty(&gating_cfg->entries)) { 2188c2ecf20Sopenharmony_ci num_entries += gating_cfg->num_entries; 2198c2ecf20Sopenharmony_ci num_cycles++; 2208c2ecf20Sopenharmony_ci } 2218c2ecf20Sopenharmony_ci 2228c2ecf20Sopenharmony_ci /* Nothing to do */ 2238c2ecf20Sopenharmony_ci if (!num_cycles) 2248c2ecf20Sopenharmony_ci return 0; 2258c2ecf20Sopenharmony_ci 2268c2ecf20Sopenharmony_ci /* Pre-allocate space in the static config tables */ 2278c2ecf20Sopenharmony_ci 2288c2ecf20Sopenharmony_ci /* Schedule Table */ 2298c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; 2308c2ecf20Sopenharmony_ci table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size, 2318c2ecf20Sopenharmony_ci GFP_KERNEL); 2328c2ecf20Sopenharmony_ci if (!table->entries) 2338c2ecf20Sopenharmony_ci return -ENOMEM; 2348c2ecf20Sopenharmony_ci table->entry_count = num_entries; 2358c2ecf20Sopenharmony_ci schedule = table->entries; 2368c2ecf20Sopenharmony_ci 2378c2ecf20Sopenharmony_ci /* Schedule Points Parameters Table */ 2388c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS]; 2398c2ecf20Sopenharmony_ci table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, 2408c2ecf20Sopenharmony_ci table->ops->unpacked_entry_size, GFP_KERNEL); 2418c2ecf20Sopenharmony_ci if (!table->entries) 2428c2ecf20Sopenharmony_ci /* Previously allocated memory will be freed automatically in 2438c2ecf20Sopenharmony_ci * sja1105_static_config_free. This is true for all early 2448c2ecf20Sopenharmony_ci * returns below. 2458c2ecf20Sopenharmony_ci */ 2468c2ecf20Sopenharmony_ci return -ENOMEM; 2478c2ecf20Sopenharmony_ci table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT; 2488c2ecf20Sopenharmony_ci schedule_entry_points_params = table->entries; 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_ci /* Schedule Parameters Table */ 2518c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS]; 2528c2ecf20Sopenharmony_ci table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT, 2538c2ecf20Sopenharmony_ci table->ops->unpacked_entry_size, GFP_KERNEL); 2548c2ecf20Sopenharmony_ci if (!table->entries) 2558c2ecf20Sopenharmony_ci return -ENOMEM; 2568c2ecf20Sopenharmony_ci table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT; 2578c2ecf20Sopenharmony_ci schedule_params = table->entries; 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_ci /* Schedule Entry Points Table */ 2608c2ecf20Sopenharmony_ci table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS]; 2618c2ecf20Sopenharmony_ci table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size, 2628c2ecf20Sopenharmony_ci GFP_KERNEL); 2638c2ecf20Sopenharmony_ci if (!table->entries) 2648c2ecf20Sopenharmony_ci return -ENOMEM; 2658c2ecf20Sopenharmony_ci table->entry_count = num_cycles; 2668c2ecf20Sopenharmony_ci schedule_entry_points = table->entries; 2678c2ecf20Sopenharmony_ci 2688c2ecf20Sopenharmony_ci /* Finally start populating the static config tables */ 2698c2ecf20Sopenharmony_ci schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP; 2708c2ecf20Sopenharmony_ci schedule_entry_points_params->actsubsch = num_cycles - 1; 2718c2ecf20Sopenharmony_ci 2728c2ecf20Sopenharmony_ci for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2738c2ecf20Sopenharmony_ci const struct tc_taprio_qopt_offload *offload; 2748c2ecf20Sopenharmony_ci /* Relative base time */ 2758c2ecf20Sopenharmony_ci s64 rbt; 2768c2ecf20Sopenharmony_ci 2778c2ecf20Sopenharmony_ci offload = tas_data->offload[port]; 2788c2ecf20Sopenharmony_ci if (!offload) 2798c2ecf20Sopenharmony_ci continue; 2808c2ecf20Sopenharmony_ci 2818c2ecf20Sopenharmony_ci schedule_start_idx = k; 2828c2ecf20Sopenharmony_ci schedule_end_idx = k + offload->num_entries - 1; 2838c2ecf20Sopenharmony_ci /* This is the base time expressed as a number of TAS ticks 2848c2ecf20Sopenharmony_ci * relative to PTPSCHTM, which we'll (perhaps improperly) call 2858c2ecf20Sopenharmony_ci * the operational base time. 2868c2ecf20Sopenharmony_ci */ 2878c2ecf20Sopenharmony_ci rbt = future_base_time(offload->base_time, 2888c2ecf20Sopenharmony_ci offload->cycle_time, 2898c2ecf20Sopenharmony_ci tas_data->earliest_base_time); 2908c2ecf20Sopenharmony_ci rbt -= tas_data->earliest_base_time; 2918c2ecf20Sopenharmony_ci /* UM10944.pdf 4.2.2. Schedule Entry Points table says that 2928c2ecf20Sopenharmony_ci * delta cannot be zero, which is shitty. Advance all relative 2938c2ecf20Sopenharmony_ci * base times by 1 TAS delta, so that even the earliest base 2948c2ecf20Sopenharmony_ci * time becomes 1 in relative terms. Then start the operational 2958c2ecf20Sopenharmony_ci * base time (PTPSCHTM) one TAS delta earlier than planned. 2968c2ecf20Sopenharmony_ci */ 2978c2ecf20Sopenharmony_ci entry_point_delta = ns_to_sja1105_delta(rbt) + 1; 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci schedule_entry_points[cycle].subschindx = cycle; 3008c2ecf20Sopenharmony_ci schedule_entry_points[cycle].delta = entry_point_delta; 3018c2ecf20Sopenharmony_ci schedule_entry_points[cycle].address = schedule_start_idx; 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci /* The subschedule end indices need to be 3048c2ecf20Sopenharmony_ci * monotonically increasing. 3058c2ecf20Sopenharmony_ci */ 3068c2ecf20Sopenharmony_ci for (i = cycle; i < 8; i++) 3078c2ecf20Sopenharmony_ci schedule_params->subscheind[i] = schedule_end_idx; 3088c2ecf20Sopenharmony_ci 3098c2ecf20Sopenharmony_ci for (i = 0; i < offload->num_entries; i++, k++) { 3108c2ecf20Sopenharmony_ci s64 delta_ns = offload->entries[i].interval; 3118c2ecf20Sopenharmony_ci 3128c2ecf20Sopenharmony_ci schedule[k].delta = ns_to_sja1105_delta(delta_ns); 3138c2ecf20Sopenharmony_ci schedule[k].destports = BIT(port); 3148c2ecf20Sopenharmony_ci schedule[k].resmedia_en = true; 3158c2ecf20Sopenharmony_ci schedule[k].resmedia = SJA1105_GATE_MASK & 3168c2ecf20Sopenharmony_ci ~offload->entries[i].gate_mask; 3178c2ecf20Sopenharmony_ci } 3188c2ecf20Sopenharmony_ci cycle++; 3198c2ecf20Sopenharmony_ci } 3208c2ecf20Sopenharmony_ci 3218c2ecf20Sopenharmony_ci if (!list_empty(&gating_cfg->entries)) { 3228c2ecf20Sopenharmony_ci struct sja1105_gate_entry *e; 3238c2ecf20Sopenharmony_ci 3248c2ecf20Sopenharmony_ci /* Relative base time */ 3258c2ecf20Sopenharmony_ci s64 rbt; 3268c2ecf20Sopenharmony_ci 3278c2ecf20Sopenharmony_ci schedule_start_idx = k; 3288c2ecf20Sopenharmony_ci schedule_end_idx = k + gating_cfg->num_entries - 1; 3298c2ecf20Sopenharmony_ci rbt = future_base_time(gating_cfg->base_time, 3308c2ecf20Sopenharmony_ci gating_cfg->cycle_time, 3318c2ecf20Sopenharmony_ci tas_data->earliest_base_time); 3328c2ecf20Sopenharmony_ci rbt -= tas_data->earliest_base_time; 3338c2ecf20Sopenharmony_ci entry_point_delta = ns_to_sja1105_delta(rbt) + 1; 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci schedule_entry_points[cycle].subschindx = cycle; 3368c2ecf20Sopenharmony_ci schedule_entry_points[cycle].delta = entry_point_delta; 3378c2ecf20Sopenharmony_ci schedule_entry_points[cycle].address = schedule_start_idx; 3388c2ecf20Sopenharmony_ci 3398c2ecf20Sopenharmony_ci for (i = cycle; i < 8; i++) 3408c2ecf20Sopenharmony_ci schedule_params->subscheind[i] = schedule_end_idx; 3418c2ecf20Sopenharmony_ci 3428c2ecf20Sopenharmony_ci list_for_each_entry(e, &gating_cfg->entries, list) { 3438c2ecf20Sopenharmony_ci schedule[k].delta = ns_to_sja1105_delta(e->interval); 3448c2ecf20Sopenharmony_ci schedule[k].destports = e->rule->vl.destports; 3458c2ecf20Sopenharmony_ci schedule[k].setvalid = true; 3468c2ecf20Sopenharmony_ci schedule[k].txen = true; 3478c2ecf20Sopenharmony_ci schedule[k].vlindex = e->rule->vl.sharindx; 3488c2ecf20Sopenharmony_ci schedule[k].winstindex = e->rule->vl.sharindx; 3498c2ecf20Sopenharmony_ci if (e->gate_state) /* Gate open */ 3508c2ecf20Sopenharmony_ci schedule[k].winst = true; 3518c2ecf20Sopenharmony_ci else /* Gate closed */ 3528c2ecf20Sopenharmony_ci schedule[k].winend = true; 3538c2ecf20Sopenharmony_ci k++; 3548c2ecf20Sopenharmony_ci } 3558c2ecf20Sopenharmony_ci } 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_ci return 0; 3588c2ecf20Sopenharmony_ci} 3598c2ecf20Sopenharmony_ci 3608c2ecf20Sopenharmony_ci/* Be there 2 port subschedules, each executing an arbitrary number of gate 3618c2ecf20Sopenharmony_ci * open/close events cyclically. 3628c2ecf20Sopenharmony_ci * None of those gate events must ever occur at the exact same time, otherwise 3638c2ecf20Sopenharmony_ci * the switch is known to act in exotically strange ways. 3648c2ecf20Sopenharmony_ci * However the hardware doesn't bother performing these integrity checks. 3658c2ecf20Sopenharmony_ci * So here we are with the task of validating whether the new @admin offload 3668c2ecf20Sopenharmony_ci * has any conflict with the already established TAS configuration in 3678c2ecf20Sopenharmony_ci * tas_data->offload. We already know the other ports are in harmony with one 3688c2ecf20Sopenharmony_ci * another, otherwise we wouldn't have saved them. 3698c2ecf20Sopenharmony_ci * Each gate event executes periodically, with a period of @cycle_time and a 3708c2ecf20Sopenharmony_ci * phase given by its cycle's @base_time plus its offset within the cycle 3718c2ecf20Sopenharmony_ci * (which in turn is given by the length of the events prior to it). 3728c2ecf20Sopenharmony_ci * There are two aspects to possible collisions: 3738c2ecf20Sopenharmony_ci * - Collisions within one cycle's (actually the longest cycle's) time frame. 3748c2ecf20Sopenharmony_ci * For that, we need to compare the cartesian product of each possible 3758c2ecf20Sopenharmony_ci * occurrence of each event within one cycle time. 3768c2ecf20Sopenharmony_ci * - Collisions in the future. Events may not collide within one cycle time, 3778c2ecf20Sopenharmony_ci * but if two port schedules don't have the same periodicity (aka the cycle 3788c2ecf20Sopenharmony_ci * times aren't multiples of one another), they surely will some time in the 3798c2ecf20Sopenharmony_ci * future (actually they will collide an infinite amount of times). 3808c2ecf20Sopenharmony_ci */ 3818c2ecf20Sopenharmony_cistatic bool 3828c2ecf20Sopenharmony_cisja1105_tas_check_conflicts(struct sja1105_private *priv, int port, 3838c2ecf20Sopenharmony_ci const struct tc_taprio_qopt_offload *admin) 3848c2ecf20Sopenharmony_ci{ 3858c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 3868c2ecf20Sopenharmony_ci const struct tc_taprio_qopt_offload *offload; 3878c2ecf20Sopenharmony_ci s64 max_cycle_time, min_cycle_time; 3888c2ecf20Sopenharmony_ci s64 delta1, delta2; 3898c2ecf20Sopenharmony_ci s64 rbt1, rbt2; 3908c2ecf20Sopenharmony_ci s64 stop_time; 3918c2ecf20Sopenharmony_ci s64 t1, t2; 3928c2ecf20Sopenharmony_ci int i, j; 3938c2ecf20Sopenharmony_ci s32 rem; 3948c2ecf20Sopenharmony_ci 3958c2ecf20Sopenharmony_ci offload = tas_data->offload[port]; 3968c2ecf20Sopenharmony_ci if (!offload) 3978c2ecf20Sopenharmony_ci return false; 3988c2ecf20Sopenharmony_ci 3998c2ecf20Sopenharmony_ci /* Check if the two cycle times are multiples of one another. 4008c2ecf20Sopenharmony_ci * If they aren't, then they will surely collide. 4018c2ecf20Sopenharmony_ci */ 4028c2ecf20Sopenharmony_ci max_cycle_time = max(offload->cycle_time, admin->cycle_time); 4038c2ecf20Sopenharmony_ci min_cycle_time = min(offload->cycle_time, admin->cycle_time); 4048c2ecf20Sopenharmony_ci div_s64_rem(max_cycle_time, min_cycle_time, &rem); 4058c2ecf20Sopenharmony_ci if (rem) 4068c2ecf20Sopenharmony_ci return true; 4078c2ecf20Sopenharmony_ci 4088c2ecf20Sopenharmony_ci /* Calculate the "reduced" base time of each of the two cycles 4098c2ecf20Sopenharmony_ci * (transposed back as close to 0 as possible) by dividing to 4108c2ecf20Sopenharmony_ci * the cycle time. 4118c2ecf20Sopenharmony_ci */ 4128c2ecf20Sopenharmony_ci div_s64_rem(offload->base_time, offload->cycle_time, &rem); 4138c2ecf20Sopenharmony_ci rbt1 = rem; 4148c2ecf20Sopenharmony_ci 4158c2ecf20Sopenharmony_ci div_s64_rem(admin->base_time, admin->cycle_time, &rem); 4168c2ecf20Sopenharmony_ci rbt2 = rem; 4178c2ecf20Sopenharmony_ci 4188c2ecf20Sopenharmony_ci stop_time = max_cycle_time + max(rbt1, rbt2); 4198c2ecf20Sopenharmony_ci 4208c2ecf20Sopenharmony_ci /* delta1 is the relative base time of each GCL entry within 4218c2ecf20Sopenharmony_ci * the established ports' TAS config. 4228c2ecf20Sopenharmony_ci */ 4238c2ecf20Sopenharmony_ci for (i = 0, delta1 = 0; 4248c2ecf20Sopenharmony_ci i < offload->num_entries; 4258c2ecf20Sopenharmony_ci delta1 += offload->entries[i].interval, i++) { 4268c2ecf20Sopenharmony_ci /* delta2 is the relative base time of each GCL entry 4278c2ecf20Sopenharmony_ci * within the newly added TAS config. 4288c2ecf20Sopenharmony_ci */ 4298c2ecf20Sopenharmony_ci for (j = 0, delta2 = 0; 4308c2ecf20Sopenharmony_ci j < admin->num_entries; 4318c2ecf20Sopenharmony_ci delta2 += admin->entries[j].interval, j++) { 4328c2ecf20Sopenharmony_ci /* t1 follows all possible occurrences of the 4338c2ecf20Sopenharmony_ci * established ports' GCL entry i within the 4348c2ecf20Sopenharmony_ci * first cycle time. 4358c2ecf20Sopenharmony_ci */ 4368c2ecf20Sopenharmony_ci for (t1 = rbt1 + delta1; 4378c2ecf20Sopenharmony_ci t1 <= stop_time; 4388c2ecf20Sopenharmony_ci t1 += offload->cycle_time) { 4398c2ecf20Sopenharmony_ci /* t2 follows all possible occurrences 4408c2ecf20Sopenharmony_ci * of the newly added GCL entry j 4418c2ecf20Sopenharmony_ci * within the first cycle time. 4428c2ecf20Sopenharmony_ci */ 4438c2ecf20Sopenharmony_ci for (t2 = rbt2 + delta2; 4448c2ecf20Sopenharmony_ci t2 <= stop_time; 4458c2ecf20Sopenharmony_ci t2 += admin->cycle_time) { 4468c2ecf20Sopenharmony_ci if (t1 == t2) { 4478c2ecf20Sopenharmony_ci dev_warn(priv->ds->dev, 4488c2ecf20Sopenharmony_ci "GCL entry %d collides with entry %d of port %d\n", 4498c2ecf20Sopenharmony_ci j, i, port); 4508c2ecf20Sopenharmony_ci return true; 4518c2ecf20Sopenharmony_ci } 4528c2ecf20Sopenharmony_ci } 4538c2ecf20Sopenharmony_ci } 4548c2ecf20Sopenharmony_ci } 4558c2ecf20Sopenharmony_ci } 4568c2ecf20Sopenharmony_ci 4578c2ecf20Sopenharmony_ci return false; 4588c2ecf20Sopenharmony_ci} 4598c2ecf20Sopenharmony_ci 4608c2ecf20Sopenharmony_ci/* Check the tc-taprio configuration on @port for conflicts with the tc-gate 4618c2ecf20Sopenharmony_ci * global subschedule. If @port is -1, check it against all ports. 4628c2ecf20Sopenharmony_ci * To reuse the sja1105_tas_check_conflicts logic without refactoring it, 4638c2ecf20Sopenharmony_ci * convert the gating configuration to a dummy tc-taprio offload structure. 4648c2ecf20Sopenharmony_ci */ 4658c2ecf20Sopenharmony_cibool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port, 4668c2ecf20Sopenharmony_ci struct netlink_ext_ack *extack) 4678c2ecf20Sopenharmony_ci{ 4688c2ecf20Sopenharmony_ci struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg; 4698c2ecf20Sopenharmony_ci size_t num_entries = gating_cfg->num_entries; 4708c2ecf20Sopenharmony_ci struct tc_taprio_qopt_offload *dummy; 4718c2ecf20Sopenharmony_ci struct sja1105_gate_entry *e; 4728c2ecf20Sopenharmony_ci bool conflict; 4738c2ecf20Sopenharmony_ci int i = 0; 4748c2ecf20Sopenharmony_ci 4758c2ecf20Sopenharmony_ci if (list_empty(&gating_cfg->entries)) 4768c2ecf20Sopenharmony_ci return false; 4778c2ecf20Sopenharmony_ci 4788c2ecf20Sopenharmony_ci dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL); 4798c2ecf20Sopenharmony_ci if (!dummy) { 4808c2ecf20Sopenharmony_ci NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory"); 4818c2ecf20Sopenharmony_ci return true; 4828c2ecf20Sopenharmony_ci } 4838c2ecf20Sopenharmony_ci 4848c2ecf20Sopenharmony_ci dummy->num_entries = num_entries; 4858c2ecf20Sopenharmony_ci dummy->base_time = gating_cfg->base_time; 4868c2ecf20Sopenharmony_ci dummy->cycle_time = gating_cfg->cycle_time; 4878c2ecf20Sopenharmony_ci 4888c2ecf20Sopenharmony_ci list_for_each_entry(e, &gating_cfg->entries, list) 4898c2ecf20Sopenharmony_ci dummy->entries[i++].interval = e->interval; 4908c2ecf20Sopenharmony_ci 4918c2ecf20Sopenharmony_ci if (port != -1) { 4928c2ecf20Sopenharmony_ci conflict = sja1105_tas_check_conflicts(priv, port, dummy); 4938c2ecf20Sopenharmony_ci } else { 4948c2ecf20Sopenharmony_ci for (port = 0; port < SJA1105_NUM_PORTS; port++) { 4958c2ecf20Sopenharmony_ci conflict = sja1105_tas_check_conflicts(priv, port, 4968c2ecf20Sopenharmony_ci dummy); 4978c2ecf20Sopenharmony_ci if (conflict) 4988c2ecf20Sopenharmony_ci break; 4998c2ecf20Sopenharmony_ci } 5008c2ecf20Sopenharmony_ci } 5018c2ecf20Sopenharmony_ci 5028c2ecf20Sopenharmony_ci kfree(dummy); 5038c2ecf20Sopenharmony_ci 5048c2ecf20Sopenharmony_ci return conflict; 5058c2ecf20Sopenharmony_ci} 5068c2ecf20Sopenharmony_ci 5078c2ecf20Sopenharmony_ciint sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, 5088c2ecf20Sopenharmony_ci struct tc_taprio_qopt_offload *admin) 5098c2ecf20Sopenharmony_ci{ 5108c2ecf20Sopenharmony_ci struct sja1105_private *priv = ds->priv; 5118c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 5128c2ecf20Sopenharmony_ci int other_port, rc, i; 5138c2ecf20Sopenharmony_ci 5148c2ecf20Sopenharmony_ci /* Can't change an already configured port (must delete qdisc first). 5158c2ecf20Sopenharmony_ci * Can't delete the qdisc from an unconfigured port. 5168c2ecf20Sopenharmony_ci */ 5178c2ecf20Sopenharmony_ci if (!!tas_data->offload[port] == admin->enable) 5188c2ecf20Sopenharmony_ci return -EINVAL; 5198c2ecf20Sopenharmony_ci 5208c2ecf20Sopenharmony_ci if (!admin->enable) { 5218c2ecf20Sopenharmony_ci taprio_offload_free(tas_data->offload[port]); 5228c2ecf20Sopenharmony_ci tas_data->offload[port] = NULL; 5238c2ecf20Sopenharmony_ci 5248c2ecf20Sopenharmony_ci rc = sja1105_init_scheduling(priv); 5258c2ecf20Sopenharmony_ci if (rc < 0) 5268c2ecf20Sopenharmony_ci return rc; 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci return sja1105_static_config_reload(priv, SJA1105_SCHEDULING); 5298c2ecf20Sopenharmony_ci } 5308c2ecf20Sopenharmony_ci 5318c2ecf20Sopenharmony_ci /* The cycle time extension is the amount of time the last cycle from 5328c2ecf20Sopenharmony_ci * the old OPER needs to be extended in order to phase-align with the 5338c2ecf20Sopenharmony_ci * base time of the ADMIN when that becomes the new OPER. 5348c2ecf20Sopenharmony_ci * But of course our switch needs to be reset to switch-over between 5358c2ecf20Sopenharmony_ci * the ADMIN and the OPER configs - so much for a seamless transition. 5368c2ecf20Sopenharmony_ci * So don't add insult over injury and just say we don't support cycle 5378c2ecf20Sopenharmony_ci * time extension. 5388c2ecf20Sopenharmony_ci */ 5398c2ecf20Sopenharmony_ci if (admin->cycle_time_extension) 5408c2ecf20Sopenharmony_ci return -ENOTSUPP; 5418c2ecf20Sopenharmony_ci 5428c2ecf20Sopenharmony_ci for (i = 0; i < admin->num_entries; i++) { 5438c2ecf20Sopenharmony_ci s64 delta_ns = admin->entries[i].interval; 5448c2ecf20Sopenharmony_ci s64 delta_cycles = ns_to_sja1105_delta(delta_ns); 5458c2ecf20Sopenharmony_ci bool too_long, too_short; 5468c2ecf20Sopenharmony_ci 5478c2ecf20Sopenharmony_ci too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA); 5488c2ecf20Sopenharmony_ci too_short = (delta_cycles == 0); 5498c2ecf20Sopenharmony_ci if (too_long || too_short) { 5508c2ecf20Sopenharmony_ci dev_err(priv->ds->dev, 5518c2ecf20Sopenharmony_ci "Interval %llu too %s for GCL entry %d\n", 5528c2ecf20Sopenharmony_ci delta_ns, too_long ? "long" : "short", i); 5538c2ecf20Sopenharmony_ci return -ERANGE; 5548c2ecf20Sopenharmony_ci } 5558c2ecf20Sopenharmony_ci } 5568c2ecf20Sopenharmony_ci 5578c2ecf20Sopenharmony_ci for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) { 5588c2ecf20Sopenharmony_ci if (other_port == port) 5598c2ecf20Sopenharmony_ci continue; 5608c2ecf20Sopenharmony_ci 5618c2ecf20Sopenharmony_ci if (sja1105_tas_check_conflicts(priv, other_port, admin)) 5628c2ecf20Sopenharmony_ci return -ERANGE; 5638c2ecf20Sopenharmony_ci } 5648c2ecf20Sopenharmony_ci 5658c2ecf20Sopenharmony_ci if (sja1105_gating_check_conflicts(priv, port, NULL)) { 5668c2ecf20Sopenharmony_ci dev_err(ds->dev, "Conflict with tc-gate schedule\n"); 5678c2ecf20Sopenharmony_ci return -ERANGE; 5688c2ecf20Sopenharmony_ci } 5698c2ecf20Sopenharmony_ci 5708c2ecf20Sopenharmony_ci tas_data->offload[port] = taprio_offload_get(admin); 5718c2ecf20Sopenharmony_ci 5728c2ecf20Sopenharmony_ci rc = sja1105_init_scheduling(priv); 5738c2ecf20Sopenharmony_ci if (rc < 0) 5748c2ecf20Sopenharmony_ci return rc; 5758c2ecf20Sopenharmony_ci 5768c2ecf20Sopenharmony_ci return sja1105_static_config_reload(priv, SJA1105_SCHEDULING); 5778c2ecf20Sopenharmony_ci} 5788c2ecf20Sopenharmony_ci 5798c2ecf20Sopenharmony_cistatic int sja1105_tas_check_running(struct sja1105_private *priv) 5808c2ecf20Sopenharmony_ci{ 5818c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 5828c2ecf20Sopenharmony_ci struct dsa_switch *ds = priv->ds; 5838c2ecf20Sopenharmony_ci struct sja1105_ptp_cmd cmd = {0}; 5848c2ecf20Sopenharmony_ci int rc; 5858c2ecf20Sopenharmony_ci 5868c2ecf20Sopenharmony_ci rc = sja1105_ptp_commit(ds, &cmd, SPI_READ); 5878c2ecf20Sopenharmony_ci if (rc < 0) 5888c2ecf20Sopenharmony_ci return rc; 5898c2ecf20Sopenharmony_ci 5908c2ecf20Sopenharmony_ci if (cmd.ptpstrtsch == 1) 5918c2ecf20Sopenharmony_ci /* Schedule successfully started */ 5928c2ecf20Sopenharmony_ci tas_data->state = SJA1105_TAS_STATE_RUNNING; 5938c2ecf20Sopenharmony_ci else if (cmd.ptpstopsch == 1) 5948c2ecf20Sopenharmony_ci /* Schedule is stopped */ 5958c2ecf20Sopenharmony_ci tas_data->state = SJA1105_TAS_STATE_DISABLED; 5968c2ecf20Sopenharmony_ci else 5978c2ecf20Sopenharmony_ci /* Schedule is probably not configured with PTP clock source */ 5988c2ecf20Sopenharmony_ci rc = -EINVAL; 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ci return rc; 6018c2ecf20Sopenharmony_ci} 6028c2ecf20Sopenharmony_ci 6038c2ecf20Sopenharmony_ci/* Write to PTPCLKCORP */ 6048c2ecf20Sopenharmony_cistatic int sja1105_tas_adjust_drift(struct sja1105_private *priv, 6058c2ecf20Sopenharmony_ci u64 correction) 6068c2ecf20Sopenharmony_ci{ 6078c2ecf20Sopenharmony_ci const struct sja1105_regs *regs = priv->info->regs; 6088c2ecf20Sopenharmony_ci u32 ptpclkcorp = ns_to_sja1105_ticks(correction); 6098c2ecf20Sopenharmony_ci 6108c2ecf20Sopenharmony_ci return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp, 6118c2ecf20Sopenharmony_ci &ptpclkcorp, NULL); 6128c2ecf20Sopenharmony_ci} 6138c2ecf20Sopenharmony_ci 6148c2ecf20Sopenharmony_ci/* Write to PTPSCHTM */ 6158c2ecf20Sopenharmony_cistatic int sja1105_tas_set_base_time(struct sja1105_private *priv, 6168c2ecf20Sopenharmony_ci u64 base_time) 6178c2ecf20Sopenharmony_ci{ 6188c2ecf20Sopenharmony_ci const struct sja1105_regs *regs = priv->info->regs; 6198c2ecf20Sopenharmony_ci u64 ptpschtm = ns_to_sja1105_ticks(base_time); 6208c2ecf20Sopenharmony_ci 6218c2ecf20Sopenharmony_ci return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm, 6228c2ecf20Sopenharmony_ci &ptpschtm, NULL); 6238c2ecf20Sopenharmony_ci} 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_cistatic int sja1105_tas_start(struct sja1105_private *priv) 6268c2ecf20Sopenharmony_ci{ 6278c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 6288c2ecf20Sopenharmony_ci struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd; 6298c2ecf20Sopenharmony_ci struct dsa_switch *ds = priv->ds; 6308c2ecf20Sopenharmony_ci int rc; 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "Starting the TAS\n"); 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING || 6358c2ecf20Sopenharmony_ci tas_data->state == SJA1105_TAS_STATE_RUNNING) { 6368c2ecf20Sopenharmony_ci dev_err(ds->dev, "TAS already started\n"); 6378c2ecf20Sopenharmony_ci return -EINVAL; 6388c2ecf20Sopenharmony_ci } 6398c2ecf20Sopenharmony_ci 6408c2ecf20Sopenharmony_ci cmd->ptpstrtsch = 1; 6418c2ecf20Sopenharmony_ci cmd->ptpstopsch = 0; 6428c2ecf20Sopenharmony_ci 6438c2ecf20Sopenharmony_ci rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE); 6448c2ecf20Sopenharmony_ci if (rc < 0) 6458c2ecf20Sopenharmony_ci return rc; 6468c2ecf20Sopenharmony_ci 6478c2ecf20Sopenharmony_ci tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING; 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_ci return 0; 6508c2ecf20Sopenharmony_ci} 6518c2ecf20Sopenharmony_ci 6528c2ecf20Sopenharmony_cistatic int sja1105_tas_stop(struct sja1105_private *priv) 6538c2ecf20Sopenharmony_ci{ 6548c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 6558c2ecf20Sopenharmony_ci struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd; 6568c2ecf20Sopenharmony_ci struct dsa_switch *ds = priv->ds; 6578c2ecf20Sopenharmony_ci int rc; 6588c2ecf20Sopenharmony_ci 6598c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "Stopping the TAS\n"); 6608c2ecf20Sopenharmony_ci 6618c2ecf20Sopenharmony_ci if (tas_data->state == SJA1105_TAS_STATE_DISABLED) { 6628c2ecf20Sopenharmony_ci dev_err(ds->dev, "TAS already disabled\n"); 6638c2ecf20Sopenharmony_ci return -EINVAL; 6648c2ecf20Sopenharmony_ci } 6658c2ecf20Sopenharmony_ci 6668c2ecf20Sopenharmony_ci cmd->ptpstopsch = 1; 6678c2ecf20Sopenharmony_ci cmd->ptpstrtsch = 0; 6688c2ecf20Sopenharmony_ci 6698c2ecf20Sopenharmony_ci rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE); 6708c2ecf20Sopenharmony_ci if (rc < 0) 6718c2ecf20Sopenharmony_ci return rc; 6728c2ecf20Sopenharmony_ci 6738c2ecf20Sopenharmony_ci tas_data->state = SJA1105_TAS_STATE_DISABLED; 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_ci return 0; 6768c2ecf20Sopenharmony_ci} 6778c2ecf20Sopenharmony_ci 6788c2ecf20Sopenharmony_ci/* The schedule engine and the PTP clock are driven by the same oscillator, and 6798c2ecf20Sopenharmony_ci * they run in parallel. But whilst the PTP clock can keep an absolute 6808c2ecf20Sopenharmony_ci * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make 6818c2ecf20Sopenharmony_ci * up a delta, which is 200ns), and wrapping around at the end of each cycle. 6828c2ecf20Sopenharmony_ci * The schedule engine is started when the PTP clock reaches the PTPSCHTM time 6838c2ecf20Sopenharmony_ci * (in PTP domain). 6848c2ecf20Sopenharmony_ci * Because the PTP clock can be rate-corrected (accelerated or slowed down) by 6858c2ecf20Sopenharmony_ci * a software servo, and the schedule engine clock runs in parallel to the PTP 6868c2ecf20Sopenharmony_ci * clock, there is logic internal to the switch that periodically keeps the 6878c2ecf20Sopenharmony_ci * schedule engine from drifting away. The frequency with which this internal 6888c2ecf20Sopenharmony_ci * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is 6898c2ecf20Sopenharmony_ci * a value also in the PTP clock domain, and is also rate-corrected. 6908c2ecf20Sopenharmony_ci * To be precise, during a correction period, there is logic to determine by 6918c2ecf20Sopenharmony_ci * how many scheduler clock ticks has the PTP clock drifted. At the end of each 6928c2ecf20Sopenharmony_ci * correction period/beginning of new one, the length of a delta is shrunk or 6938c2ecf20Sopenharmony_ci * expanded with an integer number of ticks, compared with the typical 25. 6948c2ecf20Sopenharmony_ci * So a delta lasts for 200ns (or 25 ticks) only on average. 6958c2ecf20Sopenharmony_ci * Sometimes it is longer, sometimes it is shorter. The internal syntonization 6968c2ecf20Sopenharmony_ci * logic can adjust for at most 5 ticks each 20 ticks. 6978c2ecf20Sopenharmony_ci * 6988c2ecf20Sopenharmony_ci * The first implication is that you should choose your schedule correction 6998c2ecf20Sopenharmony_ci * period to be an integer multiple of the schedule length. Preferably one. 7008c2ecf20Sopenharmony_ci * In case there are schedules of multiple ports active, then the correction 7018c2ecf20Sopenharmony_ci * period needs to be a multiple of them all. Given the restriction that the 7028c2ecf20Sopenharmony_ci * cycle times have to be multiples of one another anyway, this means the 7038c2ecf20Sopenharmony_ci * correction period can simply be the largest cycle time, hence the current 7048c2ecf20Sopenharmony_ci * choice. This way, the updates are always synchronous to the transmission 7058c2ecf20Sopenharmony_ci * cycle, and therefore predictable. 7068c2ecf20Sopenharmony_ci * 7078c2ecf20Sopenharmony_ci * The second implication is that at the beginning of a correction period, the 7088c2ecf20Sopenharmony_ci * first few deltas will be modulated in time, until the schedule engine is 7098c2ecf20Sopenharmony_ci * properly phase-aligned with the PTP clock. For this reason, you should place 7108c2ecf20Sopenharmony_ci * your best-effort traffic at the beginning of a cycle, and your 7118c2ecf20Sopenharmony_ci * time-triggered traffic afterwards. 7128c2ecf20Sopenharmony_ci * 7138c2ecf20Sopenharmony_ci * The third implication is that once the schedule engine is started, it can 7148c2ecf20Sopenharmony_ci * only adjust for so much drift within a correction period. In the servo you 7158c2ecf20Sopenharmony_ci * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you 7168c2ecf20Sopenharmony_ci * want to do the latter, you need to stop and restart the schedule engine, 7178c2ecf20Sopenharmony_ci * which is what the state machine handles. 7188c2ecf20Sopenharmony_ci */ 7198c2ecf20Sopenharmony_cistatic void sja1105_tas_state_machine(struct work_struct *work) 7208c2ecf20Sopenharmony_ci{ 7218c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work); 7228c2ecf20Sopenharmony_ci struct sja1105_private *priv = tas_to_sja1105(tas_data); 7238c2ecf20Sopenharmony_ci struct sja1105_ptp_data *ptp_data = &priv->ptp_data; 7248c2ecf20Sopenharmony_ci struct timespec64 base_time_ts, now_ts; 7258c2ecf20Sopenharmony_ci struct dsa_switch *ds = priv->ds; 7268c2ecf20Sopenharmony_ci struct timespec64 diff; 7278c2ecf20Sopenharmony_ci s64 base_time, now; 7288c2ecf20Sopenharmony_ci int rc = 0; 7298c2ecf20Sopenharmony_ci 7308c2ecf20Sopenharmony_ci mutex_lock(&ptp_data->lock); 7318c2ecf20Sopenharmony_ci 7328c2ecf20Sopenharmony_ci switch (tas_data->state) { 7338c2ecf20Sopenharmony_ci case SJA1105_TAS_STATE_DISABLED: 7348c2ecf20Sopenharmony_ci /* Can't do anything at all if clock is still being stepped */ 7358c2ecf20Sopenharmony_ci if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) 7368c2ecf20Sopenharmony_ci break; 7378c2ecf20Sopenharmony_ci 7388c2ecf20Sopenharmony_ci rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time); 7398c2ecf20Sopenharmony_ci if (rc < 0) 7408c2ecf20Sopenharmony_ci break; 7418c2ecf20Sopenharmony_ci 7428c2ecf20Sopenharmony_ci rc = __sja1105_ptp_gettimex(ds, &now, NULL); 7438c2ecf20Sopenharmony_ci if (rc < 0) 7448c2ecf20Sopenharmony_ci break; 7458c2ecf20Sopenharmony_ci 7468c2ecf20Sopenharmony_ci /* Plan to start the earliest schedule first. The others 7478c2ecf20Sopenharmony_ci * will be started in hardware, by way of their respective 7488c2ecf20Sopenharmony_ci * entry points delta. 7498c2ecf20Sopenharmony_ci * Try our best to avoid fringe cases (race condition between 7508c2ecf20Sopenharmony_ci * ptpschtm and ptpstrtsch) by pushing the oper_base_time at 7518c2ecf20Sopenharmony_ci * least one second in the future from now. This is not ideal, 7528c2ecf20Sopenharmony_ci * but this only needs to buy us time until the 7538c2ecf20Sopenharmony_ci * sja1105_tas_start command below gets executed. 7548c2ecf20Sopenharmony_ci */ 7558c2ecf20Sopenharmony_ci base_time = future_base_time(tas_data->earliest_base_time, 7568c2ecf20Sopenharmony_ci tas_data->max_cycle_time, 7578c2ecf20Sopenharmony_ci now + 1ull * NSEC_PER_SEC); 7588c2ecf20Sopenharmony_ci base_time -= sja1105_delta_to_ns(1); 7598c2ecf20Sopenharmony_ci 7608c2ecf20Sopenharmony_ci rc = sja1105_tas_set_base_time(priv, base_time); 7618c2ecf20Sopenharmony_ci if (rc < 0) 7628c2ecf20Sopenharmony_ci break; 7638c2ecf20Sopenharmony_ci 7648c2ecf20Sopenharmony_ci tas_data->oper_base_time = base_time; 7658c2ecf20Sopenharmony_ci 7668c2ecf20Sopenharmony_ci rc = sja1105_tas_start(priv); 7678c2ecf20Sopenharmony_ci if (rc < 0) 7688c2ecf20Sopenharmony_ci break; 7698c2ecf20Sopenharmony_ci 7708c2ecf20Sopenharmony_ci base_time_ts = ns_to_timespec64(base_time); 7718c2ecf20Sopenharmony_ci now_ts = ns_to_timespec64(now); 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n", 7748c2ecf20Sopenharmony_ci base_time_ts.tv_sec, base_time_ts.tv_nsec, 7758c2ecf20Sopenharmony_ci now_ts.tv_sec, now_ts.tv_nsec); 7768c2ecf20Sopenharmony_ci 7778c2ecf20Sopenharmony_ci break; 7788c2ecf20Sopenharmony_ci 7798c2ecf20Sopenharmony_ci case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING: 7808c2ecf20Sopenharmony_ci if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) { 7818c2ecf20Sopenharmony_ci /* Clock was stepped.. bad news for TAS */ 7828c2ecf20Sopenharmony_ci sja1105_tas_stop(priv); 7838c2ecf20Sopenharmony_ci break; 7848c2ecf20Sopenharmony_ci } 7858c2ecf20Sopenharmony_ci 7868c2ecf20Sopenharmony_ci /* Check if TAS has actually started, by comparing the 7878c2ecf20Sopenharmony_ci * scheduled start time with the SJA1105 PTP clock 7888c2ecf20Sopenharmony_ci */ 7898c2ecf20Sopenharmony_ci rc = __sja1105_ptp_gettimex(ds, &now, NULL); 7908c2ecf20Sopenharmony_ci if (rc < 0) 7918c2ecf20Sopenharmony_ci break; 7928c2ecf20Sopenharmony_ci 7938c2ecf20Sopenharmony_ci if (now < tas_data->oper_base_time) { 7948c2ecf20Sopenharmony_ci /* TAS has not started yet */ 7958c2ecf20Sopenharmony_ci diff = ns_to_timespec64(tas_data->oper_base_time - now); 7968c2ecf20Sopenharmony_ci dev_dbg(ds->dev, "time to start: [%lld.%09ld]", 7978c2ecf20Sopenharmony_ci diff.tv_sec, diff.tv_nsec); 7988c2ecf20Sopenharmony_ci break; 7998c2ecf20Sopenharmony_ci } 8008c2ecf20Sopenharmony_ci 8018c2ecf20Sopenharmony_ci /* Time elapsed, what happened? */ 8028c2ecf20Sopenharmony_ci rc = sja1105_tas_check_running(priv); 8038c2ecf20Sopenharmony_ci if (rc < 0) 8048c2ecf20Sopenharmony_ci break; 8058c2ecf20Sopenharmony_ci 8068c2ecf20Sopenharmony_ci if (tas_data->state != SJA1105_TAS_STATE_RUNNING) 8078c2ecf20Sopenharmony_ci /* TAS has started */ 8088c2ecf20Sopenharmony_ci dev_err(ds->dev, 8098c2ecf20Sopenharmony_ci "TAS not started despite time elapsed\n"); 8108c2ecf20Sopenharmony_ci 8118c2ecf20Sopenharmony_ci break; 8128c2ecf20Sopenharmony_ci 8138c2ecf20Sopenharmony_ci case SJA1105_TAS_STATE_RUNNING: 8148c2ecf20Sopenharmony_ci /* Clock was stepped.. bad news for TAS */ 8158c2ecf20Sopenharmony_ci if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) { 8168c2ecf20Sopenharmony_ci sja1105_tas_stop(priv); 8178c2ecf20Sopenharmony_ci break; 8188c2ecf20Sopenharmony_ci } 8198c2ecf20Sopenharmony_ci 8208c2ecf20Sopenharmony_ci rc = sja1105_tas_check_running(priv); 8218c2ecf20Sopenharmony_ci if (rc < 0) 8228c2ecf20Sopenharmony_ci break; 8238c2ecf20Sopenharmony_ci 8248c2ecf20Sopenharmony_ci if (tas_data->state != SJA1105_TAS_STATE_RUNNING) 8258c2ecf20Sopenharmony_ci dev_err(ds->dev, "TAS surprisingly stopped\n"); 8268c2ecf20Sopenharmony_ci 8278c2ecf20Sopenharmony_ci break; 8288c2ecf20Sopenharmony_ci 8298c2ecf20Sopenharmony_ci default: 8308c2ecf20Sopenharmony_ci if (net_ratelimit()) 8318c2ecf20Sopenharmony_ci dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n"); 8328c2ecf20Sopenharmony_ci } 8338c2ecf20Sopenharmony_ci 8348c2ecf20Sopenharmony_ci if (rc && net_ratelimit()) 8358c2ecf20Sopenharmony_ci dev_err(ds->dev, "An operation returned %d\n", rc); 8368c2ecf20Sopenharmony_ci 8378c2ecf20Sopenharmony_ci mutex_unlock(&ptp_data->lock); 8388c2ecf20Sopenharmony_ci} 8398c2ecf20Sopenharmony_ci 8408c2ecf20Sopenharmony_civoid sja1105_tas_clockstep(struct dsa_switch *ds) 8418c2ecf20Sopenharmony_ci{ 8428c2ecf20Sopenharmony_ci struct sja1105_private *priv = ds->priv; 8438c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 8448c2ecf20Sopenharmony_ci 8458c2ecf20Sopenharmony_ci if (!tas_data->enabled) 8468c2ecf20Sopenharmony_ci return; 8478c2ecf20Sopenharmony_ci 8488c2ecf20Sopenharmony_ci tas_data->last_op = SJA1105_PTP_CLOCKSTEP; 8498c2ecf20Sopenharmony_ci schedule_work(&tas_data->tas_work); 8508c2ecf20Sopenharmony_ci} 8518c2ecf20Sopenharmony_ci 8528c2ecf20Sopenharmony_civoid sja1105_tas_adjfreq(struct dsa_switch *ds) 8538c2ecf20Sopenharmony_ci{ 8548c2ecf20Sopenharmony_ci struct sja1105_private *priv = ds->priv; 8558c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ci if (!tas_data->enabled) 8588c2ecf20Sopenharmony_ci return; 8598c2ecf20Sopenharmony_ci 8608c2ecf20Sopenharmony_ci /* No reason to schedule the workqueue, nothing changed */ 8618c2ecf20Sopenharmony_ci if (tas_data->state == SJA1105_TAS_STATE_RUNNING) 8628c2ecf20Sopenharmony_ci return; 8638c2ecf20Sopenharmony_ci 8648c2ecf20Sopenharmony_ci tas_data->last_op = SJA1105_PTP_ADJUSTFREQ; 8658c2ecf20Sopenharmony_ci schedule_work(&tas_data->tas_work); 8668c2ecf20Sopenharmony_ci} 8678c2ecf20Sopenharmony_ci 8688c2ecf20Sopenharmony_civoid sja1105_tas_setup(struct dsa_switch *ds) 8698c2ecf20Sopenharmony_ci{ 8708c2ecf20Sopenharmony_ci struct sja1105_private *priv = ds->priv; 8718c2ecf20Sopenharmony_ci struct sja1105_tas_data *tas_data = &priv->tas_data; 8728c2ecf20Sopenharmony_ci 8738c2ecf20Sopenharmony_ci INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine); 8748c2ecf20Sopenharmony_ci tas_data->state = SJA1105_TAS_STATE_DISABLED; 8758c2ecf20Sopenharmony_ci tas_data->last_op = SJA1105_PTP_NONE; 8768c2ecf20Sopenharmony_ci 8778c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&tas_data->gating_cfg.entries); 8788c2ecf20Sopenharmony_ci} 8798c2ecf20Sopenharmony_ci 8808c2ecf20Sopenharmony_civoid sja1105_tas_teardown(struct dsa_switch *ds) 8818c2ecf20Sopenharmony_ci{ 8828c2ecf20Sopenharmony_ci struct sja1105_private *priv = ds->priv; 8838c2ecf20Sopenharmony_ci struct tc_taprio_qopt_offload *offload; 8848c2ecf20Sopenharmony_ci int port; 8858c2ecf20Sopenharmony_ci 8868c2ecf20Sopenharmony_ci cancel_work_sync(&priv->tas_data.tas_work); 8878c2ecf20Sopenharmony_ci 8888c2ecf20Sopenharmony_ci for (port = 0; port < SJA1105_NUM_PORTS; port++) { 8898c2ecf20Sopenharmony_ci offload = priv->tas_data.offload[port]; 8908c2ecf20Sopenharmony_ci if (!offload) 8918c2ecf20Sopenharmony_ci continue; 8928c2ecf20Sopenharmony_ci 8938c2ecf20Sopenharmony_ci taprio_offload_free(offload); 8948c2ecf20Sopenharmony_ci } 8958c2ecf20Sopenharmony_ci} 896