Lines Matching refs:core

50 	struct clk_core		*core;
98 struct clk_core *core;
109 static int clk_pm_runtime_get(struct clk_core *core)
111 if (!core->rpm_enabled)
114 return pm_runtime_resume_and_get(core->dev);
117 static void clk_pm_runtime_put(struct clk_core *core)
119 if (!core->rpm_enabled)
122 pm_runtime_put_sync(core->dev);
194 static bool clk_core_rate_is_protected(struct clk_core *core)
196 return core->protect_count;
199 static bool clk_core_is_prepared(struct clk_core *core)
207 if (!core->ops->is_prepared)
208 return core->prepare_count;
210 if (!clk_pm_runtime_get(core)) {
211 ret = core->ops->is_prepared(core->hw);
212 clk_pm_runtime_put(core);
218 static bool clk_core_is_enabled(struct clk_core *core)
226 if (!core->ops->is_enabled)
227 return core->enable_count;
239 if (core->rpm_enabled) {
240 pm_runtime_get_noresume(core->dev);
241 if (!pm_runtime_active(core->dev)) {
252 if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
253 if (!clk_core_is_enabled(core->parent)) {
258 ret = core->ops->is_enabled(core->hw);
260 if (core->rpm_enabled)
261 pm_runtime_put(core->dev);
270 return !clk ? NULL : clk->core->name;
276 return hw->core->name;
282 return !clk ? NULL : clk->core->hw;
288 return hw->core->num_parents;
294 return hw->core->parent ? hw->core->parent->hw : NULL;
299 struct clk_core *core)
304 if (!strcmp(core->name, name))
305 return core;
307 hlist_for_each_entry(child, &core->children, child_node) {
362 * @core: clk to find parent of
396 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
398 const char *name = core->parents[p_index].fw_name;
399 int index = core->parents[p_index].index;
401 struct device *dev = core->dev;
403 struct device_node *np = core->of_node;
424 return hw->core;
427 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
429 struct clk_parent_map *entry = &core->parents[index];
433 parent = entry->hw->core;
435 parent = clk_core_get(core, index);
450 entry->core = parent;
453 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
456 if (!core || index >= core->num_parents || !core->parents)
459 if (!core->parents[index].core)
460 clk_core_fill_parent_index(core, index);
462 return core->parents[index].core;
470 parent = clk_core_get_parent_by_index(hw->core, index);
478 return !clk ? 0 : clk->core->enable_count;
481 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
483 if (!core)
486 if (!core->num_parents || core->parent)
487 return core->rate;
499 return clk_core_get_rate_nolock(hw->core);
503 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
505 if (!core)
508 return core->accuracy;
513 return hw->core->flags;
519 return clk_core_is_prepared(hw->core);
525 return clk_core_rate_is_protected(hw->core);
531 return clk_core_is_enabled(hw->core);
540 return clk_core_is_enabled(clk->core);
553 static void clk_core_init_rate_req(struct clk_core * const core,
557 static int clk_core_round_rate_nolock(struct clk_core *core,
560 static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
566 if (core->parent == parent)
569 for (i = 0; i < core->num_parents; i++) {
570 tmp = clk_core_get_parent_by_index(core, i);
582 clk_core_forward_rate_req(struct clk_core *core,
588 if (WARN_ON(!clk_core_has_parent(core, parent)))
604 struct clk_core *core = hw->core;
605 struct clk_core *parent = core->parent;
609 if (core->flags & CLK_SET_RATE_PARENT) {
617 clk_core_forward_rate_req(core, req, parent, &parent_req,
632 best = clk_core_get_rate_nolock(core);
645 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
650 if (core->flags & CLK_SET_RATE_NO_REPARENT)
654 num_parents = core->num_parents;
658 parent = clk_core_get_parent_by_index(core, i);
662 if (core->flags & CLK_SET_RATE_PARENT) {
665 clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
700 struct clk_core *core = clk_core_lookup(name);
702 return !core ? NULL : core->hw->clk;
705 static void clk_core_get_boundaries(struct clk_core *core,
713 *min_rate = core->min_rate;
714 *max_rate = core->max_rate;
716 hlist_for_each_entry(clk_user, &core->clks, clks_node)
719 hlist_for_each_entry(clk_user, &core->clks, clks_node)
735 clk_core_get_boundaries(hw->core, min_rate, max_rate);
739 static bool clk_core_check_boundaries(struct clk_core *core,
747 if (min_rate > core->max_rate || max_rate < core->min_rate)
750 hlist_for_each_entry(user, &core->clks, clks_node)
760 hw->core->min_rate = min_rate;
761 hw->core->max_rate = max_rate;
811 static void clk_core_rate_unprotect(struct clk_core *core)
815 if (!core)
818 if (WARN(core->protect_count == 0,
819 "%s already unprotected\n", core->name))
822 if (--core->protect_count > 0)
825 clk_core_rate_unprotect(core->parent);
828 static int clk_core_rate_nuke_protect(struct clk_core *core)
834 if (!core)
837 if (core->protect_count == 0)
840 ret = core->protect_count;
841 core->protect_count = 1;
842 clk_core_rate_unprotect(core);
879 clk_core_rate_unprotect(clk->core);
886 static void clk_core_rate_protect(struct clk_core *core)
890 if (!core)
893 if (core->protect_count == 0)
894 clk_core_rate_protect(core->parent);
896 core->protect_count++;
899 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
903 if (!core)
909 clk_core_rate_protect(core);
910 core->protect_count = count;
937 clk_core_rate_protect(clk->core);
945 static void clk_core_unprepare(struct clk_core *core)
949 if (!core)
952 if (WARN(core->prepare_count == 0,
953 "%s already unprepared\n", core->name))
956 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
957 "Unpreparing critical %s\n", core->name))
960 if (core->flags & CLK_SET_RATE_GATE)
961 clk_core_rate_unprotect(core);
963 if (--core->prepare_count > 0)
966 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
968 trace_clk_unprepare(core);
970 if (core->ops->unprepare)
971 core->ops->unprepare(core->hw);
973 trace_clk_unprepare_complete(core);
974 clk_core_unprepare(core->parent);
975 clk_pm_runtime_put(core);
978 static void clk_core_unprepare_lock(struct clk_core *core)
981 clk_core_unprepare(core);
1001 clk_core_unprepare_lock(clk->core);
1005 static int clk_core_prepare(struct clk_core *core)
1011 if (!core)
1014 if (core->prepare_count == 0) {
1015 ret = clk_pm_runtime_get(core);
1019 ret = clk_core_prepare(core->parent);
1023 trace_clk_prepare(core);
1025 if (core->ops->prepare)
1026 ret = core->ops->prepare(core->hw);
1028 trace_clk_prepare_complete(core);
1034 core->prepare_count++;
1043 if (core->flags & CLK_SET_RATE_GATE)
1044 clk_core_rate_protect(core);
1048 clk_core_unprepare(core->parent);
1050 clk_pm_runtime_put(core);
1054 static int clk_core_prepare_lock(struct clk_core *core)
1059 ret = clk_core_prepare(core);
1082 return clk_core_prepare_lock(clk->core);
1086 static void clk_core_disable(struct clk_core *core)
1090 if (!core)
1093 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
1096 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
1097 "Disabling critical %s\n", core->name))
1100 if (--core->enable_count > 0)
1103 trace_clk_disable(core);
1105 if (core->ops->disable)
1106 core->ops->disable(core->hw);
1108 trace_clk_disable_complete(core);
1110 clk_core_disable(core->parent);
1113 static void clk_core_disable_lock(struct clk_core *core)
1118 clk_core_disable(core);
1139 clk_core_disable_lock(clk->core);
1143 static int clk_core_enable(struct clk_core *core)
1149 if (!core)
1152 if (WARN(core->prepare_count == 0,
1153 "Enabling unprepared %s\n", core->name))
1156 if (core->enable_count == 0) {
1157 ret = clk_core_enable(core->parent);
1162 trace_clk_enable(core);
1164 if (core->ops->enable)
1165 ret = core->ops->enable(core->hw);
1167 trace_clk_enable_complete(core);
1170 clk_core_disable(core->parent);
1175 core->enable_count++;
1179 static int clk_core_enable_lock(struct clk_core *core)
1185 ret = clk_core_enable(core);
1203 struct clk_core *core = hw->core;
1205 if (core->enable_count)
1206 core->ops->enable(hw);
1208 core->ops->disable(hw);
1212 static int clk_core_save_context(struct clk_core *core)
1217 hlist_for_each_entry(child, &core->children, child_node) {
1223 if (core->ops && core->ops->save_context)
1224 ret = core->ops->save_context(core->hw);
1229 static void clk_core_restore_context(struct clk_core *core)
1233 if (core->ops && core->ops->restore_context)
1234 core->ops->restore_context(core->hw);
1236 hlist_for_each_entry(child, &core->children, child_node)
1276 struct clk_core *core;
1278 hlist_for_each_entry(core, &clk_root_list, child_node)
1279 clk_core_restore_context(core);
1281 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1282 clk_core_restore_context(core);
1304 return clk_core_enable_lock(clk->core);
1325 return clk && !(clk->core->ops->enable && clk->core->ops->disable);
1329 static int clk_core_prepare_enable(struct clk_core *core)
1333 ret = clk_core_prepare_lock(core);
1337 ret = clk_core_enable_lock(core);
1339 clk_core_unprepare_lock(core);
1344 static void clk_core_disable_unprepare(struct clk_core *core)
1346 clk_core_disable_lock(core);
1347 clk_core_unprepare_lock(core);
1350 static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1356 hlist_for_each_entry(child, &core->children, child_node)
1359 if (core->prepare_count)
1362 if (core->flags & CLK_IGNORE_UNUSED)
1365 if (clk_pm_runtime_get(core))
1368 if (clk_core_is_prepared(core)) {
1369 trace_clk_unprepare(core);
1370 if (core->ops->unprepare_unused)
1371 core->ops->unprepare_unused(core->hw);
1372 else if (core->ops->unprepare)
1373 core->ops->unprepare(core->hw);
1374 trace_clk_unprepare_complete(core);
1377 clk_pm_runtime_put(core);
1380 static void __init clk_disable_unused_subtree(struct clk_core *core)
1387 hlist_for_each_entry(child, &core->children, child_node)
1390 if (core->flags & CLK_OPS_PARENT_ENABLE)
1391 clk_core_prepare_enable(core->parent);
1393 if (clk_pm_runtime_get(core))
1398 if (core->enable_count)
1401 if (core->flags & CLK_IGNORE_UNUSED)
1409 if (clk_core_is_enabled(core)) {
1410 trace_clk_disable(core);
1411 if (core->ops->disable_unused)
1412 core->ops->disable_unused(core->hw);
1413 else if (core->ops->disable)
1414 core->ops->disable(core->hw);
1415 trace_clk_disable_complete(core);
1420 clk_pm_runtime_put(core);
1422 if (core->flags & CLK_OPS_PARENT_ENABLE)
1423 clk_core_disable_unprepare(core->parent);
1436 struct clk_core *core;
1447 hlist_for_each_entry(core, &clk_root_list, child_node)
1448 clk_disable_unused_subtree(core);
1450 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1451 clk_disable_unused_subtree(core);
1453 hlist_for_each_entry(core, &clk_root_list, child_node)
1454 clk_unprepare_unused_subtree(core);
1456 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1457 clk_unprepare_unused_subtree(core);
1465 static int clk_core_determine_round_nolock(struct clk_core *core,
1472 if (!core)
1485 __func__, core->name);
1490 * At this point, core protection will be disabled
1495 if (clk_core_rate_is_protected(core)) {
1496 req->rate = core->rate;
1497 } else if (core->ops->determine_rate) {
1498 return core->ops->determine_rate(core->hw, req);
1499 } else if (core->ops->round_rate) {
1500 rate = core->ops->round_rate(core->hw, req->rate,
1513 static void clk_core_init_rate_req(struct clk_core * const core,
1525 if (!core)
1528 req->core = core;
1530 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
1532 parent = core->parent;
1558 clk_core_init_rate_req(hw->core, req, rate);
1582 clk_core_forward_rate_req(hw->core, old_req,
1583 parent->core, req,
1588 static bool clk_core_can_round(struct clk_core * const core)
1590 return core->ops->determine_rate || core->ops->round_rate;
1593 static int clk_core_round_rate_nolock(struct clk_core *core,
1600 if (!core) {
1605 if (clk_core_can_round(core))
1606 return clk_core_determine_round_nolock(core, req);
1608 if (core->flags & CLK_SET_RATE_PARENT) {
1611 clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
1615 ret = clk_core_round_rate_nolock(core->parent, &parent_req);
1627 req->rate = core->rate;
1645 return clk_core_round_rate_nolock(hw->core, req);
1669 clk_core_init_rate_req(hw->core, &req, rate);
1673 ret = clk_core_round_rate_nolock(hw->core, &req);
1703 clk_core_rate_unprotect(clk->core);
1705 clk_core_init_rate_req(clk->core, &req, rate);
1709 ret = clk_core_round_rate_nolock(clk->core, &req);
1714 clk_core_rate_protect(clk->core);
1727 * @core: clk that is changing rate
1739 static int __clk_notify(struct clk_core *core, unsigned long msg,
1750 if (cn->clk->core == core) {
1764 * @core: first clk in the subtree
1771 static void __clk_recalc_accuracies(struct clk_core *core)
1778 if (core->parent)
1779 parent_accuracy = core->parent->accuracy;
1781 if (core->ops->recalc_accuracy)
1782 core->accuracy = core->ops->recalc_accuracy(core->hw,
1785 core->accuracy = parent_accuracy;
1787 hlist_for_each_entry(child, &core->children, child_node)
1791 static long clk_core_get_accuracy_recalc(struct clk_core *core)
1793 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1794 __clk_recalc_accuracies(core);
1796 return clk_core_get_accuracy_no_lock(core);
1816 accuracy = clk_core_get_accuracy_recalc(clk->core);
1823 static unsigned long clk_recalc(struct clk_core *core,
1828 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1829 rate = core->ops->recalc_rate(core->hw, parent_rate);
1830 clk_pm_runtime_put(core);
1837 * @core: first clk in the subtree
1848 static void __clk_recalc_rates(struct clk_core *core, bool update_req,
1857 old_rate = core->rate;
1859 if (core->parent)
1860 parent_rate = core->parent->rate;
1862 core->rate = clk_recalc(core, parent_rate);
1864 core->req_rate = core->rate;
1870 if (core->notifier_count && msg)
1871 __clk_notify(core, msg, old_rate, core->rate);
1873 hlist_for_each_entry(child, &core->children, child_node)
1877 static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1879 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1880 __clk_recalc_rates(core, false, 0);
1882 return clk_core_get_rate_nolock(core);
1902 rate = clk_core_get_rate_recalc(clk->core);
1909 static int clk_fetch_parent_index(struct clk_core *core,
1917 for (i = 0; i < core->num_parents; i++) {
1919 if (core->parents[i].core == parent)
1923 if (core->parents[i].core)
1926 /* Maybe core hasn't been cached but the hw is all we know? */
1927 if (core->parents[i].hw) {
1928 if (core->parents[i].hw == parent->hw)
1936 if (parent == clk_core_get(core, i))
1940 if (core->parents[i].name &&
1941 !strcmp(parent->name, core->parents[i].name))
1945 if (i == core->num_parents)
1948 core->parents[i].core = parent;
1966 return clk_fetch_parent_index(hw->core, parent->core);
1971 * Update the orphan status of @core and all its children.
1973 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1977 core->orphan = is_orphan;
1979 hlist_for_each_entry(child, &core->children, child_node)
1983 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1985 bool was_orphan = core->orphan;
1987 hlist_del(&core->child_node);
1993 if (new_parent->new_child == core)
1996 hlist_add_head(&core->child_node, &new_parent->children);
1999 clk_core_update_orphan_status(core, becomes_orphan);
2001 hlist_add_head(&core->child_node, &clk_orphan_list);
2003 clk_core_update_orphan_status(core, true);
2006 core->parent = new_parent;
2009 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
2013 struct clk_core *old_parent = core->parent;
2036 if (core->flags & CLK_OPS_PARENT_ENABLE) {
2042 if (core->prepare_count) {
2044 clk_core_enable_lock(core);
2049 clk_reparent(core, parent);
2055 static void __clk_set_parent_after(struct clk_core *core,
2063 if (core->prepare_count) {
2064 clk_core_disable_lock(core);
2069 if (core->flags & CLK_OPS_PARENT_ENABLE) {
2075 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
2082 old_parent = __clk_set_parent_before(core, parent);
2084 trace_clk_set_parent(core, parent);
2087 if (parent && core->ops->set_parent)
2088 ret = core->ops->set_parent(core->hw, p_index);
2090 trace_clk_set_parent_complete(core, parent);
2094 clk_reparent(core, old_parent);
2097 __clk_set_parent_after(core, old_parent, parent);
2102 __clk_set_parent_after(core, parent, old_parent);
2109 * @core: first clk in the subtree
2121 static int __clk_speculate_rates(struct clk_core *core,
2130 new_rate = clk_recalc(core, parent_rate);
2133 if (core->notifier_count)
2134 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
2138 __func__, core->name, ret);
2142 hlist_for_each_entry(child, &core->children, child_node) {
2152 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
2157 core->new_rate = new_rate;
2158 core->new_parent = new_parent;
2159 core->new_parent_index = p_index;
2161 core->new_child = NULL;
2162 if (new_parent && new_parent != core->parent)
2163 new_parent->new_child = core;
2165 hlist_for_each_entry(child, &core->children, child_node) {
2175 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
2178 struct clk_core *top = core;
2188 if (IS_ERR_OR_NULL(core))
2192 parent = old_parent = core->parent;
2196 clk_core_get_boundaries(core, &min_rate, &max_rate);
2199 if (clk_core_can_round(core)) {
2202 clk_core_init_rate_req(core, &req, rate);
2206 ret = clk_core_determine_round_nolock(core, &req);
2214 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
2218 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
2220 core->new_rate = core->rate;
2231 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2233 __func__, core->name);
2238 if (parent && core->num_parents > 1) {
2239 p_index = clk_fetch_parent_index(core, parent);
2242 __func__, parent->name, core->name);
2247 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2252 clk_calc_subtree(core, new_rate, parent, p_index);
2262 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2268 if (core->rate == core->new_rate)
2271 if (core->notifier_count) {
2272 ret = __clk_notify(core, event, core->rate, core->new_rate);
2274 fail_clk = core;
2277 hlist_for_each_entry(child, &core->children, child_node) {
2279 if (child->new_parent && child->new_parent != core)
2286 /* handle the new child who might not be in core->children yet */
2287 if (core->new_child) {
2288 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2300 static void clk_change_rate(struct clk_core *core)
2310 old_rate = core->rate;
2312 if (core->new_parent) {
2313 parent = core->new_parent;
2314 best_parent_rate = core->new_parent->rate;
2315 } else if (core->parent) {
2316 parent = core->parent;
2317 best_parent_rate = core->parent->rate;
2320 if (clk_pm_runtime_get(core))
2323 if (core->flags & CLK_SET_RATE_UNGATE) {
2324 clk_core_prepare(core);
2325 clk_core_enable_lock(core);
2328 if (core->new_parent && core->new_parent != core->parent) {
2329 old_parent = __clk_set_parent_before(core, core->new_parent);
2330 trace_clk_set_parent(core, core->new_parent);
2332 if (core->ops->set_rate_and_parent) {
2334 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2336 core->new_parent_index);
2337 } else if (core->ops->set_parent) {
2338 core->ops->set_parent(core->hw, core->new_parent_index);
2341 trace_clk_set_parent_complete(core, core->new_parent);
2342 __clk_set_parent_after(core, core->new_parent, old_parent);
2345 if (core->flags & CLK_OPS_PARENT_ENABLE)
2348 trace_clk_set_rate(core, core->new_rate);
2350 if (!skip_set_rate && core->ops->set_rate)
2351 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2353 trace_clk_set_rate_complete(core, core->new_rate);
2355 core->rate = clk_recalc(core, best_parent_rate);
2357 if (core->flags & CLK_SET_RATE_UNGATE) {
2358 clk_core_disable_lock(core);
2359 clk_core_unprepare(core);
2362 if (core->flags & CLK_OPS_PARENT_ENABLE)
2365 if (core->notifier_count && old_rate != core->rate)
2366 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2368 if (core->flags & CLK_RECALC_NEW_RATES)
2369 (void)clk_calc_new_rates(core, core->new_rate);
2375 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2377 if (child->new_parent && child->new_parent != core)
2382 /* handle the new child who might not be in core->children yet */
2383 if (core->new_child)
2384 clk_change_rate(core->new_child);
2386 clk_pm_runtime_put(core);
2389 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2397 if (!core)
2401 cnt = clk_core_rate_nuke_protect(core);
2405 clk_core_init_rate_req(core, &req, req_rate);
2409 ret = clk_core_round_rate_nolock(core, &req);
2414 clk_core_rate_restore_protect(core, cnt);
2419 static int clk_core_set_rate_nolock(struct clk_core *core,
2426 if (!core)
2429 rate = clk_core_req_round_rate_nolock(core, req_rate);
2432 if (rate == clk_core_get_rate_nolock(core))
2436 if (clk_core_rate_is_protected(core))
2440 top = clk_calc_new_rates(core, req_rate);
2444 ret = clk_pm_runtime_get(core);
2461 core->req_rate = req_rate;
2463 clk_pm_runtime_put(core);
2500 clk_core_rate_unprotect(clk->core);
2502 ret = clk_core_set_rate_nolock(clk->core, rate);
2505 clk_core_rate_protect(clk->core);
2548 ret = clk_core_set_rate_nolock(clk->core, rate);
2550 clk_core_rate_protect(clk->core);
2572 trace_clk_set_rate_range(clk->core, min, max);
2576 __func__, clk->core->name, clk->dev_id, clk->con_id,
2582 clk_core_rate_unprotect(clk->core);
2590 if (!clk_core_check_boundaries(clk->core, min, max)) {
2595 rate = clk->core->req_rate;
2596 if (clk->core->flags & CLK_GET_RATE_NOCACHE)
2597 rate = clk_core_get_rate_recalc(clk->core);
2617 ret = clk_core_set_rate_nolock(clk->core, rate);
2626 clk_core_rate_protect(clk->core);
2668 trace_clk_set_min_rate(clk->core, rate);
2686 trace_clk_set_max_rate(clk->core, rate);
2707 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2714 static struct clk_core *__clk_init_parent(struct clk_core *core)
2718 if (core->num_parents > 1 && core->ops->get_parent)
2719 index = core->ops->get_parent(core->hw);
2721 return clk_core_get_parent_by_index(core, index);
2724 static void clk_core_reparent(struct clk_core *core,
2727 clk_reparent(core, new_parent);
2728 __clk_recalc_accuracies(core);
2729 __clk_recalc_rates(core, true, POST_RATE_CHANGE);
2737 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2756 return clk_core_has_parent(clk->core, parent->core);
2760 static int clk_core_set_parent_nolock(struct clk_core *core,
2769 if (!core)
2772 if (core->parent == parent)
2776 if (core->num_parents > 1 && !core->ops->set_parent)
2780 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2783 if (clk_core_rate_is_protected(core))
2788 p_index = clk_fetch_parent_index(core, parent);
2791 __func__, parent->name, core->name);
2797 ret = clk_pm_runtime_get(core);
2802 ret = __clk_speculate_rates(core, p_rate);
2809 ret = __clk_set_parent(core, parent, p_index);
2813 __clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
2815 __clk_recalc_rates(core, true, POST_RATE_CHANGE);
2816 __clk_recalc_accuracies(core);
2820 clk_pm_runtime_put(core);
2827 return clk_core_set_parent_nolock(hw->core, parent->core);
2858 clk_core_rate_unprotect(clk->core);
2860 ret = clk_core_set_parent_nolock(clk->core,
2861 parent ? parent->core : NULL);
2864 clk_core_rate_protect(clk->core);
2872 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2878 if (!core)
2881 if (clk_core_rate_is_protected(core))
2884 trace_clk_set_phase(core, degrees);
2886 if (core->ops->set_phase) {
2887 ret = core->ops->set_phase(core->hw, degrees);
2889 core->phase = degrees;
2892 trace_clk_set_phase_complete(core, degrees);
2932 clk_core_rate_unprotect(clk->core);
2934 ret = clk_core_set_phase_nolock(clk->core, degrees);
2937 clk_core_rate_protect(clk->core);
2945 static int clk_core_get_phase(struct clk_core *core)
2950 if (!core->ops->get_phase)
2954 ret = core->ops->get_phase(core->hw);
2956 core->phase = ret;
2976 ret = clk_core_get_phase(clk->core);
2983 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2986 core->duty.num = 1;
2987 core->duty.den = 2;
2990 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2992 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2994 struct clk_duty *duty = &core->duty;
2997 if (!core->ops->get_duty_cycle)
2998 return clk_core_update_duty_cycle_parent_nolock(core);
3000 ret = core->ops->get_duty_cycle(core->hw, duty);
3013 clk_core_reset_duty_cycle_nolock(core);
3017 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
3021 if (core->parent &&
3022 core->flags & CLK_DUTY_CYCLE_PARENT) {
3023 ret = clk_core_update_duty_cycle_nolock(core->parent);
3024 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
3026 clk_core_reset_duty_cycle_nolock(core);
3032 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
3035 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
3042 if (clk_core_rate_is_protected(core))
3045 trace_clk_set_duty_cycle(core, duty);
3047 if (!core->ops->set_duty_cycle)
3048 return clk_core_set_duty_cycle_parent_nolock(core, duty);
3050 ret = core->ops->set_duty_cycle(core->hw, duty);
3052 memcpy(&core->duty, duty, sizeof(*duty));
3054 trace_clk_set_duty_cycle_complete(core, duty);
3059 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
3064 if (core->parent &&
3065 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
3066 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
3067 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
3102 clk_core_rate_unprotect(clk->core);
3104 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
3107 clk_core_rate_protect(clk->core);
3115 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
3118 struct clk_duty *duty = &core->duty;
3123 ret = clk_core_update_duty_cycle_nolock(core);
3145 return clk_core_get_scaled_duty_cycle(clk->core, scale);
3166 /* true if clk->core pointers match. Avoid dereferencing garbage */
3168 if (p->core == q->core)
3323 struct clk_core *core = data;
3327 ret = clk_core_set_rate_nolock(core, val);
3337 struct clk_core *core = data;
3341 ret = clk_prepare_enable(core->hw->clk);
3343 clk_disable_unprepare(core->hw->clk);
3350 struct clk_core *core = data;
3352 *val = core->enable_count && core->prepare_count;
3366 struct clk_core *core = data;
3369 *val = clk_core_get_rate_recalc(core);
3399 struct clk_core *core = s->private;
3400 unsigned long flags = core->flags;
3418 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3436 parent = clk_core_get_parent_by_index(core, i);
3439 } else if (core->parents[i].name) {
3440 seq_puts(s, core->parents[i].name);
3441 } else if (core->parents[i].fw_name) {
3442 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3444 if (core->parents[i].index >= 0)
3445 name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
3457 struct clk_core *core = s->private;
3460 for (i = 0; i < core->num_parents - 1; i++)
3461 possible_parent_show(s, core, i, ' ');
3463 possible_parent_show(s, core, i, '\n');
3471 struct clk_core *core = s->private;
3473 if (core->parent)
3474 seq_printf(s, "%s\n", core->parent->name);
3485 struct clk_core *core = s->private;
3494 parent = clk_core_get_parent_by_index(core, idx);
3499 err = clk_core_set_parent_nolock(core, parent);
3518 struct clk_core *core = s->private;
3519 struct clk_duty *duty = &core->duty;
3529 struct clk_core *core = s->private;
3533 clk_core_get_boundaries(core, &min_rate, &max_rate);
3543 struct clk_core *core = s->private;
3547 clk_core_get_boundaries(core, &min_rate, &max_rate);
3555 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3559 if (!core || !pdentry)
3562 root = debugfs_create_dir(core->name, pdentry);
3563 core->dentry = root;
3565 debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3567 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3568 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3569 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3570 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3571 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3572 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3573 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3574 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3575 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3576 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3579 debugfs_create_file("clk_prepare_enable", 0644, root, core,
3582 if (core->num_parents > 1)
3583 debugfs_create_file("clk_parent", 0644, root, core,
3587 if (core->num_parents > 0)
3588 debugfs_create_file("clk_parent", 0444, root, core,
3591 if (core->num_parents > 1)
3592 debugfs_create_file("clk_possible_parents", 0444, root, core,
3595 if (core->ops->debug_init)
3596 core->ops->debug_init(core->hw, core->dentry);
3601 * @core: the clk being added to the debugfs clk directory
3607 static void clk_debug_register(struct clk_core *core)
3610 hlist_add_head(&core->debug_node, &clk_debug_list);
3612 clk_debug_create_one(core, rootdir);
3618 * @core: the clk being removed from the debugfs clk directory
3624 static void clk_debug_unregister(struct clk_core *core)
3627 hlist_del_init(&core->debug_node);
3628 debugfs_remove_recursive(core->dentry);
3629 core->dentry = NULL;
3644 struct clk_core *core;
3676 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3677 clk_debug_create_one(core, rootdir);
3686 static inline void clk_debug_register(struct clk_core *core) { }
3687 static inline void clk_debug_unregister(struct clk_core *core)
3735 * @core: clk_core being initialized
3740 static int __clk_core_init(struct clk_core *core)
3750 * Set hw->core after grabbing the prepare_lock to synchronize with
3751 * callers of clk_core_fill_parent_index() where we treat hw->core
3755 core->hw->core = core;
3757 ret = clk_pm_runtime_get(core);
3762 if (clk_core_lookup(core->name)) {
3764 __func__, core->name);
3770 if (core->ops->set_rate &&
3771 !((core->ops->round_rate || core->ops->determine_rate) &&
3772 core->ops->recalc_rate)) {
3774 __func__, core->name);
3779 if (core->ops->set_parent && !core->ops->get_parent) {
3781 __func__, core->name);
3786 if (core->ops->set_parent && !core->ops->determine_rate) {
3788 __func__, core->name);
3793 if (core->num_parents > 1 && !core->ops->get_parent) {
3795 __func__, core->name);
3800 if (core->ops->set_rate_and_parent &&
3801 !(core->ops->set_parent && core->ops->set_rate)) {
3803 __func__, core->name);
3822 if (core->ops->init) {
3823 ret = core->ops->init(core->hw);
3828 parent = core->parent = __clk_init_parent(core);
3831 * Populate core->parent if parent has already been clk_core_init'd. If
3841 hlist_add_head(&core->child_node, &parent->children);
3842 core->orphan = parent->orphan;
3843 } else if (!core->num_parents) {
3844 hlist_add_head(&core->child_node, &clk_root_list);
3845 core->orphan = false;
3847 hlist_add_head(&core->child_node, &clk_orphan_list);
3848 core->orphan = true;
3858 if (core->ops->recalc_accuracy)
3859 core->accuracy = core->ops->recalc_accuracy(core->hw,
3862 core->accuracy = parent->accuracy;
3864 core->accuracy = 0;
3871 phase = clk_core_get_phase(core);
3875 core->name);
3882 clk_core_update_duty_cycle_nolock(core);
3890 if (core->ops->recalc_rate)
3891 rate = core->ops->recalc_rate(core->hw,
3897 core->rate = core->req_rate = rate;
3904 if (core->flags & CLK_IS_CRITICAL) {
3905 ret = clk_core_prepare(core);
3908 __func__, core->name);
3912 ret = clk_core_enable_lock(core);
3915 __func__, core->name);
3916 clk_core_unprepare(core);
3923 kref_init(&core->ref);
3925 clk_pm_runtime_put(core);
3928 hlist_del_init(&core->child_node);
3929 core->hw->core = NULL;
3935 clk_debug_register(core);
3942 * @core: clk to add consumer to
3945 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3948 hlist_add_head(&clk->clks_node, &core->clks);
3964 * @core: clk to allocate a consumer for
3970 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3979 clk->core = core;
4016 struct clk_core *core;
4022 core = hw->core;
4023 clk = alloc_clk(core, dev_id, con_id);
4028 if (!try_module_get(core->owner)) {
4033 kref_get(&core->ref);
4034 clk_core_link_consumer(core, clk);
4051 struct device *dev = hw->core->dev;
4075 static int clk_core_populate_parent_map(struct clk_core *core,
4093 core->parents = parents;
4104 __func__, core->name);
4137 static void clk_core_free_parent_map(struct clk_core *core)
4139 int i = core->num_parents;
4141 if (!core->num_parents)
4145 kfree_const(core->parents[i].name);
4146 kfree_const(core->parents[i].fw_name);
4149 kfree(core->parents);
4156 struct clk_core *core;
4162 * we catch use of hw->init early on in the core.
4166 core = kzalloc(sizeof(*core), GFP_KERNEL);
4167 if (!core) {
4172 core->name = kstrdup_const(init->name, GFP_KERNEL);
4173 if (!core->name) {
4182 core->ops = init->ops;
4185 core->rpm_enabled = true;
4186 core->dev = dev;
4187 core->of_node = np;
4189 core->owner = dev->driver->owner;
4190 core->hw = hw;
4191 core->flags = init->flags;
4192 core->num_parents = init->num_parents;
4193 core->min_rate = 0;
4194 core->max_rate = ULONG_MAX;
4196 ret = clk_core_populate_parent_map(core, init);
4200 INIT_HLIST_HEAD(&core->clks);
4206 hw->clk = alloc_clk(core, NULL, NULL);
4212 clk_core_link_consumer(core, hw->clk);
4214 ret = __clk_core_init(core);
4226 clk_core_free_parent_map(core);
4229 kfree_const(core->name);
4231 kfree(core);
4314 struct clk_core *core = container_of(ref, struct clk_core, ref);
4318 clk_core_free_parent_map(core);
4319 kfree_const(core->name);
4320 kfree(core);
4372 if (root->parents[i].core == target)
4373 root->parents[i].core = NULL;
4380 static void clk_core_evict_parent_cache(struct clk_core *core)
4389 clk_core_evict_parent_cache_subtree(root, core);
4405 clk_debug_unregister(clk->core);
4409 ops = clk->core->ops;
4412 clk->core->name);
4420 clk->core->ops = &clk_nodrv_ops;
4424 ops->terminate(clk->core->hw);
4426 if (!hlist_empty(&clk->core->children)) {
4431 hlist_for_each_entry_safe(child, t, &clk->core->children,
4436 clk_core_evict_parent_cache(clk->core);
4438 hlist_del_init(&clk->core->child_node);
4440 if (clk->core->prepare_count)
4442 __func__, clk->core->name);
4444 if (clk->core->protect_count)
4446 __func__, clk->core->name);
4448 kref_put(&clk->core->ref, __clk_release);
4561 WARN_ON_ONCE(dev != hw->core->dev);
4599 clk->core->protect_count -= (clk->exclusive_count - 1);
4600 clk_core_rate_unprotect(clk->core);
4610 owner = clk->core->owner;
4611 kref_put(&clk->core->ref, __clk_release);
4670 clk->core->notifier_count++;
4704 clk->core->notifier_count--;