Lines Matching defs:clk

6  * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
25 #include "clk.h"
100 #include <trace/events/clk.h>
102 struct clk {
138 * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
140 * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
172 pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
193 * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
363 const char *__clk_get_name(const struct clk *clk)
365 return !clk ? NULL : clk->core->name;
375 struct clk_hw *__clk_get_hw(struct clk *clk)
377 return !clk ? NULL : clk->core->hw;
419 /* search the 'proper' clk tree first */
456 * clk_core_get - Find the clk_core parent of a clk
457 * @core: clk to find parent of
460 * This is the preferred method for clk providers to find the parent of a
461 * clk when that parent is external to the clk controller. The parent_names
464 * dev_name() in a clk_lookup. This allows clk providers to use their own
471 * the xtal clk.
485 * Returns: -ENOENT when the provider can't be found or the clk doesn't
487 * in a clkdev lookup. NULL when the provider knows about the clk but it
489 * A valid clk_core pointer when the clk can be found in the provider.
570 unsigned int __clk_get_enable_count(struct clk *clk)
572 return !clk ? 0 : clk->core->enable_count;
585 * known yet. Best to return 0 as the rate of this clk until we can
629 bool __clk_is_enabled(struct clk *clk)
631 if (!clk)
634 return clk_core_is_enabled(clk->core);
711 struct clk *__clk_lookup(const char *name)
715 return !core ? NULL : core->hw->clk;
722 struct clk *clk_user;
740 struct clk *user;
763 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
764 * @hw: mux type clk to determine rate on
787 /*** clk api ***/
827 * @clk: the clk over which the exclusivity is released
843 void clk_rate_exclusive_put(struct clk *clk)
845 if (!clk)
854 if (WARN_ON(clk->exclusive_count <= 0))
857 clk_core_rate_unprotect(clk->core);
858 clk->exclusive_count--;
892 * clk_rate_exclusive_get - get exclusivity over the clk rate control
893 * @clk: the clk over which the exclusity of rate control is requested
909 int clk_rate_exclusive_get(struct clk *clk)
911 if (!clk)
915 clk_core_rate_protect(clk->core);
916 clk->exclusive_count++;
965 * @clk: the clk being unprepared
968 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
969 * if the operation may sleep. One example is a clk which is accessed over
970 * I2c. In the complex case a clk gate operation may require a fast and a slow
974 void clk_unprepare(struct clk *clk)
976 if (IS_ERR_OR_NULL(clk))
979 clk_core_unprepare_lock(clk->core);
1045 * @clk: the clk being prepared
1048 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
1049 * operation may sleep. One example is a clk which is accessed over I2c. In
1050 * the complex case a clk ungate operation may require a fast and a slow part.
1055 int clk_prepare(struct clk *clk)
1057 if (!clk)
1060 return clk_core_prepare_lock(clk->core);
1102 * @clk: the clk being gated
1106 * clk if the operation is fast and will never sleep. One example is a
1107 * SoC-internal clk which is controlled via simple register writes. In the
1108 * complex case a clk gate operation may require a fast and a slow part. It is
1112 void clk_disable(struct clk *clk)
1114 if (IS_ERR_OR_NULL(clk))
1117 clk_core_disable_lock(clk->core);
1227 struct clk_core *clk;
1230 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1231 ret = clk_core_save_context(clk);
1236 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1237 ret = clk_core_save_context(clk);
1266 * @clk: the clk being ungated
1269 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1270 * if the operation will never sleep. One example is a SoC-internal clk which
1271 * is controlled via simple register writes. In the complex case a clk ungate
1277 int clk_enable(struct clk *clk)
1279 if (!clk)
1282 return clk_core_enable_lock(clk->core);
1387 pr_warn("clk: Not disabling unused clocks\n");
1391 pr_info("clk: Disabling unused clocks\n");
1397 * Grab the prepare lock to keep the clk topology stable while iterating
1519 * clk_hw_round_rate() - round the given rate for a hw clk
1520 * @hw: the hw clk for which we are rounding a rate
1523 * Takes in a rate as input and rounds it to a rate that the clk can actually
1527 * For clk providers to call from within clk_ops such as .round_rate,
1530 * Return: returns rounded rate of hw clk if clk supports round_rate operation
1550 * clk_round_rate - round the given rate for a clk
1551 * @clk: the clk for which we are rounding a rate
1554 * Takes in a rate as input and rounds it to a rate that the clk can actually
1555 * use which is then returned. If clk doesn't support round_rate operation
1558 long clk_round_rate(struct clk *clk, unsigned long rate)
1563 if (!clk)
1568 if (clk->exclusive_count)
1569 clk_core_rate_unprotect(clk->core);
1571 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1574 ret = clk_core_round_rate_nolock(clk->core, &req);
1576 if (clk->exclusive_count)
1577 clk_core_rate_protect(clk->core);
1589 * __clk_notify - call clk notifier chain
1590 * @core: clk that is changing rate
1591 * @msg: clk notifier type (see include/linux/clk.h)
1592 * @old_rate: old clk rate
1593 * @new_rate: new clk rate
1595 * Triggers a notifier call chain on the clk rate-change notification
1596 * for 'clk'. Passes a pointer to the struct clk and the previous
1613 if (cn->clk->core == core) {
1614 cnd.clk = cn->clk;
1627 * @core: first clk in the subtree
1629 * Walks the subtree of clks starting with clk and recalculates accuracies as
1630 * it goes. Note that if a clk does not implement the .recalc_accuracy
1663 * clk_get_accuracy - return the accuracy of clk
1664 * @clk: the clk whose accuracy is being returned
1666 * Simply returns the cached accuracy of the clk, unless
1669 * If clk is NULL then returns 0.
1671 long clk_get_accuracy(struct clk *clk)
1675 if (!clk)
1679 accuracy = clk_core_get_accuracy_recalc(clk->core);
1700 * @core: first clk in the subtree
1701 * @msg: notification type (see include/linux/clk.h)
1703 * Walks the subtree of clks starting with clk and recalculates rates as it
1704 * goes. Note that if a clk does not implement the .recalc_rate callback then
1745 * clk_get_rate - return the rate of clk
1746 * @clk: the clk whose rate is being returned
1748 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1750 * If clk is NULL then returns 0.
1752 unsigned long clk_get_rate(struct clk *clk)
1756 if (!clk)
1760 rate = clk_core_get_rate_recalc(clk->core);
1812 * @hw: clk_hw associated with the clk being consumed
1905 /* update the clk tree topology */
1966 * @core: first clk in the subtree
1967 * @parent_rate: the "future" rate of clk's parent
1969 * Walks the subtree of clks starting with clk, speculating rates as it
1974 * subtree have subscribed to the notifications. Note that if a clk does not
1994 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
2017 /* include clk in new parent's PRE_RATE_CHANGE notifications */
2055 /* find the closest rate and parent clk/rate */
2098 pr_debug("%s: clk %s can not be parent of clk %s\n",
2331 * clk_set_rate - specify a new rate for clk
2332 * @clk: the clk whose rate is being changed
2333 * @rate: the new rate for clk
2335 * In the simplest case clk_set_rate will only adjust the rate of clk.
2338 * propagate up to clk's parent; whether or not this happens depends on the
2339 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
2341 * *parent_rate comes back with a new rate for clk's parent then we propagate
2342 * up to clk's parent and set its rate. Upward propagation will continue
2343 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2344 * .round_rate stops requesting changes to clk's parent_rate.
2351 int clk_set_rate(struct clk *clk, unsigned long rate)
2355 if (!clk)
2361 if (clk->exclusive_count)
2362 clk_core_rate_unprotect(clk->core);
2364 ret = clk_core_set_rate_nolock(clk->core, rate);
2366 if (clk->exclusive_count)
2367 clk_core_rate_protect(clk->core);
2377 * @clk: the clk whose rate is being changed
2378 * @rate: the new rate for clk
2394 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2398 if (!clk)
2410 ret = clk_core_set_rate_nolock(clk->core, rate);
2412 clk_core_rate_protect(clk->core);
2413 clk->exclusive_count++;
2424 * @clk: clock source
2430 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2435 if (!clk)
2439 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2440 __func__, clk->core->name, clk->dev_id, clk->con_id,
2447 if (clk->exclusive_count)
2448 clk_core_rate_unprotect(clk->core);
2451 old_min = clk->min_rate;
2452 old_max = clk->max_rate;
2453 clk->min_rate = min;
2454 clk->max_rate = max;
2456 if (!clk_core_check_boundaries(clk->core, min, max)) {
2461 rate = clk_core_get_rate_nolock(clk->core);
2481 ret = clk_core_set_rate_nolock(clk->core, rate);
2484 clk->min_rate = old_min;
2485 clk->max_rate = old_max;
2490 if (clk->exclusive_count)
2491 clk_core_rate_protect(clk->core);
2501 * @clk: clock source
2506 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2508 if (!clk)
2511 return clk_set_rate_range(clk, rate, clk->max_rate);
2517 * @clk: clock source
2522 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2524 if (!clk)
2527 return clk_set_rate_range(clk, clk->min_rate, rate);
2532 * clk_get_parent - return the parent of a clk
2533 * @clk: the clk whose parent gets returned
2535 * Simply returns clk->parent. Returns NULL if clk is NULL.
2537 struct clk *clk_get_parent(struct clk *clk)
2539 struct clk *parent;
2541 if (!clk)
2545 /* TODO: Create a per-user clk and change callers to call clk_put */
2546 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2581 * @clk: clock source
2587 * Returns true if @parent is a possible parent for @clk, false otherwise.
2589 bool clk_has_parent(struct clk *clk, struct clk *parent)
2595 if (!clk || !parent)
2598 core = clk->core;
2643 pr_debug("%s: clk %s can not be parent of clk %s\n",
2685 * clk_set_parent - switch the parent of a mux clk
2686 * @clk: the mux clk whose input we are switching
2687 * @parent: the new input to clk
2689 * Re-parent clk to use parent as its new input source. If clk is in
2690 * prepared state, the clk will get enabled for the duration of this call. If
2691 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2693 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2695 * After successfully changing clk's parent clk_set_parent will update the
2696 * clk topology, sysfs topology and propagate rate recalculation via
2701 int clk_set_parent(struct clk *clk, struct clk *parent)
2705 if (!clk)
2710 if (clk->exclusive_count)
2711 clk_core_rate_unprotect(clk->core);
2713 ret = clk_core_set_parent_nolock(clk->core,
2716 if (clk->exclusive_count)
2717 clk_core_rate_protect(clk->core);
2752 * @clk: clock signal source
2770 int clk_set_phase(struct clk *clk, int degrees)
2774 if (!clk)
2784 if (clk->exclusive_count)
2785 clk_core_rate_unprotect(clk->core);
2787 ret = clk_core_set_phase_nolock(clk->core, degrees);
2789 if (clk->exclusive_count)
2790 clk_core_rate_protect(clk->core);
2816 * @clk: clock signal source
2821 int clk_get_phase(struct clk *clk)
2825 if (!clk)
2829 ret = clk_core_get_phase(clk->core);
2928 * @clk: clock signal source
2937 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2942 if (!clk)
2954 if (clk->exclusive_count)
2955 clk_core_rate_unprotect(clk->core);
2957 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2959 if (clk->exclusive_count)
2960 clk_core_rate_protect(clk->core);
2987 * @clk: clock signal source
2993 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2995 if (!clk)
2998 return clk_core_get_scaled_duty_cycle(clk->core, scale);
3003 * clk_is_match - check if two clk's point to the same hardware clock
3004 * @p: clk compared against q
3005 * @q: clk compared against p
3007 * Returns true if the two struct clk pointers both point to the same hardware
3008 * clock node. Put differently, returns true if struct clk *p and struct clk *q
3013 bool clk_is_match(const struct clk *p, const struct clk *q)
3015 /* trivial case: identical struct clk's or both NULL */
3019 /* true if clk->core pointers match. Avoid dereferencing garbage */
3185 ret = clk_prepare_enable(core->hw->clk);
3187 clk_disable_unprepare(core->hw->clk);
3401 * clk_debug_register - add a clk node to the debugfs clk directory
3402 * @core: the clk being added to the debugfs clk directory
3404 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3405 * initialized. Otherwise it bails out early since the debugfs clk directory
3418 * clk_debug_unregister - remove a clk node from the debugfs clk directory
3419 * @core: the clk being removed from the debugfs clk directory
3421 * Dynamically removes a clk and all its child nodes from the
3422 * debugfs clk directory if clk->dentry points to debugfs created by
3435 * clk_debug_init - lazily populate the debugfs clk directory
3439 * populates the debugfs clk directory once at boot-time when we know that
3452 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3454 pr_warn("** This means that this kernel is built to expose clk operations **\n");
3465 rootdir = debugfs_create_dir("clk", NULL);
3512 /* update the clk tree topology */
3556 * being NULL as the clk not being registered yet. This is crucial so
3567 pr_debug("%s: clk %s already initialized\n",
3573 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
3629 * parent has not yet been clk_core_init'd then place clk in the orphan
3630 * list. If clk doesn't have any parents then place it in the root
3631 * clk list.
3633 * Every time a new clk is clk_init'd then we walk the list of orphan
3649 * Set clk's accuracy. The preferred method is to use
3664 * Set clk's phase by clk_core_get_phase() caching the phase.
3671 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3677 * Set clk's duty cycle.
3682 * Set clk's rate. The preferred method is to use .recalc_rate. For
3706 pr_warn("%s: critical clk '%s' failed to prepare\n",
3715 pr_warn("%s: critical clk '%s' failed to enable\n",
3740 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3741 * @core: clk to add consumer to
3742 * @clk: consumer to link to a clk
3744 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3747 hlist_add_head(&clk->clks_node, &core->clks);
3752 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3753 * @clk: consumer to unlink
3755 static void clk_core_unlink_consumer(struct clk *clk)
3758 hlist_del(&clk->clks_node);
3762 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3763 * @core: clk to allocate a consumer for
3767 * Returns: clk consumer left unlinked from the consumer list
3769 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3772 struct clk *clk;
3774 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3775 if (!clk)
3778 clk->core = core;
3779 clk->dev_id = dev_id;
3780 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3781 clk->max_rate = ULONG_MAX;
3783 return clk;
3787 * free_clk - Free a clk consumer
3788 * @clk: clk consumer to free
3790 * Note, this assumes the clk has been unlinked from the clk_core consumer
3793 static void free_clk(struct clk *clk)
3795 kfree_const(clk->con_id);
3796 kfree(clk);
3800 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3802 * @dev: clk consumer device
3803 * @hw: clk_hw associated with the clk being consumed
3807 * This is the main function used to create a clk pointer for use by clk
3809 * used by the framework and clk provider respectively.
3811 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3814 struct clk *clk;
3822 clk = alloc_clk(core, dev_id, con_id);
3823 if (IS_ERR(clk))
3824 return clk;
3825 clk->dev = dev;
3828 free_clk(clk);
3833 clk_core_link_consumer(core, clk);
3835 return clk;
3948 static struct clk *
4003 hw->clk = alloc_clk(core, NULL, NULL);
4004 if (IS_ERR(hw->clk)) {
4005 ret = PTR_ERR(hw->clk);
4009 clk_core_link_consumer(core, hw->clk);
4013 return hw->clk;
4016 clk_core_unlink_consumer(hw->clk);
4019 free_clk(hw->clk);
4020 hw->clk = NULL;
4061 * Returns: a pointer to the newly allocated struct clk which
4066 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4109 * consumer calls clk_put() and the struct clk object is freed.
4155 /* Remove this clk from all parent caches */
4171 * @clk: clock to unregister
4173 void clk_unregister(struct clk *clk)
4178 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4181 clk_debug_unregister(clk->core);
4185 ops = clk->core->ops;
4188 clk->core->name);
4196 clk->core->ops = &clk_nodrv_ops;
4200 ops->terminate(clk->core->hw);
4202 if (!hlist_empty(&clk->core->children)) {
4207 hlist_for_each_entry_safe(child, t, &clk->core->children,
4212 clk_core_evict_parent_cache(clk->core);
4214 hlist_del_init(&clk->core->child_node);
4216 if (clk->core->prepare_count)
4218 __func__, clk->core->name);
4220 if (clk->core->protect_count)
4222 __func__, clk->core->name);
4224 kref_put(&clk->core->ref, __clk_release);
4225 free_clk(clk);
4237 clk_unregister(hw->clk);
4243 clk_unregister(*(struct clk **)res);
4261 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4263 struct clk *clk;
4264 struct clk **clkp;
4270 clk = clk_register(dev, hw);
4271 if (!IS_ERR(clk)) {
4272 *clkp = clk;
4278 return clk;
4314 struct clk *c = res;
4332 * @clk: clock to unregister
4338 void devm_clk_unregister(struct device *dev, struct clk *clk)
4340 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4364 void __clk_put(struct clk *clk)
4368 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4378 if (WARN_ON(clk->exclusive_count)) {
4380 clk->core->protect_count -= (clk->exclusive_count - 1);
4381 clk_core_rate_unprotect(clk->core);
4382 clk->exclusive_count = 0;
4385 hlist_del(&clk->clks_node);
4386 if (clk->min_rate > clk->core->req_rate ||
4387 clk->max_rate < clk->core->req_rate)
4388 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4390 owner = clk->core->owner;
4391 kref_put(&clk->core->ref, __clk_release);
4397 free_clk(clk);
4400 /*** clk rate change notifiers ***/
4403 * clk_notifier_register - add a clk rate change notifier
4404 * @clk: struct clk * to watch
4407 * Request notification when clk's rate changes. This uses an SRCU
4410 * re-enter into the clk framework by calling any top-level clk APIs;
4422 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4427 if (!clk || !nb)
4432 /* search the list of notifiers for this clk */
4434 if (cn->clk == clk)
4437 /* if clk wasn't in the notifier list, allocate new clk_notifier */
4442 cn->clk = clk;
4450 clk->core->notifier_count++;
4460 * clk_notifier_unregister - remove a clk rate change notifier
4461 * @clk: struct clk *
4464 * Request no further notification for changes to 'clk' and frees memory
4470 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4475 if (!clk || !nb)
4481 if (cn->clk == clk) {
4484 clk->core->notifier_count--;
4514 * @get: Get clock callback. Returns NULL or a struct clk for the
4524 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4536 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4549 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4587 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4679 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4843 struct clk *clk;
4848 clk = provider->get(clkspec, provider->data);
4849 if (IS_ERR(clk))
4850 return ERR_CAST(clk);
4851 return __clk_get_hw(clk);
4880 * This function looks up a struct clk from the registered list of clock
4884 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4909 static struct clk *__of_clk_get(struct device_node *np,
4918 struct clk *of_clk_get(struct device_node *np, int index)
4930 * and uses them to look up the struct clk from the registered list of clock
4933 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4969 struct clk *clk;
5002 clk = of_clk_get_from_provider(&clkspec);
5003 if (IS_ERR(clk)) {
5009 clk_name = __clk_get_name(clk);
5010 clk_put(clk);
5057 struct clk *clk = of_clk_get(np, i);
5060 if (!IS_ERR(clk)) {
5061 clk_put(clk);
5067 if (PTR_ERR(clk) == -EPROBE_DEFER)